Merge branch 'master' into sanchikuuus-add_more_Maya_materials
commit
d9f79190b1
|
@ -46,7 +46,7 @@ jobs:
|
|||
toolchain: ninja-vs-win64-cxx17
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
|
@ -69,17 +69,11 @@ jobs:
|
|||
|
||||
- name: Checkout Hunter toolchains
|
||||
if: endsWith(matrix.name, 'hunter')
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: cpp-pm/polly
|
||||
path: cmake/polly
|
||||
|
||||
- name: Remove contrib directory for Hunter builds
|
||||
if: contains(matrix.name, 'hunter')
|
||||
uses: JesseTG/rm@v1.0.3
|
||||
with:
|
||||
path: contrib
|
||||
|
||||
- name: Cache DX SDK
|
||||
id: dxcache
|
||||
if: contains(matrix.name, 'windows')
|
||||
|
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
name: adress-sanitizer
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: lukka/get-cmake@latest
|
||||
- uses: lukka/set-shell-env@v1
|
||||
with:
|
||||
|
@ -38,7 +38,7 @@ jobs:
|
|||
name: undefined-behavior-sanitizer
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: lukka/get-cmake@latest
|
||||
- uses: lukka/set-shell-env@v1
|
||||
with:
|
||||
|
@ -46,7 +46,7 @@ jobs:
|
|||
CC: clang
|
||||
|
||||
- name: configure and build
|
||||
uses: lukka/run-cmake@v2
|
||||
uses: lukka/run-cmake@v3
|
||||
with:
|
||||
cmakeListsOrSettingsJson: CMakeListsTxtAdvanced
|
||||
cmakeListsTxtPath: '${{ github.workspace }}/CMakeLists.txt'
|
||||
|
@ -62,7 +62,7 @@ jobs:
|
|||
name: printf-sanitizer
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: run scan_printf script
|
||||
run: ./scripts/scan_printf.sh
|
||||
|
|
|
@ -49,14 +49,13 @@ option(ASSIMP_HUNTER_ENABLED "Enable Hunter package manager support" OFF)
|
|||
IF(ASSIMP_HUNTER_ENABLED)
|
||||
include("cmake-modules/HunterGate.cmake")
|
||||
HunterGate(
|
||||
URL "https://github.com/cpp-pm/hunter/archive/v0.24.0.tar.gz"
|
||||
SHA1 "a3d7f4372b1dcd52faa6ff4a3bd5358e1d0e5efd"
|
||||
URL "https://github.com/cpp-pm/hunter/archive/v0.24.18.tar.gz"
|
||||
SHA1 "1292e4d661e1770d6d6ca08c12c07cf34a0bf718"
|
||||
)
|
||||
|
||||
add_definitions(-DASSIMP_USE_HUNTER)
|
||||
ENDIF()
|
||||
|
||||
PROJECT(Assimp VERSION 5.2.5)
|
||||
PROJECT(Assimp VERSION 5.3.0)
|
||||
|
||||
# All supported options ###############################################
|
||||
|
||||
|
@ -201,12 +200,9 @@ SET (ASSIMP_VERSION ${ASSIMP_VERSION_MAJOR}.${ASSIMP_VERSION_MINOR}.${ASSIMP_VER
|
|||
SET (ASSIMP_SOVERSION 5)
|
||||
|
||||
SET( ASSIMP_PACKAGE_VERSION "0" CACHE STRING "the package-specific version used for uploading the sources" )
|
||||
if(NOT ASSIMP_HUNTER_ENABLED)
|
||||
# Enable C++17 support globally
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_C_STANDARD 99)
|
||||
endif()
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_C_STANDARD 99)
|
||||
|
||||
IF(NOT ASSIMP_IGNORE_GIT_HASH)
|
||||
# Get the current working branch
|
||||
|
@ -254,8 +250,7 @@ IF( UNIX )
|
|||
# Use GNUInstallDirs for Unix predefined directories
|
||||
INCLUDE(GNUInstallDirs)
|
||||
# Ensure that we do not run into issues like http://www.tcm.phy.cam.ac.uk/sw/inodes64.html on 32 bit linux
|
||||
IF( ${OPERATING_SYSTEM} MATCHES "Android")
|
||||
ELSE()
|
||||
IF(NOT ${OPERATING_SYSTEM} MATCHES "Android")
|
||||
IF ( CMAKE_SIZEOF_VOID_P EQUAL 4) # only necessary for 32-bit linux
|
||||
ADD_DEFINITIONS(-D_FILE_OFFSET_BITS=64 )
|
||||
ENDIF()
|
||||
|
@ -263,11 +258,15 @@ IF( UNIX )
|
|||
ENDIF()
|
||||
|
||||
# Grouped compiler settings ########################################
|
||||
IF ((CMAKE_C_COMPILER_ID MATCHES "GNU") AND NOT MINGW)
|
||||
IF ((CMAKE_C_COMPILER_ID MATCHES "GNU") AND NOT MINGW AND NOT HAIKU)
|
||||
IF(NOT ASSIMP_HUNTER_ENABLED)
|
||||
SET(CMAKE_CXX_STANDARD 17)
|
||||
SET(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_CXX_COMPILER_VERSION GREATER_EQUAL 13)
|
||||
MESSAGE(STATUS "GCC13 detected disabling \"-Wdangling-reference\" in Cpp files as it appears to be a false positive")
|
||||
ADD_COMPILE_OPTIONS("$<$<COMPILE_LANGUAGE:CXX>:-Wno-dangling-reference>")
|
||||
ENDIF()
|
||||
# hide all not-exported symbols
|
||||
IF(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "mips64" )
|
||||
SET(CMAKE_CXX_FLAGS "-mxgot -fvisibility=hidden -fno-strict-aliasing -Wall ${CMAKE_CXX_FLAGS}")
|
||||
|
@ -281,9 +280,9 @@ IF ((CMAKE_C_COMPILER_ID MATCHES "GNU") AND NOT MINGW)
|
|||
ELSEIF(MSVC)
|
||||
# enable multi-core compilation with MSVC
|
||||
IF(CMAKE_CXX_COMPILER_ID MATCHES "Clang" ) # clang-cl
|
||||
ADD_COMPILE_OPTIONS(/bigobj /W4 /WX )
|
||||
ADD_COMPILE_OPTIONS(/bigobj)
|
||||
ELSE() # msvc
|
||||
ADD_COMPILE_OPTIONS(/MP /bigobj /W4 /WX)
|
||||
ADD_COMPILE_OPTIONS(/MP /bigobj)
|
||||
ENDIF()
|
||||
|
||||
# disable "elements of array '' will be default initialized" warning on MSVC2013
|
||||
|
@ -297,7 +296,6 @@ ELSEIF(MSVC)
|
|||
SET(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /DEBUG:FULL /PDBALTPATH:%_PDB% /OPT:REF /OPT:ICF")
|
||||
ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang" )
|
||||
IF(NOT ASSIMP_HUNTER_ENABLED)
|
||||
SET(CMAKE_CXX_STANDARD 17)
|
||||
SET(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
ENDIF()
|
||||
SET(CMAKE_CXX_FLAGS "-fvisibility=hidden -fno-strict-aliasing -Wall -Wno-long-long ${CMAKE_CXX_FLAGS}" )
|
||||
|
@ -322,17 +320,17 @@ ENDIF()
|
|||
|
||||
IF ( IOS AND NOT ASSIMP_HUNTER_ENABLED)
|
||||
IF (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fembed-bitcode -Og")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fembed-bitcode -Og")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fembed-bitcode -Og")
|
||||
ELSE()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fembed-bitcode -O3")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fembed-bitcode -O3")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fembed-bitcode -O3")
|
||||
# Experimental for pdb generation
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF (ASSIMP_COVERALLS)
|
||||
MESSAGE(STATUS "Coveralls enabled")
|
||||
|
||||
INCLUDE(Coveralls)
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
|
||||
|
@ -340,14 +338,16 @@ ENDIF()
|
|||
|
||||
IF (ASSIMP_ASAN)
|
||||
MESSAGE(STATUS "AddressSanitizer enabled")
|
||||
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
|
||||
ENDIF()
|
||||
|
||||
IF (ASSIMP_UBSAN)
|
||||
MESSAGE(STATUS "Undefined Behavior sanitizer enabled")
|
||||
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined,shift,shift-exponent,integer-divide-by-zero,unreachable,vla-bound,null,return,signed-integer-overflow,bounds,float-divide-by-zero,float-cast-overflow,nonnull-attribute,returns-nonnull-attribute,bool,enum,vptr,pointer-overflow,builtin -fno-sanitize-recover=all")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined,shift,shift-exponent,integer-divide-by-zero,unreachable,vla-bound,null,return,signed-integer-overflow,bounds,float-divide-by-zero,float-cast-overflow,nonnull-attribute,returns-nonnull-attribute,bool,enum,vptr,pointer-overflow,builtin -fno-sanitize-recover=all")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined,shift,shift-exponent,integer-divide-by-zero,unreachable,vla-bound,null,return,signed-integer-overflow,bounds,float-divide-by-zero,float-cast-overflow,nonnull-attribute,returns-nonnull-attribute,bool,enum,vptr,pointer-overflow,builtin -fno-sanitize-recover=all")
|
||||
ENDIF()
|
||||
|
||||
INCLUDE (FindPkgMacros)
|
||||
|
@ -668,13 +668,13 @@ ELSE()
|
|||
set_target_properties(draco_encoder draco_decoder PROPERTIES
|
||||
EXCLUDE_FROM_ALL TRUE
|
||||
EXCLUDE_FROM_DEFAULT_BUILD TRUE
|
||||
)
|
||||
)
|
||||
|
||||
# Do build the draco shared library
|
||||
set_target_properties(${draco_LIBRARIES} PROPERTIES
|
||||
EXCLUDE_FROM_ALL FALSE
|
||||
EXCLUDE_FROM_DEFAULT_BUILD FALSE
|
||||
)
|
||||
)
|
||||
|
||||
TARGET_USE_COMMON_OUTPUT_DIRECTORY(${draco_LIBRARIES})
|
||||
TARGET_USE_COMMON_OUTPUT_DIRECTORY(draco_encoder)
|
||||
|
@ -691,8 +691,7 @@ ELSE()
|
|||
FRAMEWORK DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
|
||||
COMPONENT ${LIBASSIMP_COMPONENT}
|
||||
INCLUDES DESTINATION include
|
||||
)
|
||||
|
||||
)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
@ -778,7 +777,7 @@ IF ( ASSIMP_INSTALL )
|
|||
SET(CPACK_DEBIAN_PACKAGE_SECTION "libs" )
|
||||
SET(CPACK_DEBIAN_PACKAGE_DEPENDS "${CPACK_COMPONENTS_ALL}")
|
||||
SET(CPACK_DEBIAN_PACKAGE_SUGGESTS)
|
||||
SET(cPACK_DEBIAN_PACKAGE_NAME "assimp")
|
||||
SET(CPACK_DEBIAN_PACKAGE_NAME "assimp")
|
||||
SET(CPACK_DEBIAN_PACKAGE_REMOVE_SOURCE_FILES contrib/gtest contrib/zlib workspaces test doc obj samples packaging)
|
||||
SET(CPACK_DEBIAN_PACKAGE_SOURCE_COPY svn export --force)
|
||||
SET(CPACK_DEBIAN_CHANGELOG)
|
||||
|
|
16
Dockerfile
16
Dockerfile
|
@ -1,14 +1,9 @@
|
|||
FROM ubuntu:14.04
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
RUN apt-get update && apt-get install -y ninja-build \
|
||||
git cmake build-essential software-properties-common
|
||||
|
||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test && apt-get update && apt-get install -y gcc-4.9 g++-4.9 && \
|
||||
cd /usr/bin && \
|
||||
rm gcc g++ cpp && \
|
||||
ln -s gcc-4.9 gcc && \
|
||||
ln -s g++-4.9 g++ && \
|
||||
ln -s cpp-4.9 cpp
|
||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test && apt-get update
|
||||
|
||||
WORKDIR /opt
|
||||
|
||||
|
@ -19,7 +14,8 @@ WORKDIR /opt/assimp
|
|||
|
||||
RUN git checkout master \
|
||||
&& mkdir build && cd build && \
|
||||
cmake \
|
||||
cmake -G 'Ninja' \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DASSIMP_BUILD_ASSIMP_TOOLS=ON \
|
||||
.. && \
|
||||
make && make install
|
||||
ninja -j4 && ninja install
|
||||
|
|
|
@ -479,6 +479,11 @@ void Parser::ParseLV1MaterialListBlock() {
|
|||
if (TokenMatch(filePtr, "MATERIAL_COUNT", 14)) {
|
||||
ParseLV4MeshLong(iMaterialCount);
|
||||
|
||||
if (UINT_MAX - iOldMaterialCount < iMaterialCount) {
|
||||
LogWarning("Out of range: material index is too large");
|
||||
return;
|
||||
}
|
||||
|
||||
// now allocate enough storage to hold all materials
|
||||
m_vMaterials.resize(iOldMaterialCount + iMaterialCount, Material("INVALID"));
|
||||
continue;
|
||||
|
|
|
@ -115,15 +115,12 @@ BlenderImporter::~BlenderImporter() {
|
|||
delete modifier_cache;
|
||||
}
|
||||
|
||||
static const char * const Tokens[] = { "BLENDER" };
|
||||
static const char Token[] = "BLENDER";
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Returns whether the class can handle the format of the given file.
|
||||
bool BlenderImporter::CanRead(const std::string &pFile, IOSystem *pIOHandler, bool /*checkSig*/) const {
|
||||
// note: this won't catch compressed files
|
||||
static const char *tokens[] = { "<BLENDER", "blender" };
|
||||
|
||||
return SearchFileHeaderForToken(pIOHandler, pFile, tokens, AI_COUNT_OF(tokens));
|
||||
return ParseMagicToken(pFile, pIOHandler).error.empty();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
@ -142,63 +139,21 @@ void BlenderImporter::SetupProperties(const Importer * /*pImp*/) {
|
|||
// Imports the given file into the given scene structure.
|
||||
void BlenderImporter::InternReadFile(const std::string &pFile,
|
||||
aiScene *pScene, IOSystem *pIOHandler) {
|
||||
#ifndef ASSIMP_BUILD_NO_COMPRESSED_BLEND
|
||||
std::vector<char> uncompressed;
|
||||
#endif
|
||||
|
||||
FileDatabase file;
|
||||
std::shared_ptr<IOStream> stream(pIOHandler->Open(pFile, "rb"));
|
||||
if (!stream) {
|
||||
ThrowException("Could not open file for reading");
|
||||
StreamOrError streamOrError = ParseMagicToken(pFile, pIOHandler);
|
||||
if (!streamOrError.error.empty()) {
|
||||
ThrowException(streamOrError.error);
|
||||
}
|
||||
std::shared_ptr<IOStream> stream = std::move(streamOrError.stream);
|
||||
|
||||
char magic[8] = { 0 };
|
||||
stream->Read(magic, 7, 1);
|
||||
if (strcmp(magic, Tokens[0])) {
|
||||
// Check for presence of the gzip header. If yes, assume it is a
|
||||
// compressed blend file and try uncompressing it, else fail. This is to
|
||||
// avoid uncompressing random files which our loader might end up with.
|
||||
#ifdef ASSIMP_BUILD_NO_COMPRESSED_BLEND
|
||||
ThrowException("BLENDER magic bytes are missing, is this file compressed (Assimp was built without decompression support)?");
|
||||
#else
|
||||
if (magic[0] != 0x1f || static_cast<uint8_t>(magic[1]) != 0x8b) {
|
||||
ThrowException("BLENDER magic bytes are missing, couldn't find GZIP header either");
|
||||
}
|
||||
char version[4] = { 0 };
|
||||
file.i64bit = (stream->Read(version, 1, 1), version[0] == '-');
|
||||
file.little = (stream->Read(version, 1, 1), version[0] == 'v');
|
||||
|
||||
LogDebug("Found no BLENDER magic word but a GZIP header, might be a compressed file");
|
||||
if (magic[2] != 8) {
|
||||
ThrowException("Unsupported GZIP compression method");
|
||||
}
|
||||
stream->Read(version, 3, 1);
|
||||
version[3] = '\0';
|
||||
|
||||
// http://www.gzip.org/zlib/rfc-gzip.html#header-trailer
|
||||
stream->Seek(0L, aiOrigin_SET);
|
||||
std::shared_ptr<StreamReaderLE> reader = std::shared_ptr<StreamReaderLE>(new StreamReaderLE(stream));
|
||||
|
||||
size_t total = 0;
|
||||
Compression compression;
|
||||
if (compression.open(Compression::Format::Binary, Compression::FlushMode::NoFlush, 16 + Compression::MaxWBits)) {
|
||||
total = compression.decompress((unsigned char *)reader->GetPtr(), reader->GetRemainingSize(), uncompressed);
|
||||
compression.close();
|
||||
}
|
||||
|
||||
// replace the input stream with a memory stream
|
||||
stream = std::make_shared<MemoryIOStream>(reinterpret_cast<uint8_t *>(uncompressed.data()), total);
|
||||
|
||||
// .. and retry
|
||||
stream->Read(magic, 7, 1);
|
||||
if (strcmp(magic, "BLENDER")) {
|
||||
ThrowException("Found no BLENDER magic word in decompressed GZIP file");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
file.i64bit = (stream->Read(magic, 1, 1), magic[0] == '-');
|
||||
file.little = (stream->Read(magic, 1, 1), magic[0] == 'v');
|
||||
|
||||
stream->Read(magic, 3, 1);
|
||||
magic[3] = '\0';
|
||||
|
||||
LogInfo("Blender version is ", magic[0], ".", magic + 1,
|
||||
LogInfo("Blender version is ", version[0], ".", version + 1,
|
||||
" (64bit: ", file.i64bit ? "true" : "false",
|
||||
", little endian: ", file.little ? "true" : "false", ")");
|
||||
|
||||
|
@ -1338,4 +1293,55 @@ aiNode *BlenderImporter::ConvertNode(const Scene &in, const Object *obj, Convers
|
|||
return node.release();
|
||||
}
|
||||
|
||||
BlenderImporter::StreamOrError BlenderImporter::ParseMagicToken(const std::string &pFile, IOSystem *pIOHandler) const {
|
||||
std::shared_ptr<IOStream> stream(pIOHandler->Open(pFile, "rb"));
|
||||
if (stream == nullptr) {
|
||||
return {{}, {}, "Could not open file for reading"};
|
||||
}
|
||||
|
||||
char magic[8] = { 0 };
|
||||
stream->Read(magic, 7, 1);
|
||||
if (strcmp(magic, Token) == 0) {
|
||||
return {stream, {}, {}};
|
||||
}
|
||||
|
||||
// Check for presence of the gzip header. If yes, assume it is a
|
||||
// compressed blend file and try uncompressing it, else fail. This is to
|
||||
// avoid uncompressing random files which our loader might end up with.
|
||||
#ifdef ASSIMP_BUILD_NO_COMPRESSED_BLEND
|
||||
return {{}, {}, "BLENDER magic bytes are missing, is this file compressed (Assimp was built without decompression support)?"};
|
||||
#else
|
||||
if (magic[0] != 0x1f || static_cast<uint8_t>(magic[1]) != 0x8b) {
|
||||
return {{}, {}, "BLENDER magic bytes are missing, couldn't find GZIP header either"};
|
||||
}
|
||||
|
||||
LogDebug("Found no BLENDER magic word but a GZIP header, might be a compressed file");
|
||||
if (magic[2] != 8) {
|
||||
return {{}, {}, "Unsupported GZIP compression method"};
|
||||
}
|
||||
|
||||
// http://www.gzip.org/zlib/rfc-gzip.html#header-trailer
|
||||
stream->Seek(0L, aiOrigin_SET);
|
||||
std::shared_ptr<StreamReaderLE> reader = std::shared_ptr<StreamReaderLE>(new StreamReaderLE(stream));
|
||||
|
||||
size_t total = 0;
|
||||
Compression compression;
|
||||
auto uncompressed = std::make_shared<std::vector<char>>();
|
||||
if (compression.open(Compression::Format::Binary, Compression::FlushMode::NoFlush, 16 + Compression::MaxWBits)) {
|
||||
total = compression.decompress((unsigned char *)reader->GetPtr(), reader->GetRemainingSize(), *uncompressed);
|
||||
compression.close();
|
||||
}
|
||||
|
||||
// replace the input stream with a memory stream
|
||||
stream = std::make_shared<MemoryIOStream>(reinterpret_cast<uint8_t *>(uncompressed->data()), total);
|
||||
|
||||
// .. and retry
|
||||
stream->Read(magic, 7, 1);
|
||||
if (strcmp(magic, Token) == 0) {
|
||||
return {stream, uncompressed, {}};
|
||||
}
|
||||
return {{}, {}, "Found no BLENDER magic word in decompressed GZIP file"};
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // ASSIMP_BUILD_NO_BLEND_IMPORTER
|
||||
|
|
|
@ -180,6 +180,19 @@ private:
|
|||
const Blender::MTex *tex,
|
||||
Blender::ConversionData &conv_data);
|
||||
|
||||
// TODO: Move to a std::variant, once c++17 is supported.
|
||||
struct StreamOrError {
|
||||
std::shared_ptr<IOStream> stream;
|
||||
std::shared_ptr<std::vector<char>> input;
|
||||
std::string error;
|
||||
};
|
||||
|
||||
// Returns either a stream (and optional input data for the stream) or
|
||||
// an error if it can't parse the magic token.
|
||||
StreamOrError ParseMagicToken(
|
||||
const std::string &pFile,
|
||||
IOSystem *pIOHandler) const;
|
||||
|
||||
private: // static stuff, mostly logging and error reporting.
|
||||
// --------------------
|
||||
static void CheckActualType(const Blender::ElemBase *dt,
|
||||
|
|
|
@ -102,10 +102,6 @@ void Structure::Convert<CollectionObject>(
|
|||
|
||||
ReadFieldPtr<ErrorPolicy_Fail>(dest.next, "*next", db);
|
||||
{
|
||||
//std::shared_ptr<CollectionObject> prev;
|
||||
//ReadFieldPtr<ErrorPolicy_Fail>(prev, "*prev", db);
|
||||
//dest.prev = prev.get();
|
||||
|
||||
std::shared_ptr<Object> ob;
|
||||
ReadFieldPtr<ErrorPolicy_Igno>(ob, "*ob", db);
|
||||
dest.ob = ob.get();
|
||||
|
|
|
@ -4,7 +4,6 @@ Open Asset Import Library (assimp)
|
|||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use of this software in source and binary forms,
|
||||
|
@ -40,10 +39,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/** @file BlenderTessellator.cpp
|
||||
* @brief A simple tessellation wrapper
|
||||
*/
|
||||
|
||||
/// @file BlenderTessellator.cpp
|
||||
/// @brief A simple tessellation wrapper
|
||||
|
||||
#ifndef ASSIMP_BUILD_NO_BLEND_IMPORTER
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ Open Asset Import Library (assimp)
|
|||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use of this software in source and binary forms,
|
||||
|
@ -144,11 +143,7 @@ namespace Assimp
|
|||
|
||||
#if ASSIMP_BLEND_WITH_POLY_2_TRI
|
||||
|
||||
#ifdef ASSIMP_USE_HUNTER
|
||||
# include <poly2tri/poly2tri.h>
|
||||
#else
|
||||
# include "../contrib/poly2tri/poly2tri/poly2tri.h"
|
||||
#endif
|
||||
#include "contrib/poly2tri/poly2tri/poly2tri.h"
|
||||
|
||||
namespace Assimp
|
||||
{
|
||||
|
|
|
@ -95,6 +95,7 @@ ColladaLoader::ColladaLoader() :
|
|||
noSkeletonMesh(false),
|
||||
removeEmptyBones(false),
|
||||
ignoreUpDirection(false),
|
||||
ignoreUnitSize(false),
|
||||
useColladaName(false),
|
||||
mNodeNameCounter(0) {
|
||||
// empty
|
||||
|
@ -122,6 +123,7 @@ void ColladaLoader::SetupProperties(const Importer *pImp) {
|
|||
noSkeletonMesh = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_NO_SKELETON_MESHES, 0) != 0;
|
||||
removeEmptyBones = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES, true) != 0;
|
||||
ignoreUpDirection = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_COLLADA_IGNORE_UP_DIRECTION, 0) != 0;
|
||||
ignoreUnitSize = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_COLLADA_IGNORE_UNIT_SIZE, 0) != 0;
|
||||
useColladaName = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_COLLADA_USE_COLLADA_NAMES, 0) != 0;
|
||||
}
|
||||
|
||||
|
@ -170,12 +172,15 @@ void ColladaLoader::InternReadFile(const std::string &pFile, aiScene *pScene, IO
|
|||
// ... then fill the materials with the now adjusted settings
|
||||
FillMaterials(parser, pScene);
|
||||
|
||||
// Apply unit-size scale calculation
|
||||
|
||||
pScene->mRootNode->mTransformation *= aiMatrix4x4(parser.mUnitSize, 0, 0, 0,
|
||||
0, parser.mUnitSize, 0, 0,
|
||||
0, 0, parser.mUnitSize, 0,
|
||||
0, 0, 0, 1);
|
||||
if (!ignoreUnitSize) {
|
||||
// Apply unit-size scale calculation
|
||||
pScene->mRootNode->mTransformation *= aiMatrix4x4(
|
||||
parser.mUnitSize, 0, 0, 0,
|
||||
0, parser.mUnitSize, 0, 0,
|
||||
0, 0, parser.mUnitSize, 0,
|
||||
0, 0, 0, 1);
|
||||
}
|
||||
|
||||
if (!ignoreUpDirection) {
|
||||
// Convert to Y_UP, if different orientation
|
||||
if (parser.mUpDirection == ColladaParser::UP_X) {
|
||||
|
|
|
@ -239,6 +239,7 @@ protected:
|
|||
bool noSkeletonMesh;
|
||||
bool removeEmptyBones;
|
||||
bool ignoreUpDirection;
|
||||
bool ignoreUnitSize;
|
||||
bool useColladaName;
|
||||
|
||||
/** Used by FindNameForNode() to generate unique node names */
|
||||
|
|
|
@ -71,7 +71,7 @@ static const aiColor4D AI_DXF_DEFAULT_COLOR(aiColor4D(0.6f, 0.6f, 0.6f, 0.6f));
|
|||
// color indices for DXF - 16 are supported, the table is
|
||||
// taken directly from the DXF spec.
|
||||
static aiColor4D g_aclrDxfIndexColors[] = {
|
||||
aiColor4D (0.6f, 0.6f, 0.6f, 1.0f),
|
||||
aiColor4D(0.6f, 0.6f, 0.6f, 1.0f),
|
||||
aiColor4D (1.0f, 0.0f, 0.0f, 1.0f), // red
|
||||
aiColor4D (0.0f, 1.0f, 0.0f, 1.0f), // green
|
||||
aiColor4D (0.0f, 0.0f, 1.0f, 1.0f), // blue
|
||||
|
@ -88,6 +88,7 @@ static aiColor4D g_aclrDxfIndexColors[] = {
|
|||
aiColor4D (1.0f, 1.0f, 1.0f, 1.0f), // white
|
||||
aiColor4D (0.6f, 0.0f, 1.0f, 1.0f) // violet
|
||||
};
|
||||
|
||||
#define AI_DXF_NUM_INDEX_COLORS (sizeof(g_aclrDxfIndexColors)/sizeof(g_aclrDxfIndexColors[0]))
|
||||
#define AI_DXF_ENTITIES_MAGIC_BLOCK "$ASSIMP_ENTITIES_MAGIC"
|
||||
|
||||
|
@ -109,14 +110,6 @@ static const aiImporterDesc desc = {
|
|||
"dxf"
|
||||
};
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Constructor to be privately used by Importer
|
||||
DXFImporter::DXFImporter() = default;
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Destructor, private as well
|
||||
DXFImporter::~DXFImporter() = default;
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Returns whether the class can handle the format of the given file.
|
||||
bool DXFImporter::CanRead( const std::string& filename, IOSystem* pIOHandler, bool /*checkSig*/ ) const {
|
||||
|
@ -229,7 +222,7 @@ void DXFImporter::ConvertMeshes(aiScene* pScene, DXF::FileData& output) {
|
|||
ASSIMP_LOG_VERBOSE_DEBUG("DXF: Unexpanded polycount is ", icount, ", vertex count is ", vcount);
|
||||
}
|
||||
|
||||
if (! output.blocks.size() ) {
|
||||
if (output.blocks.empty()) {
|
||||
throw DeadlyImportError("DXF: no data blocks loaded");
|
||||
}
|
||||
|
||||
|
@ -587,10 +580,11 @@ void DXFImporter::ParseInsertion(DXF::LineReader& reader, DXF::FileData& output)
|
|||
}
|
||||
}
|
||||
|
||||
#define DXF_POLYLINE_FLAG_CLOSED 0x1
|
||||
#define DXF_POLYLINE_FLAG_3D_POLYLINE 0x8
|
||||
#define DXF_POLYLINE_FLAG_3D_POLYMESH 0x10
|
||||
#define DXF_POLYLINE_FLAG_POLYFACEMESH 0x40
|
||||
static constexpr unsigned int DXF_POLYLINE_FLAG_CLOSED = 0x1;
|
||||
// Currently unused
|
||||
//static constexpr unsigned int DXF_POLYLINE_FLAG_3D_POLYLINE = 0x8;
|
||||
//static constexpr unsigned int DXF_POLYLINE_FLAG_3D_POLYMESH = 0x10;
|
||||
static constexpr unsigned int DXF_POLYLINE_FLAG_POLYFACEMESH = 0x40;
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void DXFImporter::ParsePolyLine(DXF::LineReader& reader, DXF::FileData& output) {
|
||||
|
@ -639,12 +633,6 @@ void DXFImporter::ParsePolyLine(DXF::LineReader& reader, DXF::FileData& output)
|
|||
reader++;
|
||||
}
|
||||
|
||||
//if (!(line.flags & DXF_POLYLINE_FLAG_POLYFACEMESH)) {
|
||||
// DefaultLogger::get()->warn((Formatter::format("DXF: polyline not currently supported: "),line.flags));
|
||||
// output.blocks.back().lines.pop_back();
|
||||
// return;
|
||||
//}
|
||||
|
||||
if (vguess && line.positions.size() != vguess) {
|
||||
ASSIMP_LOG_WARN("DXF: unexpected vertex count in polymesh: ",
|
||||
line.positions.size(),", expected ", vguess );
|
||||
|
@ -734,12 +722,18 @@ void DXFImporter::ParsePolyLineVertex(DXF::LineReader& reader, DXF::PolyLine& li
|
|||
case 71:
|
||||
case 72:
|
||||
case 73:
|
||||
case 74:
|
||||
if (cnti == 4) {
|
||||
ASSIMP_LOG_WARN("DXF: more than 4 indices per face not supported; ignoring");
|
||||
break;
|
||||
case 74: {
|
||||
if (cnti == 4) {
|
||||
ASSIMP_LOG_WARN("DXF: more than 4 indices per face not supported; ignoring");
|
||||
break;
|
||||
}
|
||||
const int index = reader.ValueAsSignedInt();
|
||||
if (index >= 0) {
|
||||
indices[cnti++] = static_cast<unsigned int>(index);
|
||||
} else {
|
||||
indices[cnti++] = static_cast<unsigned int>(-index);
|
||||
}
|
||||
}
|
||||
indices[cnti++] = reader.ValueAsUnsignedInt();
|
||||
break;
|
||||
|
||||
// color
|
||||
|
@ -777,8 +771,7 @@ void DXFImporter::ParsePolyLineVertex(DXF::LineReader& reader, DXF::PolyLine& li
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void DXFImporter::Parse3DFace(DXF::LineReader& reader, DXF::FileData& output)
|
||||
{
|
||||
void DXFImporter::Parse3DFace(DXF::LineReader& reader, DXF::FileData& output) {
|
||||
// (note) this is also used for for parsing line entities, so we
|
||||
// must handle the vertex_count == 2 case as well.
|
||||
|
||||
|
@ -795,8 +788,7 @@ void DXFImporter::Parse3DFace(DXF::LineReader& reader, DXF::FileData& output)
|
|||
if (reader.GroupCode() == 0) {
|
||||
break;
|
||||
}
|
||||
switch (reader.GroupCode())
|
||||
{
|
||||
switch (reader.GroupCode()) {
|
||||
|
||||
// 8 specifies the layer
|
||||
case 8:
|
||||
|
|
|
@ -68,8 +68,8 @@ namespace DXF {
|
|||
*/
|
||||
class DXFImporter : public BaseImporter {
|
||||
public:
|
||||
DXFImporter();
|
||||
~DXFImporter() override;
|
||||
DXFImporter() = default;
|
||||
~DXFImporter() override = default;
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Returns whether the class can handle the format of the given file.
|
||||
|
|
|
@ -93,6 +93,8 @@ FBXConverter::FBXConverter(aiScene *out, const Document &doc, bool removeEmptyBo
|
|||
mSceneOut(out),
|
||||
doc(doc),
|
||||
mRemoveEmptyBones(removeEmptyBones) {
|
||||
|
||||
|
||||
// animations need to be converted first since this will
|
||||
// populate the node_anim_chain_bits map, which is needed
|
||||
// to determine which nodes need to be generated.
|
||||
|
@ -427,12 +429,26 @@ void FBXConverter::ConvertCamera(const Camera &cam, const std::string &orig_name
|
|||
out_camera->mLookAt = aiVector3D(1.0f, 0.0f, 0.0f);
|
||||
out_camera->mUp = aiVector3D(0.0f, 1.0f, 0.0f);
|
||||
|
||||
out_camera->mHorizontalFOV = AI_DEG_TO_RAD(cam.FieldOfView());
|
||||
// NOTE: Some software (maya) does not put FieldOfView in FBX, so we compute
|
||||
// mHorizontalFOV from FocalLength and FilmWidth with unit conversion.
|
||||
|
||||
out_camera->mClipPlaneNear = cam.NearPlane();
|
||||
out_camera->mClipPlaneFar = cam.FarPlane();
|
||||
// TODO: This is not a complete solution for how FBX cameras can be stored.
|
||||
// TODO: Incorporate non-square pixel aspect ratio.
|
||||
// TODO: FBX aperture mode might be storing vertical FOV in need of conversion with aspect ratio.
|
||||
|
||||
float fov_deg = cam.FieldOfView();
|
||||
// If FOV not specified in file, compute using FilmWidth and FocalLength.
|
||||
if (fov_deg == kFovUnknown) {
|
||||
float film_width_inches = cam.FilmWidth();
|
||||
float focal_length_mm = cam.FocalLength();
|
||||
ASSIMP_LOG_VERBOSE_DEBUG("FBX FOV unspecified. Computing from FilmWidth (", film_width_inches, "inches) and FocalLength (", focal_length_mm, "mm).");
|
||||
double half_fov_rad = std::atan2(film_width_inches * 25.4 * 0.5, focal_length_mm);
|
||||
out_camera->mHorizontalFOV = static_cast<float>(half_fov_rad);
|
||||
} else {
|
||||
// FBX fov is full-view degrees. We want half-view radians.
|
||||
out_camera->mHorizontalFOV = AI_DEG_TO_RAD(fov_deg) * 0.5f;
|
||||
}
|
||||
|
||||
out_camera->mHorizontalFOV = AI_DEG_TO_RAD(cam.FieldOfView());
|
||||
out_camera->mClipPlaneNear = cam.NearPlane();
|
||||
out_camera->mClipPlaneFar = cam.FarPlane();
|
||||
}
|
||||
|
|
|
@ -55,9 +55,14 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#define _AI_CONCAT(a,b) a ## b
|
||||
#define AI_CONCAT(a,b) _AI_CONCAT(a,b)
|
||||
|
||||
|
||||
namespace Assimp {
|
||||
namespace FBX {
|
||||
|
||||
// Use an 'illegal' default FOV value to detect if the FBX camera has set the FOV.
|
||||
static const float kFovUnknown = -1.0f;
|
||||
|
||||
|
||||
class Parser;
|
||||
class Object;
|
||||
struct ImportSettings;
|
||||
|
@ -247,7 +252,7 @@ public:
|
|||
fbx_simple_property(FilmAspectRatio, float, 1.0f)
|
||||
fbx_simple_property(ApertureMode, int, 0)
|
||||
|
||||
fbx_simple_property(FieldOfView, float, 1.0f)
|
||||
fbx_simple_property(FieldOfView, float, kFovUnknown)
|
||||
fbx_simple_property(FocalLength, float, 1.0f)
|
||||
};
|
||||
|
||||
|
|
|
@ -1391,7 +1391,7 @@ void FBXExporter::WriteObjects ()
|
|||
aiMaterial* m = mScene->mMaterials[i];
|
||||
|
||||
// these are used to receive material data
|
||||
float f; aiColor3D c;
|
||||
ai_real f; aiColor3D c;
|
||||
|
||||
// start the node record
|
||||
FBX::Node n("Material");
|
||||
|
|
|
@ -211,7 +211,7 @@ Scope::Scope(Parser& parser,bool topLevel)
|
|||
elements.insert(ElementMap::value_type(str, element));
|
||||
return;
|
||||
}
|
||||
delete element;
|
||||
delete_Element(element);
|
||||
ParseError("unexpected end of file",parser.LastToken());
|
||||
} else {
|
||||
elements.insert(ElementMap::value_type(str, element));
|
||||
|
|
|
@ -115,7 +115,9 @@ void HMPImporter::InternReadFile(const std::string &pFile,
|
|||
throw DeadlyImportError("HMP File is too small.");
|
||||
|
||||
// Allocate storage and copy the contents of the file to a memory buffer
|
||||
mBuffer = new uint8_t[fileSize];
|
||||
auto deleter=[this](uint8_t* ptr){ delete[] ptr; mBuffer = nullptr; };
|
||||
std::unique_ptr<uint8_t[], decltype(deleter)> buffer(new uint8_t[fileSize], deleter);
|
||||
mBuffer = buffer.get();
|
||||
file->Read((void *)mBuffer, 1, fileSize);
|
||||
iFileSize = (unsigned int)fileSize;
|
||||
|
||||
|
@ -143,9 +145,6 @@ void HMPImporter::InternReadFile(const std::string &pFile,
|
|||
// Print the magic word to the logger
|
||||
std::string szBuffer = ai_str_toprintable((const char *)&iMagic, sizeof(iMagic));
|
||||
|
||||
delete[] mBuffer;
|
||||
mBuffer = nullptr;
|
||||
|
||||
// We're definitely unable to load this file
|
||||
throw DeadlyImportError("Unknown HMP subformat ", pFile,
|
||||
". Magic word (", szBuffer, ") is not known");
|
||||
|
@ -153,9 +152,6 @@ void HMPImporter::InternReadFile(const std::string &pFile,
|
|||
|
||||
// Set the AI_SCENE_FLAGS_TERRAIN bit
|
||||
pScene->mFlags |= AI_SCENE_FLAGS_TERRAIN;
|
||||
|
||||
delete[] mBuffer;
|
||||
mBuffer = nullptr;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
@ -445,11 +441,11 @@ void HMPImporter::ReadFirstSkin(unsigned int iNumSkins, const unsigned char *szC
|
|||
szCursor += sizeof(uint32_t);
|
||||
|
||||
// allocate an output material
|
||||
aiMaterial *pcMat = new aiMaterial();
|
||||
std::unique_ptr<aiMaterial> pcMat(new aiMaterial());
|
||||
|
||||
// read the skin, this works exactly as for MDL7
|
||||
ParseSkinLump_3DGS_MDL7(szCursor, &szCursor,
|
||||
pcMat, iType, iWidth, iHeight);
|
||||
pcMat.get(), iType, iWidth, iHeight);
|
||||
|
||||
// now we need to skip any other skins ...
|
||||
for (unsigned int i = 1; i < iNumSkins; ++i) {
|
||||
|
@ -468,7 +464,7 @@ void HMPImporter::ReadFirstSkin(unsigned int iNumSkins, const unsigned char *szC
|
|||
// setup the material ...
|
||||
pScene->mNumMaterials = 1;
|
||||
pScene->mMaterials = new aiMaterial *[1];
|
||||
pScene->mMaterials[0] = pcMat;
|
||||
pScene->mMaterials[0] = pcMat.release();
|
||||
|
||||
*szCursorOut = szCursor;
|
||||
}
|
||||
|
|
|
@ -38,9 +38,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/** @file IFCBoolean.cpp
|
||||
* @brief Implements a subset of Ifc boolean operations
|
||||
*/
|
||||
/// @file IFCBoolean.cpp
|
||||
/// @brief Implements a subset of Ifc boolean operations
|
||||
|
||||
#ifndef ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
|
||||
|
@ -48,7 +47,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include "Common/PolyTools.h"
|
||||
#include "PostProcessing/ProcessHelper.h"
|
||||
|
||||
|
||||
#include <iterator>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
|
@ -67,8 +65,9 @@ bool IntersectSegmentPlane(const IfcVector3 &p, const IfcVector3 &n, const IfcVe
|
|||
|
||||
// if segment ends on plane, do not report a hit. We stay on that side until a following segment starting at this
|
||||
// point leaves the plane through the other side
|
||||
if (std::abs(dotOne + dotTwo) < ai_epsilon)
|
||||
if (std::abs(dotOne + dotTwo) < ai_epsilon) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// if segment starts on the plane, report a hit only if the end lies on the *other* side
|
||||
if (std::abs(dotTwo) < ai_epsilon) {
|
||||
|
@ -82,13 +81,15 @@ bool IntersectSegmentPlane(const IfcVector3 &p, const IfcVector3 &n, const IfcVe
|
|||
|
||||
// ignore if segment is parallel to plane and far away from it on either side
|
||||
// Warning: if there's a few thousand of such segments which slowly accumulate beyond the epsilon, no hit would be registered
|
||||
if (std::abs(dotOne) < ai_epsilon)
|
||||
if (std::abs(dotOne) < ai_epsilon) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// t must be in [0..1] if the intersection point is within the given segment
|
||||
const IfcFloat t = dotTwo / dotOne;
|
||||
if (t > 1.0 || t < 0.0)
|
||||
if (t > 1.0 || t < 0.0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
out = e0 + t * seg;
|
||||
return true;
|
||||
|
@ -110,11 +111,13 @@ void FilterPolygon(std::vector<IfcVector3> &resultpoly) {
|
|||
FuzzyVectorCompare fz(epsilon);
|
||||
std::vector<IfcVector3>::iterator e = std::unique(resultpoly.begin(), resultpoly.end(), fz);
|
||||
|
||||
if (e != resultpoly.end())
|
||||
if (e != resultpoly.end()) {
|
||||
resultpoly.erase(e, resultpoly.end());
|
||||
}
|
||||
|
||||
if (!resultpoly.empty() && fz(resultpoly.front(), resultpoly.back()))
|
||||
if (!resultpoly.empty() && fz(resultpoly.front(), resultpoly.back())) {
|
||||
resultpoly.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
@ -291,8 +294,9 @@ bool IntersectsBoundaryProfile(const IfcVector3 &e0, const IfcVector3 &e1, const
|
|||
}
|
||||
|
||||
// Line segment ends at boundary -> ignore any hit, it will be handled by possibly following segments
|
||||
if (endsAtSegment && !halfOpen)
|
||||
if (endsAtSegment && !halfOpen) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Line segment starts at boundary -> generate a hit only if following that line would change the INSIDE/OUTSIDE
|
||||
// state. This should catch the case where a connected set of segments has a point directly on the boundary,
|
||||
|
@ -301,15 +305,17 @@ bool IntersectsBoundaryProfile(const IfcVector3 &e0, const IfcVector3 &e1, const
|
|||
if (startsAtSegment) {
|
||||
IfcVector3 inside_dir = IfcVector3(b.y, -b.x, 0.0) * windingOrder;
|
||||
bool isGoingInside = (inside_dir * e) > 0.0;
|
||||
if (isGoingInside == isStartAssumedInside)
|
||||
if (isGoingInside == isStartAssumedInside) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// only insert the point into the list if it is sufficiently far away from the previous intersection point.
|
||||
// This way, we avoid duplicate detection if the intersection is directly on the vertex between two segments.
|
||||
if (!intersect_results.empty() && intersect_results.back().first == i - 1) {
|
||||
const IfcVector3 diff = intersect_results.back().second - e0;
|
||||
if (IfcVector2(diff.x, diff.y).SquareLength() < 1e-10)
|
||||
if (IfcVector2(diff.x, diff.y).SquareLength() < 1e-10) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
intersect_results.emplace_back(i, e0);
|
||||
continue;
|
||||
|
@ -322,8 +328,9 @@ bool IntersectsBoundaryProfile(const IfcVector3 &e0, const IfcVector3 &e1, const
|
|||
// This way, we avoid duplicate detection if the intersection is directly on the vertex between two segments.
|
||||
if (!intersect_results.empty() && intersect_results.back().first == i - 1) {
|
||||
const IfcVector3 diff = intersect_results.back().second - p;
|
||||
if (IfcVector2(diff.x, diff.y).SquareLength() < 1e-10)
|
||||
if (IfcVector2(diff.x, diff.y).SquareLength() < 1e-10) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
intersect_results.emplace_back(i, p);
|
||||
}
|
||||
|
@ -662,7 +669,8 @@ void ProcessPolygonalBoundedBooleanHalfSpaceDifference(const Schema_2x3::IfcPoly
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessBooleanExtrudedAreaSolidDifference(const Schema_2x3::IfcExtrudedAreaSolid *as, TempMesh &result,
|
||||
void ProcessBooleanExtrudedAreaSolidDifference(const Schema_2x3::IfcExtrudedAreaSolid *as,
|
||||
TempMesh &result,
|
||||
const TempMesh &first_operand,
|
||||
ConversionData &conv) {
|
||||
ai_assert(as != nullptr);
|
||||
|
@ -763,4 +771,4 @@ void ProcessBoolean(const Schema_2x3::IfcBooleanResult &boolean, TempMesh &resul
|
|||
} // namespace IFC
|
||||
} // namespace Assimp
|
||||
|
||||
#endif
|
||||
#endif // ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
|
|
|
@ -39,15 +39,15 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/** @file IFCProfile.cpp
|
||||
* @brief Read profile and curves entities from IFC files
|
||||
*/
|
||||
/// @file IFCProfile.cpp
|
||||
/// @brief Read profile and curves entities from IFC files
|
||||
|
||||
#ifndef ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
#include "IFCUtil.h"
|
||||
|
||||
namespace Assimp {
|
||||
namespace IFC {
|
||||
|
||||
namespace {
|
||||
|
||||
// --------------------------------------------------------------------------------
|
||||
|
@ -56,8 +56,7 @@ namespace {
|
|||
class Conic : public Curve {
|
||||
public:
|
||||
// --------------------------------------------------
|
||||
Conic(const Schema_2x3::IfcConic& entity, ConversionData& conv)
|
||||
: Curve(entity,conv) {
|
||||
Conic(const Schema_2x3::IfcConic& entity, ConversionData& conv) : Curve(entity,conv) {
|
||||
IfcMatrix4 trafo;
|
||||
ConvertAxisPlacement(trafo,*entity.Position,conv);
|
||||
|
||||
|
@ -69,12 +68,12 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
bool IsClosed() const {
|
||||
bool IsClosed() const override {
|
||||
return true;
|
||||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const {
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const override {
|
||||
ai_assert( InRange( a ) );
|
||||
ai_assert( InRange( b ) );
|
||||
|
||||
|
@ -88,7 +87,7 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
ParamRange GetParametricRange() const {
|
||||
ParamRange GetParametricRange() const override {
|
||||
return std::make_pair(static_cast<IfcFloat>( 0. ), static_cast<IfcFloat>( AI_MATH_TWO_PI / conv.angle_scale ));
|
||||
}
|
||||
|
||||
|
@ -102,14 +101,13 @@ protected:
|
|||
class Circle : public Conic {
|
||||
public:
|
||||
// --------------------------------------------------
|
||||
Circle(const Schema_2x3::IfcCircle& entity, ConversionData& conv)
|
||||
: Conic(entity,conv)
|
||||
, entity(entity)
|
||||
{
|
||||
}
|
||||
|
||||
Circle(const Schema_2x3::IfcCircle& entity, ConversionData& conv) : Conic(entity,conv) , entity(entity) {}
|
||||
|
||||
// --------------------------------------------------
|
||||
IfcVector3 Eval(IfcFloat u) const {
|
||||
~Circle() override = default;
|
||||
|
||||
// --------------------------------------------------
|
||||
IfcVector3 Eval(IfcFloat u) const override {
|
||||
u = -conv.angle_scale * u;
|
||||
return location + static_cast<IfcFloat>(entity.Radius)*(static_cast<IfcFloat>(std::cos(u))*p[0] +
|
||||
static_cast<IfcFloat>(std::sin(u))*p[1]);
|
||||
|
@ -132,7 +130,7 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
IfcVector3 Eval(IfcFloat u) const {
|
||||
IfcVector3 Eval(IfcFloat u) const override {
|
||||
u = -conv.angle_scale * u;
|
||||
return location + static_cast<IfcFloat>(entity.SemiAxis1)*static_cast<IfcFloat>(std::cos(u))*p[0] +
|
||||
static_cast<IfcFloat>(entity.SemiAxis2)*static_cast<IfcFloat>(std::sin(u))*p[1];
|
||||
|
@ -155,17 +153,17 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
bool IsClosed() const {
|
||||
bool IsClosed() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
IfcVector3 Eval(IfcFloat u) const {
|
||||
IfcVector3 Eval(IfcFloat u) const override {
|
||||
return p + u*v;
|
||||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const {
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const override {
|
||||
ai_assert( InRange( a ) );
|
||||
ai_assert( InRange( b ) );
|
||||
// two points are always sufficient for a line segment
|
||||
|
@ -174,7 +172,7 @@ public:
|
|||
|
||||
|
||||
// --------------------------------------------------
|
||||
void SampleDiscrete(TempMesh& out,IfcFloat a, IfcFloat b) const {
|
||||
void SampleDiscrete(TempMesh& out,IfcFloat a, IfcFloat b) const override {
|
||||
ai_assert( InRange( a ) );
|
||||
ai_assert( InRange( b ) );
|
||||
|
||||
|
@ -188,7 +186,7 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
ParamRange GetParametricRange() const {
|
||||
ParamRange GetParametricRange() const override {
|
||||
const IfcFloat inf = std::numeric_limits<IfcFloat>::infinity();
|
||||
|
||||
return std::make_pair(-inf,+inf);
|
||||
|
@ -234,7 +232,7 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
IfcVector3 Eval(IfcFloat u) const {
|
||||
IfcVector3 Eval(IfcFloat u) const override {
|
||||
if (curves.empty()) {
|
||||
return IfcVector3();
|
||||
}
|
||||
|
@ -254,7 +252,7 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const {
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const override {
|
||||
ai_assert( InRange( a ) );
|
||||
ai_assert( InRange( b ) );
|
||||
size_t cnt = 0;
|
||||
|
@ -275,7 +273,7 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
void SampleDiscrete(TempMesh& out,IfcFloat a, IfcFloat b) const {
|
||||
void SampleDiscrete(TempMesh& out,IfcFloat a, IfcFloat b) const override {
|
||||
ai_assert( InRange( a ) );
|
||||
ai_assert( InRange( b ) );
|
||||
|
||||
|
@ -293,7 +291,7 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
ParamRange GetParametricRange() const {
|
||||
ParamRange GetParametricRange() const override {
|
||||
return std::make_pair(static_cast<IfcFloat>( 0. ),total);
|
||||
}
|
||||
|
||||
|
@ -373,27 +371,27 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
IfcVector3 Eval(IfcFloat p) const {
|
||||
IfcVector3 Eval(IfcFloat p) const override {
|
||||
ai_assert(InRange(p));
|
||||
return base->Eval( TrimParam(p) );
|
||||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const {
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const override {
|
||||
ai_assert( InRange( a ) );
|
||||
ai_assert( InRange( b ) );
|
||||
return base->EstimateSampleCount(TrimParam(a),TrimParam(b));
|
||||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
void SampleDiscrete(TempMesh& out,IfcFloat a,IfcFloat b) const {
|
||||
void SampleDiscrete(TempMesh& out,IfcFloat a,IfcFloat b) const override {
|
||||
ai_assert(InRange(a));
|
||||
ai_assert(InRange(b));
|
||||
return base->SampleDiscrete(out,TrimParam(a),TrimParam(b));
|
||||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
ParamRange GetParametricRange() const {
|
||||
ParamRange GetParametricRange() const override {
|
||||
return std::make_pair(static_cast<IfcFloat>( 0. ),maxval);
|
||||
}
|
||||
|
||||
|
@ -431,7 +429,7 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
IfcVector3 Eval(IfcFloat p) const {
|
||||
IfcVector3 Eval(IfcFloat p) const override {
|
||||
ai_assert(InRange(p));
|
||||
|
||||
const size_t b = static_cast<size_t>(std::floor(p));
|
||||
|
@ -444,14 +442,14 @@ public:
|
|||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const {
|
||||
size_t EstimateSampleCount(IfcFloat a, IfcFloat b) const override {
|
||||
ai_assert(InRange(a));
|
||||
ai_assert(InRange(b));
|
||||
return static_cast<size_t>( std::ceil(b) - std::floor(a) );
|
||||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
ParamRange GetParametricRange() const {
|
||||
ParamRange GetParametricRange() const override {
|
||||
return std::make_pair(static_cast<IfcFloat>( 0. ),static_cast<IfcFloat>(points.size()-1));
|
||||
}
|
||||
|
||||
|
@ -516,7 +514,7 @@ size_t Curve::EstimateSampleCount(IfcFloat a, IfcFloat b) const {
|
|||
ai_assert( InRange( a ) );
|
||||
ai_assert( InRange( b ) );
|
||||
|
||||
// arbitrary default value, deriving classes should supply better suited values
|
||||
// arbitrary default value, deriving classes should supply better-suited values
|
||||
return 16;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,24 +38,15 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/** @file IFCGeometry.cpp
|
||||
* @brief Geometry conversion and synthesis for IFC
|
||||
*/
|
||||
|
||||
|
||||
/// @file IFCGeometry.cpp
|
||||
/// @brief Geometry conversion and synthesis for IFC
|
||||
|
||||
#ifndef ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
#include "IFCUtil.h"
|
||||
#include "Common/PolyTools.h"
|
||||
#include "PostProcessing/ProcessHelper.h"
|
||||
|
||||
#ifdef ASSIMP_USE_HUNTER
|
||||
# include <poly2tri/poly2tri.h>
|
||||
# include <polyclipping/clipper.hpp>
|
||||
#else
|
||||
# include "../contrib/poly2tri/poly2tri/poly2tri.h"
|
||||
# include "../contrib/clipper/clipper.hpp"
|
||||
#endif
|
||||
#include "contrib/poly2tri/poly2tri/poly2tri.h"
|
||||
#include "contrib/clipper/clipper.hpp"
|
||||
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
|
@ -65,8 +56,7 @@ namespace Assimp {
|
|||
namespace IFC {
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
bool ProcessPolyloop(const Schema_2x3::IfcPolyLoop& loop, TempMesh& meshout, ConversionData& /*conv*/)
|
||||
{
|
||||
bool ProcessPolyloop(const Schema_2x3::IfcPolyLoop& loop, TempMesh& meshout, ConversionData& /*conv*/) {
|
||||
size_t cnt = 0;
|
||||
for(const Schema_2x3::IfcCartesianPoint& c : loop.Polygon) {
|
||||
IfcVector3 tmp;
|
||||
|
@ -91,8 +81,7 @@ bool ProcessPolyloop(const Schema_2x3::IfcPolyLoop& loop, TempMesh& meshout, Con
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessPolygonBoundaries(TempMesh& result, const TempMesh& inmesh, size_t master_bounds = (size_t)-1)
|
||||
{
|
||||
void ProcessPolygonBoundaries(TempMesh& result, const TempMesh& inmesh, size_t master_bounds = (size_t)-1) {
|
||||
// handle all trivial cases
|
||||
if(inmesh.mVertcnt.empty()) {
|
||||
return;
|
||||
|
@ -127,8 +116,7 @@ void ProcessPolygonBoundaries(TempMesh& result, const TempMesh& inmesh, size_t m
|
|||
if (master_bounds != (size_t)-1) {
|
||||
ai_assert(master_bounds < inmesh.mVertcnt.size());
|
||||
outer_polygon_it = begin + master_bounds;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
for(iit = begin; iit != end; ++iit) {
|
||||
// find the polygon with the largest area and take it as the outer bound.
|
||||
IfcVector3& n = normals[std::distance(begin,iit)];
|
||||
|
@ -139,7 +127,8 @@ void ProcessPolygonBoundaries(TempMesh& result, const TempMesh& inmesh, size_t m
|
|||
}
|
||||
}
|
||||
}
|
||||
if (outer_polygon_it == end) {
|
||||
|
||||
if (outer_polygon_it == end) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -205,40 +194,20 @@ void ProcessConnectedFaceSet(const Schema_2x3::IfcConnectedFaceSet& fset, TempMe
|
|||
|
||||
if(const Schema_2x3::IfcPolyLoop* const polyloop = bound.Bound->ToPtr<Schema_2x3::IfcPolyLoop>()) {
|
||||
if(ProcessPolyloop(*polyloop, meshout,conv)) {
|
||||
|
||||
// The outer boundary is better determined by checking which
|
||||
// polygon covers the largest area.
|
||||
|
||||
//if(bound.ToPtr<IfcFaceOuterBound>()) {
|
||||
// ob = cnt;
|
||||
//}
|
||||
//++cnt;
|
||||
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
IFCImporter::LogWarn("skipping unknown IfcFaceBound entity, type is ", bound.Bound->GetClassName());
|
||||
continue;
|
||||
}
|
||||
|
||||
// And this, even though it is sometimes TRUE and sometimes FALSE,
|
||||
// does not really improve results.
|
||||
|
||||
/*if(!IsTrue(bound.Orientation)) {
|
||||
size_t c = 0;
|
||||
for(unsigned int& c : meshout.vertcnt) {
|
||||
std::reverse(result.verts.begin() + cnt,result.verts.begin() + cnt + c);
|
||||
cnt += c;
|
||||
}
|
||||
}*/
|
||||
}
|
||||
ProcessPolygonBoundaries(result, meshout);
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessRevolvedAreaSolid(const Schema_2x3::IfcRevolvedAreaSolid& solid, TempMesh& result, ConversionData& conv)
|
||||
{
|
||||
void ProcessRevolvedAreaSolid(const Schema_2x3::IfcRevolvedAreaSolid& solid, TempMesh& result, ConversionData& conv) {
|
||||
TempMesh meshout;
|
||||
|
||||
// first read the profile description
|
||||
|
@ -265,7 +234,8 @@ void ProcessRevolvedAreaSolid(const Schema_2x3::IfcRevolvedAreaSolid& solid, Tem
|
|||
return;
|
||||
}
|
||||
|
||||
const unsigned int cnt_segments = std::max(2u,static_cast<unsigned int>(conv.settings.cylindricalTessellation * std::fabs(max_angle)/AI_MATH_HALF_PI_F));
|
||||
const unsigned int cnt_segments =
|
||||
std::max(2u,static_cast<unsigned int>(conv.settings.cylindricalTessellation * std::fabs(max_angle)/AI_MATH_HALF_PI_F));
|
||||
const IfcFloat delta = max_angle/cnt_segments;
|
||||
|
||||
has_area = has_area && std::fabs(max_angle) < AI_MATH_TWO_PI_F*0.99;
|
||||
|
@ -324,8 +294,9 @@ void ProcessRevolvedAreaSolid(const Schema_2x3::IfcRevolvedAreaSolid& solid, Tem
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessSweptDiskSolid(const Schema_2x3::IfcSweptDiskSolid &solid, TempMesh& result, ConversionData& conv)
|
||||
{
|
||||
void ProcessSweptDiskSolid(const Schema_2x3::IfcSweptDiskSolid &solid,
|
||||
TempMesh& result,
|
||||
ConversionData& conv) {
|
||||
const Curve* const curve = Curve::Convert(*solid.Directrix, conv);
|
||||
if(!curve) {
|
||||
IFCImporter::LogError("failed to convert Directrix curve (IfcSweptDiskSolid)");
|
||||
|
@ -460,8 +431,7 @@ void ProcessSweptDiskSolid(const Schema_2x3::IfcSweptDiskSolid &solid, TempMesh&
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
IfcMatrix3 DerivePlaneCoordinateSpace(const TempMesh& curmesh, bool& ok, IfcVector3& norOut)
|
||||
{
|
||||
IfcMatrix3 DerivePlaneCoordinateSpace(const TempMesh& curmesh, bool& ok, IfcVector3& norOut) {
|
||||
const std::vector<IfcVector3>& out = curmesh.mVerts;
|
||||
IfcMatrix3 m;
|
||||
|
||||
|
@ -504,10 +474,6 @@ IfcMatrix3 DerivePlaneCoordinateSpace(const TempMesh& curmesh, bool& ok, IfcVect
|
|||
IfcVector3 r = (out[idx]-any_point);
|
||||
r.Normalize();
|
||||
|
||||
//if(d) {
|
||||
// *d = -any_point * nor;
|
||||
//}
|
||||
|
||||
// Reconstruct orthonormal basis
|
||||
// XXX use Gram Schmidt for increased robustness
|
||||
IfcVector3 u = r ^ nor;
|
||||
|
@ -531,8 +497,7 @@ IfcMatrix3 DerivePlaneCoordinateSpace(const TempMesh& curmesh, bool& ok, IfcVect
|
|||
const auto closeDistance = ai_epsilon;
|
||||
|
||||
bool areClose(Schema_2x3::IfcCartesianPoint pt1,Schema_2x3::IfcCartesianPoint pt2) {
|
||||
if(pt1.Coordinates.size() != pt2.Coordinates.size())
|
||||
{
|
||||
if(pt1.Coordinates.size() != pt2.Coordinates.size()) {
|
||||
IFCImporter::LogWarn("unable to compare differently-dimensioned points");
|
||||
return false;
|
||||
}
|
||||
|
@ -540,10 +505,10 @@ bool areClose(Schema_2x3::IfcCartesianPoint pt1,Schema_2x3::IfcCartesianPoint pt
|
|||
auto coord2 = pt2.Coordinates.begin();
|
||||
// we're just testing each dimension separately rather than doing euclidean distance, as we're
|
||||
// looking for very close coordinates
|
||||
for(; coord1 != pt1.Coordinates.end(); coord1++,coord2++)
|
||||
{
|
||||
if(std::fabs(*coord1 - *coord2) > closeDistance)
|
||||
for(; coord1 != pt1.Coordinates.end(); coord1++,coord2++) {
|
||||
if(std::fabs(*coord1 - *coord2) > closeDistance) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -553,6 +518,7 @@ bool areClose(IfcVector3 pt1,IfcVector3 pt2) {
|
|||
std::fabs(pt1.y - pt2.y) < closeDistance &&
|
||||
std::fabs(pt1.z - pt2.z) < closeDistance);
|
||||
}
|
||||
|
||||
// Extrudes the given polygon along the direction, converts it into an opening or applies all openings as necessary.
|
||||
void ProcessExtrudedArea(const Schema_2x3::IfcExtrudedAreaSolid& solid, const TempMesh& curve,
|
||||
const IfcVector3& extrusionDir, TempMesh& result, ConversionData &conv, bool collect_openings)
|
||||
|
@ -590,8 +556,9 @@ void ProcessExtrudedArea(const Schema_2x3::IfcExtrudedAreaSolid& solid, const Te
|
|||
|
||||
// reverse profile polygon if it's winded in the wrong direction in relation to the extrusion direction
|
||||
IfcVector3 profileNormal = TempMesh::ComputePolygonNormal(in.data(), in.size());
|
||||
if( profileNormal * dir < 0.0 )
|
||||
if( profileNormal * dir < 0.0 ) {
|
||||
std::reverse(in.begin(), in.end());
|
||||
}
|
||||
|
||||
std::vector<IfcVector3> nors;
|
||||
const bool openings = !!conv.apply_openings && conv.apply_openings->size();
|
||||
|
@ -678,8 +645,7 @@ void ProcessExtrudedArea(const Schema_2x3::IfcExtrudedAreaSolid& solid, const Te
|
|||
if(n > 0) {
|
||||
for(size_t i = 0; i < in.size(); ++i)
|
||||
out.push_back(in[i] + dir);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
for(size_t i = in.size(); i--; )
|
||||
out.push_back(in[i]);
|
||||
}
|
||||
|
@ -721,9 +687,10 @@ void ProcessExtrudedArea(const Schema_2x3::IfcExtrudedAreaSolid& solid, const Te
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessExtrudedAreaSolid(const Schema_2x3::IfcExtrudedAreaSolid& solid, TempMesh& result,
|
||||
ConversionData& conv, bool collect_openings)
|
||||
{
|
||||
void ProcessExtrudedAreaSolid(const Schema_2x3::IfcExtrudedAreaSolid& solid,
|
||||
TempMesh& result,
|
||||
ConversionData& conv,
|
||||
bool collect_openings) {
|
||||
TempMesh meshout;
|
||||
|
||||
// First read the profile description.
|
||||
|
@ -761,24 +728,23 @@ void ProcessExtrudedAreaSolid(const Schema_2x3::IfcExtrudedAreaSolid& solid, Tem
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessSweptAreaSolid(const Schema_2x3::IfcSweptAreaSolid& swept, TempMesh& meshout,
|
||||
ConversionData& conv)
|
||||
{
|
||||
void ProcessSweptAreaSolid(const Schema_2x3::IfcSweptAreaSolid& swept,
|
||||
TempMesh& meshout,
|
||||
ConversionData& conv) {
|
||||
if(const Schema_2x3::IfcExtrudedAreaSolid* const solid = swept.ToPtr<Schema_2x3::IfcExtrudedAreaSolid>()) {
|
||||
ProcessExtrudedAreaSolid(*solid,meshout,conv, !!conv.collect_openings);
|
||||
}
|
||||
else if(const Schema_2x3::IfcRevolvedAreaSolid* const rev = swept.ToPtr<Schema_2x3::IfcRevolvedAreaSolid>()) {
|
||||
} else if(const Schema_2x3::IfcRevolvedAreaSolid* const rev = swept.ToPtr<Schema_2x3::IfcRevolvedAreaSolid>()) {
|
||||
ProcessRevolvedAreaSolid(*rev,meshout,conv);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
IFCImporter::LogWarn("skipping unknown IfcSweptAreaSolid entity, type is ", swept.GetClassName());
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
bool ProcessGeometricItem(const Schema_2x3::IfcRepresentationItem& geo, unsigned int matid, std::set<unsigned int>& mesh_indices,
|
||||
ConversionData& conv)
|
||||
{
|
||||
bool ProcessGeometricItem(const Schema_2x3::IfcRepresentationItem& geo,
|
||||
unsigned int matid,
|
||||
std::set<unsigned int>& mesh_indices,
|
||||
ConversionData& conv) {
|
||||
bool fix_orientation = false;
|
||||
std::shared_ptr< TempMesh > meshtmp = std::make_shared<TempMesh>();
|
||||
if(const Schema_2x3::IfcShellBasedSurfaceModel* shellmod = geo.ToPtr<Schema_2x3::IfcShellBasedSurfaceModel>()) {
|
||||
|
@ -788,41 +754,32 @@ bool ProcessGeometricItem(const Schema_2x3::IfcRepresentationItem& geo, unsigned
|
|||
const Schema_2x3::IfcConnectedFaceSet& fs = conv.db.MustGetObject(e).To<Schema_2x3::IfcConnectedFaceSet>();
|
||||
|
||||
ProcessConnectedFaceSet(fs, *meshtmp, conv);
|
||||
}
|
||||
catch(std::bad_cast&) {
|
||||
} catch(std::bad_cast&) {
|
||||
IFCImporter::LogWarn("unexpected type error, IfcShell ought to inherit from IfcConnectedFaceSet");
|
||||
}
|
||||
}
|
||||
fix_orientation = true;
|
||||
}
|
||||
else if(const Schema_2x3::IfcConnectedFaceSet* fset = geo.ToPtr<Schema_2x3::IfcConnectedFaceSet>()) {
|
||||
} else if(const Schema_2x3::IfcConnectedFaceSet* fset = geo.ToPtr<Schema_2x3::IfcConnectedFaceSet>()) {
|
||||
ProcessConnectedFaceSet(*fset, *meshtmp, conv);
|
||||
fix_orientation = true;
|
||||
}
|
||||
else if(const Schema_2x3::IfcSweptAreaSolid* swept = geo.ToPtr<Schema_2x3::IfcSweptAreaSolid>()) {
|
||||
} else if(const Schema_2x3::IfcSweptAreaSolid* swept = geo.ToPtr<Schema_2x3::IfcSweptAreaSolid>()) {
|
||||
ProcessSweptAreaSolid(*swept, *meshtmp, conv);
|
||||
}
|
||||
else if(const Schema_2x3::IfcSweptDiskSolid* disk = geo.ToPtr<Schema_2x3::IfcSweptDiskSolid>()) {
|
||||
} else if(const Schema_2x3::IfcSweptDiskSolid* disk = geo.ToPtr<Schema_2x3::IfcSweptDiskSolid>()) {
|
||||
ProcessSweptDiskSolid(*disk, *meshtmp, conv);
|
||||
}
|
||||
else if(const Schema_2x3::IfcManifoldSolidBrep* brep = geo.ToPtr<Schema_2x3::IfcManifoldSolidBrep>()) {
|
||||
} else if(const Schema_2x3::IfcManifoldSolidBrep* brep = geo.ToPtr<Schema_2x3::IfcManifoldSolidBrep>()) {
|
||||
ProcessConnectedFaceSet(brep->Outer, *meshtmp, conv);
|
||||
fix_orientation = true;
|
||||
}
|
||||
else if(const Schema_2x3::IfcFaceBasedSurfaceModel* surf = geo.ToPtr<Schema_2x3::IfcFaceBasedSurfaceModel>()) {
|
||||
} else if(const Schema_2x3::IfcFaceBasedSurfaceModel* surf = geo.ToPtr<Schema_2x3::IfcFaceBasedSurfaceModel>()) {
|
||||
for(const Schema_2x3::IfcConnectedFaceSet& fc : surf->FbsmFaces) {
|
||||
ProcessConnectedFaceSet(fc, *meshtmp, conv);
|
||||
}
|
||||
fix_orientation = true;
|
||||
}
|
||||
else if(const Schema_2x3::IfcBooleanResult* boolean = geo.ToPtr<Schema_2x3::IfcBooleanResult>()) {
|
||||
} else if(const Schema_2x3::IfcBooleanResult* boolean = geo.ToPtr<Schema_2x3::IfcBooleanResult>()) {
|
||||
ProcessBoolean(*boolean, *meshtmp, conv);
|
||||
}
|
||||
else if(geo.ToPtr<Schema_2x3::IfcBoundingBox>()) {
|
||||
} else if(geo.ToPtr<Schema_2x3::IfcBoundingBox>()) {
|
||||
// silently skip over bounding boxes
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
std::stringstream toLog;
|
||||
toLog << "skipping unknown IfcGeometricRepresentationItem entity, type is " << geo.GetClassName() << " id is " << geo.GetID();
|
||||
IFCImporter::LogWarn(toLog.str().c_str());
|
||||
|
@ -868,9 +825,7 @@ bool ProcessGeometricItem(const Schema_2x3::IfcRepresentationItem& geo, unsigned
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void AssignAddedMeshes(std::set<unsigned int>& mesh_indices,aiNode* nd,
|
||||
ConversionData& /*conv*/)
|
||||
{
|
||||
void AssignAddedMeshes(std::set<unsigned int>& mesh_indices,aiNode* nd, ConversionData& /*conv*/) {
|
||||
if (!mesh_indices.empty()) {
|
||||
std::set<unsigned int>::const_iterator it = mesh_indices.cbegin();
|
||||
std::set<unsigned int>::const_iterator end = mesh_indices.cend();
|
||||
|
@ -886,9 +841,9 @@ void AssignAddedMeshes(std::set<unsigned int>& mesh_indices,aiNode* nd,
|
|||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
bool TryQueryMeshCache(const Schema_2x3::IfcRepresentationItem& item,
|
||||
std::set<unsigned int>& mesh_indices, unsigned int mat_index,
|
||||
ConversionData& conv)
|
||||
{
|
||||
std::set<unsigned int>& mesh_indices,
|
||||
unsigned int mat_index,
|
||||
ConversionData& conv) {
|
||||
ConversionData::MeshCacheIndex idx(&item, mat_index);
|
||||
ConversionData::MeshCache::const_iterator it = conv.cached_meshes.find(idx);
|
||||
if (it != conv.cached_meshes.end()) {
|
||||
|
@ -900,18 +855,18 @@ bool TryQueryMeshCache(const Schema_2x3::IfcRepresentationItem& item,
|
|||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void PopulateMeshCache(const Schema_2x3::IfcRepresentationItem& item,
|
||||
const std::set<unsigned int>& mesh_indices, unsigned int mat_index,
|
||||
ConversionData& conv)
|
||||
{
|
||||
const std::set<unsigned int>& mesh_indices,
|
||||
unsigned int mat_index,
|
||||
ConversionData& conv) {
|
||||
ConversionData::MeshCacheIndex idx(&item, mat_index);
|
||||
conv.cached_meshes[idx] = mesh_indices;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
bool ProcessRepresentationItem(const Schema_2x3::IfcRepresentationItem& item, unsigned int matid,
|
||||
std::set<unsigned int>& mesh_indices,
|
||||
ConversionData& conv)
|
||||
{
|
||||
bool ProcessRepresentationItem(const Schema_2x3::IfcRepresentationItem& item,
|
||||
unsigned int matid,
|
||||
std::set<unsigned int>& mesh_indices,
|
||||
ConversionData& conv) {
|
||||
// determine material
|
||||
unsigned int localmatid = ProcessMaterials(item.GetID(), matid, conv, true);
|
||||
|
||||
|
@ -920,8 +875,9 @@ bool ProcessRepresentationItem(const Schema_2x3::IfcRepresentationItem& item, un
|
|||
if(mesh_indices.size()) {
|
||||
PopulateMeshCache(item,mesh_indices,localmatid,conv);
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
else return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -930,4 +886,4 @@ bool ProcessRepresentationItem(const Schema_2x3::IfcRepresentationItem& item, un
|
|||
} // ! IFC
|
||||
} // ! Assimp
|
||||
|
||||
#endif
|
||||
#endif // ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
|
|
|
@ -4,7 +4,6 @@ Open Asset Import Library (assimp)
|
|||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use of this software in source and binary forms,
|
||||
|
@ -40,9 +39,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/** @file IFCLoad.cpp
|
||||
* @brief Implementation of the Industry Foundation Classes loader.
|
||||
*/
|
||||
/// @file IFCLoad.cpp
|
||||
/// @brief Implementation of the Industry Foundation Classes loader.
|
||||
|
||||
#ifndef ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
|
||||
|
@ -92,7 +90,6 @@ using namespace Assimp::IFC;
|
|||
IfcUnitAssignment
|
||||
IfcClosedShell
|
||||
IfcDoor
|
||||
|
||||
*/
|
||||
|
||||
namespace {
|
||||
|
@ -119,14 +116,6 @@ static const aiImporterDesc desc = {
|
|||
"ifc ifczip step stp"
|
||||
};
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Constructor to be privately used by Importer
|
||||
IFCImporter::IFCImporter() = default;
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Destructor, private as well
|
||||
IFCImporter::~IFCImporter() = default;
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Returns whether the class can handle the format of the given file.
|
||||
bool IFCImporter::CanRead(const std::string &pFile, IOSystem *pIOHandler, bool /*checkSig*/) const {
|
||||
|
@ -256,7 +245,12 @@ void IFCImporter::InternReadFile(const std::string &pFile, aiScene *pScene, IOSy
|
|||
|
||||
// tell the reader for which types we need to simulate STEPs reverse indices
|
||||
static const char *const inverse_indices_to_track[] = {
|
||||
"ifcrelcontainedinspatialstructure", "ifcrelaggregates", "ifcrelvoidselement", "ifcreldefinesbyproperties", "ifcpropertyset", "ifcstyleditem"
|
||||
"ifcrelcontainedinspatialstructure",
|
||||
"ifcrelaggregates",
|
||||
"ifcrelvoidselement",
|
||||
"ifcreldefinesbyproperties",
|
||||
"ifcpropertyset",
|
||||
"ifcstyleditem"
|
||||
};
|
||||
|
||||
// feed the IFC schema into the reader and pre-parse all lines
|
||||
|
@ -928,4 +922,4 @@ void MakeTreeRelative(ConversionData &conv) {
|
|||
|
||||
} // namespace
|
||||
|
||||
#endif
|
||||
#endif // ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
|
|
|
@ -87,8 +87,8 @@ public:
|
|||
int cylindricalTessellation;
|
||||
};
|
||||
|
||||
IFCImporter();
|
||||
~IFCImporter() override;
|
||||
IFCImporter() = default;
|
||||
~IFCImporter() override = default;
|
||||
|
||||
// --------------------
|
||||
bool CanRead(const std::string &pFile,
|
||||
|
|
|
@ -4,7 +4,6 @@ Open Asset Import Library (assimp)
|
|||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use of this software in source and binary forms,
|
||||
|
@ -40,9 +39,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/** @file IFCMaterial.cpp
|
||||
* @brief Implementation of conversion routines to convert IFC materials to aiMaterial
|
||||
*/
|
||||
/// @file IFCMaterial.cpp
|
||||
/// @brief Implementation of conversion routines to convert IFC materials to aiMaterial
|
||||
|
||||
#ifndef ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
|
||||
|
@ -174,7 +172,6 @@ unsigned int ProcessMaterials(uint64_t id, unsigned int prevMatId, ConversionDat
|
|||
|
||||
aiString name;
|
||||
name.Set("<IFCDefault>");
|
||||
// ConvertColorToString( color, name);
|
||||
|
||||
// look if there's already a default material with this base color
|
||||
for( size_t a = 0; a < conv.materials.size(); ++a ) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,7 +4,6 @@ Open Asset Import Library (assimp)
|
|||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use of this software in source and binary forms,
|
||||
|
@ -40,9 +39,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/** @file IFCProfile.cpp
|
||||
* @brief Read profile and curves entities from IFC files
|
||||
*/
|
||||
/// @file IFCProfile.cpp
|
||||
/// @brief Read profile and curves entities from IFC files
|
||||
|
||||
#ifndef ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
|
||||
|
@ -52,8 +50,9 @@ namespace Assimp {
|
|||
namespace IFC {
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessPolyLine(const Schema_2x3::IfcPolyline& def, TempMesh& meshout, ConversionData& /*conv*/)
|
||||
{
|
||||
void ProcessPolyLine(const Schema_2x3::IfcPolyline& def,
|
||||
TempMesh& meshout,
|
||||
ConversionData& /*conv*/) {
|
||||
// this won't produce a valid mesh, it just spits out a list of vertices
|
||||
IfcVector3 t;
|
||||
for(const Schema_2x3::IfcCartesianPoint& cp : def.Points) {
|
||||
|
@ -64,8 +63,9 @@ void ProcessPolyLine(const Schema_2x3::IfcPolyline& def, TempMesh& meshout, Conv
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
bool ProcessCurve(const Schema_2x3::IfcCurve& curve, TempMesh& meshout, ConversionData& conv)
|
||||
{
|
||||
bool ProcessCurve(const Schema_2x3::IfcCurve& curve,
|
||||
TempMesh& meshout,
|
||||
ConversionData& conv) {
|
||||
std::unique_ptr<const Curve> cv(Curve::Convert(curve,conv));
|
||||
if (!cv) {
|
||||
IFCImporter::LogWarn("skipping unknown IfcCurve entity, type is ", curve.GetClassName());
|
||||
|
@ -90,20 +90,23 @@ bool ProcessCurve(const Schema_2x3::IfcCurve& curve, TempMesh& meshout, Convers
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessClosedProfile(const Schema_2x3::IfcArbitraryClosedProfileDef& def, TempMesh& meshout, ConversionData& conv)
|
||||
{
|
||||
void ProcessClosedProfile(const Schema_2x3::IfcArbitraryClosedProfileDef& def,
|
||||
TempMesh& meshout,
|
||||
ConversionData& conv) {
|
||||
ProcessCurve(def.OuterCurve,meshout,conv);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessOpenProfile(const Schema_2x3::IfcArbitraryOpenProfileDef& def, TempMesh& meshout, ConversionData& conv)
|
||||
{
|
||||
void ProcessOpenProfile(const Schema_2x3::IfcArbitraryOpenProfileDef& def,
|
||||
TempMesh& meshout,
|
||||
ConversionData& conv) {
|
||||
ProcessCurve(def.Curve,meshout,conv);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ProcessParametrizedProfile(const Schema_2x3::IfcParameterizedProfileDef& def, TempMesh& meshout, ConversionData& conv)
|
||||
{
|
||||
void ProcessParametrizedProfile(const Schema_2x3::IfcParameterizedProfileDef& def,
|
||||
TempMesh& meshout,
|
||||
ConversionData& conv) {
|
||||
if(const Schema_2x3::IfcRectangleProfileDef* const cprofile = def.ToPtr<Schema_2x3::IfcRectangleProfileDef>()) {
|
||||
const IfcFloat x = cprofile->XDim*0.5f, y = cprofile->YDim*0.5f;
|
||||
|
||||
|
@ -113,8 +116,7 @@ void ProcessParametrizedProfile(const Schema_2x3::IfcParameterizedProfileDef& de
|
|||
meshout.mVerts.emplace_back(-x,-y, 0.f );
|
||||
meshout.mVerts.emplace_back( x,-y, 0.f );
|
||||
meshout.mVertcnt.push_back(4);
|
||||
}
|
||||
else if( const Schema_2x3::IfcCircleProfileDef* const circle = def.ToPtr<Schema_2x3::IfcCircleProfileDef>()) {
|
||||
} else if( const Schema_2x3::IfcCircleProfileDef* const circle = def.ToPtr<Schema_2x3::IfcCircleProfileDef>()) {
|
||||
if(def.ToPtr<Schema_2x3::IfcCircleHollowProfileDef>()) {
|
||||
// TODO
|
||||
}
|
||||
|
@ -129,8 +131,7 @@ void ProcessParametrizedProfile(const Schema_2x3::IfcParameterizedProfileDef& de
|
|||
}
|
||||
|
||||
meshout.mVertcnt.push_back(static_cast<unsigned int>(segments));
|
||||
}
|
||||
else if( const Schema_2x3::IfcIShapeProfileDef* const ishape = def.ToPtr<Schema_2x3::IfcIShapeProfileDef>()) {
|
||||
} else if( const Schema_2x3::IfcIShapeProfileDef* const ishape = def.ToPtr<Schema_2x3::IfcIShapeProfileDef>()) {
|
||||
// construct simplified IBeam shape
|
||||
const IfcFloat offset = (ishape->OverallWidth - ishape->WebThickness) / 2;
|
||||
const IfcFloat inner_height = ishape->OverallDepth - ishape->FlangeThickness * 2;
|
||||
|
@ -150,8 +151,7 @@ void ProcessParametrizedProfile(const Schema_2x3::IfcParameterizedProfileDef& de
|
|||
meshout.mVerts.emplace_back(ishape->OverallWidth,0,0);
|
||||
|
||||
meshout.mVertcnt.push_back(12);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
IFCImporter::LogWarn("skipping unknown IfcParameterizedProfileDef entity, type is ", def.GetClassName());
|
||||
return;
|
||||
}
|
||||
|
@ -162,18 +162,14 @@ void ProcessParametrizedProfile(const Schema_2x3::IfcParameterizedProfileDef& de
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
bool ProcessProfile(const Schema_2x3::IfcProfileDef& prof, TempMesh& meshout, ConversionData& conv)
|
||||
{
|
||||
bool ProcessProfile(const Schema_2x3::IfcProfileDef& prof, TempMesh& meshout, ConversionData& conv) {
|
||||
if(const Schema_2x3::IfcArbitraryClosedProfileDef* const cprofile = prof.ToPtr<Schema_2x3::IfcArbitraryClosedProfileDef>()) {
|
||||
ProcessClosedProfile(*cprofile,meshout,conv);
|
||||
}
|
||||
else if(const Schema_2x3::IfcArbitraryOpenProfileDef* const copen = prof.ToPtr<Schema_2x3::IfcArbitraryOpenProfileDef>()) {
|
||||
} else if(const Schema_2x3::IfcArbitraryOpenProfileDef* const copen = prof.ToPtr<Schema_2x3::IfcArbitraryOpenProfileDef>()) {
|
||||
ProcessOpenProfile(*copen,meshout,conv);
|
||||
}
|
||||
else if(const Schema_2x3::IfcParameterizedProfileDef* const cparam = prof.ToPtr<Schema_2x3::IfcParameterizedProfileDef>()) {
|
||||
} else if(const Schema_2x3::IfcParameterizedProfileDef* const cparam = prof.ToPtr<Schema_2x3::IfcParameterizedProfileDef>()) {
|
||||
ProcessParametrizedProfile(*cparam,meshout,conv);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
IFCImporter::LogWarn("skipping unknown IfcProfileDef entity, type is ", prof.GetClassName());
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ Open Asset Import Library (assimp)
|
|||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use of this software in source and binary forms,
|
||||
|
@ -40,9 +39,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/** @file IFCUtil.cpp
|
||||
* @brief Implementation of conversion routines for some common Ifc helper entities.
|
||||
*/
|
||||
/// @file IFCUtil.cpp
|
||||
/// @brief Implementation of conversion routines for some common Ifc helper entities.
|
||||
|
||||
#ifndef ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
|
||||
|
@ -66,8 +64,7 @@ void TempOpening::Transform(const IfcMatrix4& mat) {
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
aiMesh* TempMesh::ToMesh()
|
||||
{
|
||||
aiMesh* TempMesh::ToMesh() {
|
||||
ai_assert(mVerts.size() == std::accumulate(mVertcnt.begin(),mVertcnt.end(),size_t(0)));
|
||||
|
||||
if (mVerts.empty()) {
|
||||
|
@ -105,36 +102,31 @@ aiMesh* TempMesh::ToMesh()
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void TempMesh::Clear()
|
||||
{
|
||||
void TempMesh::Clear() {
|
||||
mVerts.clear();
|
||||
mVertcnt.clear();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void TempMesh::Transform(const IfcMatrix4& mat)
|
||||
{
|
||||
void TempMesh::Transform(const IfcMatrix4& mat) {
|
||||
for(IfcVector3& v : mVerts) {
|
||||
v *= mat;
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------
|
||||
IfcVector3 TempMesh::Center() const
|
||||
{
|
||||
return (mVerts.size() == 0) ? IfcVector3(0.0f, 0.0f, 0.0f) : (std::accumulate(mVerts.begin(),mVerts.end(),IfcVector3()) / static_cast<IfcFloat>(mVerts.size()));
|
||||
IfcVector3 TempMesh::Center() const {
|
||||
return mVerts.empty() ? IfcVector3(0.0f, 0.0f, 0.0f) : (std::accumulate(mVerts.begin(),mVerts.end(),IfcVector3()) / static_cast<IfcFloat>(mVerts.size()));
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void TempMesh::Append(const TempMesh& other)
|
||||
{
|
||||
void TempMesh::Append(const TempMesh& other) {
|
||||
mVerts.insert(mVerts.end(),other.mVerts.begin(),other.mVerts.end());
|
||||
mVertcnt.insert(mVertcnt.end(),other.mVertcnt.begin(),other.mVertcnt.end());
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void TempMesh::RemoveDegenerates()
|
||||
{
|
||||
void TempMesh::RemoveDegenerates() {
|
||||
// The strategy is simple: walk the mesh and compute normals using
|
||||
// Newell's algorithm. The length of the normals gives the area
|
||||
// of the polygons, which is close to zero for lines.
|
||||
|
@ -167,11 +159,9 @@ void TempMesh::RemoveDegenerates()
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
IfcVector3 TempMesh::ComputePolygonNormal(const IfcVector3* vtcs, size_t cnt, bool normalize)
|
||||
{
|
||||
IfcVector3 TempMesh::ComputePolygonNormal(const IfcVector3* vtcs, size_t cnt, bool normalize) {
|
||||
std::vector<IfcFloat> temp((cnt+2)*3);
|
||||
for( size_t vofs = 0, i = 0; vofs < cnt; ++vofs )
|
||||
{
|
||||
for( size_t vofs = 0, i = 0; vofs < cnt; ++vofs ) {
|
||||
const IfcVector3& v = vtcs[vofs];
|
||||
temp[i++] = v.x;
|
||||
temp[i++] = v.y;
|
||||
|
@ -185,9 +175,8 @@ IfcVector3 TempMesh::ComputePolygonNormal(const IfcVector3* vtcs, size_t cnt, bo
|
|||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void TempMesh::ComputePolygonNormals(std::vector<IfcVector3>& normals,
|
||||
bool normalize,
|
||||
size_t ofs) const
|
||||
{
|
||||
bool normalize,
|
||||
size_t ofs) const {
|
||||
size_t max_vcount = 0;
|
||||
std::vector<unsigned int>::const_iterator begin = mVertcnt.begin()+ofs, end = mVertcnt.end(), iit;
|
||||
for(iit = begin; iit != end; ++iit) {
|
||||
|
@ -250,29 +239,27 @@ struct FindVector {
|
|||
};
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void TempMesh::FixupFaceOrientation()
|
||||
{
|
||||
void TempMesh::FixupFaceOrientation() {
|
||||
const IfcVector3 vavg = Center();
|
||||
|
||||
// create a list of start indices for all faces to allow random access to faces
|
||||
std::vector<size_t> faceStartIndices(mVertcnt.size());
|
||||
for( size_t i = 0, a = 0; a < mVertcnt.size(); i += mVertcnt[a], ++a )
|
||||
for( size_t i = 0, a = 0; a < mVertcnt.size(); i += mVertcnt[a], ++a ) {
|
||||
faceStartIndices[a] = i;
|
||||
}
|
||||
|
||||
// list all faces on a vertex
|
||||
std::map<IfcVector3, std::vector<size_t>, CompareVector> facesByVertex;
|
||||
for( size_t a = 0; a < mVertcnt.size(); ++a )
|
||||
{
|
||||
for( size_t b = 0; b < mVertcnt[a]; ++b )
|
||||
for( size_t a = 0; a < mVertcnt.size(); ++a ) {
|
||||
for( size_t b = 0; b < mVertcnt[a]; ++b ) {
|
||||
facesByVertex[mVerts[faceStartIndices[a] + b]].push_back(a);
|
||||
}
|
||||
}
|
||||
// determine neighbourhood for all polys
|
||||
std::vector<size_t> neighbour(mVerts.size(), SIZE_MAX);
|
||||
std::vector<size_t> tempIntersect(10);
|
||||
for( size_t a = 0; a < mVertcnt.size(); ++a )
|
||||
{
|
||||
for( size_t b = 0; b < mVertcnt[a]; ++b )
|
||||
{
|
||||
for( size_t a = 0; a < mVertcnt.size(); ++a ) {
|
||||
for( size_t b = 0; b < mVertcnt[a]; ++b ) {
|
||||
size_t ib = faceStartIndices[a] + b, nib = faceStartIndices[a] + (b + 1) % mVertcnt[a];
|
||||
const std::vector<size_t>& facesOnB = facesByVertex[mVerts[ib]];
|
||||
const std::vector<size_t>& facesOnNB = facesByVertex[mVerts[nib]];
|
||||
|
@ -281,10 +268,12 @@ void TempMesh::FixupFaceOrientation()
|
|||
std::vector<size_t>::iterator sectend = std::set_intersection(
|
||||
facesOnB.begin(), facesOnB.end(), facesOnNB.begin(), facesOnNB.end(), sectstart);
|
||||
|
||||
if( std::distance(sectstart, sectend) != 2 )
|
||||
if( std::distance(sectstart, sectend) != 2 ) {
|
||||
continue;
|
||||
if( *sectstart == a )
|
||||
}
|
||||
if( *sectstart == a ) {
|
||||
++sectstart;
|
||||
}
|
||||
neighbour[ib] = *sectstart;
|
||||
}
|
||||
}
|
||||
|
@ -293,15 +282,14 @@ void TempMesh::FixupFaceOrientation()
|
|||
// facing outwards. So we reverse this face to point outwards in relation to the center. Then we adapt neighbouring
|
||||
// faces to have the same winding until all faces have been tested.
|
||||
std::vector<bool> faceDone(mVertcnt.size(), false);
|
||||
while( std::count(faceDone.begin(), faceDone.end(), false) != 0 )
|
||||
{
|
||||
while( std::count(faceDone.begin(), faceDone.end(), false) != 0 ) {
|
||||
// find the farthest of the remaining faces
|
||||
size_t farthestIndex = SIZE_MAX;
|
||||
IfcFloat farthestDistance = -1.0;
|
||||
for( size_t a = 0; a < mVertcnt.size(); ++a )
|
||||
{
|
||||
if( faceDone[a] )
|
||||
for( size_t a = 0; a < mVertcnt.size(); ++a ) {
|
||||
if( faceDone[a] ) {
|
||||
continue;
|
||||
}
|
||||
IfcVector3 faceCenter = std::accumulate(mVerts.begin() + faceStartIndices[a],
|
||||
mVerts.begin() + faceStartIndices[a] + mVertcnt[a], IfcVector3(0.0)) / IfcFloat(mVertcnt[a]);
|
||||
IfcFloat dst = (faceCenter - vavg).SquareLength();
|
||||
|
@ -315,8 +303,7 @@ void TempMesh::FixupFaceOrientation()
|
|||
/ IfcFloat(mVertcnt[farthestIndex]);
|
||||
// We accept a bit of negative orientation without reversing. In case of doubt, prefer the orientation given in
|
||||
// the file.
|
||||
if( (farthestNormal * (farthestCenter - vavg).Normalize()) < -0.4 )
|
||||
{
|
||||
if( (farthestNormal * (farthestCenter - vavg).Normalize()) < -0.4 ) {
|
||||
size_t fsi = faceStartIndices[farthestIndex], fvc = mVertcnt[farthestIndex];
|
||||
std::reverse(mVerts.begin() + fsi, mVerts.begin() + fsi + fvc);
|
||||
std::reverse(neighbour.begin() + fsi, neighbour.begin() + fsi + fvc);
|
||||
|
@ -333,19 +320,18 @@ void TempMesh::FixupFaceOrientation()
|
|||
todo.push_back(farthestIndex);
|
||||
|
||||
// go over its neighbour faces recursively and adapt their winding order to match the farthest face
|
||||
while( !todo.empty() )
|
||||
{
|
||||
while( !todo.empty() ) {
|
||||
size_t tdf = todo.back();
|
||||
size_t vsi = faceStartIndices[tdf], vc = mVertcnt[tdf];
|
||||
todo.pop_back();
|
||||
|
||||
// check its neighbours
|
||||
for( size_t a = 0; a < vc; ++a )
|
||||
{
|
||||
for( size_t a = 0; a < vc; ++a ) {
|
||||
// ignore neighbours if we already checked them
|
||||
size_t nbi = neighbour[vsi + a];
|
||||
if( nbi == SIZE_MAX || faceDone[nbi] )
|
||||
if( nbi == SIZE_MAX || faceDone[nbi] ) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const IfcVector3& vp = mVerts[vsi + a];
|
||||
size_t nbvsi = faceStartIndices[nbi], nbvc = mVertcnt[nbi];
|
||||
|
@ -388,32 +374,8 @@ void TempMesh::RemoveAdjacentDuplicates() {
|
|||
IfcVector3 vmin,vmax;
|
||||
ArrayBounds(&*base, cnt ,vmin,vmax);
|
||||
|
||||
|
||||
const IfcFloat epsilon = (vmax-vmin).SquareLength() / static_cast<IfcFloat>(1e9);
|
||||
//const IfcFloat dotepsilon = 1e-9;
|
||||
|
||||
//// look for vertices that lie directly on the line between their predecessor and their
|
||||
//// successor and replace them with either of them.
|
||||
|
||||
//for(size_t i = 0; i < cnt; ++i) {
|
||||
// IfcVector3& v1 = *(base+i), &v0 = *(base+(i?i-1:cnt-1)), &v2 = *(base+(i+1)%cnt);
|
||||
// const IfcVector3& d0 = (v1-v0), &d1 = (v2-v1);
|
||||
// const IfcFloat l0 = d0.SquareLength(), l1 = d1.SquareLength();
|
||||
// if (!l0 || !l1) {
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// const IfcFloat d = (d0/std::sqrt(l0))*(d1/std::sqrt(l1));
|
||||
|
||||
// if ( d >= 1.f-dotepsilon ) {
|
||||
// v1 = v0;
|
||||
// }
|
||||
// else if ( d < -1.f+dotepsilon ) {
|
||||
// v2 = v1;
|
||||
// continue;
|
||||
// }
|
||||
//}
|
||||
|
||||
|
||||
// drop any identical, adjacent vertices. this pass will collect the dropouts
|
||||
// of the previous pass as a side-effect.
|
||||
FuzzyVectorCompare fz(epsilon);
|
||||
|
@ -440,78 +402,58 @@ void TempMesh::RemoveAdjacentDuplicates() {
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void TempMesh::Swap(TempMesh& other)
|
||||
{
|
||||
void TempMesh::Swap(TempMesh& other) {
|
||||
mVertcnt.swap(other.mVertcnt);
|
||||
mVerts.swap(other.mVerts);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
bool IsTrue(const ::Assimp::STEP::EXPRESS::BOOLEAN& in)
|
||||
{
|
||||
bool IsTrue(const ::Assimp::STEP::EXPRESS::BOOLEAN& in) {
|
||||
return (std::string)in == "TRUE" || (std::string)in == "T";
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
IfcFloat ConvertSIPrefix(const std::string& prefix)
|
||||
{
|
||||
IfcFloat ConvertSIPrefix(const std::string& prefix) {
|
||||
if (prefix == "EXA") {
|
||||
return 1e18f;
|
||||
}
|
||||
else if (prefix == "PETA") {
|
||||
} else if (prefix == "PETA") {
|
||||
return 1e15f;
|
||||
}
|
||||
else if (prefix == "TERA") {
|
||||
} else if (prefix == "TERA") {
|
||||
return 1e12f;
|
||||
}
|
||||
else if (prefix == "GIGA") {
|
||||
} else if (prefix == "GIGA") {
|
||||
return 1e9f;
|
||||
}
|
||||
else if (prefix == "MEGA") {
|
||||
} else if (prefix == "MEGA") {
|
||||
return 1e6f;
|
||||
}
|
||||
else if (prefix == "KILO") {
|
||||
} else if (prefix == "KILO") {
|
||||
return 1e3f;
|
||||
}
|
||||
else if (prefix == "HECTO") {
|
||||
} else if (prefix == "HECTO") {
|
||||
return 1e2f;
|
||||
}
|
||||
else if (prefix == "DECA") {
|
||||
} else if (prefix == "DECA") {
|
||||
return 1e-0f;
|
||||
}
|
||||
else if (prefix == "DECI") {
|
||||
} else if (prefix == "DECI") {
|
||||
return 1e-1f;
|
||||
}
|
||||
else if (prefix == "CENTI") {
|
||||
} else if (prefix == "CENTI") {
|
||||
return 1e-2f;
|
||||
}
|
||||
else if (prefix == "MILLI") {
|
||||
} else if (prefix == "MILLI") {
|
||||
return 1e-3f;
|
||||
}
|
||||
else if (prefix == "MICRO") {
|
||||
} else if (prefix == "MICRO") {
|
||||
return 1e-6f;
|
||||
}
|
||||
else if (prefix == "NANO") {
|
||||
} else if (prefix == "NANO") {
|
||||
return 1e-9f;
|
||||
}
|
||||
else if (prefix == "PICO") {
|
||||
} else if (prefix == "PICO") {
|
||||
return 1e-12f;
|
||||
}
|
||||
else if (prefix == "FEMTO") {
|
||||
} else if (prefix == "FEMTO") {
|
||||
return 1e-15f;
|
||||
}
|
||||
else if (prefix == "ATTO") {
|
||||
} else if (prefix == "ATTO") {
|
||||
return 1e-18f;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
IFCImporter::LogError("Unrecognized SI prefix: ", prefix);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertColor(aiColor4D& out, const Schema_2x3::IfcColourRgb& in)
|
||||
{
|
||||
void ConvertColor(aiColor4D& out, const Schema_2x3::IfcColourRgb& in) {
|
||||
out.r = static_cast<float>( in.Red );
|
||||
out.g = static_cast<float>( in.Green );
|
||||
out.b = static_cast<float>( in.Blue );
|
||||
|
@ -519,8 +461,10 @@ void ConvertColor(aiColor4D& out, const Schema_2x3::IfcColourRgb& in)
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertColor(aiColor4D& out, const Schema_2x3::IfcColourOrFactor& in,ConversionData& conv,const aiColor4D* base)
|
||||
{
|
||||
void ConvertColor(aiColor4D& out,
|
||||
const Schema_2x3::IfcColourOrFactor& in,
|
||||
ConversionData& conv,
|
||||
const aiColor4D* base) {
|
||||
if (const ::Assimp::STEP::EXPRESS::REAL* const r = in.ToPtr<::Assimp::STEP::EXPRESS::REAL>()) {
|
||||
out.r = out.g = out.b = static_cast<float>(*r);
|
||||
if(base) {
|
||||
|
@ -528,20 +472,18 @@ void ConvertColor(aiColor4D& out, const Schema_2x3::IfcColourOrFactor& in,Conver
|
|||
out.g *= static_cast<float>( base->g );
|
||||
out.b *= static_cast<float>( base->b );
|
||||
out.a = static_cast<float>( base->a );
|
||||
} else {
|
||||
out.a = 1.0;
|
||||
}
|
||||
else out.a = 1.0;
|
||||
}
|
||||
else if (const Schema_2x3::IfcColourRgb* const rgb = in.ResolveSelectPtr<Schema_2x3::IfcColourRgb>(conv.db)) {
|
||||
} else if (const Schema_2x3::IfcColourRgb* const rgb = in.ResolveSelectPtr<Schema_2x3::IfcColourRgb>(conv.db)) {
|
||||
ConvertColor(out,*rgb);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
IFCImporter::LogWarn("skipping unknown IfcColourOrFactor entity");
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertCartesianPoint(IfcVector3& out, const Schema_2x3::IfcCartesianPoint& in)
|
||||
{
|
||||
void ConvertCartesianPoint(IfcVector3& out, const Schema_2x3::IfcCartesianPoint& in) {
|
||||
out = IfcVector3();
|
||||
for(size_t i = 0; i < in.Coordinates.size(); ++i) {
|
||||
out[static_cast<unsigned int>(i)] = in.Coordinates[i];
|
||||
|
@ -549,15 +491,13 @@ void ConvertCartesianPoint(IfcVector3& out, const Schema_2x3::IfcCartesianPoint&
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertVector(IfcVector3& out, const Schema_2x3::IfcVector& in)
|
||||
{
|
||||
void ConvertVector(IfcVector3& out, const Schema_2x3::IfcVector& in) {
|
||||
ConvertDirection(out,in.Orientation);
|
||||
out *= in.Magnitude;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertDirection(IfcVector3& out, const Schema_2x3::IfcDirection& in)
|
||||
{
|
||||
void ConvertDirection(IfcVector3& out, const Schema_2x3::IfcDirection& in) {
|
||||
out = IfcVector3();
|
||||
for(size_t i = 0; i < in.DirectionRatios.size(); ++i) {
|
||||
out[static_cast<unsigned int>(i)] = in.DirectionRatios[i];
|
||||
|
@ -571,8 +511,7 @@ void ConvertDirection(IfcVector3& out, const Schema_2x3::IfcDirection& in)
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void AssignMatrixAxes(IfcMatrix4& out, const IfcVector3& x, const IfcVector3& y, const IfcVector3& z)
|
||||
{
|
||||
void AssignMatrixAxes(IfcMatrix4& out, const IfcVector3& x, const IfcVector3& y, const IfcVector3& z) {
|
||||
out.a1 = x.x;
|
||||
out.b1 = x.y;
|
||||
out.c1 = x.z;
|
||||
|
@ -587,8 +526,7 @@ void AssignMatrixAxes(IfcMatrix4& out, const IfcVector3& x, const IfcVector3& y,
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertAxisPlacement(IfcMatrix4& out, const Schema_2x3::IfcAxis2Placement3D& in)
|
||||
{
|
||||
void ConvertAxisPlacement(IfcMatrix4& out, const Schema_2x3::IfcAxis2Placement3D& in) {
|
||||
IfcVector3 loc;
|
||||
ConvertCartesianPoint(loc,in.Location);
|
||||
|
||||
|
@ -612,8 +550,7 @@ void ConvertAxisPlacement(IfcMatrix4& out, const Schema_2x3::IfcAxis2Placement3D
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertAxisPlacement(IfcMatrix4& out, const Schema_2x3::IfcAxis2Placement2D& in)
|
||||
{
|
||||
void ConvertAxisPlacement(IfcMatrix4& out, const Schema_2x3::IfcAxis2Placement2D& in) {
|
||||
IfcVector3 loc;
|
||||
ConvertCartesianPoint(loc,in.Location);
|
||||
|
||||
|
@ -629,34 +566,28 @@ void ConvertAxisPlacement(IfcMatrix4& out, const Schema_2x3::IfcAxis2Placement2D
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertAxisPlacement(IfcVector3& axis, IfcVector3& pos, const Schema_2x3::IfcAxis1Placement& in)
|
||||
{
|
||||
void ConvertAxisPlacement(IfcVector3& axis, IfcVector3& pos, const Schema_2x3::IfcAxis1Placement& in) {
|
||||
ConvertCartesianPoint(pos,in.Location);
|
||||
if (in.Axis) {
|
||||
ConvertDirection(axis,in.Axis.Get());
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
axis = IfcVector3(0.f,0.f,1.f);
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertAxisPlacement(IfcMatrix4& out, const Schema_2x3::IfcAxis2Placement& in, ConversionData& conv)
|
||||
{
|
||||
void ConvertAxisPlacement(IfcMatrix4& out, const Schema_2x3::IfcAxis2Placement& in, ConversionData& conv) {
|
||||
if(const Schema_2x3::IfcAxis2Placement3D* pl3 = in.ResolveSelectPtr<Schema_2x3::IfcAxis2Placement3D>(conv.db)) {
|
||||
ConvertAxisPlacement(out,*pl3);
|
||||
}
|
||||
else if(const Schema_2x3::IfcAxis2Placement2D* pl2 = in.ResolveSelectPtr<Schema_2x3::IfcAxis2Placement2D>(conv.db)) {
|
||||
} else if(const Schema_2x3::IfcAxis2Placement2D* pl2 = in.ResolveSelectPtr<Schema_2x3::IfcAxis2Placement2D>(conv.db)) {
|
||||
ConvertAxisPlacement(out,*pl2);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
IFCImporter::LogWarn("skipping unknown IfcAxis2Placement entity");
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void ConvertTransformOperator(IfcMatrix4& out, const Schema_2x3::IfcCartesianTransformationOperator& op)
|
||||
{
|
||||
void ConvertTransformOperator(IfcMatrix4& out, const Schema_2x3::IfcCartesianTransformationOperator& op) {
|
||||
IfcVector3 loc;
|
||||
ConvertCartesianPoint(loc,op.LocalOrigin);
|
||||
|
||||
|
@ -677,14 +608,12 @@ void ConvertTransformOperator(IfcMatrix4& out, const Schema_2x3::IfcCartesianTra
|
|||
IfcMatrix4::Translation(loc,locm);
|
||||
AssignMatrixAxes(out,x,y,z);
|
||||
|
||||
|
||||
IfcVector3 vscale;
|
||||
if (const Schema_2x3::IfcCartesianTransformationOperator3DnonUniform* nuni = op.ToPtr<Schema_2x3::IfcCartesianTransformationOperator3DnonUniform>()) {
|
||||
vscale.x = nuni->Scale?op.Scale.Get():1.f;
|
||||
vscale.y = nuni->Scale2?nuni->Scale2.Get():1.f;
|
||||
vscale.z = nuni->Scale3?nuni->Scale3.Get():1.f;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
const IfcFloat sc = op.Scale?op.Scale.Get():1.f;
|
||||
vscale = IfcVector3(sc,sc,sc);
|
||||
}
|
||||
|
@ -695,8 +624,7 @@ void ConvertTransformOperator(IfcMatrix4& out, const Schema_2x3::IfcCartesianTra
|
|||
out = locm * out * s;
|
||||
}
|
||||
|
||||
|
||||
} // ! IFC
|
||||
} // ! Assimp
|
||||
|
||||
#endif
|
||||
#endif // ASSIMP_BUILD_NO_IFC_IMPORTER
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -53,7 +53,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <assimp/StringUtils.h>
|
||||
#include <assimp/anim.h>
|
||||
|
||||
namespace Assimp {
|
||||
namespace Assimp {
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
/** Irr importer class.
|
||||
|
@ -71,13 +71,13 @@ public:
|
|||
/** Returns whether the class can handle the format of the given file.
|
||||
* See BaseImporter::CanRead() for details.
|
||||
*/
|
||||
bool CanRead( const std::string& pFile, IOSystem* pIOHandler,
|
||||
bool checkSig) const override;
|
||||
bool CanRead(const std::string &pFile, IOSystem *pIOHandler,
|
||||
bool checkSig) const override;
|
||||
|
||||
protected:
|
||||
const aiImporterDesc* GetInfo () const override;
|
||||
void InternReadFile( const std::string& pFile, aiScene* pScene, IOSystem* pIOHandler) override;
|
||||
void SetupProperties(const Importer* pImp) override;
|
||||
const aiImporterDesc *GetInfo() const override;
|
||||
void InternReadFile(const std::string &pFile, aiScene *pScene, IOSystem *pIOHandler) override;
|
||||
void SetupProperties(const Importer *pImp) override;
|
||||
|
||||
private:
|
||||
/** Data structure for a scene-graph node animator
|
||||
|
@ -85,27 +85,19 @@ private:
|
|||
struct Animator {
|
||||
// Type of the animator
|
||||
enum AT {
|
||||
UNKNOWN = 0x0,
|
||||
ROTATION = 0x1,
|
||||
FLY_CIRCLE = 0x2,
|
||||
FLY_STRAIGHT = 0x3,
|
||||
UNKNOWN = 0x0,
|
||||
ROTATION = 0x1,
|
||||
FLY_CIRCLE = 0x2,
|
||||
FLY_STRAIGHT = 0x3,
|
||||
FOLLOW_SPLINE = 0x4,
|
||||
OTHER = 0x5
|
||||
OTHER = 0x5
|
||||
|
||||
} type;
|
||||
|
||||
explicit Animator(AT t = UNKNOWN)
|
||||
: type (t)
|
||||
, speed ( ai_real( 0.001 ) )
|
||||
, direction ( ai_real( 0.0 ), ai_real( 1.0 ), ai_real( 0.0 ) )
|
||||
, circleRadius ( ai_real( 1.0) )
|
||||
, tightness ( ai_real( 0.5 ) )
|
||||
, loop (true)
|
||||
, timeForWay (100)
|
||||
{
|
||||
explicit Animator(AT t = UNKNOWN) :
|
||||
type(t), speed(ai_real(0.001)), direction(ai_real(0.0), ai_real(1.0), ai_real(0.0)), circleRadius(ai_real(1.0)), tightness(ai_real(0.5)), loop(true), timeForWay(100) {
|
||||
}
|
||||
|
||||
|
||||
// common parameters
|
||||
ai_real speed;
|
||||
aiVector3D direction;
|
||||
|
@ -128,11 +120,9 @@ private:
|
|||
|
||||
/** Data structure for a scene-graph node in an IRR file
|
||||
*/
|
||||
struct Node
|
||||
{
|
||||
struct Node {
|
||||
// Type of the node
|
||||
enum ET
|
||||
{
|
||||
enum ET {
|
||||
LIGHT,
|
||||
CUBE,
|
||||
MESH,
|
||||
|
@ -144,21 +134,20 @@ private:
|
|||
ANIMMESH
|
||||
} type;
|
||||
|
||||
explicit Node(ET t)
|
||||
: type (t)
|
||||
, scaling (1.0,1.0,1.0) // assume uniform scaling by default
|
||||
, parent()
|
||||
, framesPerSecond (0.0)
|
||||
, id()
|
||||
, sphereRadius (1.0)
|
||||
, spherePolyCountX (100)
|
||||
, spherePolyCountY (100)
|
||||
{
|
||||
explicit Node(ET t) :
|
||||
type(t), scaling(1.0, 1.0, 1.0) // assume uniform scaling by default
|
||||
,
|
||||
parent(),
|
||||
framesPerSecond(0.0),
|
||||
id(),
|
||||
sphereRadius(1.0),
|
||||
spherePolyCountX(100),
|
||||
spherePolyCountY(100) {
|
||||
|
||||
// Generate a default name for the node
|
||||
char buffer[128];
|
||||
static int cnt;
|
||||
ai_snprintf(buffer, 128, "IrrNode_%i",cnt++);
|
||||
ai_snprintf(buffer, 128, "IrrNode_%i", cnt++);
|
||||
name = std::string(buffer);
|
||||
|
||||
// reserve space for up to 5 materials
|
||||
|
@ -175,10 +164,10 @@ private:
|
|||
std::string name;
|
||||
|
||||
// List of all child nodes
|
||||
std::vector<Node*> children;
|
||||
std::vector<Node *> children;
|
||||
|
||||
// Parent node
|
||||
Node* parent;
|
||||
Node *parent;
|
||||
|
||||
// Animated meshes: frames per second
|
||||
// 0.f if not specified
|
||||
|
@ -190,13 +179,13 @@ private:
|
|||
|
||||
// Meshes: List of materials to be assigned
|
||||
// along with their corresponding material flags
|
||||
std::vector< std::pair<aiMaterial*, unsigned int> > materials;
|
||||
std::vector<std::pair<aiMaterial *, unsigned int>> materials;
|
||||
|
||||
// Spheres: radius of the sphere to be generates
|
||||
ai_real sphereRadius;
|
||||
|
||||
// Spheres: Number of polygons in the x,y direction
|
||||
unsigned int spherePolyCountX,spherePolyCountY;
|
||||
unsigned int spherePolyCountX, spherePolyCountY;
|
||||
|
||||
// List of all animators assigned to the node
|
||||
std::list<Animator> animators;
|
||||
|
@ -204,40 +193,54 @@ private:
|
|||
|
||||
/** Data structure for a vertex in an IRR skybox
|
||||
*/
|
||||
struct SkyboxVertex
|
||||
{
|
||||
struct SkyboxVertex {
|
||||
SkyboxVertex() = default;
|
||||
|
||||
//! Construction from single vertex components
|
||||
SkyboxVertex(ai_real px, ai_real py, ai_real pz,
|
||||
ai_real nx, ai_real ny, ai_real nz,
|
||||
ai_real uvx, ai_real uvy)
|
||||
ai_real nx, ai_real ny, ai_real nz,
|
||||
ai_real uvx, ai_real uvy)
|
||||
|
||||
: position (px,py,pz)
|
||||
, normal (nx,ny,nz)
|
||||
, uv (uvx,uvy,0.0)
|
||||
{}
|
||||
:
|
||||
position(px, py, pz), normal(nx, ny, nz), uv(uvx, uvy, 0.0) {}
|
||||
|
||||
aiVector3D position, normal, uv;
|
||||
};
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Parse <node> tag from XML file and extract child node
|
||||
// @param node XML node
|
||||
// @param guessedMeshesContained number of extra guessed meshes
|
||||
IRRImporter::Node *ParseNode(pugi::xml_node &node, BatchLoader& batch);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Parse <attributes> tags within <node> tags and apply to scene node
|
||||
// @param attributeNode XML child node
|
||||
// @param nd Attributed scene node
|
||||
void ParseNodeAttributes(pugi::xml_node &attributeNode, IRRImporter::Node *nd, BatchLoader& batch);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Parse an <animator> node and attach an animator to a node
|
||||
// @param animatorNode XML animator node
|
||||
// @param nd Animated scene node
|
||||
void ParseAnimators(pugi::xml_node &animatorNode, IRRImporter::Node *nd);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/// Fill the scene-graph recursively
|
||||
void GenerateGraph(Node* root,aiNode* rootOut ,aiScene* scene,
|
||||
BatchLoader& batch,
|
||||
std::vector<aiMesh*>& meshes,
|
||||
std::vector<aiNodeAnim*>& anims,
|
||||
std::vector<AttachmentInfo>& attach,
|
||||
std::vector<aiMaterial*>& materials,
|
||||
unsigned int& defaultMatIdx);
|
||||
void GenerateGraph(Node *root, aiNode *rootOut, aiScene *scene,
|
||||
BatchLoader &batch,
|
||||
std::vector<aiMesh *> &meshes,
|
||||
std::vector<aiNodeAnim *> &anims,
|
||||
std::vector<AttachmentInfo> &attach,
|
||||
std::vector<aiMaterial *> &materials,
|
||||
unsigned int &defaultMatIdx);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/// Generate a mesh that consists of just a single quad
|
||||
aiMesh* BuildSingleQuadMesh(const SkyboxVertex& v1,
|
||||
const SkyboxVertex& v2,
|
||||
const SkyboxVertex& v3,
|
||||
const SkyboxVertex& v4);
|
||||
aiMesh *BuildSingleQuadMesh(const SkyboxVertex &v1,
|
||||
const SkyboxVertex &v2,
|
||||
const SkyboxVertex &v3,
|
||||
const SkyboxVertex &v4);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/// Build a sky-box
|
||||
|
@ -245,8 +248,8 @@ private:
|
|||
/// @param meshes Receives 6 output meshes
|
||||
/// @param materials The last 6 materials are assigned to the newly
|
||||
/// created meshes. The names of the materials are adjusted.
|
||||
void BuildSkybox(std::vector<aiMesh*>& meshes,
|
||||
std::vector<aiMaterial*> materials);
|
||||
void BuildSkybox(std::vector<aiMesh *> &meshes,
|
||||
std::vector<aiMaterial *> materials);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Copy a material for a mesh to the output material list
|
||||
|
@ -256,10 +259,10 @@ private:
|
|||
* @param defMatIdx Default material index - UINT_MAX if not present
|
||||
* @param mesh Mesh to work on
|
||||
*/
|
||||
void CopyMaterial(std::vector<aiMaterial*>& materials,
|
||||
std::vector< std::pair<aiMaterial*, unsigned int> >& inmaterials,
|
||||
unsigned int& defMatIdx,
|
||||
aiMesh* mesh);
|
||||
void CopyMaterial(std::vector<aiMaterial *> &materials,
|
||||
std::vector<std::pair<aiMaterial *, unsigned int>> &inmaterials,
|
||||
unsigned int &defMatIdx,
|
||||
aiMesh *mesh);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Compute animations for a specific node
|
||||
|
@ -267,8 +270,8 @@ private:
|
|||
* @param root Node to be processed
|
||||
* @param anims The list of output animations
|
||||
*/
|
||||
void ComputeAnimations(Node* root, aiNode* real,
|
||||
std::vector<aiNodeAnim*>& anims);
|
||||
void ComputeAnimations(Node *root, aiNode *real,
|
||||
std::vector<aiNodeAnim *> &anims);
|
||||
|
||||
private:
|
||||
/// Configuration option: desired output FPS
|
||||
|
@ -276,6 +279,12 @@ private:
|
|||
|
||||
/// Configuration option: speed flag was set?
|
||||
bool configSpeedFlag;
|
||||
|
||||
std::vector<aiCamera*> cameras;
|
||||
std::vector<aiLight*> lights;
|
||||
unsigned int guessedMeshCnt;
|
||||
unsigned int guessedMatCnt;
|
||||
unsigned int guessedAnimCnt;
|
||||
};
|
||||
|
||||
} // end of namespace Assimp
|
||||
|
|
|
@ -57,16 +57,16 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
using namespace Assimp;
|
||||
|
||||
static const aiImporterDesc desc = {
|
||||
"Irrlicht Mesh Reader",
|
||||
"",
|
||||
"",
|
||||
"http://irrlicht.sourceforge.net/",
|
||||
aiImporterFlags_SupportTextFlavour,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
"xml irrmesh"
|
||||
"Irrlicht Mesh Reader",
|
||||
"",
|
||||
"",
|
||||
"http://irrlicht.sourceforge.net/",
|
||||
aiImporterFlags_SupportTextFlavour,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
"xml irrmesh"
|
||||
};
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
@ -80,419 +80,443 @@ IRRMeshImporter::~IRRMeshImporter() = default;
|
|||
// ------------------------------------------------------------------------------------------------
|
||||
// Returns whether the class can handle the format of the given file.
|
||||
bool IRRMeshImporter::CanRead(const std::string &pFile, IOSystem *pIOHandler, bool /*checkSig*/) const {
|
||||
/* NOTE: A simple check for the file extension is not enough
|
||||
* here. Irrmesh and irr are easy, but xml is too generic
|
||||
* and could be collada, too. So we need to open the file and
|
||||
* search for typical tokens.
|
||||
*/
|
||||
static const char *tokens[] = { "irrmesh" };
|
||||
return SearchFileHeaderForToken(pIOHandler, pFile, tokens, AI_COUNT_OF(tokens));
|
||||
/* NOTE: A simple check for the file extension is not enough
|
||||
* here. Irrmesh and irr are easy, but xml is too generic
|
||||
* and could be collada, too. So we need to open the file and
|
||||
* search for typical tokens.
|
||||
*/
|
||||
static const char *tokens[] = { "irrmesh" };
|
||||
return SearchFileHeaderForToken(pIOHandler, pFile, tokens, AI_COUNT_OF(tokens));
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Get a list of all file extensions which are handled by this class
|
||||
const aiImporterDesc *IRRMeshImporter::GetInfo() const {
|
||||
return &desc;
|
||||
return &desc;
|
||||
}
|
||||
|
||||
static void releaseMaterial(aiMaterial **mat) {
|
||||
if (*mat != nullptr) {
|
||||
delete *mat;
|
||||
*mat = nullptr;
|
||||
}
|
||||
if (*mat != nullptr) {
|
||||
delete *mat;
|
||||
*mat = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
static void releaseMesh(aiMesh **mesh) {
|
||||
if (*mesh != nullptr) {
|
||||
delete *mesh;
|
||||
*mesh = nullptr;
|
||||
}
|
||||
if (*mesh != nullptr) {
|
||||
delete *mesh;
|
||||
*mesh = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Imports the given file into the given scene structure.
|
||||
void IRRMeshImporter::InternReadFile(const std::string &pFile,
|
||||
aiScene *pScene, IOSystem *pIOHandler) {
|
||||
std::unique_ptr<IOStream> file(pIOHandler->Open(pFile));
|
||||
aiScene *pScene, IOSystem *pIOHandler) {
|
||||
std::unique_ptr<IOStream> file(pIOHandler->Open(pFile));
|
||||
|
||||
// Check whether we can read from the file
|
||||
if (file == nullptr)
|
||||
throw DeadlyImportError("Failed to open IRRMESH file ", pFile);
|
||||
// Check whether we can read from the file
|
||||
if (file == nullptr)
|
||||
throw DeadlyImportError("Failed to open IRRMESH file ", pFile);
|
||||
|
||||
// Construct the irrXML parser
|
||||
XmlParser parser;
|
||||
if (!parser.parse( file.get() )) {
|
||||
throw DeadlyImportError("XML parse error while loading IRRMESH file ", pFile);
|
||||
}
|
||||
XmlNode root = parser.getRootNode();
|
||||
// Construct the irrXML parser
|
||||
XmlParser parser;
|
||||
if (!parser.parse(file.get())) {
|
||||
throw DeadlyImportError("XML parse error while loading IRRMESH file ", pFile);
|
||||
}
|
||||
XmlNode root = parser.getRootNode();
|
||||
|
||||
// final data
|
||||
std::vector<aiMaterial *> materials;
|
||||
std::vector<aiMesh *> meshes;
|
||||
materials.reserve(5);
|
||||
meshes.reserve(5);
|
||||
// final data
|
||||
std::vector<aiMaterial *> materials;
|
||||
std::vector<aiMesh *> meshes;
|
||||
materials.reserve(5);
|
||||
meshes.reserve(5);
|
||||
|
||||
// temporary data - current mesh buffer
|
||||
aiMaterial *curMat = nullptr;
|
||||
aiMesh *curMesh = nullptr;
|
||||
unsigned int curMatFlags = 0;
|
||||
// temporary data - current mesh buffer
|
||||
// TODO move all these to inside loop
|
||||
aiMaterial *curMat = nullptr;
|
||||
aiMesh *curMesh = nullptr;
|
||||
unsigned int curMatFlags = 0;
|
||||
|
||||
std::vector<aiVector3D> curVertices, curNormals, curTangents, curBitangents;
|
||||
std::vector<aiColor4D> curColors;
|
||||
std::vector<aiVector3D> curUVs, curUV2s;
|
||||
std::vector<aiVector3D> curVertices, curNormals, curTangents, curBitangents;
|
||||
std::vector<aiColor4D> curColors;
|
||||
std::vector<aiVector3D> curUVs, curUV2s;
|
||||
|
||||
// some temporary variables
|
||||
int textMeaning = 0;
|
||||
int vertexFormat = 0; // 0 = normal; 1 = 2 tcoords, 2 = tangents
|
||||
bool useColors = false;
|
||||
// some temporary variables
|
||||
// textMeaning is a 15 year old variable, that could've been an enum
|
||||
// int textMeaning = 0; // 0=none? 1=vertices 2=indices
|
||||
// int vertexFormat = 0; // 0 = normal; 1 = 2 tcoords, 2 = tangents
|
||||
bool useColors = false;
|
||||
|
||||
// Parse the XML file
|
||||
for (pugi::xml_node child : root.children()) {
|
||||
if (child.type() == pugi::node_element) {
|
||||
if (!ASSIMP_stricmp(child.name(), "buffer") && (curMat || curMesh)) {
|
||||
// end of previous buffer. A material and a mesh should be there
|
||||
if (!curMat || !curMesh) {
|
||||
ASSIMP_LOG_ERROR("IRRMESH: A buffer must contain a mesh and a material");
|
||||
releaseMaterial(&curMat);
|
||||
releaseMesh(&curMesh);
|
||||
} else {
|
||||
materials.push_back(curMat);
|
||||
meshes.push_back(curMesh);
|
||||
}
|
||||
curMat = nullptr;
|
||||
curMesh = nullptr;
|
||||
/*
|
||||
** irrmesh files have a top level <mesh> owning multiple <buffer> nodes.
|
||||
** Each <buffer> contains <material>, <vertices>, and <indices>
|
||||
** <material> tags here directly owns the material data specs
|
||||
** <vertices> are a vertex per line, contains position, UV1 coords, maybe UV2, normal, tangent, bitangent
|
||||
** <boundingbox> is ignored, I think assimp recalculates those?
|
||||
*/
|
||||
|
||||
curVertices.clear();
|
||||
curColors.clear();
|
||||
curNormals.clear();
|
||||
curUV2s.clear();
|
||||
curUVs.clear();
|
||||
curTangents.clear();
|
||||
curBitangents.clear();
|
||||
}
|
||||
// Parse the XML file
|
||||
pugi::xml_node const &meshNode = root.child("mesh");
|
||||
for (pugi::xml_node bufferNode : meshNode.children()) {
|
||||
if (ASSIMP_stricmp(bufferNode.name(), "buffer")) {
|
||||
// Might be a useless warning
|
||||
ASSIMP_LOG_WARN("IRRMESH: Ignoring non buffer node <", bufferNode.name(), "> in mesh declaration");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!ASSIMP_stricmp(child.name(), "material")) {
|
||||
if (curMat) {
|
||||
ASSIMP_LOG_WARN("IRRMESH: Only one material description per buffer, please");
|
||||
releaseMaterial(&curMat);
|
||||
}
|
||||
curMat = ParseMaterial(curMatFlags);
|
||||
}
|
||||
/* no else here! */ if (!ASSIMP_stricmp(child.name(), "vertices")) {
|
||||
pugi::xml_attribute attr = child.attribute("vertexCount");
|
||||
int num = attr.as_int();
|
||||
//int num = reader->getAttributeValueAsInt("vertexCount");
|
||||
curMat = nullptr;
|
||||
curMesh = nullptr;
|
||||
|
||||
if (!num) {
|
||||
// This is possible ... remove the mesh from the list and skip further reading
|
||||
ASSIMP_LOG_WARN("IRRMESH: Found mesh with zero vertices");
|
||||
curVertices.clear();
|
||||
curColors.clear();
|
||||
curNormals.clear();
|
||||
curUV2s.clear();
|
||||
curUVs.clear();
|
||||
curTangents.clear();
|
||||
curBitangents.clear();
|
||||
|
||||
releaseMaterial(&curMat);
|
||||
releaseMesh(&curMesh);
|
||||
textMeaning = 0;
|
||||
continue;
|
||||
}
|
||||
// TODO ensure all three nodes are present and populated
|
||||
// before allocating everything
|
||||
|
||||
curVertices.reserve(num);
|
||||
curNormals.reserve(num);
|
||||
curColors.reserve(num);
|
||||
curUVs.reserve(num);
|
||||
// Get first material node
|
||||
pugi::xml_node materialNode = bufferNode.child("material");
|
||||
if (materialNode) {
|
||||
curMat = ParseMaterial(materialNode, curMatFlags);
|
||||
// Warn if there's more materials
|
||||
if (materialNode.next_sibling("material")) {
|
||||
ASSIMP_LOG_WARN("IRRMESH: Only one material description per buffer, please");
|
||||
}
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("IRRMESH: Buffer must contain one material");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Determine the file format
|
||||
//const char *t = reader->getAttributeValueSafe("type");
|
||||
pugi::xml_attribute t = child.attribute("type");
|
||||
if (!ASSIMP_stricmp("2tcoords", t.name())) {
|
||||
curUV2s.reserve(num);
|
||||
vertexFormat = 1;
|
||||
// Get first vertices node
|
||||
pugi::xml_node verticesNode = bufferNode.child("vertices");
|
||||
if (verticesNode) {
|
||||
pugi::xml_attribute vertexCountAttrib = verticesNode.attribute("vertexCount");
|
||||
int vertexCount = vertexCountAttrib.as_int();
|
||||
if (vertexCount == 0) {
|
||||
// This is possible ... remove the mesh from the list and skip further reading
|
||||
ASSIMP_LOG_WARN("IRRMESH: Found mesh with zero vertices");
|
||||
releaseMaterial(&curMat);
|
||||
// releaseMesh(&curMesh);
|
||||
continue; // Bail out early
|
||||
};
|
||||
|
||||
if (curMatFlags & AI_IRRMESH_EXTRA_2ND_TEXTURE) {
|
||||
// *********************************************************
|
||||
// We have a second texture! So use this UV channel
|
||||
// for it. The 2nd texture can be either a normal
|
||||
// texture (solid_2layer or lightmap_xxx) or a normal
|
||||
// map (normal_..., parallax_...)
|
||||
// *********************************************************
|
||||
int idx = 1;
|
||||
aiMaterial *mat = (aiMaterial *)curMat;
|
||||
curVertices.reserve(vertexCount);
|
||||
curNormals.reserve(vertexCount);
|
||||
curColors.reserve(vertexCount);
|
||||
curUVs.reserve(vertexCount);
|
||||
|
||||
if (curMatFlags & AI_IRRMESH_MAT_lightmap) {
|
||||
mat->AddProperty(&idx, 1, AI_MATKEY_UVWSRC_LIGHTMAP(0));
|
||||
} else if (curMatFlags & AI_IRRMESH_MAT_normalmap_solid) {
|
||||
mat->AddProperty(&idx, 1, AI_MATKEY_UVWSRC_NORMALS(0));
|
||||
} else if (curMatFlags & AI_IRRMESH_MAT_solid_2layer) {
|
||||
mat->AddProperty(&idx, 1, AI_MATKEY_UVWSRC_DIFFUSE(1));
|
||||
}
|
||||
}
|
||||
} else if (!ASSIMP_stricmp("tangents", t.name())) {
|
||||
curTangents.reserve(num);
|
||||
curBitangents.reserve(num);
|
||||
vertexFormat = 2;
|
||||
} else if (ASSIMP_stricmp("standard", t.name())) {
|
||||
releaseMaterial(&curMat);
|
||||
ASSIMP_LOG_WARN("IRRMESH: Unknown vertex format");
|
||||
} else
|
||||
vertexFormat = 0;
|
||||
textMeaning = 1;
|
||||
} else if (!ASSIMP_stricmp(child.name(), "indices")) {
|
||||
if (curVertices.empty() && curMat) {
|
||||
releaseMaterial(&curMat);
|
||||
throw DeadlyImportError("IRRMESH: indices must come after vertices");
|
||||
}
|
||||
VertexFormat vertexFormat;
|
||||
// Determine the file format
|
||||
pugi::xml_attribute typeAttrib = verticesNode.attribute("type");
|
||||
if (!ASSIMP_stricmp("2tcoords", typeAttrib.value())) {
|
||||
curUV2s.reserve(vertexCount);
|
||||
vertexFormat = VertexFormat::t2coord;
|
||||
if (curMatFlags & AI_IRRMESH_EXTRA_2ND_TEXTURE) {
|
||||
// *********************************************************
|
||||
// We have a second texture! So use this UV channel
|
||||
// for it. The 2nd texture can be either a normal
|
||||
// texture (solid_2layer or lightmap_xxx) or a normal
|
||||
// map (normal_..., parallax_...)
|
||||
// *********************************************************
|
||||
int idx = 1;
|
||||
aiMaterial *mat = (aiMaterial *)curMat;
|
||||
|
||||
textMeaning = 2;
|
||||
if (curMatFlags & AI_IRRMESH_MAT_lightmap) {
|
||||
mat->AddProperty(&idx, 1, AI_MATKEY_UVWSRC_LIGHTMAP(0));
|
||||
} else if (curMatFlags & AI_IRRMESH_MAT_normalmap_solid) {
|
||||
mat->AddProperty(&idx, 1, AI_MATKEY_UVWSRC_NORMALS(0));
|
||||
} else if (curMatFlags & AI_IRRMESH_MAT_solid_2layer) {
|
||||
mat->AddProperty(&idx, 1, AI_MATKEY_UVWSRC_DIFFUSE(1));
|
||||
}
|
||||
}
|
||||
} else if (!ASSIMP_stricmp("tangents", typeAttrib.value())) {
|
||||
curTangents.reserve(vertexCount);
|
||||
curBitangents.reserve(vertexCount);
|
||||
vertexFormat = VertexFormat::tangent;
|
||||
} else if (!ASSIMP_stricmp("standard", typeAttrib.value())) {
|
||||
vertexFormat = VertexFormat::standard;
|
||||
} else {
|
||||
// Unsupported format, discard whole buffer/mesh
|
||||
// Assuming we have a correct material, then release it
|
||||
// We don't have a correct mesh for sure here
|
||||
releaseMaterial(&curMat);
|
||||
ASSIMP_LOG_ERROR("IRRMESH: Unknown vertex format");
|
||||
continue; // Skip rest of buffer
|
||||
};
|
||||
|
||||
// start a new mesh
|
||||
curMesh = new aiMesh();
|
||||
// We know what format buffer is, collect numbers
|
||||
ParseBufferVertices(verticesNode.text().get(), vertexFormat,
|
||||
curVertices, curNormals,
|
||||
curTangents, curBitangents,
|
||||
curUVs, curUV2s, curColors, useColors);
|
||||
}
|
||||
|
||||
// allocate storage for all faces
|
||||
pugi::xml_attribute attr = child.attribute("indexCount");
|
||||
curMesh->mNumVertices = attr.as_int();
|
||||
if (!curMesh->mNumVertices) {
|
||||
// This is possible ... remove the mesh from the list and skip further reading
|
||||
ASSIMP_LOG_WARN("IRRMESH: Found mesh with zero indices");
|
||||
// Get indices
|
||||
// At this point we have some vertices and a valid material
|
||||
// Collect indices and create aiMesh at the same time
|
||||
pugi::xml_node indicesNode = bufferNode.child("indices");
|
||||
if (indicesNode) {
|
||||
// start a new mesh
|
||||
curMesh = new aiMesh();
|
||||
|
||||
// mesh - away
|
||||
releaseMesh(&curMesh);
|
||||
// allocate storage for all faces
|
||||
pugi::xml_attribute attr = indicesNode.attribute("indexCount");
|
||||
curMesh->mNumVertices = attr.as_int();
|
||||
if (!curMesh->mNumVertices) {
|
||||
// This is possible ... remove the mesh from the list and skip further reading
|
||||
ASSIMP_LOG_WARN("IRRMESH: Found mesh with zero indices");
|
||||
|
||||
// material - away
|
||||
releaseMaterial(&curMat);
|
||||
// mesh - away
|
||||
releaseMesh(&curMesh);
|
||||
|
||||
textMeaning = 0;
|
||||
continue;
|
||||
}
|
||||
// material - away
|
||||
releaseMaterial(&curMat);
|
||||
continue; // Go to next buffer
|
||||
}
|
||||
|
||||
if (curMesh->mNumVertices % 3) {
|
||||
ASSIMP_LOG_WARN("IRRMESH: Number if indices isn't divisible by 3");
|
||||
}
|
||||
if (curMesh->mNumVertices % 3) {
|
||||
ASSIMP_LOG_WARN("IRRMESH: Number if indices isn't divisible by 3");
|
||||
}
|
||||
|
||||
curMesh->mNumFaces = curMesh->mNumVertices / 3;
|
||||
curMesh->mFaces = new aiFace[curMesh->mNumFaces];
|
||||
curMesh->mNumFaces = curMesh->mNumVertices / 3;
|
||||
curMesh->mFaces = new aiFace[curMesh->mNumFaces];
|
||||
|
||||
// setup some members
|
||||
curMesh->mMaterialIndex = (unsigned int)materials.size();
|
||||
curMesh->mPrimitiveTypes = aiPrimitiveType_TRIANGLE;
|
||||
// setup some members
|
||||
curMesh->mMaterialIndex = (unsigned int)materials.size();
|
||||
curMesh->mPrimitiveTypes = aiPrimitiveType_TRIANGLE;
|
||||
|
||||
// allocate storage for all vertices
|
||||
curMesh->mVertices = new aiVector3D[curMesh->mNumVertices];
|
||||
// allocate storage for all vertices
|
||||
curMesh->mVertices = new aiVector3D[curMesh->mNumVertices];
|
||||
|
||||
if (curNormals.size() == curVertices.size()) {
|
||||
curMesh->mNormals = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curTangents.size() == curVertices.size()) {
|
||||
curMesh->mTangents = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curBitangents.size() == curVertices.size()) {
|
||||
curMesh->mBitangents = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curColors.size() == curVertices.size() && useColors) {
|
||||
curMesh->mColors[0] = new aiColor4D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curUVs.size() == curVertices.size()) {
|
||||
curMesh->mTextureCoords[0] = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curUV2s.size() == curVertices.size()) {
|
||||
curMesh->mTextureCoords[1] = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
}
|
||||
//break;
|
||||
if (curNormals.size() == curVertices.size()) {
|
||||
curMesh->mNormals = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curTangents.size() == curVertices.size()) {
|
||||
curMesh->mTangents = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curBitangents.size() == curVertices.size()) {
|
||||
curMesh->mBitangents = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curColors.size() == curVertices.size() && useColors) {
|
||||
curMesh->mColors[0] = new aiColor4D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curUVs.size() == curVertices.size()) {
|
||||
curMesh->mTextureCoords[0] = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
if (curUV2s.size() == curVertices.size()) {
|
||||
curMesh->mTextureCoords[1] = new aiVector3D[curMesh->mNumVertices];
|
||||
}
|
||||
|
||||
//case EXN_TEXT: {
|
||||
const char *sz = child.child_value();
|
||||
if (textMeaning == 1) {
|
||||
textMeaning = 0;
|
||||
// read indices
|
||||
aiFace *curFace = curMesh->mFaces;
|
||||
aiFace *const faceEnd = curMesh->mFaces + curMesh->mNumFaces;
|
||||
|
||||
// read vertices
|
||||
do {
|
||||
SkipSpacesAndLineEnd(&sz);
|
||||
aiVector3D temp;
|
||||
aiColor4D c;
|
||||
aiVector3D *pcV = curMesh->mVertices;
|
||||
aiVector3D *pcN = curMesh->mNormals;
|
||||
aiVector3D *pcT = curMesh->mTangents;
|
||||
aiVector3D *pcB = curMesh->mBitangents;
|
||||
aiColor4D *pcC0 = curMesh->mColors[0];
|
||||
aiVector3D *pcT0 = curMesh->mTextureCoords[0];
|
||||
aiVector3D *pcT1 = curMesh->mTextureCoords[1];
|
||||
|
||||
// Read the vertex position
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
unsigned int curIdx = 0;
|
||||
unsigned int total = 0;
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
// NOTE this might explode for UTF-16 and wchars
|
||||
const char *sz = indicesNode.text().get();
|
||||
// For each index loop over aiMesh faces
|
||||
while (SkipSpacesAndLineEnd(&sz)) {
|
||||
if (curFace >= faceEnd) {
|
||||
ASSIMP_LOG_ERROR("IRRMESH: Too many indices");
|
||||
break;
|
||||
}
|
||||
// if new face
|
||||
if (!curIdx) {
|
||||
curFace->mNumIndices = 3;
|
||||
curFace->mIndices = new unsigned int[3];
|
||||
}
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.z);
|
||||
SkipSpaces(&sz);
|
||||
curVertices.push_back(temp);
|
||||
// Read index base 10
|
||||
// function advances the pointer
|
||||
unsigned int idx = strtoul10(sz, &sz);
|
||||
if (idx >= curVertices.size()) {
|
||||
ASSIMP_LOG_ERROR("IRRMESH: Index out of range");
|
||||
idx = 0;
|
||||
}
|
||||
|
||||
// Read the vertex normals
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
// make up our own indices?
|
||||
curFace->mIndices[curIdx] = total++;
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
// Copy over data to aiMesh
|
||||
*pcV++ = curVertices[idx];
|
||||
if (pcN) *pcN++ = curNormals[idx];
|
||||
if (pcT) *pcT++ = curTangents[idx];
|
||||
if (pcB) *pcB++ = curBitangents[idx];
|
||||
if (pcC0) *pcC0++ = curColors[idx];
|
||||
if (pcT0) *pcT0++ = curUVs[idx];
|
||||
if (pcT1) *pcT1++ = curUV2s[idx];
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.z);
|
||||
SkipSpaces(&sz);
|
||||
curNormals.push_back(temp);
|
||||
// start new face
|
||||
if (++curIdx == 3) {
|
||||
++curFace;
|
||||
curIdx = 0;
|
||||
}
|
||||
}
|
||||
// We should be at the end of mFaces
|
||||
if (curFace != faceEnd)
|
||||
ASSIMP_LOG_ERROR("IRRMESH: Not enough indices");
|
||||
}
|
||||
|
||||
// read the vertex colors
|
||||
uint32_t clr = strtoul16(sz, &sz);
|
||||
ColorFromARGBPacked(clr, c);
|
||||
// Finish processing the mesh - do some small material workarounds
|
||||
if (curMatFlags & AI_IRRMESH_MAT_trans_vertex_alpha && !useColors) {
|
||||
// Take the opacity value of the current material
|
||||
// from the common vertex color alpha
|
||||
aiMaterial *mat = (aiMaterial *)curMat;
|
||||
mat->AddProperty(&curColors[0].a, 1, AI_MATKEY_OPACITY);
|
||||
}
|
||||
// textMeaning = 2;
|
||||
|
||||
if (!curColors.empty() && c != *(curColors.end() - 1))
|
||||
useColors = true;
|
||||
// end of previous buffer. A material and a mesh should be there
|
||||
if (!curMat || !curMesh) {
|
||||
ASSIMP_LOG_ERROR("IRRMESH: A buffer must contain a mesh and a material");
|
||||
releaseMaterial(&curMat);
|
||||
releaseMesh(&curMesh);
|
||||
} else {
|
||||
materials.push_back(curMat);
|
||||
meshes.push_back(curMesh);
|
||||
}
|
||||
}
|
||||
|
||||
curColors.push_back(c);
|
||||
SkipSpaces(&sz);
|
||||
// If one is empty then so is the other
|
||||
if (materials.empty() || meshes.empty()) {
|
||||
throw DeadlyImportError("IRRMESH: Unable to read a mesh from this file");
|
||||
}
|
||||
|
||||
// read the first UV coordinate set
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
// now generate the output scene
|
||||
pScene->mNumMeshes = (unsigned int)meshes.size();
|
||||
pScene->mMeshes = new aiMesh *[pScene->mNumMeshes];
|
||||
for (unsigned int i = 0; i < pScene->mNumMeshes; ++i) {
|
||||
pScene->mMeshes[i] = meshes[i];
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
temp.z = 0.f;
|
||||
temp.y = 1.f - temp.y; // DX to OGL
|
||||
curUVs.push_back(temp);
|
||||
// clean this value ...
|
||||
pScene->mMeshes[i]->mNumUVComponents[3] = 0;
|
||||
}
|
||||
|
||||
// read the (optional) second UV coordinate set
|
||||
if (vertexFormat == 1) {
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
pScene->mNumMaterials = (unsigned int)materials.size();
|
||||
pScene->mMaterials = new aiMaterial *[pScene->mNumMaterials];
|
||||
::memcpy(pScene->mMaterials, &materials[0], sizeof(void *) * pScene->mNumMaterials);
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
temp.y = 1.f - temp.y; // DX to OGL
|
||||
curUV2s.push_back(temp);
|
||||
}
|
||||
// read optional tangent and bitangent vectors
|
||||
else if (vertexFormat == 2) {
|
||||
// tangents
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
pScene->mRootNode = new aiNode();
|
||||
pScene->mRootNode->mName.Set("<IRRMesh>");
|
||||
pScene->mRootNode->mNumMeshes = pScene->mNumMeshes;
|
||||
pScene->mRootNode->mMeshes = new unsigned int[pScene->mNumMeshes];
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.z);
|
||||
SkipSpaces(&sz);
|
||||
for (unsigned int i = 0; i < pScene->mNumMeshes; ++i) {
|
||||
pScene->mRootNode->mMeshes[i] = i;
|
||||
};
|
||||
}
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
temp.y *= -1.0f;
|
||||
curTangents.push_back(temp);
|
||||
void IRRMeshImporter::ParseBufferVertices(const char *sz, VertexFormat vertexFormat,
|
||||
std::vector<aiVector3D> &vertices, std::vector<aiVector3D> &normals,
|
||||
std::vector<aiVector3D> &tangents, std::vector<aiVector3D> &bitangents,
|
||||
std::vector<aiVector3D> &UVs, std::vector<aiVector3D> &UV2s,
|
||||
std::vector<aiColor4D> &colors, bool &useColors) {
|
||||
// read vertices
|
||||
do {
|
||||
SkipSpacesAndLineEnd(&sz);
|
||||
aiVector3D temp;
|
||||
aiColor4D c;
|
||||
|
||||
// bitangents
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
// Read the vertex position
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.z);
|
||||
SkipSpaces(&sz);
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
temp.y *= -1.0f;
|
||||
curBitangents.push_back(temp);
|
||||
}
|
||||
}
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.z);
|
||||
SkipSpaces(&sz);
|
||||
vertices.push_back(temp);
|
||||
|
||||
/* IMPORTANT: We assume that each vertex is specified in one
|
||||
line. So we can skip the rest of the line - unknown vertex
|
||||
elements are ignored.
|
||||
*/
|
||||
// Read the vertex normals
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
while (SkipLine(&sz));
|
||||
} else if (textMeaning == 2) {
|
||||
textMeaning = 0;
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
// read indices
|
||||
aiFace *curFace = curMesh->mFaces;
|
||||
aiFace *const faceEnd = curMesh->mFaces + curMesh->mNumFaces;
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.z);
|
||||
SkipSpaces(&sz);
|
||||
normals.push_back(temp);
|
||||
|
||||
aiVector3D *pcV = curMesh->mVertices;
|
||||
aiVector3D *pcN = curMesh->mNormals;
|
||||
aiVector3D *pcT = curMesh->mTangents;
|
||||
aiVector3D *pcB = curMesh->mBitangents;
|
||||
aiColor4D *pcC0 = curMesh->mColors[0];
|
||||
aiVector3D *pcT0 = curMesh->mTextureCoords[0];
|
||||
aiVector3D *pcT1 = curMesh->mTextureCoords[1];
|
||||
// read the vertex colors
|
||||
uint32_t clr = strtoul16(sz, &sz);
|
||||
ColorFromARGBPacked(clr, c);
|
||||
|
||||
unsigned int curIdx = 0;
|
||||
unsigned int total = 0;
|
||||
while (SkipSpacesAndLineEnd(&sz)) {
|
||||
if (curFace >= faceEnd) {
|
||||
ASSIMP_LOG_ERROR("IRRMESH: Too many indices");
|
||||
break;
|
||||
}
|
||||
if (!curIdx) {
|
||||
curFace->mNumIndices = 3;
|
||||
curFace->mIndices = new unsigned int[3];
|
||||
}
|
||||
// If we're pushing more than one distinct color
|
||||
if (!colors.empty() && c != *(colors.end() - 1))
|
||||
useColors = true;
|
||||
|
||||
unsigned int idx = strtoul10(sz, &sz);
|
||||
if (idx >= curVertices.size()) {
|
||||
ASSIMP_LOG_ERROR("IRRMESH: Index out of range");
|
||||
idx = 0;
|
||||
}
|
||||
colors.push_back(c);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
curFace->mIndices[curIdx] = total++;
|
||||
// read the first UV coordinate set
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
*pcV++ = curVertices[idx];
|
||||
if (pcN) *pcN++ = curNormals[idx];
|
||||
if (pcT) *pcT++ = curTangents[idx];
|
||||
if (pcB) *pcB++ = curBitangents[idx];
|
||||
if (pcC0) *pcC0++ = curColors[idx];
|
||||
if (pcT0) *pcT0++ = curUVs[idx];
|
||||
if (pcT1) *pcT1++ = curUV2s[idx];
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
temp.z = 0.f;
|
||||
temp.y = 1.f - temp.y; // DX to OGL
|
||||
UVs.push_back(temp);
|
||||
|
||||
if (++curIdx == 3) {
|
||||
++curFace;
|
||||
curIdx = 0;
|
||||
}
|
||||
}
|
||||
// NOTE these correspond to specific S3DVertex* structs in irr sourcecode
|
||||
// So by definition, all buffers have either UV2 or tangents or neither
|
||||
// read the (optional) second UV coordinate set
|
||||
if (vertexFormat == VertexFormat::t2coord) {
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
if (curFace != faceEnd)
|
||||
ASSIMP_LOG_ERROR("IRRMESH: Not enough indices");
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
temp.y = 1.f - temp.y; // DX to OGL
|
||||
UV2s.push_back(temp);
|
||||
}
|
||||
// read optional tangent and bitangent vectors
|
||||
else if (vertexFormat == VertexFormat::tangent) {
|
||||
// tangents
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
// Finish processing the mesh - do some small material workarounds
|
||||
if (curMatFlags & AI_IRRMESH_MAT_trans_vertex_alpha && !useColors) {
|
||||
// Take the opacity value of the current material
|
||||
// from the common vertex color alpha
|
||||
aiMaterial *mat = (aiMaterial *)curMat;
|
||||
mat->AddProperty(&curColors[0].a, 1, AI_MATKEY_OPACITY);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.z);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
// End of the last buffer. A material and a mesh should be there
|
||||
if (curMat || curMesh) {
|
||||
if (!curMat || !curMesh) {
|
||||
ASSIMP_LOG_ERROR("IRRMESH: A buffer must contain a mesh and a material");
|
||||
releaseMaterial(&curMat);
|
||||
releaseMesh(&curMesh);
|
||||
} else {
|
||||
materials.push_back(curMat);
|
||||
meshes.push_back(curMesh);
|
||||
}
|
||||
}
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
temp.y *= -1.0f;
|
||||
tangents.push_back(temp);
|
||||
|
||||
if (materials.empty()) {
|
||||
throw DeadlyImportError("IRRMESH: Unable to read a mesh from this file");
|
||||
}
|
||||
// bitangents
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.x);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
// now generate the output scene
|
||||
pScene->mNumMeshes = (unsigned int)meshes.size();
|
||||
pScene->mMeshes = new aiMesh *[pScene->mNumMeshes];
|
||||
for (unsigned int i = 0; i < pScene->mNumMeshes; ++i) {
|
||||
pScene->mMeshes[i] = meshes[i];
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.z);
|
||||
SkipSpaces(&sz);
|
||||
|
||||
// clean this value ...
|
||||
pScene->mMeshes[i]->mNumUVComponents[3] = 0;
|
||||
}
|
||||
|
||||
pScene->mNumMaterials = (unsigned int)materials.size();
|
||||
pScene->mMaterials = new aiMaterial *[pScene->mNumMaterials];
|
||||
::memcpy(pScene->mMaterials, &materials[0], sizeof(void *) * pScene->mNumMaterials);
|
||||
|
||||
pScene->mRootNode = new aiNode();
|
||||
pScene->mRootNode->mName.Set("<IRRMesh>");
|
||||
pScene->mRootNode->mNumMeshes = pScene->mNumMeshes;
|
||||
pScene->mRootNode->mMeshes = new unsigned int[pScene->mNumMeshes];
|
||||
|
||||
for (unsigned int i = 0; i < pScene->mNumMeshes; ++i) {
|
||||
pScene->mRootNode->mMeshes[i] = i;
|
||||
}
|
||||
sz = fast_atoreal_move<float>(sz, (float &)temp.y);
|
||||
SkipSpaces(&sz);
|
||||
temp.y *= -1.0f;
|
||||
bitangents.push_back(temp);
|
||||
}
|
||||
} while (SkipLine(&sz));
|
||||
/* IMPORTANT: We assume that each vertex is specified in one
|
||||
line. So we can skip the rest of the line - unknown vertex
|
||||
elements are ignored.
|
||||
*/
|
||||
}
|
||||
|
||||
#endif // !! ASSIMP_BUILD_NO_IRRMESH_IMPORTER
|
||||
|
|
|
@ -85,6 +85,19 @@ protected:
|
|||
*/
|
||||
void InternReadFile(const std::string &pFile, aiScene *pScene,
|
||||
IOSystem *pIOHandler) override;
|
||||
|
||||
private:
|
||||
enum class VertexFormat {
|
||||
standard = 0, // "standard" - also noted as 'normal' format elsewhere
|
||||
t2coord = 1, // "2tcoord" - standard + 2 UV maps
|
||||
tangent = 2, // "tangents" - standard + tangents and bitangents
|
||||
};
|
||||
|
||||
void ParseBufferVertices(const char *sz, VertexFormat vertexFormat,
|
||||
std::vector<aiVector3D> &vertices, std::vector<aiVector3D> &normals,
|
||||
std::vector<aiVector3D> &tangents, std::vector<aiVector3D> &bitangents,
|
||||
std::vector<aiVector3D> &UVs, std::vector<aiVector3D> &UV2s,
|
||||
std::vector<aiColor4D> &colors, bool &useColors);
|
||||
};
|
||||
|
||||
} // end of namespace Assimp
|
||||
|
|
|
@ -43,302 +43,302 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
* @brief Shared utilities for the IRR and IRRMESH loaders
|
||||
*/
|
||||
|
||||
//This section should be excluded only if both the Irrlicht AND the Irrlicht Mesh importers were omitted.
|
||||
// This section should be excluded only if both the Irrlicht AND the Irrlicht Mesh importers were omitted.
|
||||
#if !(defined(ASSIMP_BUILD_NO_IRR_IMPORTER) && defined(ASSIMP_BUILD_NO_IRRMESH_IMPORTER))
|
||||
|
||||
#include "IRRShared.h"
|
||||
#include <assimp/ParsingUtils.h>
|
||||
#include <assimp/fast_atof.h>
|
||||
#include <assimp/DefaultLogger.hpp>
|
||||
#include <assimp/material.h>
|
||||
#include <assimp/DefaultLogger.hpp>
|
||||
|
||||
using namespace Assimp;
|
||||
|
||||
// Transformation matrix to convert from Assimp to IRR space
|
||||
const aiMatrix4x4 Assimp::AI_TO_IRR_MATRIX = aiMatrix4x4 (
|
||||
1.0f, 0.0f, 0.0f, 0.0f,
|
||||
0.0f, 0.0f, 1.0f, 0.0f,
|
||||
0.0f, 1.0f, 0.0f, 0.0f,
|
||||
0.0f, 0.0f, 0.0f, 1.0f);
|
||||
const aiMatrix4x4 Assimp::AI_TO_IRR_MATRIX = aiMatrix4x4(
|
||||
1.0f, 0.0f, 0.0f, 0.0f,
|
||||
0.0f, 0.0f, 1.0f, 0.0f,
|
||||
0.0f, 1.0f, 0.0f, 0.0f,
|
||||
0.0f, 0.0f, 0.0f, 1.0f);
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// read a property in hexadecimal format (i.e. ffffffff)
|
||||
void IrrlichtBase::ReadHexProperty(HexProperty &out ) {
|
||||
for (pugi::xml_attribute attrib : mNode->attributes()) {
|
||||
void IrrlichtBase::ReadHexProperty(HexProperty &out, pugi::xml_node& hexnode) {
|
||||
for (pugi::xml_attribute attrib : hexnode.attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string( attrib.value() );
|
||||
} else if (!ASSIMP_stricmp(attrib.name(),"value")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
// parse the hexadecimal value
|
||||
out.value = strtoul16(attrib.name());
|
||||
out.value = strtoul16(attrib.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// read a decimal property
|
||||
void IrrlichtBase::ReadIntProperty(IntProperty & out) {
|
||||
for (pugi::xml_attribute attrib : mNode->attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.value(),"value")) {
|
||||
void IrrlichtBase::ReadIntProperty(IntProperty &out, pugi::xml_node& intnode) {
|
||||
for (pugi::xml_attribute attrib : intnode.attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
// parse the int value
|
||||
out.value = strtol10(attrib.name());
|
||||
out.value = strtol10(attrib.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// read a string property
|
||||
void IrrlichtBase::ReadStringProperty( StringProperty& out) {
|
||||
for (pugi::xml_attribute attrib : mNode->attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
void IrrlichtBase::ReadStringProperty(StringProperty &out, pugi::xml_node& stringnode) {
|
||||
for (pugi::xml_attribute attrib : stringnode.attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
// simple copy the string
|
||||
out.value = std::string(attrib.value());
|
||||
out.value = std::string(attrib.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// read a boolean property
|
||||
void IrrlichtBase::ReadBoolProperty(BoolProperty &out) {
|
||||
for (pugi::xml_attribute attrib : mNode->attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")){
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
void IrrlichtBase::ReadBoolProperty(BoolProperty &out, pugi::xml_node& boolnode) {
|
||||
for (pugi::xml_attribute attrib : boolnode.attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
// true or false, case insensitive
|
||||
out.value = (ASSIMP_stricmp(attrib.value(), "true") ? false : true);
|
||||
out.value = (ASSIMP_stricmp(attrib.value(), "true") ? false : true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// read a float property
|
||||
void IrrlichtBase::ReadFloatProperty(FloatProperty &out) {
|
||||
for (pugi::xml_attribute attrib : mNode->attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
void IrrlichtBase::ReadFloatProperty(FloatProperty &out, pugi::xml_node &floatnode) {
|
||||
for (pugi::xml_attribute attrib : floatnode.attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
// just parse the float
|
||||
out.value = fast_atof(attrib.value());
|
||||
out.value = fast_atof(attrib.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// read a vector property
|
||||
void IrrlichtBase::ReadVectorProperty( VectorProperty &out ) {
|
||||
for (pugi::xml_attribute attrib : mNode->attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
void IrrlichtBase::ReadVectorProperty(VectorProperty &out, pugi::xml_node& vectornode) {
|
||||
for (pugi::xml_attribute attrib : vectornode.attributes()) {
|
||||
if (!ASSIMP_stricmp(attrib.name(), "name")) {
|
||||
out.name = std::string(attrib.value());
|
||||
} else if (!ASSIMP_stricmp(attrib.name(), "value")) {
|
||||
// three floats, separated with commas
|
||||
const char *ptr = attrib.value();
|
||||
|
||||
SkipSpaces(&ptr);
|
||||
ptr = fast_atoreal_move<float>( ptr,(float&)out.value.x );
|
||||
ptr = fast_atoreal_move<float>(ptr, (float &)out.value.x);
|
||||
SkipSpaces(&ptr);
|
||||
if (',' != *ptr) {
|
||||
ASSIMP_LOG_ERROR("IRR(MESH): Expected comma in vector definition");
|
||||
} else {
|
||||
SkipSpaces(ptr + 1, &ptr);
|
||||
}
|
||||
ptr = fast_atoreal_move<float>( ptr,(float&)out.value.y );
|
||||
} else {
|
||||
SkipSpaces(ptr + 1, &ptr);
|
||||
}
|
||||
ptr = fast_atoreal_move<float>(ptr, (float &)out.value.y);
|
||||
SkipSpaces(&ptr);
|
||||
if (',' != *ptr) {
|
||||
ASSIMP_LOG_ERROR("IRR(MESH): Expected comma in vector definition");
|
||||
} else {
|
||||
SkipSpaces(ptr + 1, &ptr);
|
||||
}
|
||||
ptr = fast_atoreal_move<float>( ptr,(float&)out.value.z );
|
||||
} else {
|
||||
SkipSpaces(ptr + 1, &ptr);
|
||||
}
|
||||
ptr = fast_atoreal_move<float>(ptr, (float &)out.value.z);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Convert a string to a proper aiMappingMode
|
||||
int ConvertMappingMode(const std::string& mode) {
|
||||
int ConvertMappingMode(const std::string &mode) {
|
||||
if (mode == "texture_clamp_repeat") {
|
||||
return aiTextureMapMode_Wrap;
|
||||
} else if (mode == "texture_clamp_mirror") {
|
||||
return aiTextureMapMode_Mirror;
|
||||
}
|
||||
} else if (mode == "texture_clamp_mirror") {
|
||||
return aiTextureMapMode_Mirror;
|
||||
}
|
||||
|
||||
return aiTextureMapMode_Clamp;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Parse a material from the XML file
|
||||
aiMaterial* IrrlichtBase::ParseMaterial(unsigned int& matFlags) {
|
||||
aiMaterial* mat = new aiMaterial();
|
||||
aiMaterial *IrrlichtBase::ParseMaterial(pugi::xml_node& materialNode, unsigned int &matFlags) {
|
||||
aiMaterial *mat = new aiMaterial();
|
||||
aiColor4D clr;
|
||||
aiString s;
|
||||
|
||||
matFlags = 0; // zero output flags
|
||||
int cnt = 0; // number of used texture channels
|
||||
int cnt = 0; // number of used texture channels
|
||||
unsigned int nd = 0;
|
||||
|
||||
for (pugi::xml_node child : mNode->children()) {
|
||||
if (!ASSIMP_stricmp(child.name(), "color")) { // Hex properties
|
||||
HexProperty prop;
|
||||
ReadHexProperty(prop);
|
||||
if (prop.name == "Diffuse") {
|
||||
ColorFromARGBPacked(prop.value, clr);
|
||||
mat->AddProperty(&clr, 1, AI_MATKEY_COLOR_DIFFUSE);
|
||||
} else if (prop.name == "Ambient") {
|
||||
ColorFromARGBPacked(prop.value, clr);
|
||||
mat->AddProperty(&clr, 1, AI_MATKEY_COLOR_AMBIENT);
|
||||
} else if (prop.name == "Specular") {
|
||||
ColorFromARGBPacked(prop.value, clr);
|
||||
mat->AddProperty(&clr, 1, AI_MATKEY_COLOR_SPECULAR);
|
||||
}
|
||||
for (pugi::xml_node child : materialNode.children()) {
|
||||
if (!ASSIMP_stricmp(child.name(), "color")) { // Hex properties
|
||||
HexProperty prop;
|
||||
ReadHexProperty(prop, child);
|
||||
if (prop.name == "Diffuse") {
|
||||
ColorFromARGBPacked(prop.value, clr);
|
||||
mat->AddProperty(&clr, 1, AI_MATKEY_COLOR_DIFFUSE);
|
||||
} else if (prop.name == "Ambient") {
|
||||
ColorFromARGBPacked(prop.value, clr);
|
||||
mat->AddProperty(&clr, 1, AI_MATKEY_COLOR_AMBIENT);
|
||||
} else if (prop.name == "Specular") {
|
||||
ColorFromARGBPacked(prop.value, clr);
|
||||
mat->AddProperty(&clr, 1, AI_MATKEY_COLOR_SPECULAR);
|
||||
}
|
||||
|
||||
// NOTE: The 'emissive' property causes problems. It is
|
||||
// often != 0, even if there is obviously no light
|
||||
// emitted by the described surface. In fact I think
|
||||
// IRRLICHT ignores this property, too.
|
||||
// NOTE: The 'emissive' property causes problems. It is
|
||||
// often != 0, even if there is obviously no light
|
||||
// emitted by the described surface. In fact I think
|
||||
// IRRLICHT ignores this property, too.
|
||||
#if 0
|
||||
else if (prop.name == "Emissive") {
|
||||
ColorFromARGBPacked(prop.value,clr);
|
||||
mat->AddProperty(&clr,1,AI_MATKEY_COLOR_EMISSIVE);
|
||||
}
|
||||
#endif
|
||||
} else if (!ASSIMP_stricmp(child.name(), "float")) { // Float properties
|
||||
FloatProperty prop;
|
||||
ReadFloatProperty(prop);
|
||||
if (prop.name == "Shininess") {
|
||||
mat->AddProperty(&prop.value, 1, AI_MATKEY_SHININESS);
|
||||
}
|
||||
} else if (!ASSIMP_stricmp(child.name(), "bool")) { // Bool properties
|
||||
BoolProperty prop;
|
||||
ReadBoolProperty(prop);
|
||||
if (prop.name == "Wireframe") {
|
||||
int val = (prop.value ? true : false);
|
||||
mat->AddProperty(&val, 1, AI_MATKEY_ENABLE_WIREFRAME);
|
||||
} else if (prop.name == "GouraudShading") {
|
||||
int val = (prop.value ? aiShadingMode_Gouraud : aiShadingMode_NoShading);
|
||||
mat->AddProperty(&val, 1, AI_MATKEY_SHADING_MODEL);
|
||||
} else if (prop.name == "BackfaceCulling") {
|
||||
int val = (!prop.value);
|
||||
mat->AddProperty(&val, 1, AI_MATKEY_TWOSIDED);
|
||||
}
|
||||
} else if (!ASSIMP_stricmp(child.name(), "texture") ||
|
||||
!ASSIMP_stricmp(child.name(), "enum")) { // String properties - textures and texture related properties
|
||||
StringProperty prop;
|
||||
ReadStringProperty(prop);
|
||||
if (prop.value.length()) {
|
||||
// material type (shader)
|
||||
if (prop.name == "Type") {
|
||||
if (prop.value == "solid") {
|
||||
// default material ...
|
||||
} else if (prop.value == "trans_vertex_alpha") {
|
||||
matFlags = AI_IRRMESH_MAT_trans_vertex_alpha;
|
||||
} else if (prop.value == "lightmap") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap;
|
||||
} else if (prop.value == "solid_2layer") {
|
||||
matFlags = AI_IRRMESH_MAT_solid_2layer;
|
||||
} else if (prop.value == "lightmap_m2") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_m2;
|
||||
} else if (prop.value == "lightmap_m4") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_m4;
|
||||
} else if (prop.value == "lightmap_light") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_light;
|
||||
} else if (prop.value == "lightmap_light_m2") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_light_m2;
|
||||
} else if (prop.value == "lightmap_light_m4") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_light_m4;
|
||||
} else if (prop.value == "lightmap_add") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_add;
|
||||
} else if (prop.value == "normalmap_solid" ||
|
||||
prop.value == "parallaxmap_solid") { // Normal and parallax maps are treated equally
|
||||
matFlags = AI_IRRMESH_MAT_normalmap_solid;
|
||||
} else if (prop.value == "normalmap_trans_vertex_alpha" ||
|
||||
prop.value == "parallaxmap_trans_vertex_alpha") {
|
||||
matFlags = AI_IRRMESH_MAT_normalmap_tva;
|
||||
} else if (prop.value == "normalmap_trans_add" ||
|
||||
prop.value == "parallaxmap_trans_add") {
|
||||
matFlags = AI_IRRMESH_MAT_normalmap_ta;
|
||||
} else {
|
||||
ASSIMP_LOG_WARN("IRRMat: Unrecognized material type: ", prop.value);
|
||||
}
|
||||
}
|
||||
} else if (!ASSIMP_stricmp(child.name(), "float")) { // Float properties
|
||||
FloatProperty prop;
|
||||
ReadFloatProperty(prop, child);
|
||||
if (prop.name == "Shininess") {
|
||||
mat->AddProperty(&prop.value, 1, AI_MATKEY_SHININESS);
|
||||
}
|
||||
} else if (!ASSIMP_stricmp(child.name(), "bool")) { // Bool properties
|
||||
BoolProperty prop;
|
||||
ReadBoolProperty(prop, child);
|
||||
if (prop.name == "Wireframe") {
|
||||
int val = (prop.value ? true : false);
|
||||
mat->AddProperty(&val, 1, AI_MATKEY_ENABLE_WIREFRAME);
|
||||
} else if (prop.name == "GouraudShading") {
|
||||
int val = (prop.value ? aiShadingMode_Gouraud : aiShadingMode_NoShading);
|
||||
mat->AddProperty(&val, 1, AI_MATKEY_SHADING_MODEL);
|
||||
} else if (prop.name == "BackfaceCulling") {
|
||||
int val = (!prop.value);
|
||||
mat->AddProperty(&val, 1, AI_MATKEY_TWOSIDED);
|
||||
}
|
||||
} else if (!ASSIMP_stricmp(child.name(), "texture") ||
|
||||
!ASSIMP_stricmp(child.name(), "enum")) { // String properties - textures and texture related properties
|
||||
StringProperty prop;
|
||||
ReadStringProperty(prop, child);
|
||||
if (prop.value.length()) {
|
||||
// material type (shader)
|
||||
if (prop.name == "Type") {
|
||||
if (prop.value == "solid") {
|
||||
// default material ...
|
||||
} else if (prop.value == "trans_vertex_alpha") {
|
||||
matFlags = AI_IRRMESH_MAT_trans_vertex_alpha;
|
||||
} else if (prop.value == "lightmap") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap;
|
||||
} else if (prop.value == "solid_2layer") {
|
||||
matFlags = AI_IRRMESH_MAT_solid_2layer;
|
||||
} else if (prop.value == "lightmap_m2") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_m2;
|
||||
} else if (prop.value == "lightmap_m4") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_m4;
|
||||
} else if (prop.value == "lightmap_light") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_light;
|
||||
} else if (prop.value == "lightmap_light_m2") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_light_m2;
|
||||
} else if (prop.value == "lightmap_light_m4") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_light_m4;
|
||||
} else if (prop.value == "lightmap_add") {
|
||||
matFlags = AI_IRRMESH_MAT_lightmap_add;
|
||||
} else if (prop.value == "normalmap_solid" ||
|
||||
prop.value == "parallaxmap_solid") { // Normal and parallax maps are treated equally
|
||||
matFlags = AI_IRRMESH_MAT_normalmap_solid;
|
||||
} else if (prop.value == "normalmap_trans_vertex_alpha" ||
|
||||
prop.value == "parallaxmap_trans_vertex_alpha") {
|
||||
matFlags = AI_IRRMESH_MAT_normalmap_tva;
|
||||
} else if (prop.value == "normalmap_trans_add" ||
|
||||
prop.value == "parallaxmap_trans_add") {
|
||||
matFlags = AI_IRRMESH_MAT_normalmap_ta;
|
||||
} else {
|
||||
ASSIMP_LOG_WARN("IRRMat: Unrecognized material type: ", prop.value);
|
||||
}
|
||||
}
|
||||
|
||||
// Up to 4 texture channels are supported
|
||||
if (prop.name == "Texture1") {
|
||||
// Always accept the primary texture channel
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_DIFFUSE(0));
|
||||
} else if (prop.name == "Texture2" && cnt == 1) {
|
||||
// 2-layer material lightmapped?
|
||||
if (matFlags & AI_IRRMESH_MAT_lightmap) {
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_LIGHTMAP(0));
|
||||
// Up to 4 texture channels are supported
|
||||
if (prop.name == "Texture1") {
|
||||
// Always accept the primary texture channel
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_DIFFUSE(0));
|
||||
} else if (prop.name == "Texture2" && cnt == 1) {
|
||||
// 2-layer material lightmapped?
|
||||
if (matFlags & AI_IRRMESH_MAT_lightmap) {
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_LIGHTMAP(0));
|
||||
|
||||
// set the corresponding material flag
|
||||
matFlags |= AI_IRRMESH_EXTRA_2ND_TEXTURE;
|
||||
} else if (matFlags & AI_IRRMESH_MAT_normalmap_solid) { // alternatively: normal or parallax mapping
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_NORMALS(0));
|
||||
// set the corresponding material flag
|
||||
matFlags |= AI_IRRMESH_EXTRA_2ND_TEXTURE;
|
||||
} else if (matFlags & AI_IRRMESH_MAT_normalmap_solid) { // alternatively: normal or parallax mapping
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_NORMALS(0));
|
||||
|
||||
// set the corresponding material flag
|
||||
matFlags |= AI_IRRMESH_EXTRA_2ND_TEXTURE;
|
||||
} else if (matFlags & AI_IRRMESH_MAT_solid_2layer) { // or just as second diffuse texture
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_DIFFUSE(1));
|
||||
++nd;
|
||||
// set the corresponding material flag
|
||||
matFlags |= AI_IRRMESH_EXTRA_2ND_TEXTURE;
|
||||
} else if (matFlags & AI_IRRMESH_MAT_solid_2layer) { // or just as second diffuse texture
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_DIFFUSE(1));
|
||||
++nd;
|
||||
|
||||
// set the corresponding material flag
|
||||
matFlags |= AI_IRRMESH_EXTRA_2ND_TEXTURE;
|
||||
} else {
|
||||
ASSIMP_LOG_WARN("IRRmat: Skipping second texture");
|
||||
}
|
||||
} else if (prop.name == "Texture3" && cnt == 2) {
|
||||
// Irrlicht does not seem to use these channels.
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_DIFFUSE(nd + 1));
|
||||
} else if (prop.name == "Texture4" && cnt == 3) {
|
||||
// Irrlicht does not seem to use these channels.
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_DIFFUSE(nd + 2));
|
||||
}
|
||||
// set the corresponding material flag
|
||||
matFlags |= AI_IRRMESH_EXTRA_2ND_TEXTURE;
|
||||
} else {
|
||||
ASSIMP_LOG_WARN("IRRmat: Skipping second texture");
|
||||
}
|
||||
} else if (prop.name == "Texture3" && cnt == 2) {
|
||||
// Irrlicht does not seem to use these channels.
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_DIFFUSE(nd + 1));
|
||||
} else if (prop.name == "Texture4" && cnt == 3) {
|
||||
// Irrlicht does not seem to use these channels.
|
||||
++cnt;
|
||||
s.Set(prop.value);
|
||||
mat->AddProperty(&s, AI_MATKEY_TEXTURE_DIFFUSE(nd + 2));
|
||||
}
|
||||
|
||||
// Texture mapping options
|
||||
if (prop.name == "TextureWrap1" && cnt >= 1) {
|
||||
int map = ConvertMappingMode(prop.value);
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_DIFFUSE(0));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_DIFFUSE(0));
|
||||
} else if (prop.name == "TextureWrap2" && cnt >= 2) {
|
||||
int map = ConvertMappingMode(prop.value);
|
||||
if (matFlags & AI_IRRMESH_MAT_lightmap) {
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_LIGHTMAP(0));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_LIGHTMAP(0));
|
||||
} else if (matFlags & (AI_IRRMESH_MAT_normalmap_solid)) {
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_NORMALS(0));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_NORMALS(0));
|
||||
} else if (matFlags & AI_IRRMESH_MAT_solid_2layer) {
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_DIFFUSE(1));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_DIFFUSE(1));
|
||||
}
|
||||
} else if (prop.name == "TextureWrap3" && cnt >= 3) {
|
||||
int map = ConvertMappingMode(prop.value);
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_DIFFUSE(nd + 1));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_DIFFUSE(nd + 1));
|
||||
} else if (prop.name == "TextureWrap4" && cnt >= 4) {
|
||||
int map = ConvertMappingMode(prop.value);
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_DIFFUSE(nd + 2));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_DIFFUSE(nd + 2));
|
||||
}
|
||||
}
|
||||
}
|
||||
//break;
|
||||
/*case EXN_ELEMENT_END:
|
||||
// Texture mapping options
|
||||
if (prop.name == "TextureWrap1" && cnt >= 1) {
|
||||
int map = ConvertMappingMode(prop.value);
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_DIFFUSE(0));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_DIFFUSE(0));
|
||||
} else if (prop.name == "TextureWrap2" && cnt >= 2) {
|
||||
int map = ConvertMappingMode(prop.value);
|
||||
if (matFlags & AI_IRRMESH_MAT_lightmap) {
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_LIGHTMAP(0));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_LIGHTMAP(0));
|
||||
} else if (matFlags & (AI_IRRMESH_MAT_normalmap_solid)) {
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_NORMALS(0));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_NORMALS(0));
|
||||
} else if (matFlags & AI_IRRMESH_MAT_solid_2layer) {
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_DIFFUSE(1));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_DIFFUSE(1));
|
||||
}
|
||||
} else if (prop.name == "TextureWrap3" && cnt >= 3) {
|
||||
int map = ConvertMappingMode(prop.value);
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_DIFFUSE(nd + 1));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_DIFFUSE(nd + 1));
|
||||
} else if (prop.name == "TextureWrap4" && cnt >= 4) {
|
||||
int map = ConvertMappingMode(prop.value);
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_U_DIFFUSE(nd + 2));
|
||||
mat->AddProperty(&map, 1, AI_MATKEY_MAPPINGMODE_V_DIFFUSE(nd + 2));
|
||||
}
|
||||
}
|
||||
}
|
||||
// break;
|
||||
/*case EXN_ELEMENT_END:
|
||||
|
||||
// Assume there are no further nested nodes in <material> elements
|
||||
if ( !ASSIMP_stricmp(reader->getNodeName(),"material") ||
|
||||
|
@ -378,8 +378,8 @@ aiMaterial* IrrlichtBase::ParseMaterial(unsigned int& matFlags) {
|
|||
break;
|
||||
}
|
||||
}*/
|
||||
}
|
||||
ASSIMP_LOG_ERROR("IRRMESH: Unexpected end of file. Material is not complete");
|
||||
}
|
||||
//ASSIMP_LOG_ERROR("IRRMESH: Unexpected end of file. Material is not complete");
|
||||
|
||||
return mat;
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
|
||||
|
||||
/** @file IRRShared.h
|
||||
* @brief Shared utilities for the IRR and IRRMESH loaders
|
||||
*/
|
||||
* @brief Shared utilities for the IRR and IRRMESH loaders
|
||||
*/
|
||||
|
||||
#ifndef INCLUDED_AI_IRRSHARED_H
|
||||
#define INCLUDED_AI_IRRSHARED_H
|
||||
|
@ -58,8 +58,7 @@ extern const aiMatrix4x4 AI_TO_IRR_MATRIX;
|
|||
*/
|
||||
class IrrlichtBase {
|
||||
protected:
|
||||
IrrlichtBase() :
|
||||
mNode(nullptr) {
|
||||
IrrlichtBase() {
|
||||
// empty
|
||||
}
|
||||
|
||||
|
@ -82,25 +81,25 @@ protected:
|
|||
|
||||
/// XML reader instance
|
||||
XmlParser mParser;
|
||||
pugi::xml_node *mNode;
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Parse a material description from the XML
|
||||
* @return The created material
|
||||
* @param matFlags Receives AI_IRRMESH_MAT_XX flags
|
||||
*/
|
||||
aiMaterial *ParseMaterial(unsigned int &matFlags);
|
||||
aiMaterial *ParseMaterial(pugi::xml_node &materialNode, unsigned int &matFlags);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Read a property of the specified type from the current XML element.
|
||||
* @param out Receives output data
|
||||
* @param node XML attribute element containing data
|
||||
*/
|
||||
void ReadHexProperty(HexProperty &out);
|
||||
void ReadStringProperty(StringProperty &out);
|
||||
void ReadBoolProperty(BoolProperty &out);
|
||||
void ReadFloatProperty(FloatProperty &out);
|
||||
void ReadVectorProperty(VectorProperty &out);
|
||||
void ReadIntProperty(IntProperty &out);
|
||||
void ReadHexProperty(HexProperty &out, pugi::xml_node& hexnode);
|
||||
void ReadStringProperty(StringProperty &out, pugi::xml_node& stringnode);
|
||||
void ReadBoolProperty(BoolProperty &out, pugi::xml_node& boolnode);
|
||||
void ReadFloatProperty(FloatProperty &out, pugi::xml_node& floatnode);
|
||||
void ReadVectorProperty(VectorProperty &out, pugi::xml_node& vectornode);
|
||||
void ReadIntProperty(IntProperty &out, pugi::xml_node& intnode);
|
||||
};
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
|
|
@ -632,18 +632,17 @@ void LWSImporter::InternReadFile(const std::string &pFile, aiScene *pScene, IOSy
|
|||
nodes.push_back(d);
|
||||
}
|
||||
ASSIMP_LOG_ERROR("LWS: Unexpected keyword: \'Channel\'");
|
||||
} else {
|
||||
// important: index of channel
|
||||
nodes.back().channels.emplace_back();
|
||||
LWO::Envelope &env = nodes.back().channels.back();
|
||||
|
||||
env.index = strtoul10(c);
|
||||
|
||||
// currently we can just interpret the standard channels 0...9
|
||||
// (hack) assume that index-i yields the binary channel type from LWO
|
||||
env.type = (LWO::EnvelopeType)(env.index + 1);
|
||||
}
|
||||
|
||||
// important: index of channel
|
||||
nodes.back().channels.emplace_back();
|
||||
LWO::Envelope &env = nodes.back().channels.back();
|
||||
|
||||
env.index = strtoul10(c);
|
||||
|
||||
// currently we can just interpret the standard channels 0...9
|
||||
// (hack) assume that index-i yields the binary channel type from LWO
|
||||
env.type = (LWO::EnvelopeType)(env.index + 1);
|
||||
|
||||
}
|
||||
// 'Envelope': a single animation channel
|
||||
else if ((*it).tokens[0] == "Envelope") {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
Open Asset Import Library (assimp)
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
Copyright (c) 2006-2023, assimp team
|
||||
|
||||
All rights reserved.
|
||||
|
||||
|
@ -87,7 +87,7 @@ MD5Parser::MD5Parser(char *_buffer, unsigned int _fileSize) : buffer(_buffer), b
|
|||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Report error to the log stream
|
||||
/*static*/ AI_WONT_RETURN void MD5Parser::ReportError(const char *error, unsigned int line) {
|
||||
AI_WONT_RETURN void MD5Parser::ReportError(const char *error, unsigned int line) {
|
||||
char szBuffer[1024];
|
||||
::ai_snprintf(szBuffer, 1024, "[MD5] Line %u: %s", line, error);
|
||||
throw DeadlyImportError(szBuffer);
|
||||
|
@ -95,7 +95,7 @@ MD5Parser::MD5Parser(char *_buffer, unsigned int _fileSize) : buffer(_buffer), b
|
|||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Report warning to the log stream
|
||||
/*static*/ void MD5Parser::ReportWarning(const char *warn, unsigned int line) {
|
||||
void MD5Parser::ReportWarning(const char *warn, unsigned int line) {
|
||||
char szBuffer[1024];
|
||||
::snprintf(szBuffer, sizeof(szBuffer), "[MD5] Line %u: %s", line, warn);
|
||||
ASSIMP_LOG_WARN(szBuffer);
|
||||
|
@ -122,8 +122,8 @@ void MD5Parser::ParseHeader() {
|
|||
// print the command line options to the console
|
||||
// FIX: can break the log length limit, so we need to be careful
|
||||
char *sz = buffer;
|
||||
while (!IsLineEnd(*buffer++))
|
||||
;
|
||||
while (!IsLineEnd(*buffer++));
|
||||
|
||||
ASSIMP_LOG_INFO(std::string(sz, std::min((uintptr_t)MAX_LOG_MESSAGE_LENGTH, (uintptr_t)(buffer - sz))));
|
||||
SkipSpacesAndLineEnd();
|
||||
}
|
||||
|
@ -138,18 +138,31 @@ bool MD5Parser::ParseSection(Section &out) {
|
|||
char *sz = buffer;
|
||||
while (!IsSpaceOrNewLine(*buffer)) {
|
||||
++buffer;
|
||||
if (buffer == bufferEnd)
|
||||
return false;
|
||||
}
|
||||
out.mName = std::string(sz, (uintptr_t)(buffer - sz));
|
||||
SkipSpaces();
|
||||
while (IsSpace(*buffer)) {
|
||||
++buffer;
|
||||
if (buffer == bufferEnd)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool running = true;
|
||||
while (running) {
|
||||
if ('{' == *buffer) {
|
||||
// it is a normal section so read all lines
|
||||
++buffer;
|
||||
if (buffer == bufferEnd)
|
||||
return false;
|
||||
bool run = true;
|
||||
while (run) {
|
||||
if (!SkipSpacesAndLineEnd()) {
|
||||
while (IsSpaceOrNewLine(*buffer)) {
|
||||
++buffer;
|
||||
if (buffer == bufferEnd)
|
||||
return false;
|
||||
}
|
||||
if ('\0' == *buffer) {
|
||||
return false; // seems this was the last section
|
||||
}
|
||||
if ('}' == *buffer) {
|
||||
|
@ -164,25 +177,39 @@ bool MD5Parser::ParseSection(Section &out) {
|
|||
elem.szStart = buffer;
|
||||
|
||||
// terminate the line with zero
|
||||
while (!IsLineEnd(*buffer))
|
||||
while (!IsLineEnd(*buffer)) {
|
||||
++buffer;
|
||||
if (buffer == bufferEnd)
|
||||
return false;
|
||||
}
|
||||
if (*buffer) {
|
||||
++lineNumber;
|
||||
*buffer++ = '\0';
|
||||
if (buffer == bufferEnd)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
break;
|
||||
} else if (!IsSpaceOrNewLine(*buffer)) {
|
||||
// it is an element at global scope. Parse its value and go on
|
||||
sz = buffer;
|
||||
while (!IsSpaceOrNewLine(*buffer++))
|
||||
;
|
||||
while (!IsSpaceOrNewLine(*buffer++)) {
|
||||
if (buffer == bufferEnd)
|
||||
return false;
|
||||
}
|
||||
out.mGlobalValue = std::string(sz, (uintptr_t)(buffer - sz));
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return SkipSpacesAndLineEnd();
|
||||
if (buffer == bufferEnd)
|
||||
return false;
|
||||
while (IsSpaceOrNewLine(*buffer)) {
|
||||
++buffer;
|
||||
if (buffer == bufferEnd)
|
||||
return false;
|
||||
}
|
||||
return '\0' != *buffer;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
@ -228,15 +255,20 @@ bool MD5Parser::ParseSection(Section &out) {
|
|||
out.data[out.length] = '\0';
|
||||
|
||||
// parse a string, enclosed in quotation marks
|
||||
#define AI_MD5_PARSE_STRING_IN_QUOTATION(out) \
|
||||
while ('\"' != *sz) \
|
||||
++sz; \
|
||||
const char *szStart = ++sz; \
|
||||
while ('\"' != *sz) \
|
||||
++sz; \
|
||||
const char *szEnd = (sz++); \
|
||||
out.length = (ai_uint32)(szEnd - szStart); \
|
||||
::memcpy(out.data, szStart, out.length); \
|
||||
#define AI_MD5_PARSE_STRING_IN_QUOTATION(out) \
|
||||
out.length = 0; \
|
||||
while ('\"' != *sz && '\0' != *sz) \
|
||||
++sz; \
|
||||
if ('\0' != *sz) { \
|
||||
const char *szStart = ++sz; \
|
||||
while ('\"' != *sz && '\0' != *sz) \
|
||||
++sz; \
|
||||
if ('\0' != *sz) { \
|
||||
const char *szEnd = (sz++); \
|
||||
out.length = (ai_uint32)(szEnd - szStart); \
|
||||
::memcpy(out.data, szStart, out.length); \
|
||||
} \
|
||||
} \
|
||||
out.data[out.length] = '\0';
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// .MD5MESH parsing function
|
||||
|
|
|
@ -2,8 +2,7 @@
|
|||
Open Asset Import Library (assimp)
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
|
||||
Copyright (c) 2006-2023, assimp team
|
||||
|
||||
All rights reserved.
|
||||
|
||||
|
@ -93,7 +92,7 @@ struct Section {
|
|||
std::string mName;
|
||||
|
||||
//! For global elements: the value of the element as string
|
||||
//! Iif !length() the section is not a global element
|
||||
//! if !length() the section is not a global element
|
||||
std::string mGlobalValue;
|
||||
};
|
||||
|
||||
|
@ -185,7 +184,7 @@ using FrameList = std::vector<FrameDesc>;
|
|||
*/
|
||||
struct VertexDesc {
|
||||
VertexDesc() AI_NO_EXCEPT
|
||||
: mFirstWeight(0), mNumWeights(0) {
|
||||
: mFirstWeight(0), mNumWeights(0) {
|
||||
// empty
|
||||
}
|
||||
|
||||
|
@ -349,62 +348,61 @@ public:
|
|||
*/
|
||||
MD5Parser(char* buffer, unsigned int fileSize);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Report a specific error message and throw an exception
|
||||
* @param error Error message to be reported
|
||||
* @param line Index of the line where the error occurred
|
||||
*/
|
||||
AI_WONT_RETURN static void ReportError (const char* error, unsigned int line) AI_WONT_RETURN_SUFFIX;
|
||||
AI_WONT_RETURN static void ReportError(const char* error, unsigned int line) AI_WONT_RETURN_SUFFIX;
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Report a specific warning
|
||||
* @param warn Warn message to be reported
|
||||
* @param line Index of the line where the error occurred
|
||||
*/
|
||||
static void ReportWarning (const char* warn, unsigned int line);
|
||||
|
||||
static void ReportWarning(const char* warn, unsigned int line);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Report a specific error
|
||||
* @param error Error message to be reported
|
||||
*/
|
||||
AI_WONT_RETURN void ReportError (const char* error) AI_WONT_RETURN_SUFFIX;
|
||||
|
||||
void ReportWarning (const char* warn) {
|
||||
return ReportWarning(warn, lineNumber);
|
||||
}
|
||||
// -------------------------------------------------------------------
|
||||
/** Report a specific warning
|
||||
* @param error Warn message to be reported
|
||||
*/
|
||||
void ReportWarning (const char* warn);
|
||||
|
||||
//! List of all sections which have been read
|
||||
SectionList mSections;
|
||||
|
||||
private:
|
||||
// -------------------------------------------------------------------
|
||||
/** Parses a file section. The current file pointer must be outside
|
||||
* of a section.
|
||||
* @param out Receives the section data
|
||||
* @return true if the end of the file has been reached
|
||||
* @throws ImportErrorException if an error occurs
|
||||
*/
|
||||
bool ParseSection(Section& out);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Parses the file header
|
||||
* @throws ImportErrorException if an error occurs
|
||||
*/
|
||||
void ParseHeader();
|
||||
|
||||
bool SkipLine(const char* in, const char** out);
|
||||
bool SkipLine( );
|
||||
bool SkipSpacesAndLineEnd( const char* in, const char** out);
|
||||
bool SkipSpacesAndLineEnd();
|
||||
bool SkipSpaces();
|
||||
|
||||
private:
|
||||
char* buffer;
|
||||
char* bufferEnd;
|
||||
unsigned int fileSize;
|
||||
unsigned int lineNumber;
|
||||
};
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
inline void MD5Parser::ReportWarning (const char* warn) {
|
||||
return ReportWarning(warn, lineNumber);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
inline void MD5Parser::ReportError(const char* error) {
|
||||
ReportError(error, lineNumber);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
inline bool MD5Parser::SkipLine(const char* in, const char** out) {
|
||||
++lineNumber;
|
||||
|
@ -418,18 +416,24 @@ inline bool MD5Parser::SkipLine( ) {
|
|||
|
||||
// -------------------------------------------------------------------
|
||||
inline bool MD5Parser::SkipSpacesAndLineEnd( const char* in, const char** out) {
|
||||
bool bHad = false;
|
||||
bool running = true;
|
||||
if (in == bufferEnd) {
|
||||
*out = in;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool bHad = false, running = true;
|
||||
while (running) {
|
||||
if( *in == '\r' || *in == '\n') {
|
||||
// we open files in binary mode, so there could be \r\n sequences ...
|
||||
// we open files in binary mode, so there could be \r\n sequences ...
|
||||
if (!bHad) {
|
||||
bHad = true;
|
||||
++lineNumber;
|
||||
}
|
||||
} else if (*in == '\t' || *in == ' ') {
|
||||
bHad = false;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
else if (*in == '\t' || *in == ' ')bHad = false;
|
||||
else break;
|
||||
++in;
|
||||
if (in == bufferEnd) {
|
||||
break;
|
||||
|
|
|
@ -271,10 +271,16 @@ void MDLImporter::InternReadFile(const std::string &pFile,
|
|||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Check whether we're still inside the valid file range
|
||||
bool MDLImporter::IsPosValid(const void *szPos) const {
|
||||
return szPos && (const unsigned char *)szPos <= this->mBuffer + this->iFileSize && szPos >= this->mBuffer;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Check whether we're still inside the valid file range
|
||||
void MDLImporter::SizeCheck(const void *szPos) {
|
||||
if (!szPos || (const unsigned char *)szPos > this->mBuffer + this->iFileSize) {
|
||||
if (!IsPosValid(szPos)) {
|
||||
throw DeadlyImportError("Invalid MDL file. The file is too small "
|
||||
"or contains invalid data.");
|
||||
}
|
||||
|
@ -284,7 +290,7 @@ void MDLImporter::SizeCheck(const void *szPos) {
|
|||
// Just for debugging purposes
|
||||
void MDLImporter::SizeCheck(const void *szPos, const char *szFile, unsigned int iLine) {
|
||||
ai_assert(nullptr != szFile);
|
||||
if (!szPos || (const unsigned char *)szPos > mBuffer + iFileSize) {
|
||||
if (!IsPosValid(szPos)) {
|
||||
// remove a directory if there is one
|
||||
const char *szFilePtr = ::strrchr(szFile, '\\');
|
||||
if (!szFilePtr) {
|
||||
|
|
|
@ -150,6 +150,7 @@ protected:
|
|||
*/
|
||||
void SizeCheck(const void* szPos);
|
||||
void SizeCheck(const void* szPos, const char* szFile, unsigned int iLine);
|
||||
bool IsPosValid(const void* szPos) const;
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Validate the header data structure of a game studio MDL7 file
|
||||
|
|
|
@ -481,6 +481,8 @@ void MDLImporter::ParseSkinLump_3DGS_MDL7(
|
|||
pcNew->achFormatHint[2] = 's';
|
||||
pcNew->achFormatHint[3] = '\0';
|
||||
|
||||
SizeCheck(szCurrent + pcNew->mWidth);
|
||||
|
||||
pcNew->pcData = (aiTexel *)new unsigned char[pcNew->mWidth];
|
||||
memcpy(pcNew->pcData, szCurrent, pcNew->mWidth);
|
||||
szCurrent += iWidth;
|
||||
|
@ -493,12 +495,12 @@ void MDLImporter::ParseSkinLump_3DGS_MDL7(
|
|||
|
||||
aiString szFile;
|
||||
const size_t iLen = strlen((const char *)szCurrent);
|
||||
size_t iLen2 = iLen + 1;
|
||||
iLen2 = iLen2 > MAXLEN ? MAXLEN : iLen2;
|
||||
size_t iLen2 = iLen > (MAXLEN - 1) ? (MAXLEN - 1) : iLen;
|
||||
memcpy(szFile.data, (const char *)szCurrent, iLen2);
|
||||
szFile.data[iLen2] = '\0';
|
||||
szFile.length = static_cast<ai_uint32>(iLen2);
|
||||
|
||||
szCurrent += iLen2;
|
||||
szCurrent += iLen2 + 1;
|
||||
|
||||
// place this as diffuse texture
|
||||
pcMatOut->AddProperty(&szFile, AI_MATKEY_TEXTURE_DIFFUSE(0));
|
||||
|
@ -703,7 +705,14 @@ void MDLImporter::SkipSkinLump_3DGS_MDL7(
|
|||
tex.pcData = bad_texel;
|
||||
tex.mHeight = iHeight;
|
||||
tex.mWidth = iWidth;
|
||||
ParseTextureColorData(szCurrent, iMasked, &iSkip, &tex);
|
||||
|
||||
try {
|
||||
ParseTextureColorData(szCurrent, iMasked, &iSkip, &tex);
|
||||
} catch (...) {
|
||||
// FIX: Important, otherwise the destructor will crash
|
||||
tex.pcData = nullptr;
|
||||
throw;
|
||||
}
|
||||
|
||||
// FIX: Important, otherwise the destructor will crash
|
||||
tex.pcData = nullptr;
|
||||
|
|
|
@ -52,6 +52,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <assimp/importerdesc.h>
|
||||
#include <assimp/StreamReader.h>
|
||||
#include <map>
|
||||
#include <limits>
|
||||
|
||||
using namespace Assimp;
|
||||
|
||||
|
@ -160,6 +161,9 @@ void NDOImporter::InternReadFile( const std::string& pFile,
|
|||
|
||||
temp = file_format >= 12 ? reader.GetU4() : reader.GetU2();
|
||||
head = (const char*)reader.GetPtr();
|
||||
if (std::numeric_limits<unsigned int>::max() - 76 < temp) {
|
||||
throw DeadlyImportError("Invalid name length");
|
||||
}
|
||||
reader.IncPtr(temp + 76); /* skip unknown stuff */
|
||||
|
||||
obj.name = std::string(head, temp);
|
||||
|
|
|
@ -284,7 +284,7 @@ void OFFImporter::InternReadFile( const std::string& pFile, aiScene* pScene, IOS
|
|||
for (unsigned int i = 0; i < numFaces; ) {
|
||||
if(!GetNextLine(buffer,line)) {
|
||||
ASSIMP_LOG_ERROR("OFF: The number of faces in the header is incorrect");
|
||||
break;
|
||||
throw DeadlyImportError("OFF: The number of faces in the header is incorrect");
|
||||
}
|
||||
unsigned int idx;
|
||||
sz = line; SkipSpaces(&sz);
|
||||
|
|
|
@ -239,8 +239,6 @@ struct Mesh {
|
|||
unsigned int m_uiMaterialIndex;
|
||||
/// True, if normals are stored.
|
||||
bool m_hasNormals;
|
||||
/// True, if vertex colors are stored.
|
||||
bool m_hasVertexColors;
|
||||
|
||||
/// Constructor
|
||||
explicit Mesh(const std::string &name) :
|
||||
|
|
|
@ -323,7 +323,7 @@ aiMesh *ObjFileImporter::createTopology(const ObjFile::Model *pModel, const ObjF
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
aiMesh *pMesh = new aiMesh;
|
||||
std::unique_ptr<aiMesh> pMesh(new aiMesh);
|
||||
if (!pObjMesh->m_name.empty()) {
|
||||
pMesh->mName.Set(pObjMesh->m_name);
|
||||
}
|
||||
|
@ -385,9 +385,9 @@ aiMesh *ObjFileImporter::createTopology(const ObjFile::Model *pModel, const ObjF
|
|||
}
|
||||
|
||||
// Create mesh vertices
|
||||
createVertexArray(pModel, pData, meshIndex, pMesh, uiIdxCount);
|
||||
createVertexArray(pModel, pData, meshIndex, pMesh.get(), uiIdxCount);
|
||||
|
||||
return pMesh;
|
||||
return pMesh.release();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
@ -498,6 +498,10 @@ void ObjFileImporter::createVertexArray(const ObjFile::Model *pModel,
|
|||
|
||||
if (vertexIndex) {
|
||||
if (!last) {
|
||||
if (pMesh->mNumVertices <= newIndex + 1) {
|
||||
throw DeadlyImportError("OBJ: bad vertex index");
|
||||
}
|
||||
|
||||
pMesh->mVertices[newIndex + 1] = pMesh->mVertices[newIndex];
|
||||
if (!sourceFace->m_normals.empty() && !pModel->mNormals.empty()) {
|
||||
pMesh->mNormals[newIndex + 1] = pMesh->mNormals[newIndex];
|
||||
|
|
|
@ -252,9 +252,9 @@ void ObjFileMtlImporter::load() {
|
|||
case 'a': // Anisotropy
|
||||
{
|
||||
++m_DataIt;
|
||||
getFloatValue(m_pModel->mCurrentMaterial->anisotropy);
|
||||
if (m_pModel->mCurrentMaterial != nullptr)
|
||||
m_DataIt = skipLine<DataArrayIt>(m_DataIt, m_DataItEnd, m_uiLine);
|
||||
getFloatValue(m_pModel->mCurrentMaterial->anisotropy);
|
||||
m_DataIt = skipLine<DataArrayIt>(m_DataIt, m_DataItEnd, m_uiLine);
|
||||
} break;
|
||||
|
||||
default: {
|
||||
|
@ -371,6 +371,7 @@ void ObjFileMtlImporter::getTexture() {
|
|||
if (m_pModel->mCurrentMaterial == nullptr) {
|
||||
m_pModel->mCurrentMaterial = new ObjFile::Material();
|
||||
m_pModel->mCurrentMaterial->MaterialName.Set("Empty_Material");
|
||||
m_pModel->mMaterialMap["Empty_Material"] = m_pModel->mCurrentMaterial;
|
||||
}
|
||||
|
||||
const char *pPtr(&(*m_DataIt));
|
||||
|
|
|
@ -156,9 +156,17 @@ void ObjFileParser::parseFile(IOStreamBuffer<char> &streamBuffer) {
|
|||
// read in vertex definition (homogeneous coords)
|
||||
getHomogeneousVector3(m_pModel->mVertices);
|
||||
} else if (numComponents == 6) {
|
||||
// fill previous omitted vertex-colors by default
|
||||
if (m_pModel->mVertexColors.size() < m_pModel->mVertices.size()) {
|
||||
m_pModel->mVertexColors.resize(m_pModel->mVertices.size(), aiVector3D(0, 0, 0));
|
||||
}
|
||||
// read vertex and vertex-color
|
||||
getTwoVectors3(m_pModel->mVertices, m_pModel->mVertexColors);
|
||||
}
|
||||
// append omitted vertex-colors as default for the end if any vertex-color exists
|
||||
if (!m_pModel->mVertexColors.empty() && m_pModel->mVertexColors.size() < m_pModel->mVertices.size()) {
|
||||
m_pModel->mVertexColors.resize(m_pModel->mVertices.size(), aiVector3D(0, 0, 0));
|
||||
}
|
||||
} else if (*m_DataIt == 't') {
|
||||
// read in texture coordinate ( 2D or 3D )
|
||||
++m_DataIt;
|
||||
|
@ -456,8 +464,19 @@ void ObjFileParser::getFace(aiPrimitiveType type) {
|
|||
iPos = 0;
|
||||
} else {
|
||||
//OBJ USES 1 Base ARRAYS!!!!
|
||||
const char *token = &(*m_DataIt);
|
||||
const int iVal = ::atoi(token);
|
||||
int iVal;
|
||||
auto end = m_DataIt;
|
||||
// find either the buffer end or the '\0'
|
||||
while (end < m_DataItEnd && *end != '\0')
|
||||
++end;
|
||||
// avoid temporary string allocation if there is a zero
|
||||
if (end != m_DataItEnd) {
|
||||
iVal = ::atoi(&(*m_DataIt));
|
||||
} else {
|
||||
// otherwise make a zero terminated copy, which is safe to pass to atoi
|
||||
std::string number(&(*m_DataIt), m_DataItEnd - m_DataIt);
|
||||
iVal = ::atoi(number.c_str());
|
||||
}
|
||||
|
||||
// increment iStep position based off of the sign and # of digits
|
||||
int tmp = iVal;
|
||||
|
|
|
@ -837,7 +837,10 @@ void SMDImporter::ParseNodeInfo(const char* szCurrent, const char** szCurrentOut
|
|||
unsigned int iBone = 0;
|
||||
SkipSpacesAndLineEnd(szCurrent,&szCurrent);
|
||||
if ( !ParseUnsignedInt(szCurrent,&szCurrent,iBone) || !SkipSpaces(szCurrent,&szCurrent)) {
|
||||
LogErrorNoThrow("Unexpected EOF/EOL while parsing bone index");
|
||||
throw DeadlyImportError("Unexpected EOF/EOL while parsing bone index");
|
||||
}
|
||||
if (iBone == UINT_MAX) {
|
||||
LogErrorNoThrow("Invalid bone number while parsing bone index");
|
||||
SMDI_PARSE_RETURN;
|
||||
}
|
||||
// add our bone to the list
|
||||
|
|
|
@ -93,7 +93,10 @@ const aiImporterDesc *glTFImporter::GetInfo() const {
|
|||
bool glTFImporter::CanRead(const std::string &pFile, IOSystem *pIOHandler, bool /* checkSig */) const {
|
||||
glTF::Asset asset(pIOHandler);
|
||||
try {
|
||||
asset.Load(pFile, GetExtension(pFile) == "glb");
|
||||
asset.Load(pFile,
|
||||
CheckMagicToken(
|
||||
pIOHandler, pFile, AI_GLB_MAGIC_NUMBER, 1, 0,
|
||||
static_cast<unsigned int>(strlen(AI_GLB_MAGIC_NUMBER))));
|
||||
return asset.asset;
|
||||
} catch (...) {
|
||||
return false;
|
||||
|
@ -697,7 +700,10 @@ void glTFImporter::InternReadFile(const std::string &pFile, aiScene *pScene, IOS
|
|||
|
||||
// read the asset file
|
||||
glTF::Asset asset(pIOHandler);
|
||||
asset.Load(pFile, GetExtension(pFile) == "glb");
|
||||
asset.Load(pFile,
|
||||
CheckMagicToken(
|
||||
pIOHandler, pFile, AI_GLB_MAGIC_NUMBER, 1, 0,
|
||||
static_cast<unsigned int>(strlen(AI_GLB_MAGIC_NUMBER))));
|
||||
|
||||
//
|
||||
// Copy the data out
|
||||
|
|
|
@ -371,6 +371,15 @@ struct CustomExtension {
|
|||
CustomExtension& operator=(const CustomExtension&) = default;
|
||||
};
|
||||
|
||||
//! Represents metadata in an glTF2 object
|
||||
struct Extras {
|
||||
std::vector<CustomExtension> mValues;
|
||||
|
||||
inline bool HasExtras() const {
|
||||
return !mValues.empty();
|
||||
}
|
||||
};
|
||||
|
||||
//! Base class for all glTF top-level objects
|
||||
struct Object {
|
||||
int index; //!< The index of this object within its property container
|
||||
|
@ -379,7 +388,7 @@ struct Object {
|
|||
std::string name; //!< The user-defined name of this object
|
||||
|
||||
CustomExtension customExtensions;
|
||||
CustomExtension extras;
|
||||
Extras extras;
|
||||
|
||||
//! Objects marked as special are not exported (used to emulate the binary body buffer)
|
||||
virtual bool IsSpecial() const { return false; }
|
||||
|
|
|
@ -45,6 +45,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <assimp/StringUtils.h>
|
||||
#include <assimp/DefaultLogger.hpp>
|
||||
#include <assimp/Base64.hpp>
|
||||
#include <rapidjson/document.h>
|
||||
#include <rapidjson/schema.h>
|
||||
#include <rapidjson/stringbuffer.h>
|
||||
|
||||
// clang-format off
|
||||
#ifdef ASSIMP_ENABLE_DRACO
|
||||
|
@ -139,6 +142,18 @@ inline CustomExtension ReadExtensions(const char *name, Value &obj) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
inline Extras ReadExtras(Value &obj) {
|
||||
Extras ret;
|
||||
|
||||
ret.mValues.reserve(obj.MemberCount());
|
||||
for (auto it = obj.MemberBegin(); it != obj.MemberEnd(); ++it) {
|
||||
auto &val = it->value;
|
||||
ret.mValues.emplace_back(ReadExtensions(it->name.GetString(), val));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline void CopyData(size_t count, const uint8_t *src, size_t src_stride,
|
||||
uint8_t *dst, size_t dst_stride) {
|
||||
if (src_stride == dst_stride) {
|
||||
|
@ -248,7 +263,7 @@ inline void Object::ReadExtensions(Value &val) {
|
|||
|
||||
inline void Object::ReadExtras(Value &val) {
|
||||
if (Value *curExtras = FindObject(val, "extras")) {
|
||||
this->extras = glTF2::ReadExtensions("extras", *curExtras);
|
||||
this->extras = glTF2::ReadExtras(*curExtras);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -654,6 +654,44 @@ namespace glTF2 {
|
|||
}
|
||||
}
|
||||
|
||||
inline void WriteExtrasValue(Value &parent, const CustomExtension &value, AssetWriter &w) {
|
||||
Value valueNode;
|
||||
|
||||
if (value.mStringValue.isPresent) {
|
||||
MakeValue(valueNode, value.mStringValue.value.c_str(), w.mAl);
|
||||
} else if (value.mDoubleValue.isPresent) {
|
||||
MakeValue(valueNode, value.mDoubleValue.value, w.mAl);
|
||||
} else if (value.mUint64Value.isPresent) {
|
||||
MakeValue(valueNode, value.mUint64Value.value, w.mAl);
|
||||
} else if (value.mInt64Value.isPresent) {
|
||||
MakeValue(valueNode, value.mInt64Value.value, w.mAl);
|
||||
} else if (value.mBoolValue.isPresent) {
|
||||
MakeValue(valueNode, value.mBoolValue.value, w.mAl);
|
||||
} else if (value.mValues.isPresent) {
|
||||
valueNode.SetObject();
|
||||
for (auto const &subvalue : value.mValues.value) {
|
||||
WriteExtrasValue(valueNode, subvalue, w);
|
||||
}
|
||||
}
|
||||
|
||||
parent.AddMember(StringRef(value.name), valueNode, w.mAl);
|
||||
}
|
||||
|
||||
inline void WriteExtras(Value &obj, const Extras &extras, AssetWriter &w) {
|
||||
if (!extras.HasExtras()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Value extrasNode;
|
||||
extrasNode.SetObject();
|
||||
|
||||
for (auto const &value : extras.mValues) {
|
||||
WriteExtrasValue(extrasNode, value, w);
|
||||
}
|
||||
|
||||
obj.AddMember("extras", extrasNode, w.mAl);
|
||||
}
|
||||
|
||||
inline void Write(Value& obj, Node& n, AssetWriter& w)
|
||||
{
|
||||
if (n.matrix.isPresent) {
|
||||
|
@ -689,6 +727,8 @@ namespace glTF2 {
|
|||
if(n.skeletons.size()) {
|
||||
AddRefsVector(obj, "skeletons", n.skeletons, w.mAl);
|
||||
}
|
||||
|
||||
WriteExtras(obj, n.extras, w);
|
||||
}
|
||||
|
||||
inline void Write(Value& /*obj*/, Program& /*b*/, AssetWriter& /*w*/)
|
||||
|
@ -762,7 +802,6 @@ namespace glTF2 {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
inline AssetWriter::AssetWriter(Asset& a)
|
||||
: mDoc()
|
||||
, mAsset(a)
|
||||
|
|
|
@ -172,22 +172,6 @@ static void IdentityMatrix4(mat4 &o) {
|
|||
o[15] = 1;
|
||||
}
|
||||
|
||||
static bool IsBoneWeightFitted(vec4 &weight) {
|
||||
return weight[0] + weight[1] + weight[2] + weight[3] >= 1.f;
|
||||
}
|
||||
|
||||
static int FitBoneWeight(vec4 &weight, float value) {
|
||||
int i = 0;
|
||||
for (; i < 4; ++i) {
|
||||
if (weight[i] < value) {
|
||||
weight[i] = value;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SetAccessorRange(Ref<Accessor> acc, void *data, size_t count,
|
||||
unsigned int numCompsIn, unsigned int numCompsOut) {
|
||||
|
@ -263,7 +247,7 @@ size_t NZDiff(void *data, void *dataBase, size_t count, unsigned int numCompsIn,
|
|||
for (short idx = 0; bufferData_ptr < bufferData_end; idx += 1, bufferData_ptr += numCompsIn) {
|
||||
bool bNonZero = false;
|
||||
|
||||
//for the data, check any component Non Zero
|
||||
// for the data, check any component Non Zero
|
||||
for (unsigned int j = 0; j < numCompsOut; j++) {
|
||||
double valueData = bufferData_ptr[j];
|
||||
double valueBase = bufferBase_ptr ? bufferBase_ptr[j] : 0;
|
||||
|
@ -273,11 +257,11 @@ size_t NZDiff(void *data, void *dataBase, size_t count, unsigned int numCompsIn,
|
|||
}
|
||||
}
|
||||
|
||||
//all zeros, continue
|
||||
// all zeros, continue
|
||||
if (!bNonZero)
|
||||
continue;
|
||||
|
||||
//non zero, store the data
|
||||
// non zero, store the data
|
||||
for (unsigned int j = 0; j < numCompsOut; j++) {
|
||||
T valueData = bufferData_ptr[j];
|
||||
T valueBase = bufferBase_ptr ? bufferBase_ptr[j] : 0;
|
||||
|
@ -286,14 +270,14 @@ size_t NZDiff(void *data, void *dataBase, size_t count, unsigned int numCompsIn,
|
|||
vNZIdx.push_back(idx);
|
||||
}
|
||||
|
||||
//avoid all-0, put 1 item
|
||||
// avoid all-0, put 1 item
|
||||
if (vNZDiff.size() == 0) {
|
||||
for (unsigned int j = 0; j < numCompsOut; j++)
|
||||
vNZDiff.push_back(0);
|
||||
vNZIdx.push_back(0);
|
||||
}
|
||||
|
||||
//process data
|
||||
// process data
|
||||
outputNZDiff = new T[vNZDiff.size()];
|
||||
memcpy(outputNZDiff, vNZDiff.data(), vNZDiff.size() * sizeof(T));
|
||||
|
||||
|
@ -361,7 +345,7 @@ inline Ref<Accessor> ExportDataSparse(Asset &a, std::string &meshName, Ref<Buffe
|
|||
acc->sparse.reset(new Accessor::Sparse);
|
||||
acc->sparse->count = nzCount;
|
||||
|
||||
//indices
|
||||
// indices
|
||||
unsigned int bytesPerIdx = sizeof(unsigned short);
|
||||
size_t indices_offset = buffer->byteLength;
|
||||
size_t indices_padding = indices_offset % bytesPerIdx;
|
||||
|
@ -379,7 +363,7 @@ inline Ref<Accessor> ExportDataSparse(Asset &a, std::string &meshName, Ref<Buffe
|
|||
acc->sparse->indicesByteOffset = 0;
|
||||
acc->WriteSparseIndices(nzCount, nzIdx, 1 * bytesPerIdx);
|
||||
|
||||
//values
|
||||
// values
|
||||
size_t values_offset = buffer->byteLength;
|
||||
size_t values_padding = values_offset % bytesPerComp;
|
||||
values_offset += values_padding;
|
||||
|
@ -395,9 +379,9 @@ inline Ref<Accessor> ExportDataSparse(Asset &a, std::string &meshName, Ref<Buffe
|
|||
acc->sparse->valuesByteOffset = 0;
|
||||
acc->WriteSparseValues(nzCount, nzDiff, numCompsIn * bytesPerComp);
|
||||
|
||||
//clear
|
||||
delete[](char *) nzDiff;
|
||||
delete[](char *) nzIdx;
|
||||
// clear
|
||||
delete[] (char *)nzDiff;
|
||||
delete[] (char *)nzIdx;
|
||||
}
|
||||
return acc;
|
||||
}
|
||||
|
@ -443,6 +427,61 @@ inline Ref<Accessor> ExportData(Asset &a, std::string &meshName, Ref<Buffer> &bu
|
|||
return acc;
|
||||
}
|
||||
|
||||
inline void ExportNodeExtras(const aiMetadataEntry &metadataEntry, aiString name, CustomExtension &value) {
|
||||
|
||||
value.name = name.C_Str();
|
||||
switch (metadataEntry.mType) {
|
||||
case AI_BOOL:
|
||||
value.mBoolValue.value = *static_cast<bool *>(metadataEntry.mData);
|
||||
value.mBoolValue.isPresent = true;
|
||||
break;
|
||||
case AI_INT32:
|
||||
value.mInt64Value.value = *static_cast<int32_t *>(metadataEntry.mData);
|
||||
value.mInt64Value.isPresent = true;
|
||||
break;
|
||||
case AI_UINT64:
|
||||
value.mUint64Value.value = *static_cast<uint64_t *>(metadataEntry.mData);
|
||||
value.mUint64Value.isPresent = true;
|
||||
break;
|
||||
case AI_FLOAT:
|
||||
value.mDoubleValue.value = *static_cast<float *>(metadataEntry.mData);
|
||||
value.mDoubleValue.isPresent = true;
|
||||
break;
|
||||
case AI_DOUBLE:
|
||||
value.mDoubleValue.value = *static_cast<double *>(metadataEntry.mData);
|
||||
value.mDoubleValue.isPresent = true;
|
||||
break;
|
||||
case AI_AISTRING:
|
||||
value.mStringValue.value = static_cast<aiString *>(metadataEntry.mData)->C_Str();
|
||||
value.mStringValue.isPresent = true;
|
||||
break;
|
||||
case AI_AIMETADATA: {
|
||||
const aiMetadata *subMetadata = static_cast<aiMetadata *>(metadataEntry.mData);
|
||||
value.mValues.value.resize(subMetadata->mNumProperties);
|
||||
value.mValues.isPresent = true;
|
||||
|
||||
for (unsigned i = 0; i < subMetadata->mNumProperties; ++i) {
|
||||
ExportNodeExtras(subMetadata->mValues[i], subMetadata->mKeys[i], value.mValues.value.at(i));
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
// AI_AIVECTOR3D not handled
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
inline void ExportNodeExtras(const aiMetadata *metadata, Extras &extras) {
|
||||
if (metadata == nullptr || metadata->mNumProperties == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
extras.mValues.resize(metadata->mNumProperties);
|
||||
for (unsigned int i = 0; i < metadata->mNumProperties; ++i) {
|
||||
ExportNodeExtras(metadata->mValues[i], metadata->mKeys[i], extras.mValues.at(i));
|
||||
}
|
||||
}
|
||||
|
||||
inline void SetSamplerWrap(SamplerWrap &wrap, aiTextureMapMode map) {
|
||||
switch (map) {
|
||||
case aiTextureMapMode_Clamp:
|
||||
|
@ -544,7 +583,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, Ref<Texture> &texture, unsi
|
|||
if (curTex != nullptr) { // embedded
|
||||
texture->source->name = curTex->mFilename.C_Str();
|
||||
|
||||
//basisu: embedded ktx2, bu
|
||||
// basisu: embedded ktx2, bu
|
||||
if (curTex->achFormatHint[0]) {
|
||||
std::string mimeType = "image/";
|
||||
if (memcmp(curTex->achFormatHint, "jpg", 3) == 0)
|
||||
|
@ -564,7 +603,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, Ref<Texture> &texture, unsi
|
|||
}
|
||||
|
||||
// The asset has its own buffer, see Image::SetData
|
||||
//basisu: "image/ktx2", "image/basis" as is
|
||||
// basisu: "image/ktx2", "image/basis" as is
|
||||
texture->source->SetData(reinterpret_cast<uint8_t *>(curTex->pcData), curTex->mWidth, *mAsset);
|
||||
} else {
|
||||
texture->source->uri = path;
|
||||
|
@ -574,7 +613,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, Ref<Texture> &texture, unsi
|
|||
}
|
||||
}
|
||||
|
||||
//basisu
|
||||
// basisu
|
||||
if (useBasisUniversal) {
|
||||
mAsset->extensionsUsed.KHR_texture_basisu = true;
|
||||
mAsset->extensionsRequired.KHR_texture_basisu = true;
|
||||
|
@ -597,7 +636,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, NormalTextureInfo &prop, ai
|
|||
GetMatTex(mat, texture, prop.texCoord, tt, slot);
|
||||
|
||||
if (texture) {
|
||||
//GetMatTexProp(mat, prop.texCoord, "texCoord", tt, slot);
|
||||
// GetMatTexProp(mat, prop.texCoord, "texCoord", tt, slot);
|
||||
GetMatTexProp(mat, prop.scale, "scale", tt, slot);
|
||||
}
|
||||
}
|
||||
|
@ -608,7 +647,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, OcclusionTextureInfo &prop,
|
|||
GetMatTex(mat, texture, prop.texCoord, tt, slot);
|
||||
|
||||
if (texture) {
|
||||
//GetMatTexProp(mat, prop.texCoord, "texCoord", tt, slot);
|
||||
// GetMatTexProp(mat, prop.texCoord, "texCoord", tt, slot);
|
||||
GetMatTexProp(mat, prop.strength, "strength", tt, slot);
|
||||
}
|
||||
}
|
||||
|
@ -675,7 +714,7 @@ bool glTF2Exporter::GetMatSpecGloss(const aiMaterial &mat, glTF2::PbrSpecularGlo
|
|||
|
||||
bool glTF2Exporter::GetMatSpecular(const aiMaterial &mat, glTF2::MaterialSpecular &specular) {
|
||||
// Specular requires either/or, default factors of zero disables specular, so do not export
|
||||
if (GetMatColor(mat, specular.specularColorFactor, AI_MATKEY_COLOR_SPECULAR) != AI_SUCCESS || mat.Get(AI_MATKEY_SPECULAR_FACTOR, specular.specularFactor) != AI_SUCCESS) {
|
||||
if (GetMatColor(mat, specular.specularColorFactor, AI_MATKEY_COLOR_SPECULAR) != AI_SUCCESS && mat.Get(AI_MATKEY_SPECULAR_FACTOR, specular.specularFactor) != AI_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
// The spec states that the default is 1.0 and [1.0, 1.0, 1.0]. We if both are 0, which should disable specular. Otherwise, if one is 0, set to 1.0
|
||||
|
@ -777,20 +816,30 @@ void glTF2Exporter::ExportMaterials() {
|
|||
GetMatTex(mat, m->pbrMetallicRoughness.baseColorTexture, aiTextureType_BASE_COLOR);
|
||||
|
||||
if (!m->pbrMetallicRoughness.baseColorTexture.texture) {
|
||||
//if there wasn't a baseColorTexture defined in the source, fallback to any diffuse texture
|
||||
// if there wasn't a baseColorTexture defined in the source, fallback to any diffuse texture
|
||||
GetMatTex(mat, m->pbrMetallicRoughness.baseColorTexture, aiTextureType_DIFFUSE);
|
||||
}
|
||||
|
||||
GetMatTex(mat, m->pbrMetallicRoughness.metallicRoughnessTexture, AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_METALLICROUGHNESS_TEXTURE);
|
||||
GetMatTex(mat, m->pbrMetallicRoughness.metallicRoughnessTexture, aiTextureType_DIFFUSE_ROUGHNESS);
|
||||
|
||||
if (!m->pbrMetallicRoughness.metallicRoughnessTexture.texture) {
|
||||
// if there wasn't a aiTextureType_DIFFUSE_ROUGHNESS defined in the source, fallback to aiTextureType_METALNESS
|
||||
GetMatTex(mat, m->pbrMetallicRoughness.metallicRoughnessTexture, aiTextureType_METALNESS);
|
||||
}
|
||||
|
||||
if (!m->pbrMetallicRoughness.metallicRoughnessTexture.texture) {
|
||||
// if there still wasn't a aiTextureType_METALNESS defined in the source, fallback to AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_METALLICROUGHNESS_TEXTURE
|
||||
GetMatTex(mat, m->pbrMetallicRoughness.metallicRoughnessTexture, AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_METALLICROUGHNESS_TEXTURE);
|
||||
}
|
||||
|
||||
if (GetMatColor(mat, m->pbrMetallicRoughness.baseColorFactor, AI_MATKEY_BASE_COLOR) != AI_SUCCESS) {
|
||||
// if baseColorFactor wasn't defined, then the source is likely not a metallic roughness material.
|
||||
//a fallback to any diffuse color should be used instead
|
||||
// a fallback to any diffuse color should be used instead
|
||||
GetMatColor(mat, m->pbrMetallicRoughness.baseColorFactor, AI_MATKEY_COLOR_DIFFUSE);
|
||||
}
|
||||
|
||||
if (mat.Get(AI_MATKEY_METALLIC_FACTOR, m->pbrMetallicRoughness.metallicFactor) != AI_SUCCESS) {
|
||||
//if metallicFactor wasn't defined, then the source is likely not a PBR file, and the metallicFactor should be 0
|
||||
// if metallicFactor wasn't defined, then the source is likely not a PBR file, and the metallicFactor should be 0
|
||||
m->pbrMetallicRoughness.metallicFactor = 0;
|
||||
}
|
||||
|
||||
|
@ -803,10 +852,10 @@ void glTF2Exporter::ExportMaterials() {
|
|||
if (mat.Get(AI_MATKEY_COLOR_SPECULAR, specularColor) == AI_SUCCESS && mat.Get(AI_MATKEY_SHININESS, shininess) == AI_SUCCESS) {
|
||||
// convert specular color to luminance
|
||||
float specularIntensity = specularColor[0] * 0.2125f + specularColor[1] * 0.7154f + specularColor[2] * 0.0721f;
|
||||
//normalize shininess (assuming max is 1000) with an inverse exponentional curve
|
||||
// normalize shininess (assuming max is 1000) with an inverse exponentional curve
|
||||
float normalizedShininess = std::sqrt(shininess / 1000);
|
||||
|
||||
//clamp the shininess value between 0 and 1
|
||||
// clamp the shininess value between 0 and 1
|
||||
normalizedShininess = std::min(std::max(normalizedShininess, 0.0f), 1.0f);
|
||||
// low specular intensity values should produce a rough material even if shininess is high.
|
||||
normalizedShininess = normalizedShininess * specularIntensity;
|
||||
|
@ -944,23 +993,29 @@ Ref<Node> FindSkeletonRootJoint(Ref<Skin> &skinRef) {
|
|||
return parentNodeRef;
|
||||
}
|
||||
|
||||
struct boneIndexWeightPair {
|
||||
unsigned int indexJoint;
|
||||
float weight;
|
||||
bool operator()(boneIndexWeightPair &a, boneIndexWeightPair &b) {
|
||||
return a.weight > b.weight;
|
||||
}
|
||||
};
|
||||
|
||||
void ExportSkin(Asset &mAsset, const aiMesh *aimesh, Ref<Mesh> &meshRef, Ref<Buffer> &bufferRef, Ref<Skin> &skinRef,
|
||||
std::vector<aiMatrix4x4> &inverseBindMatricesData) {
|
||||
std::vector<aiMatrix4x4> &inverseBindMatricesData, bool unlimitedBonesPerVertex) {
|
||||
if (aimesh->mNumBones < 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Store the vertex joint and weight data.
|
||||
const size_t NumVerts(aimesh->mNumVertices);
|
||||
vec4 *vertexJointData = new vec4[NumVerts];
|
||||
vec4 *vertexWeightData = new vec4[NumVerts];
|
||||
int *jointsPerVertex = new int[NumVerts];
|
||||
std::vector<std::vector<boneIndexWeightPair>> allVerticesPairs;
|
||||
int maxJointsPerVertex = 0;
|
||||
for (size_t i = 0; i < NumVerts; ++i) {
|
||||
jointsPerVertex[i] = 0;
|
||||
for (size_t j = 0; j < 4; ++j) {
|
||||
vertexJointData[i][j] = 0;
|
||||
vertexWeightData[i][j] = 0;
|
||||
}
|
||||
std::vector<boneIndexWeightPair> vertexPair;
|
||||
allVerticesPairs.push_back(vertexPair);
|
||||
}
|
||||
|
||||
for (unsigned int idx_bone = 0; idx_bone < aimesh->mNumBones; ++idx_bone) {
|
||||
|
@ -990,61 +1045,88 @@ void ExportSkin(Asset &mAsset, const aiMesh *aimesh, Ref<Mesh> &meshRef, Ref<Buf
|
|||
jointNamesIndex = static_cast<unsigned int>(inverseBindMatricesData.size() - 1);
|
||||
}
|
||||
|
||||
// aib->mWeights =====> vertexWeightData
|
||||
for (unsigned int idx_weights = 0; idx_weights < aib->mNumWeights; ++idx_weights) {
|
||||
// aib->mWeights =====> temp pairs data
|
||||
for (unsigned int idx_weights = 0; idx_weights < aib->mNumWeights;
|
||||
++idx_weights) {
|
||||
unsigned int vertexId = aib->mWeights[idx_weights].mVertexId;
|
||||
float vertWeight = aib->mWeights[idx_weights].mWeight;
|
||||
|
||||
// A vertex can only have at most four joint weights, which ideally sum up to 1
|
||||
if (IsBoneWeightFitted(vertexWeightData[vertexId])) {
|
||||
continue;
|
||||
}
|
||||
if (jointsPerVertex[vertexId] > 3) {
|
||||
int boneIndexFitted = FitBoneWeight(vertexWeightData[vertexId], vertWeight);
|
||||
if (boneIndexFitted != -1) {
|
||||
vertexJointData[vertexId][boneIndexFitted] = static_cast<float>(jointNamesIndex);
|
||||
}
|
||||
}else {
|
||||
vertexJointData[vertexId][jointsPerVertex[vertexId]] = static_cast<float>(jointNamesIndex);
|
||||
vertexWeightData[vertexId][jointsPerVertex[vertexId]] = vertWeight;
|
||||
|
||||
jointsPerVertex[vertexId] += 1;
|
||||
}
|
||||
allVerticesPairs[vertexId].push_back({jointNamesIndex, vertWeight});
|
||||
jointsPerVertex[vertexId] += 1;
|
||||
maxJointsPerVertex =
|
||||
std::max(maxJointsPerVertex, jointsPerVertex[vertexId]);
|
||||
}
|
||||
|
||||
} // End: for-loop mNumMeshes
|
||||
|
||||
Mesh::Primitive &p = meshRef->primitives.back();
|
||||
Ref<Accessor> vertexJointAccessor = ExportData(mAsset, skinRef->id, bufferRef, aimesh->mNumVertices,
|
||||
vertexJointData, AttribType::VEC4, AttribType::VEC4, ComponentType_FLOAT);
|
||||
if (vertexJointAccessor) {
|
||||
size_t offset = vertexJointAccessor->bufferView->byteOffset;
|
||||
size_t bytesLen = vertexJointAccessor->bufferView->byteLength;
|
||||
unsigned int s_bytesPerComp = ComponentTypeSize(ComponentType_UNSIGNED_SHORT);
|
||||
unsigned int bytesPerComp = ComponentTypeSize(vertexJointAccessor->componentType);
|
||||
size_t s_bytesLen = bytesLen * s_bytesPerComp / bytesPerComp;
|
||||
Ref<Buffer> buf = vertexJointAccessor->bufferView->buffer;
|
||||
uint8_t *arrys = new uint8_t[bytesLen];
|
||||
unsigned int i = 0;
|
||||
for (unsigned int j = 0; j < bytesLen; j += bytesPerComp) {
|
||||
size_t len_p = offset + j;
|
||||
float f_value = *(float *)&buf->GetPointer()[len_p];
|
||||
unsigned short c = static_cast<unsigned short>(f_value);
|
||||
memcpy(&arrys[i * s_bytesPerComp], &c, s_bytesPerComp);
|
||||
++i;
|
||||
}
|
||||
buf->ReplaceData_joint(offset, bytesLen, arrys, bytesLen);
|
||||
vertexJointAccessor->componentType = ComponentType_UNSIGNED_SHORT;
|
||||
vertexJointAccessor->bufferView->byteLength = s_bytesLen;
|
||||
|
||||
p.attributes.joint.push_back(vertexJointAccessor);
|
||||
delete[] arrys;
|
||||
if (!unlimitedBonesPerVertex){
|
||||
// skinning limited only for 4 bones per vertex, default
|
||||
maxJointsPerVertex = 4;
|
||||
}
|
||||
|
||||
Ref<Accessor> vertexWeightAccessor = ExportData(mAsset, skinRef->id, bufferRef, aimesh->mNumVertices,
|
||||
vertexWeightData, AttribType::VEC4, AttribType::VEC4, ComponentType_FLOAT);
|
||||
if (vertexWeightAccessor) {
|
||||
p.attributes.weight.push_back(vertexWeightAccessor);
|
||||
// temp pairs data =====> vertexWeightData
|
||||
size_t numGroups = (maxJointsPerVertex - 1) / 4 + 1;
|
||||
vec4 *vertexJointData = new vec4[NumVerts * numGroups];
|
||||
vec4 *vertexWeightData = new vec4[NumVerts * numGroups];
|
||||
for (size_t indexVertex = 0; indexVertex < NumVerts; ++indexVertex) {
|
||||
// order pairs by weight for each vertex
|
||||
std::sort(allVerticesPairs[indexVertex].begin(),
|
||||
allVerticesPairs[indexVertex].end(),
|
||||
boneIndexWeightPair());
|
||||
for (size_t indexGroup = 0; indexGroup < numGroups; ++indexGroup) {
|
||||
for (size_t indexJoint = 0; indexJoint < 4; ++indexJoint) {
|
||||
size_t indexBone = indexGroup * 4 + indexJoint;
|
||||
size_t indexData = indexVertex + NumVerts * indexGroup;
|
||||
if (indexBone >= allVerticesPairs[indexVertex].size()) {
|
||||
vertexJointData[indexData][indexJoint] = 0.f;
|
||||
vertexWeightData[indexData][indexJoint] = 0.f;
|
||||
} else {
|
||||
vertexJointData[indexData][indexJoint] =
|
||||
static_cast<float>(
|
||||
allVerticesPairs[indexVertex][indexBone].indexJoint);
|
||||
vertexWeightData[indexData][indexJoint] =
|
||||
allVerticesPairs[indexVertex][indexBone].weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t idx_group = 0; idx_group < numGroups; ++idx_group) {
|
||||
Mesh::Primitive &p = meshRef->primitives.back();
|
||||
Ref<Accessor> vertexJointAccessor = ExportData(
|
||||
mAsset, skinRef->id, bufferRef, aimesh->mNumVertices,
|
||||
vertexJointData + idx_group * NumVerts,
|
||||
AttribType::VEC4, AttribType::VEC4, ComponentType_FLOAT);
|
||||
if (vertexJointAccessor) {
|
||||
size_t offset = vertexJointAccessor->bufferView->byteOffset;
|
||||
size_t bytesLen = vertexJointAccessor->bufferView->byteLength;
|
||||
unsigned int s_bytesPerComp =
|
||||
ComponentTypeSize(ComponentType_UNSIGNED_SHORT);
|
||||
unsigned int bytesPerComp =
|
||||
ComponentTypeSize(vertexJointAccessor->componentType);
|
||||
size_t s_bytesLen = bytesLen * s_bytesPerComp / bytesPerComp;
|
||||
Ref<Buffer> buf = vertexJointAccessor->bufferView->buffer;
|
||||
uint8_t *arrys = new uint8_t[bytesLen];
|
||||
unsigned int i = 0;
|
||||
for (unsigned int j = 0; j < bytesLen; j += bytesPerComp) {
|
||||
size_t len_p = offset + j;
|
||||
float f_value = *(float *)&buf->GetPointer()[len_p];
|
||||
unsigned short c = static_cast<unsigned short>(f_value);
|
||||
memcpy(&arrys[i * s_bytesPerComp], &c, s_bytesPerComp);
|
||||
++i;
|
||||
}
|
||||
buf->ReplaceData_joint(offset, bytesLen, arrys, bytesLen);
|
||||
vertexJointAccessor->componentType = ComponentType_UNSIGNED_SHORT;
|
||||
vertexJointAccessor->bufferView->byteLength = s_bytesLen;
|
||||
|
||||
p.attributes.joint.push_back(vertexJointAccessor);
|
||||
delete[] arrys;
|
||||
}
|
||||
Ref<Accessor> vertexWeightAccessor = ExportData(
|
||||
mAsset, skinRef->id, bufferRef, aimesh->mNumVertices,
|
||||
vertexWeightData + idx_group * NumVerts,
|
||||
AttribType::VEC4, AttribType::VEC4, ComponentType_FLOAT);
|
||||
if (vertexWeightAccessor) {
|
||||
p.attributes.weight.push_back(vertexWeightAccessor);
|
||||
}
|
||||
}
|
||||
delete[] jointsPerVertex;
|
||||
delete[] vertexWeightData;
|
||||
|
@ -1100,7 +1182,7 @@ void glTF2Exporter::ExportMeshes() {
|
|||
|
||||
/******************* Vertices ********************/
|
||||
Ref<Accessor> v = ExportData(*mAsset, meshId, b, aim->mNumVertices, aim->mVertices, AttribType::VEC3,
|
||||
AttribType::VEC3, ComponentType_FLOAT, BufferViewTarget_ARRAY_BUFFER);
|
||||
AttribType::VEC3, ComponentType_FLOAT, BufferViewTarget_ARRAY_BUFFER);
|
||||
if (v) {
|
||||
p.attributes.position.push_back(v);
|
||||
}
|
||||
|
@ -1114,7 +1196,7 @@ void glTF2Exporter::ExportMeshes() {
|
|||
}
|
||||
|
||||
Ref<Accessor> n = ExportData(*mAsset, meshId, b, aim->mNumVertices, aim->mNormals, AttribType::VEC3,
|
||||
AttribType::VEC3, ComponentType_FLOAT, BufferViewTarget_ARRAY_BUFFER);
|
||||
AttribType::VEC3, ComponentType_FLOAT, BufferViewTarget_ARRAY_BUFFER);
|
||||
if (n) {
|
||||
p.attributes.normal.push_back(n);
|
||||
}
|
||||
|
@ -1136,7 +1218,7 @@ void glTF2Exporter::ExportMeshes() {
|
|||
AttribType::Value type = (aim->mNumUVComponents[i] == 2) ? AttribType::VEC2 : AttribType::VEC3;
|
||||
|
||||
Ref<Accessor> tc = ExportData(*mAsset, meshId, b, aim->mNumVertices, aim->mTextureCoords[i],
|
||||
AttribType::VEC3, type, ComponentType_FLOAT, BufferViewTarget_ARRAY_BUFFER);
|
||||
AttribType::VEC3, type, ComponentType_FLOAT, BufferViewTarget_ARRAY_BUFFER);
|
||||
if (tc) {
|
||||
p.attributes.texcoord.push_back(tc);
|
||||
}
|
||||
|
@ -1146,7 +1228,7 @@ void glTF2Exporter::ExportMeshes() {
|
|||
/*************** Vertex colors ****************/
|
||||
for (unsigned int indexColorChannel = 0; indexColorChannel < aim->GetNumColorChannels(); ++indexColorChannel) {
|
||||
Ref<Accessor> c = ExportData(*mAsset, meshId, b, aim->mNumVertices, aim->mColors[indexColorChannel],
|
||||
AttribType::VEC4, AttribType::VEC4, ComponentType_FLOAT, BufferViewTarget_ARRAY_BUFFER);
|
||||
AttribType::VEC4, AttribType::VEC4, ComponentType_FLOAT, BufferViewTarget_ARRAY_BUFFER);
|
||||
if (c) {
|
||||
p.attributes.color.push_back(c);
|
||||
}
|
||||
|
@ -1164,7 +1246,7 @@ void glTF2Exporter::ExportMeshes() {
|
|||
}
|
||||
|
||||
p.indices = ExportData(*mAsset, meshId, b, indices.size(), &indices[0], AttribType::SCALAR, AttribType::SCALAR,
|
||||
ComponentType_UNSIGNED_INT, BufferViewTarget_ELEMENT_ARRAY_BUFFER);
|
||||
ComponentType_UNSIGNED_INT, BufferViewTarget_ELEMENT_ARRAY_BUFFER);
|
||||
}
|
||||
|
||||
switch (aim->mPrimitiveTypes) {
|
||||
|
@ -1182,9 +1264,19 @@ void glTF2Exporter::ExportMeshes() {
|
|||
break;
|
||||
}
|
||||
|
||||
// /*************** Skins ****************/
|
||||
// if (aim->HasBones()) {
|
||||
// ExportSkin(*mAsset, aim, m, b, skinRef, inverseBindMatricesData);
|
||||
// }
|
||||
/*************** Skins ****************/
|
||||
if (aim->HasBones()) {
|
||||
ExportSkin(*mAsset, aim, m, b, skinRef, inverseBindMatricesData);
|
||||
bool unlimitedBonesPerVertex =
|
||||
this->mProperties->HasPropertyBool(
|
||||
AI_CONFIG_EXPORT_GLTF_UNLIMITED_SKINNING_BONES_PER_VERTEX) &&
|
||||
this->mProperties->GetPropertyBool(
|
||||
AI_CONFIG_EXPORT_GLTF_UNLIMITED_SKINNING_BONES_PER_VERTEX);
|
||||
ExportSkin(*mAsset, aim, m, b, skinRef, inverseBindMatricesData,
|
||||
unlimitedBonesPerVertex);
|
||||
}
|
||||
|
||||
/*************** Targets for blendshapes ****************/
|
||||
|
@ -1307,24 +1399,24 @@ void glTF2Exporter::MergeMeshes() {
|
|||
|
||||
unsigned int nMeshes = static_cast<unsigned int>(node->meshes.size());
|
||||
|
||||
//skip if it's 1 or less meshes per node
|
||||
// skip if it's 1 or less meshes per node
|
||||
if (nMeshes > 1) {
|
||||
Ref<Mesh> firstMesh = node->meshes.at(0);
|
||||
|
||||
//loop backwards to allow easy removal of a mesh from a node once it's merged
|
||||
// loop backwards to allow easy removal of a mesh from a node once it's merged
|
||||
for (unsigned int m = nMeshes - 1; m >= 1; --m) {
|
||||
Ref<Mesh> mesh = node->meshes.at(m);
|
||||
|
||||
//append this mesh's primitives to the first mesh's primitives
|
||||
// append this mesh's primitives to the first mesh's primitives
|
||||
firstMesh->primitives.insert(
|
||||
firstMesh->primitives.end(),
|
||||
mesh->primitives.begin(),
|
||||
mesh->primitives.end());
|
||||
|
||||
//remove the mesh from the list of meshes
|
||||
// remove the mesh from the list of meshes
|
||||
unsigned int removedIndex = mAsset->meshes.Remove(mesh->id.c_str());
|
||||
|
||||
//find the presence of the removed mesh in other nodes
|
||||
// find the presence of the removed mesh in other nodes
|
||||
for (unsigned int nn = 0; nn < mAsset->nodes.Size(); ++nn) {
|
||||
Ref<Node> curNode = mAsset->nodes.Get(nn);
|
||||
|
||||
|
@ -1343,7 +1435,7 @@ void glTF2Exporter::MergeMeshes() {
|
|||
}
|
||||
}
|
||||
|
||||
//since we were looping backwards, reverse the order of merged primitives to their original order
|
||||
// since we were looping backwards, reverse the order of merged primitives to their original order
|
||||
std::reverse(firstMesh->primitives.begin() + 1, firstMesh->primitives.end());
|
||||
}
|
||||
}
|
||||
|
@ -1386,6 +1478,8 @@ unsigned int glTF2Exporter::ExportNode(const aiNode *n, Ref<Node> &parent) {
|
|||
node->parent = parent;
|
||||
node->name = name;
|
||||
|
||||
ExportNodeExtras(n->mMetaData, node->extras);
|
||||
|
||||
if (!n->mTransformation.IsIdentity()) {
|
||||
if (mScene->mNumAnimations > 0 || (mProperties && mProperties->HasPropertyBool("GLTF2_NODE_IN_TRS"))) {
|
||||
aiQuaternion quaternion;
|
||||
|
@ -1468,9 +1562,9 @@ inline void ExtractTranslationSampler(Asset &asset, std::string &animId, Ref<Buf
|
|||
const aiVectorKey &key = nodeChannel->mPositionKeys[i];
|
||||
// mTime is measured in ticks, but GLTF time is measured in seconds, so convert.
|
||||
times[i] = static_cast<float>(key.mTime / ticksPerSecond);
|
||||
values[(i * 3) + 0] = (ai_real) key.mValue.x;
|
||||
values[(i * 3) + 1] = (ai_real) key.mValue.y;
|
||||
values[(i * 3) + 2] = (ai_real) key.mValue.z;
|
||||
values[(i * 3) + 0] = (ai_real)key.mValue.x;
|
||||
values[(i * 3) + 1] = (ai_real)key.mValue.y;
|
||||
values[(i * 3) + 2] = (ai_real)key.mValue.z;
|
||||
}
|
||||
|
||||
sampler.input = GetSamplerInputRef(asset, animId, buffer, times);
|
||||
|
@ -1487,9 +1581,9 @@ inline void ExtractScaleSampler(Asset &asset, std::string &animId, Ref<Buffer> &
|
|||
const aiVectorKey &key = nodeChannel->mScalingKeys[i];
|
||||
// mTime is measured in ticks, but GLTF time is measured in seconds, so convert.
|
||||
times[i] = static_cast<float>(key.mTime / ticksPerSecond);
|
||||
values[(i * 3) + 0] = (ai_real) key.mValue.x;
|
||||
values[(i * 3) + 1] = (ai_real) key.mValue.y;
|
||||
values[(i * 3) + 2] = (ai_real) key.mValue.z;
|
||||
values[(i * 3) + 0] = (ai_real)key.mValue.x;
|
||||
values[(i * 3) + 1] = (ai_real)key.mValue.y;
|
||||
values[(i * 3) + 2] = (ai_real)key.mValue.z;
|
||||
}
|
||||
|
||||
sampler.input = GetSamplerInputRef(asset, animId, buffer, times);
|
||||
|
@ -1506,10 +1600,10 @@ inline void ExtractRotationSampler(Asset &asset, std::string &animId, Ref<Buffer
|
|||
const aiQuatKey &key = nodeChannel->mRotationKeys[i];
|
||||
// mTime is measured in ticks, but GLTF time is measured in seconds, so convert.
|
||||
times[i] = static_cast<float>(key.mTime / ticksPerSecond);
|
||||
values[(i * 4) + 0] = (ai_real) key.mValue.x;
|
||||
values[(i * 4) + 1] = (ai_real) key.mValue.y;
|
||||
values[(i * 4) + 2] = (ai_real) key.mValue.z;
|
||||
values[(i * 4) + 3] = (ai_real) key.mValue.w;
|
||||
values[(i * 4) + 0] = (ai_real)key.mValue.x;
|
||||
values[(i * 4) + 1] = (ai_real)key.mValue.y;
|
||||
values[(i * 4) + 2] = (ai_real)key.mValue.z;
|
||||
values[(i * 4) + 3] = (ai_real)key.mValue.w;
|
||||
}
|
||||
|
||||
sampler.input = GetSamplerInputRef(asset, animId, buffer, times);
|
||||
|
|
|
@ -100,8 +100,6 @@ glTF2Importer::glTF2Importer() :
|
|||
// empty
|
||||
}
|
||||
|
||||
glTF2Importer::~glTF2Importer() = default;
|
||||
|
||||
const aiImporterDesc *glTF2Importer::GetInfo() const {
|
||||
return &desc;
|
||||
}
|
||||
|
@ -114,7 +112,11 @@ bool glTF2Importer::CanRead(const std::string &filename, IOSystem *pIOHandler, b
|
|||
|
||||
if (pIOHandler) {
|
||||
glTF2::Asset asset(pIOHandler);
|
||||
return asset.CanRead(filename, extension == "glb");
|
||||
return asset.CanRead(
|
||||
filename,
|
||||
CheckMagicToken(
|
||||
pIOHandler, filename, AI_GLB_MAGIC_NUMBER, 1, 0,
|
||||
static_cast<unsigned int>(strlen(AI_GLB_MAGIC_NUMBER))));
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -232,7 +234,8 @@ inline void SetMaterialTextureProperty(std::vector<int> &embeddedTexIdxs, Asset
|
|||
SetMaterialTextureProperty(embeddedTexIdxs, r, (glTF2::TextureInfo)prop, mat, texType, texSlot);
|
||||
|
||||
if (prop.texture && prop.texture->source) {
|
||||
mat->AddProperty(&prop.strength, 1, AI_MATKEY_GLTF_TEXTURE_STRENGTH(texType, texSlot));
|
||||
std::string textureStrengthKey = std::string(_AI_MATKEY_TEXTURE_BASE) + "." + "strength";
|
||||
mat->AddProperty(&prop.strength, 1, textureStrengthKey.c_str(), texType, texSlot);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -443,10 +446,10 @@ static inline bool CheckValidFacesIndices(aiFace *faces, unsigned nFaces, unsign
|
|||
#endif // ASSIMP_BUILD_DEBUG
|
||||
|
||||
template <typename T>
|
||||
aiColor4D *GetVertexColorsForType(Ref<Accessor> input) {
|
||||
aiColor4D *GetVertexColorsForType(Ref<Accessor> input, std::vector<unsigned int> *vertexRemappingTable) {
|
||||
constexpr float max = std::numeric_limits<T>::max();
|
||||
aiColor4t<T> *colors;
|
||||
input->ExtractData(colors);
|
||||
input->ExtractData(colors, vertexRemappingTable);
|
||||
auto output = new aiColor4D[input->count];
|
||||
for (size_t i = 0; i < input->count; i++) {
|
||||
output[i] = aiColor4D(
|
||||
|
@ -461,20 +464,26 @@ void glTF2Importer::ImportMeshes(glTF2::Asset &r) {
|
|||
ASSIMP_LOG_DEBUG("Importing ", r.meshes.Size(), " meshes");
|
||||
std::vector<std::unique_ptr<aiMesh>> meshes;
|
||||
|
||||
unsigned int k = 0;
|
||||
meshOffsets.clear();
|
||||
meshOffsets.reserve(r.meshes.Size() + 1);
|
||||
mVertexRemappingTables.clear();
|
||||
|
||||
// Count the number of aiMeshes
|
||||
unsigned int num_aiMeshes = 0;
|
||||
for (unsigned int m = 0; m < r.meshes.Size(); ++m) {
|
||||
meshOffsets.push_back(num_aiMeshes);
|
||||
num_aiMeshes += unsigned(r.meshes[m].primitives.size());
|
||||
}
|
||||
meshOffsets.push_back(num_aiMeshes); // add a last element so we can always do meshOffsets[n+1] - meshOffsets[n]
|
||||
|
||||
std::vector<unsigned int> usedVertexIndices;
|
||||
std::vector<unsigned int> reverseMappingIndices;
|
||||
std::vector<unsigned int> indexBuffer;
|
||||
meshes.reserve(num_aiMeshes);
|
||||
mVertexRemappingTables.resize(num_aiMeshes);
|
||||
|
||||
for (unsigned int m = 0; m < r.meshes.Size(); ++m) {
|
||||
Mesh &mesh = r.meshes[m];
|
||||
|
||||
meshOffsets.push_back(k);
|
||||
k += unsigned(mesh.primitives.size());
|
||||
|
||||
for (unsigned int p = 0; p < mesh.primitives.size(); ++p) {
|
||||
Mesh::Primitive &prim = mesh.primitives[p];
|
||||
|
||||
|
@ -488,14 +497,14 @@ void glTF2Importer::ImportMeshes(glTF2::Asset &r) {
|
|||
|
||||
// Extract used vertices:
|
||||
bool useIndexBuffer = prim.indices;
|
||||
std::vector<unsigned int>* vertexRemappingTable = nullptr;
|
||||
std::vector<unsigned int> *vertexRemappingTable = nullptr;
|
||||
|
||||
if (useIndexBuffer) {
|
||||
size_t count = prim.indices->count;
|
||||
indexBuffer.resize(count);
|
||||
usedVertexIndices.clear();
|
||||
reverseMappingIndices.clear();
|
||||
usedVertexIndices.reserve(count / 3); // this is a very rough heuristic to reduce re-allocations
|
||||
vertexRemappingTable = &usedVertexIndices;
|
||||
vertexRemappingTable = &mVertexRemappingTables[meshes.size()];
|
||||
vertexRemappingTable->reserve(count / 3); // this is a very rough heuristic to reduce re-allocations
|
||||
Accessor::Indexer data = prim.indices->GetIndexer();
|
||||
if (!data.IsValid()) {
|
||||
throw DeadlyImportError("GLTF: Invalid accessor without data in mesh ", getContextForErrorMessages(mesh.id, mesh.name));
|
||||
|
@ -515,8 +524,8 @@ void glTF2Importer::ImportMeshes(glTF2::Asset &r) {
|
|||
reverseMappingIndices.resize(index + 1, unusedIndex);
|
||||
}
|
||||
if (reverseMappingIndices[index] == unusedIndex) {
|
||||
reverseMappingIndices[index] = static_cast<unsigned int>(usedVertexIndices.size());
|
||||
usedVertexIndices.push_back(index);
|
||||
reverseMappingIndices[index] = static_cast<unsigned int>(vertexRemappingTable->size());
|
||||
vertexRemappingTable->push_back(index);
|
||||
}
|
||||
indexBuffer[i] = reverseMappingIndices[index];
|
||||
}
|
||||
|
@ -597,9 +606,9 @@ void glTF2Importer::ImportMeshes(glTF2::Asset &r) {
|
|||
attr.color[c]->ExtractData(aim->mColors[c], vertexRemappingTable);
|
||||
} else {
|
||||
if (componentType == glTF2::ComponentType_UNSIGNED_BYTE) {
|
||||
aim->mColors[c] = GetVertexColorsForType<unsigned char>(attr.color[c]);
|
||||
aim->mColors[c] = GetVertexColorsForType<unsigned char>(attr.color[c], vertexRemappingTable);
|
||||
} else if (componentType == glTF2::ComponentType_UNSIGNED_SHORT) {
|
||||
aim->mColors[c] = GetVertexColorsForType<unsigned short>(attr.color[c]);
|
||||
aim->mColors[c] = GetVertexColorsForType<unsigned short>(attr.color[c], vertexRemappingTable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -875,8 +884,6 @@ void glTF2Importer::ImportMeshes(glTF2::Asset &r) {
|
|||
}
|
||||
}
|
||||
|
||||
meshOffsets.push_back(k);
|
||||
|
||||
CopyVector(meshes, mScene->mMeshes, mScene->mNumMeshes);
|
||||
}
|
||||
|
||||
|
@ -1009,7 +1016,8 @@ static void GetNodeTransform(aiMatrix4x4 &matrix, const glTF2::Node &node) {
|
|||
}
|
||||
}
|
||||
|
||||
static void BuildVertexWeightMapping(Mesh::Primitive &primitive, std::vector<std::vector<aiVertexWeight>> &map) {
|
||||
static void BuildVertexWeightMapping(Mesh::Primitive &primitive, std::vector<std::vector<aiVertexWeight>> &map, std::vector<unsigned int>* vertexRemappingTablePtr) {
|
||||
|
||||
Mesh::Primitive::Attributes &attr = primitive.attributes;
|
||||
if (attr.weight.empty() || attr.joint.empty()) {
|
||||
return;
|
||||
|
@ -1018,14 +1026,14 @@ static void BuildVertexWeightMapping(Mesh::Primitive &primitive, std::vector<std
|
|||
return;
|
||||
}
|
||||
|
||||
size_t num_vertices = attr.weight[0]->count;
|
||||
size_t num_vertices = 0;
|
||||
|
||||
struct Weights {
|
||||
float values[4];
|
||||
};
|
||||
Weights **weights = new Weights*[attr.weight.size()];
|
||||
for (size_t w = 0; w < attr.weight.size(); ++w) {
|
||||
attr.weight[w]->ExtractData(weights[w]);
|
||||
num_vertices = attr.weight[w]->ExtractData(weights[w], vertexRemappingTablePtr);
|
||||
}
|
||||
|
||||
struct Indices8 {
|
||||
|
@ -1039,12 +1047,12 @@ static void BuildVertexWeightMapping(Mesh::Primitive &primitive, std::vector<std
|
|||
if (attr.joint[0]->GetElementSize() == 4) {
|
||||
indices8 = new Indices8*[attr.joint.size()];
|
||||
for (size_t j = 0; j < attr.joint.size(); ++j) {
|
||||
attr.joint[j]->ExtractData(indices8[j]);
|
||||
attr.joint[j]->ExtractData(indices8[j], vertexRemappingTablePtr);
|
||||
}
|
||||
} else {
|
||||
indices16 = new Indices16 *[attr.joint.size()];
|
||||
for (size_t j = 0; j < attr.joint.size(); ++j) {
|
||||
attr.joint[j]->ExtractData(indices16[j]);
|
||||
attr.joint[j]->ExtractData(indices16[j], vertexRemappingTablePtr);
|
||||
}
|
||||
}
|
||||
//
|
||||
|
@ -1103,15 +1111,13 @@ void ParseExtensions(aiMetadata *metadata, const CustomExtension &extension) {
|
|||
}
|
||||
}
|
||||
|
||||
void ParseExtras(aiMetadata *metadata, const CustomExtension &extension) {
|
||||
if (extension.mValues.isPresent) {
|
||||
for (auto const &subExtension : extension.mValues.value) {
|
||||
ParseExtensions(metadata, subExtension);
|
||||
}
|
||||
void ParseExtras(aiMetadata* metadata, const Extras& extras) {
|
||||
for (auto const &value : extras.mValues) {
|
||||
ParseExtensions(metadata, value);
|
||||
}
|
||||
}
|
||||
|
||||
aiNode *ImportNode(aiScene *pScene, glTF2::Asset &r, std::vector<unsigned int> &meshOffsets, glTF2::Ref<glTF2::Node> &ptr) {
|
||||
aiNode *glTF2Importer::ImportNode(glTF2::Asset &r, glTF2::Ref<glTF2::Node> &ptr) {
|
||||
Node &node = *ptr;
|
||||
|
||||
aiNode *ainode = new aiNode(GetNodeName(node));
|
||||
|
@ -1123,18 +1129,18 @@ aiNode *ImportNode(aiScene *pScene, glTF2::Asset &r, std::vector<unsigned int> &
|
|||
std::fill(ainode->mChildren, ainode->mChildren + ainode->mNumChildren, nullptr);
|
||||
|
||||
for (unsigned int i = 0; i < ainode->mNumChildren; ++i) {
|
||||
aiNode *child = ImportNode(pScene, r, meshOffsets, node.children[i]);
|
||||
aiNode *child = ImportNode(r, node.children[i]);
|
||||
child->mParent = ainode;
|
||||
ainode->mChildren[i] = child;
|
||||
}
|
||||
}
|
||||
|
||||
if (node.customExtensions || node.extras) {
|
||||
if (node.customExtensions || node.extras.HasExtras()) {
|
||||
ainode->mMetaData = new aiMetadata;
|
||||
if (node.customExtensions) {
|
||||
ParseExtensions(ainode->mMetaData, node.customExtensions);
|
||||
}
|
||||
if (node.extras) {
|
||||
if (node.extras.HasExtras()) {
|
||||
ParseExtras(ainode->mMetaData, node.extras);
|
||||
}
|
||||
}
|
||||
|
@ -1156,11 +1162,13 @@ aiNode *ImportNode(aiScene *pScene, glTF2::Asset &r, std::vector<unsigned int> &
|
|||
|
||||
if (node.skin) {
|
||||
for (int primitiveNo = 0; primitiveNo < count; ++primitiveNo) {
|
||||
aiMesh *mesh = pScene->mMeshes[meshOffsets[mesh_idx] + primitiveNo];
|
||||
unsigned int aiMeshIdx = meshOffsets[mesh_idx] + primitiveNo;
|
||||
aiMesh *mesh = mScene->mMeshes[aiMeshIdx];
|
||||
unsigned int numBones = static_cast<unsigned int>(node.skin->jointNames.size());
|
||||
std::vector<unsigned int> *vertexRemappingTablePtr = mVertexRemappingTables[aiMeshIdx].empty() ? nullptr : &mVertexRemappingTables[aiMeshIdx];
|
||||
|
||||
std::vector<std::vector<aiVertexWeight>> weighting(numBones);
|
||||
BuildVertexWeightMapping(node.meshes[0]->primitives[primitiveNo], weighting);
|
||||
BuildVertexWeightMapping(node.meshes[0]->primitives[primitiveNo], weighting, vertexRemappingTablePtr);
|
||||
|
||||
mesh->mNumBones = static_cast<unsigned int>(numBones);
|
||||
mesh->mBones = new aiBone *[mesh->mNumBones];
|
||||
|
@ -1177,7 +1185,7 @@ aiNode *ImportNode(aiScene *pScene, glTF2::Asset &r, std::vector<unsigned int> &
|
|||
// mapping which makes things doubly-slow.
|
||||
|
||||
mat4 *pbindMatrices = nullptr;
|
||||
node.skin->inverseBindMatrices->ExtractData(pbindMatrices);
|
||||
node.skin->inverseBindMatrices->ExtractData(pbindMatrices, nullptr);
|
||||
|
||||
for (uint32_t i = 0; i < numBones; ++i) {
|
||||
const std::vector<aiVertexWeight> &weights = weighting[i];
|
||||
|
@ -1223,11 +1231,11 @@ aiNode *ImportNode(aiScene *pScene, glTF2::Asset &r, std::vector<unsigned int> &
|
|||
}
|
||||
|
||||
if (node.camera) {
|
||||
pScene->mCameras[node.camera.GetIndex()]->mName = ainode->mName;
|
||||
mScene->mCameras[node.camera.GetIndex()]->mName = ainode->mName;
|
||||
}
|
||||
|
||||
if (node.light) {
|
||||
pScene->mLights[node.light.GetIndex()]->mName = ainode->mName;
|
||||
mScene->mLights[node.light.GetIndex()]->mName = ainode->mName;
|
||||
|
||||
// range is optional - see https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Khronos/KHR_lights_punctual
|
||||
// it is added to meta data of parent node, because there is no other place to put it
|
||||
|
@ -1259,7 +1267,7 @@ void glTF2Importer::ImportNodes(glTF2::Asset &r) {
|
|||
// The root nodes
|
||||
unsigned int numRootNodes = unsigned(rootNodes.size());
|
||||
if (numRootNodes == 1) { // a single root node: use it
|
||||
mScene->mRootNode = ImportNode(mScene, r, meshOffsets, rootNodes[0]);
|
||||
mScene->mRootNode = ImportNode(r, rootNodes[0]);
|
||||
} else if (numRootNodes > 1) { // more than one root node: create a fake root
|
||||
aiNode *root = mScene->mRootNode = new aiNode("ROOT");
|
||||
|
||||
|
@ -1267,7 +1275,7 @@ void glTF2Importer::ImportNodes(glTF2::Asset &r) {
|
|||
std::fill(root->mChildren, root->mChildren + numRootNodes, nullptr);
|
||||
|
||||
for (unsigned int i = 0; i < numRootNodes; ++i) {
|
||||
aiNode *node = ImportNode(mScene, r, meshOffsets, rootNodes[i]);
|
||||
aiNode *node = ImportNode(r, rootNodes[i]);
|
||||
node->mParent = root;
|
||||
root->mChildren[root->mNumChildren++] = node;
|
||||
}
|
||||
|
@ -1668,13 +1676,17 @@ void glTF2Importer::InternReadFile(const std::string &pFile, aiScene *pScene, IO
|
|||
|
||||
// clean all member arrays
|
||||
meshOffsets.clear();
|
||||
mVertexRemappingTables.clear();
|
||||
mEmbeddedTexIdxs.clear();
|
||||
|
||||
this->mScene = pScene;
|
||||
|
||||
// read the asset file
|
||||
glTF2::Asset asset(pIOHandler, static_cast<rapidjson::IRemoteSchemaDocumentProvider *>(mSchemaDocumentProvider));
|
||||
asset.Load(pFile, GetExtension(pFile) == "glb");
|
||||
asset.Load(pFile,
|
||||
CheckMagicToken(
|
||||
pIOHandler, pFile, AI_GLB_MAGIC_NUMBER, 1, 0,
|
||||
static_cast<unsigned int>(strlen(AI_GLB_MAGIC_NUMBER))));
|
||||
if (asset.scene) {
|
||||
pScene->mName = asset.scene->name;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#define AI_GLTF2IMPORTER_H_INC
|
||||
|
||||
#include <assimp/BaseImporter.h>
|
||||
#include <AssetLib/glTF2/glTF2Asset.h>
|
||||
|
||||
struct aiNode;
|
||||
|
||||
|
@ -59,7 +60,7 @@ namespace Assimp {
|
|||
class glTF2Importer : public BaseImporter {
|
||||
public:
|
||||
glTF2Importer();
|
||||
~glTF2Importer() override;
|
||||
~glTF2Importer() override = default;
|
||||
bool CanRead(const std::string &pFile, IOSystem *pIOHandler, bool checkSig) const override;
|
||||
|
||||
protected:
|
||||
|
@ -76,10 +77,12 @@ private:
|
|||
void ImportNodes(glTF2::Asset &a);
|
||||
void ImportAnimations(glTF2::Asset &a);
|
||||
void ImportCommonMetadata(glTF2::Asset &a);
|
||||
aiNode *ImportNode(glTF2::Asset &r, glTF2::Ref<glTF2::Node> &ptr);
|
||||
|
||||
private:
|
||||
std::vector<unsigned int> meshOffsets;
|
||||
std::vector<int> mEmbeddedTexIdxs;
|
||||
std::vector<std::vector<unsigned int>> mVertexRemappingTables; // for each converted aiMesh in the scene, it stores a list of vertices that are actually used
|
||||
aiScene *mScene;
|
||||
|
||||
/// An instance of rapidjson::IRemoteSchemaDocumentProvider
|
||||
|
|
|
@ -927,22 +927,22 @@ ELSE()
|
|||
ENDIF()
|
||||
|
||||
# polyclipping
|
||||
IF(ASSIMP_HUNTER_ENABLED)
|
||||
hunter_add_package(polyclipping)
|
||||
find_package(polyclipping CONFIG REQUIRED)
|
||||
ELSE()
|
||||
#IF(ASSIMP_HUNTER_ENABLED)
|
||||
# hunter_add_package(polyclipping)
|
||||
# find_package(polyclipping CONFIG REQUIRED)
|
||||
#ELSE()
|
||||
SET( Clipper_SRCS
|
||||
../contrib/clipper/clipper.hpp
|
||||
../contrib/clipper/clipper.cpp
|
||||
)
|
||||
SOURCE_GROUP( Contrib\\Clipper FILES ${Clipper_SRCS})
|
||||
ENDIF()
|
||||
#ENDIF()
|
||||
|
||||
# poly2tri
|
||||
IF(ASSIMP_HUNTER_ENABLED)
|
||||
hunter_add_package(poly2tri)
|
||||
find_package(poly2tri CONFIG REQUIRED)
|
||||
ELSE()
|
||||
#IF(ASSIMP_HUNTER_ENABLED)
|
||||
# hunter_add_package(poly2tri)
|
||||
# find_package(poly2tri CONFIG REQUIRED)
|
||||
#ELSE()
|
||||
SET( Poly2Tri_SRCS
|
||||
../contrib/poly2tri/poly2tri/common/shapes.cc
|
||||
../contrib/poly2tri/poly2tri/common/shapes.h
|
||||
|
@ -957,7 +957,7 @@ ELSE()
|
|||
../contrib/poly2tri/poly2tri/sweep/sweep_context.h
|
||||
)
|
||||
SOURCE_GROUP( Contrib\\Poly2Tri FILES ${Poly2Tri_SRCS})
|
||||
ENDIF()
|
||||
#ENDIF()
|
||||
|
||||
# minizip/unzip
|
||||
IF(ASSIMP_HUNTER_ENABLED)
|
||||
|
@ -965,7 +965,6 @@ IF(ASSIMP_HUNTER_ENABLED)
|
|||
find_package(minizip CONFIG REQUIRED)
|
||||
ELSE()
|
||||
SET( unzip_SRCS
|
||||
../contrib/unzip/crypt.c
|
||||
../contrib/unzip/crypt.h
|
||||
../contrib/unzip/ioapi.c
|
||||
../contrib/unzip/ioapi.h
|
||||
|
@ -1268,9 +1267,9 @@ TARGET_INCLUDE_DIRECTORIES ( assimp PUBLIC
|
|||
IF(ASSIMP_HUNTER_ENABLED)
|
||||
TARGET_LINK_LIBRARIES(assimp
|
||||
PUBLIC
|
||||
polyclipping::polyclipping
|
||||
#polyclipping::polyclipping
|
||||
openddlparser::openddl_parser
|
||||
poly2tri::poly2tri
|
||||
#poly2tri::poly2tri
|
||||
minizip::minizip
|
||||
ZLIB::zlib
|
||||
RapidJSON::rapidjson
|
||||
|
@ -1419,25 +1418,29 @@ if(MSVC AND ASSIMP_INSTALL_PDB)
|
|||
COMPILE_PDB_NAME assimp${LIBRARY_SUFFIX}
|
||||
COMPILE_PDB_NAME_DEBUG assimp${LIBRARY_SUFFIX}${CMAKE_DEBUG_POSTFIX}
|
||||
)
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_GENERATOR MATCHES "^Visual Studio")
|
||||
install(FILES ${Assimp_BINARY_DIR}/code/Debug/assimp${LIBRARY_SUFFIX}${CMAKE_DEBUG_POSTFIX}.pdb
|
||||
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
|
||||
CONFIGURATIONS Debug
|
||||
)
|
||||
install(FILES ${Assimp_BINARY_DIR}/code/RelWithDebInfo/assimp${LIBRARY_SUFFIX}.pdb
|
||||
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
|
||||
CONFIGURATIONS RelWithDebInfo
|
||||
)
|
||||
IF(GENERATOR_IS_MULTI_CONFIG)
|
||||
install(FILES ${Assimp_BINARY_DIR}/code/Debug/assimp${LIBRARY_SUFFIX}${CMAKE_DEBUG_POSTFIX}.pdb
|
||||
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
|
||||
CONFIGURATIONS Debug
|
||||
)
|
||||
install(FILES ${Assimp_BINARY_DIR}/code/RelWithDebInfo/assimp${LIBRARY_SUFFIX}.pdb
|
||||
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
|
||||
CONFIGURATIONS RelWithDebInfo
|
||||
)
|
||||
ELSE()
|
||||
install(FILES ${Assimp_BINARY_DIR}/code/assimp${LIBRARY_SUFFIX}${CMAKE_DEBUG_POSTFIX}.pdb
|
||||
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
|
||||
CONFIGURATIONS Debug
|
||||
)
|
||||
install(FILES ${Assimp_BINARY_DIR}/code/assimp${LIBRARY_SUFFIX}.pdb
|
||||
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
|
||||
CONFIGURATIONS RelWithDebInfo
|
||||
)
|
||||
ENDIF()
|
||||
ELSE()
|
||||
install(FILES ${Assimp_BINARY_DIR}/code/assimp${LIBRARY_SUFFIX}${CMAKE_DEBUG_POSTFIX}.pdb
|
||||
install(FILES $<TARGET_PDB_FILE:assimp>
|
||||
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
|
||||
CONFIGURATIONS Debug
|
||||
)
|
||||
install(FILES ${Assimp_BINARY_DIR}/code/assimp${LIBRARY_SUFFIX}.pdb
|
||||
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
|
||||
CONFIGURATIONS RelWithDebInfo
|
||||
)
|
||||
ENDIF()
|
||||
ENDIF ()
|
||||
|
|
|
@ -59,6 +59,31 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <memory>
|
||||
#include <sstream>
|
||||
|
||||
namespace {
|
||||
// Checks whether the passed string is a gcs version.
|
||||
bool IsGcsVersion(const std::string &s) {
|
||||
if (s.empty()) return false;
|
||||
return std::all_of(s.cbegin(), s.cend(), [](const char c) {
|
||||
// gcs only permits numeric characters.
|
||||
return std::isdigit(static_cast<int>(c));
|
||||
});
|
||||
}
|
||||
|
||||
// Removes a possible version hash from a filename, as found for example in
|
||||
// gcs uris (e.g. `gs://bucket/model.glb#1234`), see also
|
||||
// https://github.com/GoogleCloudPlatform/gsutil/blob/c80f329bc3c4011236c78ce8910988773b2606cb/gslib/storage_url.py#L39.
|
||||
std::string StripVersionHash(const std::string &filename) {
|
||||
const std::string::size_type pos = filename.find_last_of('#');
|
||||
// Only strip if the hash is behind a possible file extension and the part
|
||||
// behind the hash is a version string.
|
||||
if (pos != std::string::npos && pos > filename.find_last_of('.') &&
|
||||
IsGcsVersion(filename.substr(pos + 1))) {
|
||||
return filename.substr(0, pos);
|
||||
}
|
||||
return filename;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
using namespace Assimp;
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
@ -158,7 +183,7 @@ void BaseImporter::GetExtensionList(std::set<std::string> &extensions) {
|
|||
std::size_t numTokens,
|
||||
unsigned int searchBytes /* = 200 */,
|
||||
bool tokensSol /* false */,
|
||||
bool noAlphaBeforeTokens /* false */) {
|
||||
bool noGraphBeforeTokens /* false */) {
|
||||
ai_assert(nullptr != tokens);
|
||||
ai_assert(0 != numTokens);
|
||||
ai_assert(0 != searchBytes);
|
||||
|
@ -207,8 +232,9 @@ void BaseImporter::GetExtensionList(std::set<std::string> &extensions) {
|
|||
continue;
|
||||
}
|
||||
// We need to make sure that we didn't accidentally identify the end of another token as our token,
|
||||
// e.g. in a previous version the "gltf " present in some gltf files was detected as "f "
|
||||
if (noAlphaBeforeTokens && (r != buffer && isalpha(static_cast<unsigned char>(r[-1])))) {
|
||||
// e.g. in a previous version the "gltf " present in some gltf files was detected as "f ", or a
|
||||
// Blender-exported glb file containing "Khronos glTF Blender I/O " was detected as "o "
|
||||
if (noGraphBeforeTokens && (r != buffer && isgraph(static_cast<unsigned char>(r[-1])))) {
|
||||
continue;
|
||||
}
|
||||
// We got a match, either we don't care where it is, or it happens to
|
||||
|
@ -229,33 +255,38 @@ void BaseImporter::GetExtensionList(std::set<std::string> &extensions) {
|
|||
const char *ext0,
|
||||
const char *ext1,
|
||||
const char *ext2) {
|
||||
std::string::size_type pos = pFile.find_last_of('.');
|
||||
|
||||
// no file extension - can't read
|
||||
if (pos == std::string::npos) {
|
||||
return false;
|
||||
std::set<std::string> extensions;
|
||||
for (const char* ext : {ext0, ext1, ext2}) {
|
||||
if (ext == nullptr) continue;
|
||||
extensions.emplace(ext);
|
||||
}
|
||||
return HasExtension(pFile, extensions);
|
||||
}
|
||||
|
||||
const char *ext_real = &pFile[pos + 1];
|
||||
if (!ASSIMP_stricmp(ext_real, ext0)) {
|
||||
return true;
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Check for file extension
|
||||
/*static*/ bool BaseImporter::HasExtension(const std::string &pFile, const std::set<std::string> &extensions) {
|
||||
const std::string file = StripVersionHash(pFile);
|
||||
// CAUTION: Do not just search for the extension!
|
||||
// GetExtension() returns the part after the *last* dot, but some extensions
|
||||
// have dots inside them, e.g. ogre.mesh.xml. Compare the entire end of the
|
||||
// string.
|
||||
for (const std::string& ext : extensions) {
|
||||
// Yay for C++<20 not having std::string::ends_with()
|
||||
const std::string dotExt = "." + ext;
|
||||
if (dotExt.length() > file.length()) continue;
|
||||
// Possible optimization: Fetch the lowercase filename!
|
||||
if (0 == ASSIMP_stricmp(file.c_str() + file.length() - dotExt.length(), dotExt.c_str())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// check for other, optional, file extensions
|
||||
if (ext1 && !ASSIMP_stricmp(ext_real, ext1)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (ext2 && !ASSIMP_stricmp(ext_real, ext2)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Get file extension from path
|
||||
std::string BaseImporter::GetExtension(const std::string &file) {
|
||||
std::string BaseImporter::GetExtension(const std::string &pFile) {
|
||||
const std::string file = StripVersionHash(pFile);
|
||||
std::string::size_type pos = file.find_last_of('.');
|
||||
|
||||
// no file extension at all
|
||||
|
@ -281,12 +312,7 @@ std::string BaseImporter::GetExtension(const std::string &file) {
|
|||
if (!pIOHandler) {
|
||||
return false;
|
||||
}
|
||||
union {
|
||||
const char *magic;
|
||||
const uint16_t *magic_u16;
|
||||
const uint32_t *magic_u32;
|
||||
};
|
||||
magic = reinterpret_cast<const char *>(_magic);
|
||||
const char *magic = reinterpret_cast<const char *>(_magic);
|
||||
std::unique_ptr<IOStream> pStream(pIOHandler->Open(pFile));
|
||||
if (pStream) {
|
||||
|
||||
|
@ -308,15 +334,15 @@ std::string BaseImporter::GetExtension(const std::string &file) {
|
|||
// that's just for convenience, the chance that we cause conflicts
|
||||
// is quite low and it can save some lines and prevent nasty bugs
|
||||
if (2 == size) {
|
||||
uint16_t rev = *magic_u16;
|
||||
ByteSwap::Swap(&rev);
|
||||
if (data_u16[0] == *magic_u16 || data_u16[0] == rev) {
|
||||
uint16_t magic_u16;
|
||||
memcpy(&magic_u16, magic, 2);
|
||||
if (data_u16[0] == magic_u16 || data_u16[0] == ByteSwap::Swapped(magic_u16)) {
|
||||
return true;
|
||||
}
|
||||
} else if (4 == size) {
|
||||
uint32_t rev = *magic_u32;
|
||||
ByteSwap::Swap(&rev);
|
||||
if (data_u32[0] == *magic_u32 || data_u32[0] == rev) {
|
||||
uint32_t magic_u32;
|
||||
memcpy(&magic_u32, magic, 4);
|
||||
if (data_u32[0] == magic_u32 || data_u32[0] == ByteSwap::Swapped(magic_u32)) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -297,7 +297,7 @@ private:
|
|||
}
|
||||
|
||||
const char separator = getOsSeparator();
|
||||
for (it = in.begin(); it != in.end(); ++it) {
|
||||
for (it = in.begin(); it < in.end(); ++it) {
|
||||
const size_t remaining = std::distance(in.end(), it);
|
||||
// Exclude :// and \\, which remain untouched.
|
||||
// https://sourceforge.net/tracker/?func=detail&aid=3031725&group_id=226462&atid=1067632
|
||||
|
|
|
@ -637,24 +637,10 @@ const aiScene* Importer::ReadFile( const char* _pFile, unsigned int pFlags) {
|
|||
std::set<std::string> extensions;
|
||||
pimpl->mImporter[a]->GetExtensionList(extensions);
|
||||
|
||||
// CAUTION: Do not just search for the extension!
|
||||
// GetExtension() returns the part after the *last* dot, but some extensions have dots
|
||||
// inside them, e.g. ogre.mesh.xml. Compare the entire end of the string.
|
||||
for (std::set<std::string>::const_iterator it = extensions.cbegin(); it != extensions.cend(); ++it) {
|
||||
|
||||
// Yay for C++<20 not having std::string::ends_with()
|
||||
std::string extension = "." + *it;
|
||||
if (extension.length() <= pFile.length()) {
|
||||
// Possible optimization: Fetch the lowercase filename!
|
||||
if (0 == ASSIMP_stricmp(pFile.c_str() + pFile.length() - extension.length(), extension.c_str())) {
|
||||
ImporterAndIndex candidate = { pimpl->mImporter[a], a };
|
||||
possibleImporters.push_back(candidate);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (BaseImporter::HasExtension(pFile, extensions)) {
|
||||
ImporterAndIndex candidate = { pimpl->mImporter[a], a };
|
||||
possibleImporters.push_back(candidate);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If just one importer supports this extension, pick it and close the case.
|
||||
|
|
|
@ -64,8 +64,14 @@ inline double GetArea2D(const T& v1, const T& v2, const T& v3) {
|
|||
* The function accepts an unconstrained template parameter for use with
|
||||
* both aiVector3D and aiVector2D, but generally ignores the third coordinate.*/
|
||||
template <typename T>
|
||||
inline bool OnLeftSideOfLine2D(const T& p0, const T& p1,const T& p2) {
|
||||
return GetArea2D(p0,p2,p1) > 0;
|
||||
inline int OnLeftSideOfLine2D(const T& p0, const T& p1,const T& p2) {
|
||||
double area = GetArea2D(p0,p2,p1);
|
||||
if(std::abs(area) < ai_epsilon)
|
||||
return 0;
|
||||
else if(area > 0)
|
||||
return 1;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------------
|
||||
|
@ -75,7 +81,10 @@ inline bool OnLeftSideOfLine2D(const T& p0, const T& p1,const T& p2) {
|
|||
template <typename T>
|
||||
inline bool PointInTriangle2D(const T& p0, const T& p1,const T& p2, const T& pp) {
|
||||
// pp should be left side of the three triangle side, by ccw arrow
|
||||
return OnLeftSideOfLine2D(p0, p1, pp) && OnLeftSideOfLine2D(p1, p2, pp) && OnLeftSideOfLine2D(p2, p0, pp);
|
||||
int c1 = OnLeftSideOfLine2D(p0, p1, pp);
|
||||
int c2 = OnLeftSideOfLine2D(p1, p2, pp);
|
||||
int c3 = OnLeftSideOfLine2D(p2, p0, pp);
|
||||
return (c1 >= 0) && (c2 >= 0) && (c3 >= 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -110,7 +119,7 @@ inline bool IsCCW(T* in, size_t npoints) {
|
|||
c = std::sqrt(cc);
|
||||
theta = std::acos((bb + cc - aa) / (2 * b * c));
|
||||
|
||||
if (OnLeftSideOfLine2D(in[i],in[i+2],in[i+1])) {
|
||||
if (OnLeftSideOfLine2D(in[i],in[i+2],in[i+1]) == 1) {
|
||||
// if (convex(in[i].x, in[i].y,
|
||||
// in[i+1].x, in[i+1].y,
|
||||
// in[i+2].x, in[i+2].y)) {
|
||||
|
@ -140,7 +149,7 @@ inline bool IsCCW(T* in, size_t npoints) {
|
|||
//if (convex(in[npoints-2].x, in[npoints-2].y,
|
||||
// in[0].x, in[0].y,
|
||||
// in[1].x, in[1].y)) {
|
||||
if (OnLeftSideOfLine2D(in[npoints-2],in[1],in[0])) {
|
||||
if (OnLeftSideOfLine2D(in[npoints-2],in[1],in[0]) == 1) {
|
||||
convex_turn = AI_MATH_PI_F - theta;
|
||||
convex_sum += convex_turn;
|
||||
} else {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
Open Asset Import Library (assimp)
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
Copyright (c) 2006-2023, assimp team
|
||||
|
||||
All rights reserved.
|
||||
|
||||
|
@ -49,10 +49,10 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
// --------------------------------------------------------------------------------
|
||||
// Legal information string - don't remove this.
|
||||
static const char *LEGAL_INFORMATION =
|
||||
static constexpr char LEGAL_INFORMATION[] =
|
||||
"Open Asset Import Library (Assimp).\n"
|
||||
"A free C/C++ library to import various 3D file formats into applications\n\n"
|
||||
"(c) 2006-2022, Assimp team\n"
|
||||
"(c) 2006-2023, Assimp team\n"
|
||||
"License under the terms and conditions of the 3-clause BSD license\n"
|
||||
"https://www.assimp.org\n";
|
||||
|
||||
|
@ -150,9 +150,11 @@ ASSIMP_API aiScene::~aiScene() {
|
|||
// To make sure we won't crash if the data is invalid it's
|
||||
// much better to check whether both mNumXXX and mXXX are
|
||||
// valid instead of relying on just one of them.
|
||||
if (mNumMeshes && mMeshes)
|
||||
for (unsigned int a = 0; a < mNumMeshes; a++)
|
||||
if (mNumMeshes && mMeshes) {
|
||||
for (unsigned int a = 0; a < mNumMeshes; ++a) {
|
||||
delete mMeshes[a];
|
||||
}
|
||||
}
|
||||
delete[] mMeshes;
|
||||
|
||||
if (mNumMaterials && mMaterials) {
|
||||
|
@ -162,24 +164,32 @@ ASSIMP_API aiScene::~aiScene() {
|
|||
}
|
||||
delete[] mMaterials;
|
||||
|
||||
if (mNumAnimations && mAnimations)
|
||||
for (unsigned int a = 0; a < mNumAnimations; a++)
|
||||
if (mNumAnimations && mAnimations) {
|
||||
for (unsigned int a = 0; a < mNumAnimations; ++a) {
|
||||
delete mAnimations[a];
|
||||
}
|
||||
}
|
||||
delete[] mAnimations;
|
||||
|
||||
if (mNumTextures && mTextures)
|
||||
for (unsigned int a = 0; a < mNumTextures; a++)
|
||||
if (mNumTextures && mTextures) {
|
||||
for (unsigned int a = 0; a < mNumTextures; ++a) {
|
||||
delete mTextures[a];
|
||||
}
|
||||
}
|
||||
delete[] mTextures;
|
||||
|
||||
if (mNumLights && mLights)
|
||||
for (unsigned int a = 0; a < mNumLights; a++)
|
||||
if (mNumLights && mLights) {
|
||||
for (unsigned int a = 0; a < mNumLights; ++a) {
|
||||
delete mLights[a];
|
||||
}
|
||||
}
|
||||
delete[] mLights;
|
||||
|
||||
if (mNumCameras && mCameras)
|
||||
for (unsigned int a = 0; a < mNumCameras; a++)
|
||||
if (mNumCameras && mCameras) {
|
||||
for (unsigned int a = 0; a < mNumCameras; ++a) {
|
||||
delete mCameras[a];
|
||||
}
|
||||
}
|
||||
delete[] mCameras;
|
||||
|
||||
aiMetadata::Dealloc(mMetaData);
|
||||
|
|
|
@ -51,6 +51,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <assimp/material.h>
|
||||
#include <assimp/types.h>
|
||||
#include <assimp/DefaultLogger.hpp>
|
||||
#include <memory>
|
||||
|
||||
using namespace Assimp;
|
||||
|
||||
|
@ -473,7 +474,7 @@ aiReturn aiMaterial::AddBinaryProperty(const void *pInput,
|
|||
}
|
||||
|
||||
// Allocate a new material property
|
||||
aiMaterialProperty *pcNew = new aiMaterialProperty();
|
||||
std::unique_ptr<aiMaterialProperty> pcNew(new aiMaterialProperty());
|
||||
|
||||
// .. and fill it
|
||||
pcNew->mType = pType;
|
||||
|
@ -489,7 +490,7 @@ aiReturn aiMaterial::AddBinaryProperty(const void *pInput,
|
|||
strcpy(pcNew->mKey.data, pKey);
|
||||
|
||||
if (UINT_MAX != iOutIndex) {
|
||||
mProperties[iOutIndex] = pcNew;
|
||||
mProperties[iOutIndex] = pcNew.release();
|
||||
return AI_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -502,7 +503,6 @@ aiReturn aiMaterial::AddBinaryProperty(const void *pInput,
|
|||
try {
|
||||
ppTemp = new aiMaterialProperty *[mNumAllocated];
|
||||
} catch (std::bad_alloc &) {
|
||||
delete pcNew;
|
||||
return AI_OUTOFMEMORY;
|
||||
}
|
||||
|
||||
|
@ -513,7 +513,7 @@ aiReturn aiMaterial::AddBinaryProperty(const void *pInput,
|
|||
mProperties = ppTemp;
|
||||
}
|
||||
// push back ...
|
||||
mProperties[mNumProperties++] = pcNew;
|
||||
mProperties[mNumProperties++] = pcNew.release();
|
||||
|
||||
return AI_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
Open Asset Import Library (assimp)
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
Copyright (c) 2006-2023, assimp team
|
||||
|
||||
All rights reserved.
|
||||
|
||||
|
@ -82,6 +82,9 @@ void UpdateMeshReferences(aiNode *node, const std::vector<unsigned int> &meshMap
|
|||
for (unsigned int a = 0; a < node->mNumMeshes; ++a) {
|
||||
|
||||
unsigned int ref = node->mMeshes[a];
|
||||
if (ref >= meshMapping.size())
|
||||
throw DeadlyImportError("Invalid mesh ref");
|
||||
|
||||
if (UINT_MAX != (ref = meshMapping[ref])) {
|
||||
node->mMeshes[out++] = ref;
|
||||
}
|
||||
|
@ -143,7 +146,13 @@ void FindInvalidDataProcess::Execute(aiScene *pScene) {
|
|||
// we need to remove some meshes.
|
||||
// therefore we'll also need to remove all references
|
||||
// to them from the scenegraph
|
||||
UpdateMeshReferences(pScene->mRootNode, meshMapping);
|
||||
try {
|
||||
UpdateMeshReferences(pScene->mRootNode, meshMapping);
|
||||
} catch (const std::exception&) {
|
||||
// fix the real number of meshes otherwise we'll get double free in the scene destructor
|
||||
pScene->mNumMeshes = real;
|
||||
throw;
|
||||
}
|
||||
pScene->mNumMeshes = real;
|
||||
}
|
||||
|
||||
|
@ -264,7 +273,8 @@ void FindInvalidDataProcess::ProcessAnimation(aiAnimation *anim) {
|
|||
void FindInvalidDataProcess::ProcessAnimationChannel(aiNodeAnim *anim) {
|
||||
ai_assert(nullptr != anim);
|
||||
if (anim->mNumPositionKeys == 0 && anim->mNumRotationKeys == 0 && anim->mNumScalingKeys == 0) {
|
||||
ai_assert_entry();
|
||||
ASSIMP_LOG_ERROR("Invalid node anuimation instance detected.");
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,8 +5,6 @@ Open Asset Import Library (assimp)
|
|||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
|
||||
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use of this software in source and binary forms,
|
||||
|
@ -109,6 +107,7 @@ bool GenVertexNormalsProcess::GenMeshVertexNormals(aiMesh *pMesh, unsigned int m
|
|||
return false;
|
||||
}
|
||||
delete[] pMesh->mNormals;
|
||||
pMesh->mNormals = nullptr;
|
||||
}
|
||||
|
||||
// If the mesh consists of lines and/or points but not of
|
||||
|
|
|
@ -3,9 +3,7 @@
|
|||
Open Asset Import Library (assimp)
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2006-2022, assimp team
|
||||
|
||||
|
||||
Copyright (c) 2006-2023, assimp team
|
||||
|
||||
All rights reserved.
|
||||
|
||||
|
@ -59,31 +57,31 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <stdio.h>
|
||||
#include <stack>
|
||||
|
||||
using namespace Assimp;
|
||||
namespace Assimp {
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Constructor to be privately used by Importer
|
||||
ImproveCacheLocalityProcess::ImproveCacheLocalityProcess()
|
||||
: mConfigCacheDepth(PP_ICL_PTCACHE_SIZE) {
|
||||
ImproveCacheLocalityProcess::ImproveCacheLocalityProcess() :
|
||||
mConfigCacheDepth(PP_ICL_PTCACHE_SIZE) {
|
||||
// empty
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Returns whether the processing step is present in the given flag field.
|
||||
bool ImproveCacheLocalityProcess::IsActive( unsigned int pFlags) const {
|
||||
bool ImproveCacheLocalityProcess::IsActive(unsigned int pFlags) const {
|
||||
return (pFlags & aiProcess_ImproveCacheLocality) != 0;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Setup configuration
|
||||
void ImproveCacheLocalityProcess::SetupProperties(const Importer* pImp) {
|
||||
void ImproveCacheLocalityProcess::SetupProperties(const Importer *pImp) {
|
||||
// AI_CONFIG_PP_ICL_PTCACHE_SIZE controls the target cache size for the optimizer
|
||||
mConfigCacheDepth = pImp->GetPropertyInteger(AI_CONFIG_PP_ICL_PTCACHE_SIZE,PP_ICL_PTCACHE_SIZE);
|
||||
mConfigCacheDepth = pImp->GetPropertyInteger(AI_CONFIG_PP_ICL_PTCACHE_SIZE, PP_ICL_PTCACHE_SIZE);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Executes the post processing step on the given imported data.
|
||||
void ImproveCacheLocalityProcess::Execute( aiScene* pScene) {
|
||||
void ImproveCacheLocalityProcess::Execute(aiScene *pScene) {
|
||||
if (!pScene->mNumMeshes) {
|
||||
ASSIMP_LOG_DEBUG("ImproveCacheLocalityProcess skipped; there are no meshes");
|
||||
return;
|
||||
|
@ -93,11 +91,11 @@ void ImproveCacheLocalityProcess::Execute( aiScene* pScene) {
|
|||
|
||||
float out = 0.f;
|
||||
unsigned int numf = 0, numm = 0;
|
||||
for( unsigned int a = 0; a < pScene->mNumMeshes; ++a ){
|
||||
const float res = ProcessMesh( pScene->mMeshes[a],a);
|
||||
for (unsigned int a = 0; a < pScene->mNumMeshes; ++a) {
|
||||
const float res = ProcessMesh(pScene->mMeshes[a], a);
|
||||
if (res) {
|
||||
numf += pScene->mMeshes[a]->mNumFaces;
|
||||
out += res;
|
||||
out += res;
|
||||
++numm;
|
||||
}
|
||||
}
|
||||
|
@ -109,9 +107,54 @@ void ImproveCacheLocalityProcess::Execute( aiScene* pScene) {
|
|||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
static ai_real calculateInputACMR(aiMesh *pMesh, const aiFace *const pcEnd,
|
||||
unsigned int configCacheDepth, unsigned int meshNum) {
|
||||
ai_real fACMR = 0.0f;
|
||||
unsigned int *piFIFOStack = new unsigned int[configCacheDepth];
|
||||
memset(piFIFOStack, 0xff, configCacheDepth * sizeof(unsigned int));
|
||||
unsigned int *piCur = piFIFOStack;
|
||||
const unsigned int *const piCurEnd = piFIFOStack + configCacheDepth;
|
||||
|
||||
// count the number of cache misses
|
||||
unsigned int iCacheMisses = 0;
|
||||
for (const aiFace *pcFace = pMesh->mFaces; pcFace != pcEnd; ++pcFace) {
|
||||
for (unsigned int qq = 0; qq < 3; ++qq) {
|
||||
bool bInCache = false;
|
||||
for (unsigned int *pp = piFIFOStack; pp < piCurEnd; ++pp) {
|
||||
if (*pp == pcFace->mIndices[qq]) {
|
||||
// the vertex is in cache
|
||||
bInCache = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!bInCache) {
|
||||
++iCacheMisses;
|
||||
if (piCurEnd == piCur) {
|
||||
piCur = piFIFOStack;
|
||||
}
|
||||
*piCur++ = pcFace->mIndices[qq];
|
||||
}
|
||||
}
|
||||
}
|
||||
delete[] piFIFOStack;
|
||||
fACMR = (ai_real)iCacheMisses / pMesh->mNumFaces;
|
||||
if (3.0 == fACMR) {
|
||||
char szBuff[128]; // should be sufficiently large in every case
|
||||
|
||||
// the JoinIdenticalVertices process has not been executed on this
|
||||
// mesh, otherwise this value would normally be at least minimally
|
||||
// smaller than 3.0 ...
|
||||
ai_snprintf(szBuff, 128, "Mesh %u: Not suitable for vcache optimization", meshNum);
|
||||
ASSIMP_LOG_WARN(szBuff);
|
||||
return static_cast<ai_real>(0.f);
|
||||
}
|
||||
return fACMR;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Improves the cache coherency of a specific mesh
|
||||
ai_real ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int meshNum) {
|
||||
ai_real ImproveCacheLocalityProcess::ProcessMesh(aiMesh *pMesh, unsigned int meshNum) {
|
||||
// TODO: rewrite this to use std::vector or boost::shared_array
|
||||
ai_assert(nullptr != pMesh);
|
||||
|
||||
|
@ -126,91 +169,57 @@ ai_real ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int me
|
|||
return static_cast<ai_real>(0.f);
|
||||
}
|
||||
|
||||
if(pMesh->mNumVertices <= mConfigCacheDepth) {
|
||||
if (pMesh->mNumVertices <= mConfigCacheDepth) {
|
||||
return static_cast<ai_real>(0.f);
|
||||
}
|
||||
|
||||
ai_real fACMR = 3.f;
|
||||
const aiFace* const pcEnd = pMesh->mFaces+pMesh->mNumFaces;
|
||||
const aiFace *const pcEnd = pMesh->mFaces + pMesh->mNumFaces;
|
||||
|
||||
// Input ACMR is for logging purposes only
|
||||
if (!DefaultLogger::isNullLogger()) {
|
||||
|
||||
unsigned int* piFIFOStack = new unsigned int[mConfigCacheDepth];
|
||||
memset(piFIFOStack,0xff,mConfigCacheDepth*sizeof(unsigned int));
|
||||
unsigned int* piCur = piFIFOStack;
|
||||
const unsigned int* const piCurEnd = piFIFOStack + mConfigCacheDepth;
|
||||
|
||||
// count the number of cache misses
|
||||
unsigned int iCacheMisses = 0;
|
||||
for (const aiFace* pcFace = pMesh->mFaces;pcFace != pcEnd;++pcFace) {
|
||||
for (unsigned int qq = 0; qq < 3;++qq) {
|
||||
bool bInCache = false;
|
||||
for (unsigned int* pp = piFIFOStack;pp < piCurEnd;++pp) {
|
||||
if (*pp == pcFace->mIndices[qq]) {
|
||||
// the vertex is in cache
|
||||
bInCache = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!bInCache) {
|
||||
++iCacheMisses;
|
||||
if (piCurEnd == piCur) {
|
||||
piCur = piFIFOStack;
|
||||
}
|
||||
*piCur++ = pcFace->mIndices[qq];
|
||||
}
|
||||
}
|
||||
}
|
||||
delete[] piFIFOStack;
|
||||
fACMR = (ai_real) iCacheMisses / pMesh->mNumFaces;
|
||||
if (3.0 == fACMR) {
|
||||
char szBuff[128]; // should be sufficiently large in every case
|
||||
|
||||
// the JoinIdenticalVertices process has not been executed on this
|
||||
// mesh, otherwise this value would normally be at least minimally
|
||||
// smaller than 3.0 ...
|
||||
ai_snprintf(szBuff,128,"Mesh %u: Not suitable for vcache optimization",meshNum);
|
||||
ASSIMP_LOG_WARN(szBuff);
|
||||
return static_cast<ai_real>(0.f);
|
||||
}
|
||||
if (!DefaultLogger::isNullLogger()) {
|
||||
fACMR = calculateInputACMR(pMesh, pcEnd, mConfigCacheDepth, meshNum);
|
||||
}
|
||||
|
||||
// first we need to build a vertex-triangle adjacency list
|
||||
VertexTriangleAdjacency adj(pMesh->mFaces,pMesh->mNumFaces, pMesh->mNumVertices,true);
|
||||
VertexTriangleAdjacency adj(pMesh->mFaces, pMesh->mNumFaces, pMesh->mNumVertices, true);
|
||||
|
||||
// build a list to store per-vertex caching time stamps
|
||||
unsigned int* const piCachingStamps = new unsigned int[pMesh->mNumVertices];
|
||||
memset(piCachingStamps,0x0,pMesh->mNumVertices*sizeof(unsigned int));
|
||||
std::vector<unsigned int> piCachingStamps;
|
||||
piCachingStamps.resize(pMesh->mNumVertices);
|
||||
memset(&piCachingStamps[0], 0x0, pMesh->mNumVertices * sizeof(unsigned int));
|
||||
|
||||
// allocate an empty output index buffer. We store the output indices in one large array.
|
||||
// Since the number of triangles won't change the input faces can be reused. This is how
|
||||
// we save thousands of redundant mini allocations for aiFace::mIndices
|
||||
const unsigned int iIdxCnt = pMesh->mNumFaces*3;
|
||||
unsigned int* const piIBOutput = new unsigned int[iIdxCnt];
|
||||
unsigned int* piCSIter = piIBOutput;
|
||||
const unsigned int iIdxCnt = pMesh->mNumFaces * 3;
|
||||
std::vector<unsigned int> piIBOutput;
|
||||
piIBOutput.resize(iIdxCnt);
|
||||
std::vector<unsigned int>::iterator piCSIter = piIBOutput.begin();
|
||||
|
||||
// allocate the flag array to hold the information
|
||||
// whether a face has already been emitted or not
|
||||
std::vector<bool> abEmitted(pMesh->mNumFaces,false);
|
||||
std::vector<bool> abEmitted(pMesh->mNumFaces, false);
|
||||
|
||||
// dead-end vertex index stack
|
||||
std::stack<unsigned int, std::vector<unsigned int> > sDeadEndVStack;
|
||||
std::stack<unsigned int, std::vector<unsigned int>> sDeadEndVStack;
|
||||
|
||||
// create a copy of the piNumTriPtr buffer
|
||||
unsigned int* const piNumTriPtr = adj.mLiveTriangles;
|
||||
unsigned int *const piNumTriPtr = adj.mLiveTriangles;
|
||||
const std::vector<unsigned int> piNumTriPtrNoModify(piNumTriPtr, piNumTriPtr + pMesh->mNumVertices);
|
||||
|
||||
// get the largest number of referenced triangles and allocate the "candidate buffer"
|
||||
unsigned int iMaxRefTris = 0; {
|
||||
const unsigned int* piCur = adj.mLiveTriangles;
|
||||
const unsigned int* const piCurEnd = adj.mLiveTriangles+pMesh->mNumVertices;
|
||||
for (;piCur != piCurEnd;++piCur) {
|
||||
iMaxRefTris = std::max(iMaxRefTris,*piCur);
|
||||
unsigned int iMaxRefTris = 0;
|
||||
{
|
||||
const unsigned int *piCur = adj.mLiveTriangles;
|
||||
const unsigned int *const piCurEnd = adj.mLiveTriangles + pMesh->mNumVertices;
|
||||
for (; piCur != piCurEnd; ++piCur) {
|
||||
iMaxRefTris = std::max(iMaxRefTris, *piCur);
|
||||
}
|
||||
}
|
||||
ai_assert(iMaxRefTris > 0);
|
||||
unsigned int* piCandidates = new unsigned int[iMaxRefTris*3];
|
||||
std::vector<unsigned int> piCandidates;
|
||||
piCandidates.resize(iMaxRefTris * 3);
|
||||
unsigned int iCacheMisses = 0;
|
||||
|
||||
// ...................................................................................
|
||||
|
@ -245,23 +254,23 @@ ai_real ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int me
|
|||
|
||||
int ivdx = 0;
|
||||
int ics = 1;
|
||||
int iStampCnt = mConfigCacheDepth+1;
|
||||
while (ivdx >= 0) {
|
||||
int iStampCnt = mConfigCacheDepth + 1;
|
||||
while (ivdx >= 0) {
|
||||
|
||||
unsigned int icnt = piNumTriPtrNoModify[ivdx];
|
||||
unsigned int* piList = adj.GetAdjacentTriangles(ivdx);
|
||||
unsigned int* piCurCandidate = piCandidates;
|
||||
unsigned int *piList = adj.GetAdjacentTriangles(ivdx);
|
||||
std::vector<unsigned int>::iterator piCurCandidate = piCandidates.begin();
|
||||
|
||||
// get all triangles in the neighborhood
|
||||
for (unsigned int tri = 0; tri < icnt;++tri) {
|
||||
for (unsigned int tri = 0; tri < icnt; ++tri) {
|
||||
|
||||
// if they have not yet been emitted, add them to the output IB
|
||||
const unsigned int fidx = *piList++;
|
||||
if (!abEmitted[fidx]) {
|
||||
if (!abEmitted[fidx]) {
|
||||
|
||||
// so iterate through all vertices of the current triangle
|
||||
const aiFace* pcFace = &pMesh->mFaces[ fidx ];
|
||||
unsigned nind = pcFace->mNumIndices;
|
||||
const aiFace *pcFace = &pMesh->mFaces[fidx];
|
||||
const unsigned nind = pcFace->mNumIndices;
|
||||
for (unsigned ind = 0; ind < nind; ind++) {
|
||||
unsigned dp = pcFace->mIndices[ind];
|
||||
|
||||
|
@ -281,7 +290,7 @@ ai_real ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int me
|
|||
*piCSIter++ = dp;
|
||||
|
||||
// if the vertex is not yet in cache, set its cache count
|
||||
if (iStampCnt-piCachingStamps[dp] > mConfigCacheDepth) {
|
||||
if (iStampCnt - piCachingStamps[dp] > mConfigCacheDepth) {
|
||||
piCachingStamps[dp] = iStampCnt++;
|
||||
++iCacheMisses;
|
||||
}
|
||||
|
@ -297,16 +306,16 @@ ai_real ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int me
|
|||
// get next fanning vertex
|
||||
ivdx = -1;
|
||||
int max_priority = -1;
|
||||
for (unsigned int* piCur = piCandidates;piCur != piCurCandidate;++piCur) {
|
||||
for (std::vector<unsigned int>::iterator piCur = piCandidates.begin(); piCur != piCurCandidate; ++piCur) {
|
||||
const unsigned int dp = *piCur;
|
||||
|
||||
// must have live triangles
|
||||
if (piNumTriPtr[dp] > 0) {
|
||||
if (piNumTriPtr[dp] > 0) {
|
||||
int priority = 0;
|
||||
|
||||
// will the vertex be in cache, even after fanning occurs?
|
||||
unsigned int tmp;
|
||||
if ((tmp = iStampCnt-piCachingStamps[dp]) + 2*piNumTriPtr[dp] <= mConfigCacheDepth) {
|
||||
if ((tmp = iStampCnt - piCachingStamps[dp]) + 2 * piNumTriPtr[dp] <= mConfigCacheDepth) {
|
||||
priority = tmp;
|
||||
}
|
||||
|
||||
|
@ -324,7 +333,7 @@ ai_real ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int me
|
|||
while (!sDeadEndVStack.empty()) {
|
||||
unsigned int iCachedIdx = sDeadEndVStack.top();
|
||||
sDeadEndVStack.pop();
|
||||
if (piNumTriPtr[ iCachedIdx ] > 0) {
|
||||
if (piNumTriPtr[iCachedIdx] > 0) {
|
||||
ivdx = iCachedIdx;
|
||||
break;
|
||||
}
|
||||
|
@ -333,9 +342,9 @@ ai_real ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int me
|
|||
if (-1 == ivdx) {
|
||||
// well, there isn't such a vertex. Simply get the next vertex in input order and
|
||||
// hope it is not too bad ...
|
||||
while (ics < (int)pMesh->mNumVertices) {
|
||||
while (ics < (int)pMesh->mNumVertices) {
|
||||
++ics;
|
||||
if (piNumTriPtr[ics] > 0) {
|
||||
if (piNumTriPtr[ics] > 0) {
|
||||
ivdx = ics;
|
||||
break;
|
||||
}
|
||||
|
@ -345,29 +354,29 @@ ai_real ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int me
|
|||
}
|
||||
ai_real fACMR2 = 0.0f;
|
||||
if (!DefaultLogger::isNullLogger()) {
|
||||
fACMR2 = (float)iCacheMisses / pMesh->mNumFaces;
|
||||
|
||||
fACMR2 = static_cast<ai_real>(iCacheMisses / pMesh->mNumFaces);
|
||||
const ai_real averageACMR = ((fACMR - fACMR2) / fACMR) * 100.f;
|
||||
// very intense verbose logging ... prepare for much text if there are many meshes
|
||||
if ( DefaultLogger::get()->getLogSeverity() == Logger::VERBOSE) {
|
||||
ASSIMP_LOG_VERBOSE_DEBUG("Mesh %u | ACMR in: ", meshNum, " out: ", fACMR, " | ~", fACMR2, ((fACMR - fACMR2) / fACMR) * 100.f);
|
||||
if (DefaultLogger::get()->getLogSeverity() == Logger::VERBOSE) {
|
||||
ASSIMP_LOG_VERBOSE_DEBUG("Mesh ", meshNum, "| ACMR in: ", fACMR, " out: ", fACMR2, " | average ACMR ", averageACMR);
|
||||
}
|
||||
|
||||
fACMR2 *= pMesh->mNumFaces;
|
||||
}
|
||||
// sort the output index buffer back to the input array
|
||||
piCSIter = piIBOutput;
|
||||
for (aiFace* pcFace = pMesh->mFaces; pcFace != pcEnd;++pcFace) {
|
||||
unsigned nind = pcFace->mNumIndices;
|
||||
unsigned * ind = pcFace->mIndices;
|
||||
if (nind > 0) ind[0] = *piCSIter++;
|
||||
if (nind > 1) ind[1] = *piCSIter++;
|
||||
if (nind > 2) ind[2] = *piCSIter++;
|
||||
}
|
||||
|
||||
// delete temporary storage
|
||||
delete[] piCachingStamps;
|
||||
delete[] piIBOutput;
|
||||
delete[] piCandidates;
|
||||
// sort the output index buffer back to the input array
|
||||
piCSIter = piIBOutput.begin();
|
||||
for (aiFace *pcFace = pMesh->mFaces; pcFace != pcEnd; ++pcFace) {
|
||||
unsigned nind = pcFace->mNumIndices;
|
||||
unsigned *ind = pcFace->mIndices;
|
||||
if (nind > 0)
|
||||
ind[0] = *piCSIter++;
|
||||
if (nind > 1)
|
||||
ind[1] = *piCSIter++;
|
||||
if (nind > 2)
|
||||
ind[2] = *piCSIter++;
|
||||
}
|
||||
|
||||
return fACMR2;
|
||||
}
|
||||
|
||||
} // namespace Assimp
|
||||
|
|
|
@ -53,6 +53,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <stdio.h>
|
||||
#include <unordered_set>
|
||||
#include <unordered_map>
|
||||
#include <memory>
|
||||
|
||||
using namespace Assimp;
|
||||
|
||||
|
@ -145,7 +146,7 @@ bool areVerticesEqual(
|
|||
}
|
||||
|
||||
template<class XMesh>
|
||||
void updateXMeshVertices(XMesh *pMesh, std::vector<Vertex> &uniqueVertices) {
|
||||
void updateXMeshVertices(XMesh *pMesh, std::vector<int> &uniqueVertices) {
|
||||
// replace vertex data with the unique data sets
|
||||
pMesh->mNumVertices = (unsigned int)uniqueVertices.size();
|
||||
|
||||
|
@ -156,53 +157,47 @@ void updateXMeshVertices(XMesh *pMesh, std::vector<Vertex> &uniqueVertices) {
|
|||
// ----------------------------------------------------------------------------
|
||||
|
||||
// Position, if present (check made for aiAnimMesh)
|
||||
if (pMesh->mVertices) {
|
||||
delete [] pMesh->mVertices;
|
||||
if (pMesh->mVertices) {
|
||||
std::unique_ptr<aiVector3D[]> oldVertices(pMesh->mVertices);
|
||||
pMesh->mVertices = new aiVector3D[pMesh->mNumVertices];
|
||||
for (unsigned int a = 0; a < pMesh->mNumVertices; a++) {
|
||||
pMesh->mVertices[a] = uniqueVertices[a].position;
|
||||
}
|
||||
for (unsigned int a = 0; a < pMesh->mNumVertices; a++)
|
||||
pMesh->mVertices[a] = oldVertices[uniqueVertices[a]];
|
||||
}
|
||||
|
||||
// Normals, if present
|
||||
if (pMesh->mNormals) {
|
||||
delete [] pMesh->mNormals;
|
||||
std::unique_ptr<aiVector3D[]> oldNormals(pMesh->mNormals);
|
||||
pMesh->mNormals = new aiVector3D[pMesh->mNumVertices];
|
||||
for( unsigned int a = 0; a < pMesh->mNumVertices; a++) {
|
||||
pMesh->mNormals[a] = uniqueVertices[a].normal;
|
||||
}
|
||||
for (unsigned int a = 0; a < pMesh->mNumVertices; a++)
|
||||
pMesh->mNormals[a] = oldNormals[uniqueVertices[a]];
|
||||
}
|
||||
// Tangents, if present
|
||||
if (pMesh->mTangents) {
|
||||
delete [] pMesh->mTangents;
|
||||
std::unique_ptr<aiVector3D[]> oldTangents(pMesh->mTangents);
|
||||
pMesh->mTangents = new aiVector3D[pMesh->mNumVertices];
|
||||
for (unsigned int a = 0; a < pMesh->mNumVertices; a++) {
|
||||
pMesh->mTangents[a] = uniqueVertices[a].tangent;
|
||||
}
|
||||
for (unsigned int a = 0; a < pMesh->mNumVertices; a++)
|
||||
pMesh->mTangents[a] = oldTangents[uniqueVertices[a]];
|
||||
}
|
||||
// Bitangents as well
|
||||
if (pMesh->mBitangents) {
|
||||
delete [] pMesh->mBitangents;
|
||||
std::unique_ptr<aiVector3D[]> oldBitangents(pMesh->mBitangents);
|
||||
pMesh->mBitangents = new aiVector3D[pMesh->mNumVertices];
|
||||
for (unsigned int a = 0; a < pMesh->mNumVertices; a++) {
|
||||
pMesh->mBitangents[a] = uniqueVertices[a].bitangent;
|
||||
}
|
||||
for (unsigned int a = 0; a < pMesh->mNumVertices; a++)
|
||||
pMesh->mBitangents[a] = oldBitangents[uniqueVertices[a]];
|
||||
}
|
||||
// Vertex colors
|
||||
for (unsigned int a = 0; pMesh->HasVertexColors(a); a++) {
|
||||
delete [] pMesh->mColors[a];
|
||||
std::unique_ptr<aiColor4D[]> oldColors(pMesh->mColors[a]);
|
||||
pMesh->mColors[a] = new aiColor4D[pMesh->mNumVertices];
|
||||
for( unsigned int b = 0; b < pMesh->mNumVertices; b++) {
|
||||
pMesh->mColors[a][b] = uniqueVertices[b].colors[a];
|
||||
}
|
||||
for (unsigned int b = 0; b < pMesh->mNumVertices; b++)
|
||||
pMesh->mColors[a][b] = oldColors[uniqueVertices[b]];
|
||||
}
|
||||
// Texture coords
|
||||
for (unsigned int a = 0; pMesh->HasTextureCoords(a); a++) {
|
||||
delete [] pMesh->mTextureCoords[a];
|
||||
std::unique_ptr<aiVector3D[]> oldTextureCoords(pMesh->mTextureCoords[a]);
|
||||
pMesh->mTextureCoords[a] = new aiVector3D[pMesh->mNumVertices];
|
||||
for (unsigned int b = 0; b < pMesh->mNumVertices; b++) {
|
||||
pMesh->mTextureCoords[a][b] = uniqueVertices[b].texcoords[a];
|
||||
}
|
||||
for (unsigned int b = 0; b < pMesh->mNumVertices; b++)
|
||||
pMesh->mTextureCoords[a][b] = oldTextureCoords[uniqueVertices[b]];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -270,7 +265,7 @@ int JoinVerticesProcess::ProcessMesh( aiMesh* pMesh, unsigned int meshIndex) {
|
|||
}
|
||||
|
||||
// We'll never have more vertices afterwards.
|
||||
std::vector<Vertex> uniqueVertices;
|
||||
std::vector<int> uniqueVertices;
|
||||
uniqueVertices.reserve( pMesh->mNumVertices);
|
||||
|
||||
// For each vertex the index of the vertex it was replaced by.
|
||||
|
@ -311,7 +306,7 @@ int JoinVerticesProcess::ProcessMesh( aiMesh* pMesh, unsigned int meshIndex) {
|
|||
const bool hasAnimMeshes = pMesh->mNumAnimMeshes > 0;
|
||||
|
||||
// We'll never have more vertices afterwards.
|
||||
std::vector<std::vector<Vertex>> uniqueAnimatedVertices;
|
||||
std::vector<std::vector<int>> uniqueAnimatedVertices;
|
||||
if (hasAnimMeshes) {
|
||||
uniqueAnimatedVertices.resize(pMesh->mNumAnimMeshes);
|
||||
for (unsigned int animMeshIndex = 0; animMeshIndex < pMesh->mNumAnimMeshes; animMeshIndex++) {
|
||||
|
@ -345,10 +340,10 @@ int JoinVerticesProcess::ProcessMesh( aiMesh* pMesh, unsigned int meshIndex) {
|
|||
//keep track of its index and increment 1
|
||||
replaceIndex[a] = newIndex++;
|
||||
// add the vertex to the unique vertices
|
||||
uniqueVertices.push_back(v);
|
||||
uniqueVertices.push_back(a);
|
||||
if (hasAnimMeshes) {
|
||||
for (unsigned int animMeshIndex = 0; animMeshIndex < pMesh->mNumAnimMeshes; animMeshIndex++) {
|
||||
uniqueAnimatedVertices[animMeshIndex].emplace_back(pMesh->mAnimMeshes[animMeshIndex], a);
|
||||
uniqueAnimatedVertices[animMeshIndex].emplace_back(a);
|
||||
}
|
||||
}
|
||||
} else{
|
||||
|
|
|
@ -81,6 +81,7 @@ void LimitBoneWeightsProcess::Execute( aiScene* pScene) {
|
|||
// Executes the post processing step on the given imported data.
|
||||
void LimitBoneWeightsProcess::SetupProperties(const Importer* pImp) {
|
||||
this->mMaxWeights = pImp->GetPropertyInteger(AI_CONFIG_PP_LBW_MAX_WEIGHTS,AI_LMW_MAX_WEIGHTS);
|
||||
this->mRemoveEmptyBones = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES, 1) != 0;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
@ -172,9 +173,9 @@ void LimitBoneWeightsProcess::ProcessMesh(aiMesh* pMesh) {
|
|||
}
|
||||
|
||||
// remove empty bones
|
||||
#ifdef AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES
|
||||
pMesh->mNumBones = removeEmptyBones(pMesh);
|
||||
#endif // AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES
|
||||
if (mRemoveEmptyBones) {
|
||||
pMesh->mNumBones = removeEmptyBones(pMesh);
|
||||
}
|
||||
|
||||
if (!DefaultLogger::isNullLogger()) {
|
||||
ASSIMP_LOG_INFO("Removed ", removed, " weights. Input bones: ", old_bones, ". Output bones: ", pMesh->mNumBones);
|
||||
|
|
|
@ -133,6 +133,7 @@ public:
|
|||
|
||||
/** Maximum number of bones influencing any single vertex. */
|
||||
unsigned int mMaxWeights;
|
||||
bool mRemoveEmptyBones;
|
||||
};
|
||||
|
||||
} // end of namespace Assimp
|
||||
|
|
|
@ -451,7 +451,7 @@ bool TriangulateProcess::TriangulateMesh( aiMesh* pMesh) {
|
|||
*pnt2 = &temp_verts[next];
|
||||
|
||||
// Must be a convex point. Assuming ccw winding, it must be on the right of the line between p-1 and p+1.
|
||||
if (OnLeftSideOfLine2D(*pnt0,*pnt2,*pnt1)) {
|
||||
if (OnLeftSideOfLine2D(*pnt0,*pnt2,*pnt1) == 1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
The Clipper code library, the "Software" (that includes Delphi, C++ & C#
|
||||
source code, accompanying samples and documentation), has been released
|
||||
under the following license, terms and conditions:
|
||||
|
||||
Boost Software License - Version 1.0 - August 17th, 2003
|
||||
http://www.boost.org/LICENSE_1_0.txt
|
||||
|
||||
|
@ -25,5 +21,4 @@ FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
|
|||
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
|
||||
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
DEALINGS IN THE SOFTWARE.
|
File diff suppressed because it is too large
Load Diff
|
@ -1,10 +1,10 @@
|
|||
/*******************************************************************************
|
||||
* *
|
||||
* Author : Angus Johnson *
|
||||
* Version : 4.8.8 *
|
||||
* Date : 30 August 2012 *
|
||||
* Version : 6.4.2 *
|
||||
* Date : 27 February 2017 *
|
||||
* Website : http://www.angusj.com *
|
||||
* Copyright : Angus Johnson 2010-2012 *
|
||||
* Copyright : Angus Johnson 2010-2017 *
|
||||
* *
|
||||
* License: *
|
||||
* Use, modification & distribution is subject to Boost Software License Ver 1. *
|
||||
|
@ -34,11 +34,30 @@
|
|||
#ifndef clipper_hpp
|
||||
#define clipper_hpp
|
||||
|
||||
#define CLIPPER_VERSION "6.4.2"
|
||||
|
||||
//use_int32: When enabled 32bit ints are used instead of 64bit ints. This
|
||||
//improve performance but coordinate values are limited to the range +/- 46340
|
||||
//#define use_int32
|
||||
|
||||
//use_xyz: adds a Z member to IntPoint. Adds a minor cost to perfomance.
|
||||
//#define use_xyz
|
||||
|
||||
//use_lines: Enables line clipping. Adds a very minor cost to performance.
|
||||
#define use_lines
|
||||
|
||||
//use_deprecated: Enables temporary support for the obsolete functions
|
||||
//#define use_deprecated
|
||||
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include <set>
|
||||
#include <stdexcept>
|
||||
#include <cstring>
|
||||
#include <cstdlib>
|
||||
#include <ostream>
|
||||
#include <functional>
|
||||
#include <queue>
|
||||
|
||||
namespace ClipperLib {
|
||||
|
||||
|
@ -50,129 +69,150 @@ enum PolyType { ptSubject, ptClip };
|
|||
//see http://glprogramming.com/red/chapter11.html
|
||||
enum PolyFillType { pftEvenOdd, pftNonZero, pftPositive, pftNegative };
|
||||
|
||||
typedef signed long long long64;
|
||||
typedef unsigned long long ulong64;
|
||||
#ifdef use_int32
|
||||
typedef int cInt;
|
||||
static cInt const loRange = 0x7FFF;
|
||||
static cInt const hiRange = 0x7FFF;
|
||||
#else
|
||||
typedef signed long long cInt;
|
||||
static cInt const loRange = 0x3FFFFFFF;
|
||||
static cInt const hiRange = 0x3FFFFFFFFFFFFFFFLL;
|
||||
typedef signed long long long64; //used by Int128 class
|
||||
typedef unsigned long long ulong64;
|
||||
|
||||
#endif
|
||||
|
||||
struct IntPoint {
|
||||
cInt X;
|
||||
cInt Y;
|
||||
#ifdef use_xyz
|
||||
cInt Z;
|
||||
IntPoint(cInt x = 0, cInt y = 0, cInt z = 0): X(x), Y(y), Z(z) {};
|
||||
#else
|
||||
IntPoint(cInt x = 0, cInt y = 0): X(x), Y(y) {};
|
||||
#endif
|
||||
|
||||
friend inline bool operator== (const IntPoint& a, const IntPoint& b)
|
||||
{
|
||||
return a.X == b.X && a.Y == b.Y;
|
||||
}
|
||||
friend inline bool operator!= (const IntPoint& a, const IntPoint& b)
|
||||
{
|
||||
return a.X != b.X || a.Y != b.Y;
|
||||
}
|
||||
};
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
typedef std::vector< IntPoint > Path;
|
||||
typedef std::vector< Path > Paths;
|
||||
|
||||
inline Path& operator <<(Path& poly, const IntPoint& p) {poly.push_back(p); return poly;}
|
||||
inline Paths& operator <<(Paths& polys, const Path& p) {polys.push_back(p); return polys;}
|
||||
|
||||
std::ostream& operator <<(std::ostream &s, const IntPoint &p);
|
||||
std::ostream& operator <<(std::ostream &s, const Path &p);
|
||||
std::ostream& operator <<(std::ostream &s, const Paths &p);
|
||||
|
||||
struct DoublePoint
|
||||
{
|
||||
double X;
|
||||
double Y;
|
||||
DoublePoint(double x = 0, double y = 0) : X(x), Y(y) {}
|
||||
DoublePoint(IntPoint ip) : X((double)ip.X), Y((double)ip.Y) {}
|
||||
};
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
#ifdef use_xyz
|
||||
typedef void (*ZFillCallback)(IntPoint& e1bot, IntPoint& e1top, IntPoint& e2bot, IntPoint& e2top, IntPoint& pt);
|
||||
#endif
|
||||
|
||||
enum InitOptions {ioReverseSolution = 1, ioStrictlySimple = 2, ioPreserveCollinear = 4};
|
||||
enum JoinType {jtSquare, jtRound, jtMiter};
|
||||
enum EndType {etClosedPolygon, etClosedLine, etOpenButt, etOpenSquare, etOpenRound};
|
||||
|
||||
class PolyNode;
|
||||
typedef std::vector< PolyNode* > PolyNodes;
|
||||
|
||||
class PolyNode
|
||||
{
|
||||
public:
|
||||
long64 X;
|
||||
long64 Y;
|
||||
IntPoint(long64 x = 0, long64 y = 0): X(x), Y(y) {};
|
||||
friend std::ostream& operator <<(std::ostream &s, IntPoint &p);
|
||||
PolyNode();
|
||||
virtual ~PolyNode(){};
|
||||
Path Contour;
|
||||
PolyNodes Childs;
|
||||
PolyNode* Parent;
|
||||
PolyNode* GetNext() const;
|
||||
bool IsHole() const;
|
||||
bool IsOpen() const;
|
||||
int ChildCount() const;
|
||||
private:
|
||||
//PolyNode& operator =(PolyNode& other);
|
||||
unsigned Index; //node index in Parent.Childs
|
||||
bool m_IsOpen;
|
||||
JoinType m_jointype;
|
||||
EndType m_endtype;
|
||||
PolyNode* GetNextSiblingUp() const;
|
||||
void AddChild(PolyNode& child);
|
||||
friend class Clipper; //to access Index
|
||||
friend class ClipperOffset;
|
||||
};
|
||||
|
||||
typedef std::vector< IntPoint > Polygon;
|
||||
typedef std::vector< Polygon > Polygons;
|
||||
|
||||
std::ostream& operator <<(std::ostream &s, Polygon &p);
|
||||
std::ostream& operator <<(std::ostream &s, Polygons &p);
|
||||
|
||||
struct ExPolygon {
|
||||
Polygon outer;
|
||||
Polygons holes;
|
||||
};
|
||||
typedef std::vector< ExPolygon > ExPolygons;
|
||||
|
||||
enum JoinType { jtSquare, jtRound, jtMiter };
|
||||
|
||||
bool Orientation(const Polygon &poly);
|
||||
double Area(const Polygon &poly);
|
||||
void OffsetPolygons(const Polygons &in_polys, Polygons &out_polys,
|
||||
double delta, JoinType jointype = jtSquare, double MiterLimit = 2);
|
||||
void SimplifyPolygon(const Polygon &in_poly, Polygons &out_polys, PolyFillType fillType = pftEvenOdd);
|
||||
void SimplifyPolygons(const Polygons &in_polys, Polygons &out_polys, PolyFillType fillType = pftEvenOdd);
|
||||
void SimplifyPolygons(Polygons &polys, PolyFillType fillType = pftEvenOdd);
|
||||
|
||||
void ReversePolygon(Polygon& p);
|
||||
void ReversePolygons(Polygons& p);
|
||||
|
||||
//used internally ...
|
||||
enum EdgeSide { esNeither = 0, esLeft = 1, esRight = 2, esBoth = 3 };
|
||||
enum IntersectProtects { ipNone = 0, ipLeft = 1, ipRight = 2, ipBoth = 3 };
|
||||
|
||||
struct TEdge {
|
||||
long64 xbot;
|
||||
long64 ybot;
|
||||
long64 xcurr;
|
||||
long64 ycurr;
|
||||
long64 xtop;
|
||||
long64 ytop;
|
||||
double dx;
|
||||
long64 tmpX;
|
||||
PolyType polyType;
|
||||
EdgeSide side;
|
||||
int windDelta; //1 or -1 depending on winding direction
|
||||
int windCnt;
|
||||
int windCnt2; //winding count of the opposite polytype
|
||||
int outIdx;
|
||||
TEdge *next;
|
||||
TEdge *prev;
|
||||
TEdge *nextInLML;
|
||||
TEdge *nextInAEL;
|
||||
TEdge *prevInAEL;
|
||||
TEdge *nextInSEL;
|
||||
TEdge *prevInSEL;
|
||||
class PolyTree: public PolyNode
|
||||
{
|
||||
public:
|
||||
~PolyTree(){ Clear(); };
|
||||
PolyNode* GetFirst() const;
|
||||
void Clear();
|
||||
int Total() const;
|
||||
private:
|
||||
//PolyTree& operator =(PolyTree& other);
|
||||
PolyNodes AllNodes;
|
||||
friend class Clipper; //to access AllNodes
|
||||
};
|
||||
|
||||
struct IntersectNode {
|
||||
TEdge *edge1;
|
||||
TEdge *edge2;
|
||||
IntPoint pt;
|
||||
IntersectNode *next;
|
||||
};
|
||||
bool Orientation(const Path &poly);
|
||||
double Area(const Path &poly);
|
||||
int PointInPolygon(const IntPoint &pt, const Path &path);
|
||||
|
||||
struct LocalMinima {
|
||||
long64 Y;
|
||||
TEdge *leftBound;
|
||||
TEdge *rightBound;
|
||||
LocalMinima *next;
|
||||
};
|
||||
void SimplifyPolygon(const Path &in_poly, Paths &out_polys, PolyFillType fillType = pftEvenOdd);
|
||||
void SimplifyPolygons(const Paths &in_polys, Paths &out_polys, PolyFillType fillType = pftEvenOdd);
|
||||
void SimplifyPolygons(Paths &polys, PolyFillType fillType = pftEvenOdd);
|
||||
|
||||
struct Scanbeam {
|
||||
long64 Y;
|
||||
Scanbeam *next;
|
||||
};
|
||||
void CleanPolygon(const Path& in_poly, Path& out_poly, double distance = 1.415);
|
||||
void CleanPolygon(Path& poly, double distance = 1.415);
|
||||
void CleanPolygons(const Paths& in_polys, Paths& out_polys, double distance = 1.415);
|
||||
void CleanPolygons(Paths& polys, double distance = 1.415);
|
||||
|
||||
struct OutPt; //forward declaration
|
||||
void MinkowskiSum(const Path& pattern, const Path& path, Paths& solution, bool pathIsClosed);
|
||||
void MinkowskiSum(const Path& pattern, const Paths& paths, Paths& solution, bool pathIsClosed);
|
||||
void MinkowskiDiff(const Path& poly1, const Path& poly2, Paths& solution);
|
||||
|
||||
struct OutRec {
|
||||
int idx;
|
||||
bool isHole;
|
||||
OutRec *FirstLeft;
|
||||
OutRec *AppendLink;
|
||||
OutPt *pts;
|
||||
OutPt *bottomPt;
|
||||
OutPt *bottomFlag;
|
||||
EdgeSide sides;
|
||||
};
|
||||
void PolyTreeToPaths(const PolyTree& polytree, Paths& paths);
|
||||
void ClosedPathsFromPolyTree(const PolyTree& polytree, Paths& paths);
|
||||
void OpenPathsFromPolyTree(PolyTree& polytree, Paths& paths);
|
||||
|
||||
struct OutPt {
|
||||
int idx;
|
||||
IntPoint pt;
|
||||
OutPt *next;
|
||||
OutPt *prev;
|
||||
};
|
||||
void ReversePath(Path& p);
|
||||
void ReversePaths(Paths& p);
|
||||
|
||||
struct JoinRec {
|
||||
IntPoint pt1a;
|
||||
IntPoint pt1b;
|
||||
int poly1Idx;
|
||||
IntPoint pt2a;
|
||||
IntPoint pt2b;
|
||||
int poly2Idx;
|
||||
};
|
||||
struct IntRect { cInt left; cInt top; cInt right; cInt bottom; };
|
||||
|
||||
struct HorzJoinRec {
|
||||
TEdge *edge;
|
||||
int savedIdx;
|
||||
};
|
||||
//enums that are used internally ...
|
||||
enum EdgeSide { esLeft = 1, esRight = 2};
|
||||
|
||||
struct IntRect { long64 left; long64 top; long64 right; long64 bottom; };
|
||||
//forward declarations (for stuff used internally) ...
|
||||
struct TEdge;
|
||||
struct IntersectNode;
|
||||
struct LocalMinimum;
|
||||
struct OutPt;
|
||||
struct OutRec;
|
||||
struct Join;
|
||||
|
||||
typedef std::vector < OutRec* > PolyOutList;
|
||||
typedef std::vector < TEdge* > EdgeList;
|
||||
typedef std::vector < JoinRec* > JoinList;
|
||||
typedef std::vector < HorzJoinRec* > HorzJoinList;
|
||||
typedef std::vector < Join* > JoinList;
|
||||
typedef std::vector < IntersectNode* > IntersectList;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
//ClipperBase is the ancestor to the Clipper class. It should not be
|
||||
//instantiated directly. This class simply abstracts the conversion of sets of
|
||||
|
@ -182,110 +222,170 @@ class ClipperBase
|
|||
public:
|
||||
ClipperBase();
|
||||
virtual ~ClipperBase();
|
||||
bool AddPolygon(const Polygon &pg, PolyType polyType);
|
||||
bool AddPolygons( const Polygons &ppg, PolyType polyType);
|
||||
virtual bool AddPath(const Path &pg, PolyType PolyTyp, bool Closed);
|
||||
bool AddPaths(const Paths &ppg, PolyType PolyTyp, bool Closed);
|
||||
virtual void Clear();
|
||||
IntRect GetBounds();
|
||||
bool PreserveCollinear() {return m_PreserveCollinear;};
|
||||
void PreserveCollinear(bool value) {m_PreserveCollinear = value;};
|
||||
protected:
|
||||
void DisposeLocalMinimaList();
|
||||
TEdge* AddBoundsToLML(TEdge *e);
|
||||
void PopLocalMinima();
|
||||
TEdge* AddBoundsToLML(TEdge *e, bool IsClosed);
|
||||
virtual void Reset();
|
||||
void InsertLocalMinima(LocalMinima *newLm);
|
||||
LocalMinima *m_CurrentLM;
|
||||
LocalMinima *m_MinimaList;
|
||||
TEdge* ProcessBound(TEdge* E, bool IsClockwise);
|
||||
void InsertScanbeam(const cInt Y);
|
||||
bool PopScanbeam(cInt &Y);
|
||||
bool LocalMinimaPending();
|
||||
bool PopLocalMinima(cInt Y, const LocalMinimum *&locMin);
|
||||
OutRec* CreateOutRec();
|
||||
void DisposeAllOutRecs();
|
||||
void DisposeOutRec(PolyOutList::size_type index);
|
||||
void SwapPositionsInAEL(TEdge *edge1, TEdge *edge2);
|
||||
void DeleteFromAEL(TEdge *e);
|
||||
void UpdateEdgeIntoAEL(TEdge *&e);
|
||||
|
||||
typedef std::vector<LocalMinimum> MinimaList;
|
||||
MinimaList::iterator m_CurrentLM;
|
||||
MinimaList m_MinimaList;
|
||||
|
||||
bool m_UseFullRange;
|
||||
EdgeList m_edges;
|
||||
bool m_PreserveCollinear;
|
||||
bool m_HasOpenPaths;
|
||||
PolyOutList m_PolyOuts;
|
||||
TEdge *m_ActiveEdges;
|
||||
|
||||
typedef std::priority_queue<cInt> ScanbeamList;
|
||||
ScanbeamList m_Scanbeam;
|
||||
};
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class Clipper : public virtual ClipperBase
|
||||
{
|
||||
public:
|
||||
Clipper();
|
||||
~Clipper();
|
||||
Clipper(int initOptions = 0);
|
||||
bool Execute(ClipType clipType,
|
||||
Polygons &solution,
|
||||
PolyFillType subjFillType = pftEvenOdd,
|
||||
PolyFillType clipFillType = pftEvenOdd);
|
||||
Paths &solution,
|
||||
PolyFillType fillType = pftEvenOdd);
|
||||
bool Execute(ClipType clipType,
|
||||
ExPolygons &solution,
|
||||
PolyFillType subjFillType = pftEvenOdd,
|
||||
PolyFillType clipFillType = pftEvenOdd);
|
||||
void Clear();
|
||||
bool ReverseSolution() {return m_ReverseOutput;};
|
||||
Paths &solution,
|
||||
PolyFillType subjFillType,
|
||||
PolyFillType clipFillType);
|
||||
bool Execute(ClipType clipType,
|
||||
PolyTree &polytree,
|
||||
PolyFillType fillType = pftEvenOdd);
|
||||
bool Execute(ClipType clipType,
|
||||
PolyTree &polytree,
|
||||
PolyFillType subjFillType,
|
||||
PolyFillType clipFillType);
|
||||
bool ReverseSolution() { return m_ReverseOutput; };
|
||||
void ReverseSolution(bool value) {m_ReverseOutput = value;};
|
||||
bool StrictlySimple() {return m_StrictSimple;};
|
||||
void StrictlySimple(bool value) {m_StrictSimple = value;};
|
||||
//set the callback function for z value filling on intersections (otherwise Z is 0)
|
||||
#ifdef use_xyz
|
||||
void ZFillFunction(ZFillCallback zFillFunc);
|
||||
#endif
|
||||
protected:
|
||||
void Reset();
|
||||
virtual bool ExecuteInternal(bool fixHoleLinkages);
|
||||
virtual bool ExecuteInternal();
|
||||
private:
|
||||
PolyOutList m_PolyOuts;
|
||||
JoinList m_Joins;
|
||||
HorzJoinList m_HorizJoins;
|
||||
ClipType m_ClipType;
|
||||
Scanbeam *m_Scanbeam;
|
||||
TEdge *m_ActiveEdges;
|
||||
JoinList m_Joins;
|
||||
JoinList m_GhostJoins;
|
||||
IntersectList m_IntersectList;
|
||||
ClipType m_ClipType;
|
||||
typedef std::list<cInt> MaximaList;
|
||||
MaximaList m_Maxima;
|
||||
TEdge *m_SortedEdges;
|
||||
IntersectNode *m_IntersectNodes;
|
||||
bool m_ExecuteLocked;
|
||||
PolyFillType m_ClipFillType;
|
||||
PolyFillType m_SubjFillType;
|
||||
bool m_ReverseOutput;
|
||||
void DisposeScanbeamList();
|
||||
bool m_ExecuteLocked;
|
||||
PolyFillType m_ClipFillType;
|
||||
PolyFillType m_SubjFillType;
|
||||
bool m_ReverseOutput;
|
||||
bool m_UsingPolyTree;
|
||||
bool m_StrictSimple;
|
||||
#ifdef use_xyz
|
||||
ZFillCallback m_ZFill; //custom callback
|
||||
#endif
|
||||
void SetWindingCount(TEdge& edge);
|
||||
bool IsEvenOddFillType(const TEdge& edge) const;
|
||||
bool IsEvenOddAltFillType(const TEdge& edge) const;
|
||||
void InsertScanbeam(const long64 Y);
|
||||
long64 PopScanbeam();
|
||||
void InsertLocalMinimaIntoAEL(const long64 botY);
|
||||
void InsertEdgeIntoAEL(TEdge *edge);
|
||||
void InsertLocalMinimaIntoAEL(const cInt botY);
|
||||
void InsertEdgeIntoAEL(TEdge *edge, TEdge* startEdge);
|
||||
void AddEdgeToSEL(TEdge *edge);
|
||||
bool PopEdgeFromSEL(TEdge *&edge);
|
||||
void CopyAELToSEL();
|
||||
void DeleteFromSEL(TEdge *e);
|
||||
void DeleteFromAEL(TEdge *e);
|
||||
void UpdateEdgeIntoAEL(TEdge *&e);
|
||||
void SwapPositionsInSEL(TEdge *edge1, TEdge *edge2);
|
||||
bool IsContributing(const TEdge& edge) const;
|
||||
bool IsTopHorz(const long64 XPos);
|
||||
void SwapPositionsInAEL(TEdge *edge1, TEdge *edge2);
|
||||
void DoMaxima(TEdge *e, long64 topY);
|
||||
bool IsTopHorz(const cInt XPos);
|
||||
void DoMaxima(TEdge *e);
|
||||
void ProcessHorizontals();
|
||||
void ProcessHorizontal(TEdge *horzEdge);
|
||||
void AddLocalMaxPoly(TEdge *e1, TEdge *e2, const IntPoint &pt);
|
||||
void AddLocalMinPoly(TEdge *e1, TEdge *e2, const IntPoint &pt);
|
||||
OutPt* AddLocalMinPoly(TEdge *e1, TEdge *e2, const IntPoint &pt);
|
||||
OutRec* GetOutRec(int idx);
|
||||
void AppendPolygon(TEdge *e1, TEdge *e2);
|
||||
void DoEdge1(TEdge *edge1, TEdge *edge2, const IntPoint &pt);
|
||||
void DoEdge2(TEdge *edge1, TEdge *edge2, const IntPoint &pt);
|
||||
void DoBothEdges(TEdge *edge1, TEdge *edge2, const IntPoint &pt);
|
||||
void IntersectEdges(TEdge *e1, TEdge *e2,
|
||||
const IntPoint &pt, IntersectProtects protects);
|
||||
OutRec* CreateOutRec();
|
||||
void AddOutPt(TEdge *e, const IntPoint &pt);
|
||||
void DisposeBottomPt(OutRec &outRec);
|
||||
void DisposeAllPolyPts();
|
||||
void DisposeOutRec(PolyOutList::size_type index);
|
||||
bool ProcessIntersections(const long64 botY, const long64 topY);
|
||||
void AddIntersectNode(TEdge *e1, TEdge *e2, const IntPoint &pt);
|
||||
void BuildIntersectList(const long64 botY, const long64 topY);
|
||||
void IntersectEdges(TEdge *e1, TEdge *e2, IntPoint &pt);
|
||||
OutPt* AddOutPt(TEdge *e, const IntPoint &pt);
|
||||
OutPt* GetLastOutPt(TEdge *e);
|
||||
bool ProcessIntersections(const cInt topY);
|
||||
void BuildIntersectList(const cInt topY);
|
||||
void ProcessIntersectList();
|
||||
void ProcessEdgesAtTopOfScanbeam(const long64 topY);
|
||||
void BuildResult(Polygons& polys);
|
||||
void BuildResultEx(ExPolygons& polys);
|
||||
void SetHoleState(TEdge *e, OutRec *OutRec);
|
||||
void ProcessEdgesAtTopOfScanbeam(const cInt topY);
|
||||
void BuildResult(Paths& polys);
|
||||
void BuildResult2(PolyTree& polytree);
|
||||
void SetHoleState(TEdge *e, OutRec *outrec);
|
||||
void DisposeIntersectNodes();
|
||||
bool FixupIntersections();
|
||||
void FixupOutPolygon(OutRec &outRec);
|
||||
bool FixupIntersectionOrder();
|
||||
void FixupOutPolygon(OutRec &outrec);
|
||||
void FixupOutPolyline(OutRec &outrec);
|
||||
bool IsHole(TEdge *e);
|
||||
void FixHoleLinkage(OutRec *outRec);
|
||||
void CheckHoleLinkages1(OutRec *outRec1, OutRec *outRec2);
|
||||
void CheckHoleLinkages2(OutRec *outRec1, OutRec *outRec2);
|
||||
void AddJoin(TEdge *e1, TEdge *e2, int e1OutIdx = -1, int e2OutIdx = -1);
|
||||
bool FindOwnerFromSplitRecs(OutRec &outRec, OutRec *&currOrfl);
|
||||
void FixHoleLinkage(OutRec &outrec);
|
||||
void AddJoin(OutPt *op1, OutPt *op2, const IntPoint offPt);
|
||||
void ClearJoins();
|
||||
void AddHorzJoin(TEdge *e, int idx);
|
||||
void ClearHorzJoins();
|
||||
void JoinCommonEdges(bool fixHoleLinkages);
|
||||
void ClearGhostJoins();
|
||||
void AddGhostJoin(OutPt *op, const IntPoint offPt);
|
||||
bool JoinPoints(Join *j, OutRec* outRec1, OutRec* outRec2);
|
||||
void JoinCommonEdges();
|
||||
void DoSimplePolygons();
|
||||
void FixupFirstLefts1(OutRec* OldOutRec, OutRec* NewOutRec);
|
||||
void FixupFirstLefts2(OutRec* InnerOutRec, OutRec* OuterOutRec);
|
||||
void FixupFirstLefts3(OutRec* OldOutRec, OutRec* NewOutRec);
|
||||
#ifdef use_xyz
|
||||
void SetZ(IntPoint& pt, TEdge& e1, TEdge& e2);
|
||||
#endif
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class ClipperOffset
|
||||
{
|
||||
public:
|
||||
ClipperOffset(double miterLimit = 2.0, double roundPrecision = 0.25);
|
||||
~ClipperOffset();
|
||||
void AddPath(const Path& path, JoinType joinType, EndType endType);
|
||||
void AddPaths(const Paths& paths, JoinType joinType, EndType endType);
|
||||
void Execute(Paths& solution, double delta);
|
||||
void Execute(PolyTree& solution, double delta);
|
||||
void Clear();
|
||||
double MiterLimit;
|
||||
double ArcTolerance;
|
||||
private:
|
||||
Paths m_destPolys;
|
||||
Path m_srcPoly;
|
||||
Path m_destPoly;
|
||||
std::vector<DoublePoint> m_normals;
|
||||
double m_delta, m_sinA, m_sin, m_cos;
|
||||
double m_miterLim, m_StepsPerRad;
|
||||
IntPoint m_lowest;
|
||||
PolyNode m_polyNodes;
|
||||
|
||||
void FixOrientations();
|
||||
void DoOffset(double delta);
|
||||
void OffsetPoint(int j, int& k, JoinType jointype);
|
||||
void DoSquare(int j, int k);
|
||||
void DoMiter(int j, int k, double r);
|
||||
void DoRound(int j, int k);
|
||||
};
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class clipperException : public std::exception
|
||||
|
|
|
@ -21,7 +21,7 @@ endif()
|
|||
|
||||
set(draco_root "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
set(draco_src_root "${draco_root}/src/draco")
|
||||
set(draco_build "${CMAKE_BINARY_DIR}")
|
||||
set(draco_build "${Assimp_BINARY_DIR}")
|
||||
|
||||
if("${draco_root}" STREQUAL "${draco_build}")
|
||||
message(
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
# Run manually to reformat a file:
|
||||
# clang-format -i --style=file <file>
|
||||
Language: Cpp
|
||||
BasedOnStyle: Google
|
|
@ -0,0 +1,53 @@
|
|||
name: Bug Report
|
||||
description: Let us know that something does not work as expected.
|
||||
title: "[Bug]: Please title this bug report"
|
||||
body:
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: Describe the issue
|
||||
description: What happened, and what did you expect to happen?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: steps
|
||||
attributes:
|
||||
label: Steps to reproduce the problem
|
||||
description: It is important that we are able to reproduce the problem that you are experiencing. Please provide all code and relevant steps to reproduce the problem, including your `BUILD`/`CMakeLists.txt` file and build commands. Links to a GitHub branch or [godbolt.org](https://godbolt.org/) that demonstrate the problem are also helpful.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: version
|
||||
attributes:
|
||||
label: What version of GoogleTest are you using?
|
||||
description: Please include the output of `git rev-parse HEAD` or the GoogleTest release version number that you are using.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: os
|
||||
attributes:
|
||||
label: What operating system and version are you using?
|
||||
description: If you are using a Linux distribution please include the name and version of the distribution as well.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: compiler
|
||||
attributes:
|
||||
label: What compiler and version are you using?
|
||||
description: Please include the output of `gcc -v` or `clang -v`, or the equivalent for your compiler.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: buildsystem
|
||||
attributes:
|
||||
label: What build system are you using?
|
||||
description: Please include the output of `bazel --version` or `cmake --version`, or the equivalent for your build system.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context about the problem here.
|
||||
validations:
|
||||
required: false
|
|
@ -0,0 +1,33 @@
|
|||
name: Feature request
|
||||
description: Propose a new feature.
|
||||
title: "[FR]: Please title this feature request"
|
||||
labels: "enhancement"
|
||||
body:
|
||||
- type: textarea
|
||||
id: version
|
||||
attributes:
|
||||
label: Does the feature exist in the most recent commit?
|
||||
description: We recommend using the latest commit from GitHub in your projects.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: why
|
||||
attributes:
|
||||
label: Why do we need this feature?
|
||||
description: Ideally, explain why a combination of existing features cannot be used instead.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: proposal
|
||||
attributes:
|
||||
label: Describe the proposal.
|
||||
description: Include a detailed description of the feature, with usage examples.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: platform
|
||||
attributes:
|
||||
label: Is the feature specific to an operating system, compiler, or build system version?
|
||||
description: If it is, please specify which versions.
|
||||
validations:
|
||||
required: true
|
|
@ -0,0 +1,5 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Get Help
|
||||
url: https://github.com/google/googletest/discussions
|
||||
about: Please ask and answer questions here.
|
|
@ -0,0 +1,43 @@
|
|||
name: ci
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
BAZEL_CXXOPTS: -std=c++14
|
||||
|
||||
jobs:
|
||||
Linux:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Tests
|
||||
run: bazel test --cxxopt=-std=c++14 --features=external_include_paths --test_output=errors ...
|
||||
|
||||
macOS:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Tests
|
||||
run: bazel test --cxxopt=-std=c++14 --features=external_include_paths --test_output=errors ...
|
||||
|
||||
|
||||
Windows:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Tests
|
||||
run: bazel test --cxxopt=/std:c++14 --features=external_include_paths --test_output=errors ...
|
|
@ -0,0 +1,88 @@
|
|||
# Ignore CI build directory
|
||||
build/
|
||||
xcuserdata
|
||||
cmake-build-debug/
|
||||
.idea/
|
||||
bazel-bin
|
||||
bazel-genfiles
|
||||
bazel-googletest
|
||||
bazel-out
|
||||
bazel-testlogs
|
||||
# python
|
||||
*.pyc
|
||||
|
||||
# Visual Studio files
|
||||
.vs
|
||||
*.sdf
|
||||
*.opensdf
|
||||
*.VC.opendb
|
||||
*.suo
|
||||
*.user
|
||||
_ReSharper.Caches/
|
||||
Win32-Debug/
|
||||
Win32-Release/
|
||||
x64-Debug/
|
||||
x64-Release/
|
||||
|
||||
# VSCode files
|
||||
.cache/
|
||||
cmake-variants.yaml
|
||||
|
||||
# Ignore autoconf / automake files
|
||||
Makefile.in
|
||||
aclocal.m4
|
||||
configure
|
||||
build-aux/
|
||||
autom4te.cache/
|
||||
googletest/m4/libtool.m4
|
||||
googletest/m4/ltoptions.m4
|
||||
googletest/m4/ltsugar.m4
|
||||
googletest/m4/ltversion.m4
|
||||
googletest/m4/lt~obsolete.m4
|
||||
googlemock/m4
|
||||
|
||||
# Ignore generated directories.
|
||||
googlemock/fused-src/
|
||||
googletest/fused-src/
|
||||
|
||||
# macOS files
|
||||
.DS_Store
|
||||
googletest/.DS_Store
|
||||
googletest/xcode/.DS_Store
|
||||
|
||||
# Ignore cmake generated directories and files.
|
||||
CMakeFiles
|
||||
CTestTestfile.cmake
|
||||
Makefile
|
||||
cmake_install.cmake
|
||||
googlemock/CMakeFiles
|
||||
googlemock/CTestTestfile.cmake
|
||||
googlemock/Makefile
|
||||
googlemock/cmake_install.cmake
|
||||
googlemock/gtest
|
||||
/bin
|
||||
/googlemock/gmock.dir
|
||||
/googlemock/gmock_main.dir
|
||||
/googlemock/RUN_TESTS.vcxproj.filters
|
||||
/googlemock/RUN_TESTS.vcxproj
|
||||
/googlemock/INSTALL.vcxproj.filters
|
||||
/googlemock/INSTALL.vcxproj
|
||||
/googlemock/gmock_main.vcxproj.filters
|
||||
/googlemock/gmock_main.vcxproj
|
||||
/googlemock/gmock.vcxproj.filters
|
||||
/googlemock/gmock.vcxproj
|
||||
/googlemock/gmock.sln
|
||||
/googlemock/ALL_BUILD.vcxproj.filters
|
||||
/googlemock/ALL_BUILD.vcxproj
|
||||
/lib
|
||||
/Win32
|
||||
/ZERO_CHECK.vcxproj.filters
|
||||
/ZERO_CHECK.vcxproj
|
||||
/RUN_TESTS.vcxproj.filters
|
||||
/RUN_TESTS.vcxproj
|
||||
/INSTALL.vcxproj.filters
|
||||
/INSTALL.vcxproj
|
||||
/googletest-distribution.sln
|
||||
/CMakeCache.txt
|
||||
/ALL_BUILD.vcxproj.filters
|
||||
/ALL_BUILD.vcxproj
|
|
@ -0,0 +1,219 @@
|
|||
# Copyright 2017 Google Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Bazel Build for Google C++ Testing Framework(Google Test)
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
config_setting(
|
||||
name = "qnx",
|
||||
constraint_values = ["@platforms//os:qnx"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "windows",
|
||||
constraint_values = ["@platforms//os:windows"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "freebsd",
|
||||
constraint_values = ["@platforms//os:freebsd"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "openbsd",
|
||||
constraint_values = ["@platforms//os:openbsd"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "msvc_compiler",
|
||||
flag_values = {
|
||||
"@bazel_tools//tools/cpp:compiler": "msvc-cl",
|
||||
},
|
||||
visibility = [":__subpackages__"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "has_absl",
|
||||
values = {"define": "absl=1"},
|
||||
)
|
||||
|
||||
# Library that defines the FRIEND_TEST macro.
|
||||
cc_library(
|
||||
name = "gtest_prod",
|
||||
hdrs = ["googletest/include/gtest/gtest_prod.h"],
|
||||
includes = ["googletest/include"],
|
||||
)
|
||||
|
||||
# Google Test including Google Mock
|
||||
cc_library(
|
||||
name = "gtest",
|
||||
srcs = glob(
|
||||
include = [
|
||||
"googletest/src/*.cc",
|
||||
"googletest/src/*.h",
|
||||
"googletest/include/gtest/**/*.h",
|
||||
"googlemock/src/*.cc",
|
||||
"googlemock/include/gmock/**/*.h",
|
||||
],
|
||||
exclude = [
|
||||
"googletest/src/gtest-all.cc",
|
||||
"googletest/src/gtest_main.cc",
|
||||
"googlemock/src/gmock-all.cc",
|
||||
"googlemock/src/gmock_main.cc",
|
||||
],
|
||||
),
|
||||
hdrs = glob([
|
||||
"googletest/include/gtest/*.h",
|
||||
"googlemock/include/gmock/*.h",
|
||||
]),
|
||||
copts = select({
|
||||
":qnx": [],
|
||||
":windows": [],
|
||||
"//conditions:default": ["-pthread"],
|
||||
}),
|
||||
defines = select({
|
||||
":has_absl": ["GTEST_HAS_ABSL=1"],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
features = select({
|
||||
":windows": ["windows_export_all_symbols"],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
includes = [
|
||||
"googlemock",
|
||||
"googlemock/include",
|
||||
"googletest",
|
||||
"googletest/include",
|
||||
],
|
||||
linkopts = select({
|
||||
":qnx": ["-lregex"],
|
||||
":windows": [],
|
||||
":freebsd": [
|
||||
"-lm",
|
||||
"-pthread",
|
||||
],
|
||||
":openbsd": [
|
||||
"-lm",
|
||||
"-pthread",
|
||||
],
|
||||
"//conditions:default": ["-pthread"],
|
||||
}),
|
||||
deps = select({
|
||||
":has_absl": [
|
||||
"@com_google_absl//absl/container:flat_hash_set",
|
||||
"@com_google_absl//absl/debugging:failure_signal_handler",
|
||||
"@com_google_absl//absl/debugging:stacktrace",
|
||||
"@com_google_absl//absl/debugging:symbolize",
|
||||
"@com_google_absl//absl/flags:flag",
|
||||
"@com_google_absl//absl/flags:parse",
|
||||
"@com_google_absl//absl/flags:reflection",
|
||||
"@com_google_absl//absl/flags:usage",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/types:any",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
"@com_google_absl//absl/types:variant",
|
||||
"@com_googlesource_code_re2//:re2",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "gtest_main",
|
||||
srcs = ["googlemock/src/gmock_main.cc"],
|
||||
features = select({
|
||||
":windows": ["windows_export_all_symbols"],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
deps = [":gtest"],
|
||||
)
|
||||
|
||||
# The following rules build samples of how to use gTest.
|
||||
cc_library(
|
||||
name = "gtest_sample_lib",
|
||||
srcs = [
|
||||
"googletest/samples/sample1.cc",
|
||||
"googletest/samples/sample2.cc",
|
||||
"googletest/samples/sample4.cc",
|
||||
],
|
||||
hdrs = [
|
||||
"googletest/samples/prime_tables.h",
|
||||
"googletest/samples/sample1.h",
|
||||
"googletest/samples/sample2.h",
|
||||
"googletest/samples/sample3-inl.h",
|
||||
"googletest/samples/sample4.h",
|
||||
],
|
||||
features = select({
|
||||
":windows": ["windows_export_all_symbols"],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "gtest_samples",
|
||||
size = "small",
|
||||
# All Samples except:
|
||||
# sample9 (main)
|
||||
# sample10 (main and takes a command line option and needs to be separate)
|
||||
srcs = [
|
||||
"googletest/samples/sample1_unittest.cc",
|
||||
"googletest/samples/sample2_unittest.cc",
|
||||
"googletest/samples/sample3_unittest.cc",
|
||||
"googletest/samples/sample4_unittest.cc",
|
||||
"googletest/samples/sample5_unittest.cc",
|
||||
"googletest/samples/sample6_unittest.cc",
|
||||
"googletest/samples/sample7_unittest.cc",
|
||||
"googletest/samples/sample8_unittest.cc",
|
||||
],
|
||||
linkstatic = 0,
|
||||
deps = [
|
||||
"gtest_sample_lib",
|
||||
":gtest_main",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "sample9_unittest",
|
||||
size = "small",
|
||||
srcs = ["googletest/samples/sample9_unittest.cc"],
|
||||
deps = [":gtest"],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "sample10_unittest",
|
||||
size = "small",
|
||||
srcs = ["googletest/samples/sample10_unittest.cc"],
|
||||
deps = [":gtest"],
|
||||
)
|
|
@ -0,0 +1,27 @@
|
|||
# Note: CMake support is community-based. The maintainers do not use CMake
|
||||
# internally.
|
||||
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
|
||||
project(googletest-distribution)
|
||||
set(GOOGLETEST_VERSION 1.14.0)
|
||||
|
||||
if(NOT CYGWIN AND NOT MSYS AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL QNX)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
endif()
|
||||
|
||||
enable_testing()
|
||||
|
||||
include(CMakeDependentOption)
|
||||
include(GNUInstallDirs)
|
||||
|
||||
#Note that googlemock target already builds googletest
|
||||
option(BUILD_GMOCK "Builds the googlemock subproject" ON)
|
||||
option(INSTALL_GTEST "Enable installation of googletest. (Projects embedding googletest may want to turn this OFF.)" ON)
|
||||
option(GTEST_HAS_ABSL "Use Abseil and RE2. Requires Abseil and RE2 to be separately added to the build." OFF)
|
||||
|
||||
if(BUILD_GMOCK)
|
||||
add_subdirectory( googlemock )
|
||||
else()
|
||||
add_subdirectory( googletest )
|
||||
endif()
|
|
@ -0,0 +1,141 @@
|
|||
# How to become a contributor and submit your own code
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
We'd love to accept your patches! Before we can take them, we have to jump a
|
||||
couple of legal hurdles.
|
||||
|
||||
Please fill out either the individual or corporate Contributor License Agreement
|
||||
(CLA).
|
||||
|
||||
* If you are an individual writing original source code and you're sure you
|
||||
own the intellectual property, then you'll need to sign an
|
||||
[individual CLA](https://developers.google.com/open-source/cla/individual).
|
||||
* If you work for a company that wants to allow you to contribute your work,
|
||||
then you'll need to sign a
|
||||
[corporate CLA](https://developers.google.com/open-source/cla/corporate).
|
||||
|
||||
Follow either of the two links above to access the appropriate CLA and
|
||||
instructions for how to sign and return it. Once we receive it, we'll be able to
|
||||
accept your pull requests.
|
||||
|
||||
## Are you a Googler?
|
||||
|
||||
If you are a Googler, please make an attempt to submit an internal contribution
|
||||
rather than a GitHub Pull Request. If you are not able to submit internally, a
|
||||
PR is acceptable as an alternative.
|
||||
|
||||
## Contributing A Patch
|
||||
|
||||
1. Submit an issue describing your proposed change to the
|
||||
[issue tracker](https://github.com/google/googletest/issues).
|
||||
2. Please don't mix more than one logical change per submittal, because it
|
||||
makes the history hard to follow. If you want to make a change that doesn't
|
||||
have a corresponding issue in the issue tracker, please create one.
|
||||
3. Also, coordinate with team members that are listed on the issue in question.
|
||||
This ensures that work isn't being duplicated and communicating your plan
|
||||
early also generally leads to better patches.
|
||||
4. If your proposed change is accepted, and you haven't already done so, sign a
|
||||
Contributor License Agreement
|
||||
([see details above](#contributor-license-agreements)).
|
||||
5. Fork the desired repo, develop and test your code changes.
|
||||
6. Ensure that your code adheres to the existing style in the sample to which
|
||||
you are contributing.
|
||||
7. Ensure that your code has an appropriate set of unit tests which all pass.
|
||||
8. Submit a pull request.
|
||||
|
||||
## The Google Test and Google Mock Communities
|
||||
|
||||
The Google Test community exists primarily through the
|
||||
[discussion group](http://groups.google.com/group/googletestframework) and the
|
||||
GitHub repository. Likewise, the Google Mock community exists primarily through
|
||||
their own [discussion group](http://groups.google.com/group/googlemock). You are
|
||||
definitely encouraged to contribute to the discussion and you can also help us
|
||||
to keep the effectiveness of the group high by following and promoting the
|
||||
guidelines listed here.
|
||||
|
||||
### Please Be Friendly
|
||||
|
||||
Showing courtesy and respect to others is a vital part of the Google culture,
|
||||
and we strongly encourage everyone participating in Google Test development to
|
||||
join us in accepting nothing less. Of course, being courteous is not the same as
|
||||
failing to constructively disagree with each other, but it does mean that we
|
||||
should be respectful of each other when enumerating the 42 technical reasons
|
||||
that a particular proposal may not be the best choice. There's never a reason to
|
||||
be antagonistic or dismissive toward anyone who is sincerely trying to
|
||||
contribute to a discussion.
|
||||
|
||||
Sure, C++ testing is serious business and all that, but it's also a lot of fun.
|
||||
Let's keep it that way. Let's strive to be one of the friendliest communities in
|
||||
all of open source.
|
||||
|
||||
As always, discuss Google Test in the official GoogleTest discussion group. You
|
||||
don't have to actually submit code in order to sign up. Your participation
|
||||
itself is a valuable contribution.
|
||||
|
||||
## Style
|
||||
|
||||
To keep the source consistent, readable, diffable and easy to merge, we use a
|
||||
fairly rigid coding style, as defined by the
|
||||
[google-styleguide](https://github.com/google/styleguide) project. All patches
|
||||
will be expected to conform to the style outlined
|
||||
[here](https://google.github.io/styleguide/cppguide.html). Use
|
||||
[.clang-format](https://github.com/google/googletest/blob/main/.clang-format) to
|
||||
check your formatting.
|
||||
|
||||
## Requirements for Contributors
|
||||
|
||||
If you plan to contribute a patch, you need to build Google Test, Google Mock,
|
||||
and their own tests from a git checkout, which has further requirements:
|
||||
|
||||
* [Python](https://www.python.org/) v3.6 or newer (for running some of the
|
||||
tests and re-generating certain source files from templates)
|
||||
* [CMake](https://cmake.org/) v2.8.12 or newer
|
||||
|
||||
## Developing Google Test and Google Mock
|
||||
|
||||
This section discusses how to make your own changes to the Google Test project.
|
||||
|
||||
### Testing Google Test and Google Mock Themselves
|
||||
|
||||
To make sure your changes work as intended and don't break existing
|
||||
functionality, you'll want to compile and run Google Test and GoogleMock's own
|
||||
tests. For that you can use CMake:
|
||||
|
||||
```
|
||||
mkdir mybuild
|
||||
cd mybuild
|
||||
cmake -Dgtest_build_tests=ON -Dgmock_build_tests=ON ${GTEST_REPO_DIR}
|
||||
```
|
||||
|
||||
To choose between building only Google Test or Google Mock, you may modify your
|
||||
cmake command to be one of each
|
||||
|
||||
```
|
||||
cmake -Dgtest_build_tests=ON ${GTEST_DIR} # sets up Google Test tests
|
||||
cmake -Dgmock_build_tests=ON ${GMOCK_DIR} # sets up Google Mock tests
|
||||
```
|
||||
|
||||
Make sure you have Python installed, as some of Google Test's tests are written
|
||||
in Python. If the cmake command complains about not being able to find Python
|
||||
(`Could NOT find PythonInterp (missing: PYTHON_EXECUTABLE)`), try telling it
|
||||
explicitly where your Python executable can be found:
|
||||
|
||||
```
|
||||
cmake -DPYTHON_EXECUTABLE=path/to/python ...
|
||||
```
|
||||
|
||||
Next, you can build Google Test and / or Google Mock and all desired tests. On
|
||||
\*nix, this is usually done by
|
||||
|
||||
```
|
||||
make
|
||||
```
|
||||
|
||||
To run the tests, do
|
||||
|
||||
```
|
||||
make test
|
||||
```
|
||||
|
||||
All tests should pass.
|
|
@ -5,33 +5,61 @@
|
|||
|
||||
Ajay Joshi <jaj@google.com>
|
||||
Balázs Dán <balazs.dan@gmail.com>
|
||||
Benoit Sigoure <tsuna@google.com>
|
||||
Bharat Mediratta <bharat@menalto.com>
|
||||
Bogdan Piloca <boo@google.com>
|
||||
Chandler Carruth <chandlerc@google.com>
|
||||
Chris Prince <cprince@google.com>
|
||||
Chris Taylor <taylorc@google.com>
|
||||
Dan Egnor <egnor@google.com>
|
||||
Dave MacLachlan <dmaclach@gmail.com>
|
||||
David Anderson <danderson@google.com>
|
||||
Dean Sturtevant
|
||||
Eric Roman <eroman@chromium.org>
|
||||
Gene Volovich <gv@cite.com>
|
||||
Hady Zalek <hady.zalek@gmail.com>
|
||||
Hal Burch <gmock@hburch.com>
|
||||
Jeffrey Yasskin <jyasskin@google.com>
|
||||
Jim Keller <jimkeller@google.com>
|
||||
Joe Walnes <joe@truemesh.com>
|
||||
Jon Wray <jwray@google.com>
|
||||
Jói Sigurðsson <joi@google.com>
|
||||
Keir Mierle <mierle@gmail.com>
|
||||
Keith Ray <keith.ray@gmail.com>
|
||||
Kenton Varda <kenton@google.com>
|
||||
Kostya Serebryany <kcc@google.com>
|
||||
Krystian Kuzniarek <krystian.kuzniarek@gmail.com>
|
||||
Lev Makhlis
|
||||
Manuel Klimek <klimek@google.com>
|
||||
Mario Tanev <radix@google.com>
|
||||
Mark Paskin
|
||||
Markus Heule <markus.heule@gmail.com>
|
||||
Martijn Vels <mvels@google.com>
|
||||
Matthew Simmons <simmonmt@acm.org>
|
||||
Mika Raento <mikie@iki.fi>
|
||||
Mike Bland <mbland@google.com>
|
||||
Miklós Fazekas <mfazekas@szemafor.com>
|
||||
Neal Norwitz <nnorwitz@gmail.com>
|
||||
Nermin Ozkiranartli <nermin@google.com>
|
||||
Owen Carlsen <ocarlsen@google.com>
|
||||
Paneendra Ba <paneendra@google.com>
|
||||
Pasi Valminen <pasi.valminen@gmail.com>
|
||||
Patrick Hanna <phanna@google.com>
|
||||
Patrick Riley <pfr@google.com>
|
||||
Paul Menage <menage@google.com>
|
||||
Peter Kaminski <piotrk@google.com>
|
||||
Piotr Kaminski <piotrk@google.com>
|
||||
Preston Jackson <preston.a.jackson@gmail.com>
|
||||
Rainer Klaffenboeck <rainer.klaffenboeck@dynatrace.com>
|
||||
Russ Cox <rsc@google.com>
|
||||
Russ Rufer <russ@pentad.com>
|
||||
Sean Mcafee <eefacm@gmail.com>
|
||||
Sigurður Ásgeirsson <siggi@google.com>
|
||||
Sverre Sundsdal <sundsdal@gmail.com>
|
||||
Szymon Sobik <sobik.szymon@gmail.com>
|
||||
Takeshi Yoshino <tyoshino@google.com>
|
||||
Tracy Bialik <tracy@pentad.com>
|
||||
Vadim Berman <vadimb@google.com>
|
||||
Vlad Losev <vladl@google.com>
|
||||
Wolfgang Klier <wklier@google.com>
|
||||
Zhanyong Wan <wan@google.com>
|
|
@ -0,0 +1,146 @@
|
|||
# GoogleTest
|
||||
|
||||
### Announcements
|
||||
|
||||
#### Live at Head
|
||||
|
||||
GoogleTest now follows the
|
||||
[Abseil Live at Head philosophy](https://abseil.io/about/philosophy#upgrade-support).
|
||||
We recommend
|
||||
[updating to the latest commit in the `main` branch as often as possible](https://github.com/abseil/abseil-cpp/blob/master/FAQ.md#what-is-live-at-head-and-how-do-i-do-it).
|
||||
We do publish occasional semantic versions, tagged with
|
||||
`v${major}.${minor}.${patch}` (e.g. `v1.13.0`).
|
||||
|
||||
#### Documentation Updates
|
||||
|
||||
Our documentation is now live on GitHub Pages at
|
||||
https://google.github.io/googletest/. We recommend browsing the documentation on
|
||||
GitHub Pages rather than directly in the repository.
|
||||
|
||||
#### Release 1.13.0
|
||||
|
||||
[Release 1.13.0](https://github.com/google/googletest/releases/tag/v1.13.0) is
|
||||
now available.
|
||||
|
||||
The 1.13.x branch requires at least C++14.
|
||||
|
||||
#### Continuous Integration
|
||||
|
||||
We use Google's internal systems for continuous integration. \
|
||||
GitHub Actions were added for the convenience of open-source contributors. They
|
||||
are exclusively maintained by the open-source community and not used by the
|
||||
GoogleTest team.
|
||||
|
||||
#### Coming Soon
|
||||
|
||||
* We are planning to take a dependency on
|
||||
[Abseil](https://github.com/abseil/abseil-cpp).
|
||||
* More documentation improvements are planned.
|
||||
|
||||
## Welcome to **GoogleTest**, Google's C++ test framework!
|
||||
|
||||
This repository is a merger of the formerly separate GoogleTest and GoogleMock
|
||||
projects. These were so closely related that it makes sense to maintain and
|
||||
release them together.
|
||||
|
||||
### Getting Started
|
||||
|
||||
See the [GoogleTest User's Guide](https://google.github.io/googletest/) for
|
||||
documentation. We recommend starting with the
|
||||
[GoogleTest Primer](https://google.github.io/googletest/primer.html).
|
||||
|
||||
More information about building GoogleTest can be found at
|
||||
[googletest/README.md](googletest/README.md).
|
||||
|
||||
## Features
|
||||
|
||||
* xUnit test framework: \
|
||||
Googletest is based on the [xUnit](https://en.wikipedia.org/wiki/XUnit)
|
||||
testing framework, a popular architecture for unit testing
|
||||
* Test discovery: \
|
||||
Googletest automatically discovers and runs your tests, eliminating the need
|
||||
to manually register your tests
|
||||
* Rich set of assertions: \
|
||||
Googletest provides a variety of assertions, such as equality, inequality,
|
||||
exceptions, and more, making it easy to test your code
|
||||
* User-defined assertions: \
|
||||
You can define your own assertions with Googletest, making it simple to
|
||||
write tests that are specific to your code
|
||||
* Death tests: \
|
||||
Googletest supports death tests, which verify that your code exits in a
|
||||
certain way, making it useful for testing error-handling code
|
||||
* Fatal and non-fatal failures: \
|
||||
You can specify whether a test failure should be treated as fatal or
|
||||
non-fatal with Googletest, allowing tests to continue running even if a
|
||||
failure occurs
|
||||
* Value-parameterized tests: \
|
||||
Googletest supports value-parameterized tests, which run multiple times with
|
||||
different input values, making it useful for testing functions that take
|
||||
different inputs
|
||||
* Type-parameterized tests: \
|
||||
Googletest also supports type-parameterized tests, which run with different
|
||||
data types, making it useful for testing functions that work with different
|
||||
data types
|
||||
* Various options for running tests: \
|
||||
Googletest provides many options for running tests including running
|
||||
individual tests, running tests in a specific order and running tests in
|
||||
parallel
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
GoogleTest follows Google's
|
||||
[Foundational C++ Support Policy](https://opensource.google/documentation/policies/cplusplus-support).
|
||||
See
|
||||
[this table](https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md)
|
||||
for a list of currently supported versions of compilers, platforms, and build
|
||||
tools.
|
||||
|
||||
## Who Is Using GoogleTest?
|
||||
|
||||
In addition to many internal projects at Google, GoogleTest is also used by the
|
||||
following notable projects:
|
||||
|
||||
* The [Chromium projects](http://www.chromium.org/) (behind the Chrome browser
|
||||
and Chrome OS).
|
||||
* The [LLVM](http://llvm.org/) compiler.
|
||||
* [Protocol Buffers](https://github.com/google/protobuf), Google's data
|
||||
interchange format.
|
||||
* The [OpenCV](http://opencv.org/) computer vision library.
|
||||
|
||||
## Related Open Source Projects
|
||||
|
||||
[GTest Runner](https://github.com/nholthaus/gtest-runner) is a Qt5 based
|
||||
automated test-runner and Graphical User Interface with powerful features for
|
||||
Windows and Linux platforms.
|
||||
|
||||
[GoogleTest UI](https://github.com/ospector/gtest-gbar) is a test runner that
|
||||
runs your test binary, allows you to track its progress via a progress bar, and
|
||||
displays a list of test failures. Clicking on one shows failure text. GoogleTest
|
||||
UI is written in C#.
|
||||
|
||||
[GTest TAP Listener](https://github.com/kinow/gtest-tap-listener) is an event
|
||||
listener for GoogleTest that implements the
|
||||
[TAP protocol](https://en.wikipedia.org/wiki/Test_Anything_Protocol) for test
|
||||
result output. If your test runner understands TAP, you may find it useful.
|
||||
|
||||
[gtest-parallel](https://github.com/google/gtest-parallel) is a test runner that
|
||||
runs tests from your binary in parallel to provide significant speed-up.
|
||||
|
||||
[GoogleTest Adapter](https://marketplace.visualstudio.com/items?itemName=DavidSchuldenfrei.gtest-adapter)
|
||||
is a VS Code extension allowing to view GoogleTest in a tree view and run/debug
|
||||
your tests.
|
||||
|
||||
[C++ TestMate](https://github.com/matepek/vscode-catch2-test-adapter) is a VS
|
||||
Code extension allowing to view GoogleTest in a tree view and run/debug your
|
||||
tests.
|
||||
|
||||
[Cornichon](https://pypi.org/project/cornichon/) is a small Gherkin DSL parser
|
||||
that generates stub code for GoogleTest.
|
||||
|
||||
## Contributing Changes
|
||||
|
||||
Please read
|
||||
[`CONTRIBUTING.md`](https://github.com/google/googletest/blob/main/CONTRIBUTING.md)
|
||||
for details on how to contribute to this project.
|
||||
|
||||
Happy testing!
|
|
@ -0,0 +1,27 @@
|
|||
workspace(name = "com_google_googletest")
|
||||
|
||||
load("//:googletest_deps.bzl", "googletest_deps")
|
||||
googletest_deps()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "rules_python", # 2023-07-31T20:39:27Z
|
||||
sha256 = "1250b59a33c591a1c4ba68c62e95fc88a84c334ec35a2e23f46cbc1b9a5a8b55",
|
||||
strip_prefix = "rules_python-e355becc30275939d87116a4ec83dad4bb50d9e1",
|
||||
urls = ["https://github.com/bazelbuild/rules_python/archive/e355becc30275939d87116a4ec83dad4bb50d9e1.zip"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bazel_skylib", # 2023-05-31T19:24:07Z
|
||||
sha256 = "08c0386f45821ce246bbbf77503c973246ed6ee5c3463e41efc197fa9bc3a7f4",
|
||||
strip_prefix = "bazel-skylib-288731ef9f7f688932bd50e704a91a45ec185f9b",
|
||||
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/288731ef9f7f688932bd50e704a91a45ec185f9b.zip"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "platforms", # 2023-07-28T19:44:27Z
|
||||
sha256 = "40eb313613ff00a5c03eed20aba58890046f4d38dec7344f00bb9a8867853526",
|
||||
strip_prefix = "platforms-4ad40ef271da8176d4fc0194d2089b8a76e19d7b",
|
||||
urls = ["https://github.com/bazelbuild/platforms/archive/4ad40ef271da8176d4fc0194d2089b8a76e19d7b.zip"],
|
||||
)
|
|
@ -0,0 +1,137 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2020, Google Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
set -euox pipefail
|
||||
|
||||
readonly LINUX_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20230217"
|
||||
readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20230120"
|
||||
|
||||
if [[ -z ${GTEST_ROOT:-} ]]; then
|
||||
GTEST_ROOT="$(realpath $(dirname ${0})/..)"
|
||||
fi
|
||||
|
||||
if [[ -z ${STD:-} ]]; then
|
||||
STD="c++14 c++17 c++20"
|
||||
fi
|
||||
|
||||
# Test the CMake build
|
||||
for cc in /usr/local/bin/gcc /opt/llvm/clang/bin/clang; do
|
||||
for cmake_off_on in OFF ON; do
|
||||
time docker run \
|
||||
--volume="${GTEST_ROOT}:/src:ro" \
|
||||
--tmpfs="/build:exec" \
|
||||
--workdir="/build" \
|
||||
--rm \
|
||||
--env="CC=${cc}" \
|
||||
--env=CXXFLAGS="-Werror -Wdeprecated" \
|
||||
${LINUX_LATEST_CONTAINER} \
|
||||
/bin/bash -c "
|
||||
cmake /src \
|
||||
-DCMAKE_CXX_STANDARD=14 \
|
||||
-Dgtest_build_samples=ON \
|
||||
-Dgtest_build_tests=ON \
|
||||
-Dgmock_build_tests=ON \
|
||||
-Dcxx_no_exception=${cmake_off_on} \
|
||||
-Dcxx_no_rtti=${cmake_off_on} && \
|
||||
make -j$(nproc) && \
|
||||
ctest -j$(nproc) --output-on-failure"
|
||||
done
|
||||
done
|
||||
|
||||
# Do one test with an older version of GCC
|
||||
time docker run \
|
||||
--volume="${GTEST_ROOT}:/src:ro" \
|
||||
--workdir="/src" \
|
||||
--rm \
|
||||
--env="CC=/usr/local/bin/gcc" \
|
||||
--env="BAZEL_CXXOPTS=-std=c++14" \
|
||||
${LINUX_GCC_FLOOR_CONTAINER} \
|
||||
/usr/local/bin/bazel test ... \
|
||||
--copt="-Wall" \
|
||||
--copt="-Werror" \
|
||||
--copt="-Wuninitialized" \
|
||||
--copt="-Wundef" \
|
||||
--copt="-Wno-error=pragmas" \
|
||||
--distdir="/bazel-distdir" \
|
||||
--features=external_include_paths \
|
||||
--keep_going \
|
||||
--show_timestamps \
|
||||
--test_output=errors
|
||||
|
||||
# Test GCC
|
||||
for std in ${STD}; do
|
||||
for absl in 0 1; do
|
||||
time docker run \
|
||||
--volume="${GTEST_ROOT}:/src:ro" \
|
||||
--workdir="/src" \
|
||||
--rm \
|
||||
--env="CC=/usr/local/bin/gcc" \
|
||||
--env="BAZEL_CXXOPTS=-std=${std}" \
|
||||
${LINUX_LATEST_CONTAINER} \
|
||||
/usr/local/bin/bazel test ... \
|
||||
--copt="-Wall" \
|
||||
--copt="-Werror" \
|
||||
--copt="-Wuninitialized" \
|
||||
--copt="-Wundef" \
|
||||
--define="absl=${absl}" \
|
||||
--distdir="/bazel-distdir" \
|
||||
--features=external_include_paths \
|
||||
--keep_going \
|
||||
--show_timestamps \
|
||||
--test_output=errors
|
||||
done
|
||||
done
|
||||
|
||||
# Test Clang
|
||||
for std in ${STD}; do
|
||||
for absl in 0 1; do
|
||||
time docker run \
|
||||
--volume="${GTEST_ROOT}:/src:ro" \
|
||||
--workdir="/src" \
|
||||
--rm \
|
||||
--env="CC=/opt/llvm/clang/bin/clang" \
|
||||
--env="BAZEL_CXXOPTS=-std=${std}" \
|
||||
${LINUX_LATEST_CONTAINER} \
|
||||
/usr/local/bin/bazel test ... \
|
||||
--copt="--gcc-toolchain=/usr/local" \
|
||||
--copt="-Wall" \
|
||||
--copt="-Werror" \
|
||||
--copt="-Wuninitialized" \
|
||||
--copt="-Wundef" \
|
||||
--define="absl=${absl}" \
|
||||
--distdir="/bazel-distdir" \
|
||||
--features=external_include_paths \
|
||||
--keep_going \
|
||||
--linkopt="--gcc-toolchain=/usr/local" \
|
||||
--show_timestamps \
|
||||
--test_output=errors
|
||||
done
|
||||
done
|
|
@ -0,0 +1,76 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2020, Google Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
set -euox pipefail
|
||||
|
||||
if [[ -z ${GTEST_ROOT:-} ]]; then
|
||||
GTEST_ROOT="$(realpath $(dirname ${0})/..)"
|
||||
fi
|
||||
|
||||
# Test the CMake build
|
||||
for cmake_off_on in OFF ON; do
|
||||
BUILD_DIR=$(mktemp -d build_dir.XXXXXXXX)
|
||||
cd ${BUILD_DIR}
|
||||
time cmake ${GTEST_ROOT} \
|
||||
-DCMAKE_CXX_STANDARD=14 \
|
||||
-Dgtest_build_samples=ON \
|
||||
-Dgtest_build_tests=ON \
|
||||
-Dgmock_build_tests=ON \
|
||||
-Dcxx_no_exception=${cmake_off_on} \
|
||||
-Dcxx_no_rtti=${cmake_off_on}
|
||||
time make
|
||||
time ctest -j$(nproc) --output-on-failure
|
||||
done
|
||||
|
||||
# Test the Bazel build
|
||||
|
||||
# If we are running on Kokoro, check for a versioned Bazel binary.
|
||||
KOKORO_GFILE_BAZEL_BIN="bazel-5.1.1-darwin-x86_64"
|
||||
if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f ${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN} ]]; then
|
||||
BAZEL_BIN="${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN}"
|
||||
chmod +x ${BAZEL_BIN}
|
||||
else
|
||||
BAZEL_BIN="bazel"
|
||||
fi
|
||||
|
||||
cd ${GTEST_ROOT}
|
||||
for absl in 0 1; do
|
||||
${BAZEL_BIN} test ... \
|
||||
--copt="-Wall" \
|
||||
--copt="-Werror" \
|
||||
--copt="-Wundef" \
|
||||
--cxxopt="-std=c++14" \
|
||||
--define="absl=${absl}" \
|
||||
--features=external_include_paths \
|
||||
--keep_going \
|
||||
--show_timestamps \
|
||||
--test_output=errors
|
||||
done
|
|
@ -0,0 +1,58 @@
|
|||
SETLOCAL ENABLEDELAYEDEXPANSION
|
||||
|
||||
SET BAZEL_EXE=%KOKORO_GFILE_DIR%\bazel-5.1.1-windows-x86_64.exe
|
||||
|
||||
SET PATH=C:\Python34;%PATH%
|
||||
SET BAZEL_PYTHON=C:\python34\python.exe
|
||||
SET BAZEL_SH=C:\tools\msys64\usr\bin\bash.exe
|
||||
SET CMAKE_BIN="cmake.exe"
|
||||
SET CTEST_BIN="ctest.exe"
|
||||
SET CTEST_OUTPUT_ON_FAILURE=1
|
||||
SET CMAKE_BUILD_PARALLEL_LEVEL=16
|
||||
SET CTEST_PARALLEL_LEVEL=16
|
||||
|
||||
IF EXIST git\googletest (
|
||||
CD git\googletest
|
||||
) ELSE IF EXIST github\googletest (
|
||||
CD github\googletest
|
||||
)
|
||||
|
||||
IF %errorlevel% neq 0 EXIT /B 1
|
||||
|
||||
:: ----------------------------------------------------------------------------
|
||||
:: CMake
|
||||
MKDIR cmake_msvc2022
|
||||
CD cmake_msvc2022
|
||||
|
||||
%CMAKE_BIN% .. ^
|
||||
-G "Visual Studio 17 2022" ^
|
||||
-DPYTHON_EXECUTABLE:FILEPATH=c:\python37\python.exe ^
|
||||
-DPYTHON_INCLUDE_DIR:PATH=c:\python37\include ^
|
||||
-DPYTHON_LIBRARY:FILEPATH=c:\python37\lib\site-packages\pip ^
|
||||
-Dgtest_build_samples=ON ^
|
||||
-Dgtest_build_tests=ON ^
|
||||
-Dgmock_build_tests=ON
|
||||
IF %errorlevel% neq 0 EXIT /B 1
|
||||
|
||||
%CMAKE_BIN% --build . --target ALL_BUILD --config Debug -- -maxcpucount
|
||||
IF %errorlevel% neq 0 EXIT /B 1
|
||||
|
||||
%CTEST_BIN% -C Debug --timeout 600
|
||||
IF %errorlevel% neq 0 EXIT /B 1
|
||||
|
||||
CD ..
|
||||
RMDIR /S /Q cmake_msvc2022
|
||||
|
||||
:: ----------------------------------------------------------------------------
|
||||
:: Bazel
|
||||
|
||||
SET BAZEL_VS=C:\Program Files\Microsoft Visual Studio\2022\Community
|
||||
%BAZEL_EXE% test ... ^
|
||||
--compilation_mode=dbg ^
|
||||
--copt=/std:c++14 ^
|
||||
--copt=/WX ^
|
||||
--features=external_include_paths ^
|
||||
--keep_going ^
|
||||
--test_output=errors ^
|
||||
--test_tag_filters=-no_test_msvc2017
|
||||
IF %errorlevel% neq 0 EXIT /B 1
|
|
@ -0,0 +1 @@
|
|||
title: GoogleTest
|
|
@ -0,0 +1,43 @@
|
|||
nav:
|
||||
- section: "Get Started"
|
||||
items:
|
||||
- title: "Supported Platforms"
|
||||
url: "/platforms.html"
|
||||
- title: "Quickstart: Bazel"
|
||||
url: "/quickstart-bazel.html"
|
||||
- title: "Quickstart: CMake"
|
||||
url: "/quickstart-cmake.html"
|
||||
- section: "Guides"
|
||||
items:
|
||||
- title: "GoogleTest Primer"
|
||||
url: "/primer.html"
|
||||
- title: "Advanced Topics"
|
||||
url: "/advanced.html"
|
||||
- title: "Mocking for Dummies"
|
||||
url: "/gmock_for_dummies.html"
|
||||
- title: "Mocking Cookbook"
|
||||
url: "/gmock_cook_book.html"
|
||||
- title: "Mocking Cheat Sheet"
|
||||
url: "/gmock_cheat_sheet.html"
|
||||
- section: "References"
|
||||
items:
|
||||
- title: "Testing Reference"
|
||||
url: "/reference/testing.html"
|
||||
- title: "Mocking Reference"
|
||||
url: "/reference/mocking.html"
|
||||
- title: "Assertions"
|
||||
url: "/reference/assertions.html"
|
||||
- title: "Matchers"
|
||||
url: "/reference/matchers.html"
|
||||
- title: "Actions"
|
||||
url: "/reference/actions.html"
|
||||
- title: "Testing FAQ"
|
||||
url: "/faq.html"
|
||||
- title: "Mocking FAQ"
|
||||
url: "/gmock_faq.html"
|
||||
- title: "Code Samples"
|
||||
url: "/samples.html"
|
||||
- title: "Using pkg-config"
|
||||
url: "/pkgconfig.html"
|
||||
- title: "Community Documentation"
|
||||
url: "/community_created_documentation.html"
|
|
@ -0,0 +1,58 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="{{ site.lang | default: "en-US" }}">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
{% seo %}
|
||||
<link rel="stylesheet" href="{{ "/assets/css/style.css?v=" | append: site.github.build_revision | relative_url }}">
|
||||
<script>
|
||||
window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;
|
||||
ga('create', 'UA-197576187-1', { 'storage': 'none' });
|
||||
ga('set', 'referrer', document.referrer.split('?')[0]);
|
||||
ga('set', 'location', window.location.href.split('?')[0]);
|
||||
ga('set', 'anonymizeIp', true);
|
||||
ga('send', 'pageview');
|
||||
</script>
|
||||
<script async src='https://www.google-analytics.com/analytics.js'></script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="sidebar">
|
||||
<div class="header">
|
||||
<h1><a href="{{ "/" | relative_url }}">{{ site.title | default: "Documentation" }}</a></h1>
|
||||
</div>
|
||||
<input type="checkbox" id="nav-toggle" class="nav-toggle">
|
||||
<label for="nav-toggle" class="expander">
|
||||
<span class="arrow"></span>
|
||||
</label>
|
||||
<nav>
|
||||
{% for item in site.data.navigation.nav %}
|
||||
<h2>{{ item.section }}</h2>
|
||||
<ul>
|
||||
{% for subitem in item.items %}
|
||||
<a href="{{subitem.url | relative_url }}">
|
||||
<li class="{% if subitem.url == page.url %}active{% endif %}">
|
||||
{{ subitem.title }}
|
||||
</li>
|
||||
</a>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endfor %}
|
||||
</nav>
|
||||
</div>
|
||||
<div class="main markdown-body">
|
||||
<div class="main-inner">
|
||||
{{ content }}
|
||||
</div>
|
||||
<div class="footer">
|
||||
GoogleTest ·
|
||||
<a href="https://github.com/google/googletest">GitHub Repository</a> ·
|
||||
<a href="https://github.com/google/googletest/blob/main/LICENSE">License</a> ·
|
||||
<a href="https://policies.google.com/privacy">Privacy Policy</a>
|
||||
</div>
|
||||
</div>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/anchor-js/4.1.0/anchor.min.js" integrity="sha256-lZaRhKri35AyJSypXXs4o6OPFTbTmUoltBbDCbdzegg=" crossorigin="anonymous"></script>
|
||||
<script>anchors.add('.main h2, .main h3, .main h4, .main h5, .main h6');</script>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,200 @@
|
|||
// Styles for GoogleTest docs website on GitHub Pages.
|
||||
// Color variables are defined in
|
||||
// https://github.com/pages-themes/primer/tree/master/_sass/primer-support/lib/variables
|
||||
|
||||
$sidebar-width: 260px;
|
||||
|
||||
body {
|
||||
display: flex;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
background: $black;
|
||||
color: $text-white;
|
||||
flex-shrink: 0;
|
||||
height: 100vh;
|
||||
overflow: auto;
|
||||
position: sticky;
|
||||
top: 0;
|
||||
width: $sidebar-width;
|
||||
}
|
||||
|
||||
.sidebar h1 {
|
||||
font-size: 1.5em;
|
||||
}
|
||||
|
||||
.sidebar h2 {
|
||||
color: $gray-light;
|
||||
font-size: 0.8em;
|
||||
font-weight: normal;
|
||||
margin-bottom: 0.8em;
|
||||
padding-left: 2.5em;
|
||||
text-transform: uppercase;
|
||||
}
|
||||
|
||||
.sidebar .header {
|
||||
background: $black;
|
||||
padding: 2em;
|
||||
position: sticky;
|
||||
top: 0;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.sidebar .header a {
|
||||
color: $text-white;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.sidebar .nav-toggle {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.sidebar .expander {
|
||||
cursor: pointer;
|
||||
display: none;
|
||||
height: 3em;
|
||||
position: absolute;
|
||||
right: 1em;
|
||||
top: 1.5em;
|
||||
width: 3em;
|
||||
}
|
||||
|
||||
.sidebar .expander .arrow {
|
||||
border: solid $white;
|
||||
border-width: 0 3px 3px 0;
|
||||
display: block;
|
||||
height: 0.7em;
|
||||
margin: 1em auto;
|
||||
transform: rotate(45deg);
|
||||
transition: transform 0.5s;
|
||||
width: 0.7em;
|
||||
}
|
||||
|
||||
.sidebar nav {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.sidebar nav ul {
|
||||
list-style-type: none;
|
||||
margin-bottom: 1em;
|
||||
padding: 0;
|
||||
|
||||
&:last-child {
|
||||
margin-bottom: 2em;
|
||||
}
|
||||
|
||||
a {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
li {
|
||||
color: $text-white;
|
||||
padding-left: 2em;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
li.active {
|
||||
background: $border-gray-darker;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
li:hover {
|
||||
background: $border-gray-darker;
|
||||
}
|
||||
}
|
||||
|
||||
.main {
|
||||
background-color: $bg-gray;
|
||||
width: calc(100% - #{$sidebar-width});
|
||||
}
|
||||
|
||||
.main .main-inner {
|
||||
background-color: $white;
|
||||
padding: 2em;
|
||||
}
|
||||
|
||||
.main .footer {
|
||||
margin: 0;
|
||||
padding: 2em;
|
||||
}
|
||||
|
||||
.main table th {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.main .callout {
|
||||
border-left: 0.25em solid $white;
|
||||
padding: 1em;
|
||||
|
||||
a {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
&.important {
|
||||
background-color: $bg-yellow-light;
|
||||
border-color: $bg-yellow;
|
||||
color: $black;
|
||||
}
|
||||
|
||||
&.note {
|
||||
background-color: $bg-blue-light;
|
||||
border-color: $text-blue;
|
||||
color: $text-blue;
|
||||
}
|
||||
|
||||
&.tip {
|
||||
background-color: $green-000;
|
||||
border-color: $green-700;
|
||||
color: $green-700;
|
||||
}
|
||||
|
||||
&.warning {
|
||||
background-color: $red-000;
|
||||
border-color: $text-red;
|
||||
color: $text-red;
|
||||
}
|
||||
}
|
||||
|
||||
.main .good pre {
|
||||
background-color: $bg-green-light;
|
||||
}
|
||||
|
||||
.main .bad pre {
|
||||
background-color: $red-000;
|
||||
}
|
||||
|
||||
@media all and (max-width: 768px) {
|
||||
body {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
height: auto;
|
||||
position: relative;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.sidebar .expander {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.sidebar nav {
|
||||
height: 0;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.sidebar .nav-toggle:checked {
|
||||
& ~ nav {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
& + .expander .arrow {
|
||||
transform: rotate(-135deg);
|
||||
}
|
||||
}
|
||||
|
||||
.main {
|
||||
width: 100%;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
---
|
||||
|
||||
@import "jekyll-theme-primer";
|
||||
@import "main";
|
|
@ -0,0 +1,7 @@
|
|||
# Community-Created Documentation
|
||||
|
||||
The following is a list, in no particular order, of links to documentation
|
||||
created by the Googletest community.
|
||||
|
||||
* [Googlemock Insights](https://github.com/ElectricRCAircraftGuy/eRCaGuy_dotfiles/blob/master/googletest/insights.md),
|
||||
by [ElectricRCAircraftGuy](https://github.com/ElectricRCAircraftGuy)
|
|
@ -0,0 +1,692 @@
|
|||
# GoogleTest FAQ
|
||||
|
||||
## Why should test suite names and test names not contain underscore?
|
||||
|
||||
{: .callout .note}
|
||||
Note: GoogleTest reserves underscore (`_`) for special purpose keywords, such as
|
||||
[the `DISABLED_` prefix](advanced.md#temporarily-disabling-tests), in addition
|
||||
to the following rationale.
|
||||
|
||||
Underscore (`_`) is special, as C++ reserves the following to be used by the
|
||||
compiler and the standard library:
|
||||
|
||||
1. any identifier that starts with an `_` followed by an upper-case letter, and
|
||||
2. any identifier that contains two consecutive underscores (i.e. `__`)
|
||||
*anywhere* in its name.
|
||||
|
||||
User code is *prohibited* from using such identifiers.
|
||||
|
||||
Now let's look at what this means for `TEST` and `TEST_F`.
|
||||
|
||||
Currently `TEST(TestSuiteName, TestName)` generates a class named
|
||||
`TestSuiteName_TestName_Test`. What happens if `TestSuiteName` or `TestName`
|
||||
contains `_`?
|
||||
|
||||
1. If `TestSuiteName` starts with an `_` followed by an upper-case letter (say,
|
||||
`_Foo`), we end up with `_Foo_TestName_Test`, which is reserved and thus
|
||||
invalid.
|
||||
2. If `TestSuiteName` ends with an `_` (say, `Foo_`), we get
|
||||
`Foo__TestName_Test`, which is invalid.
|
||||
3. If `TestName` starts with an `_` (say, `_Bar`), we get
|
||||
`TestSuiteName__Bar_Test`, which is invalid.
|
||||
4. If `TestName` ends with an `_` (say, `Bar_`), we get
|
||||
`TestSuiteName_Bar__Test`, which is invalid.
|
||||
|
||||
So clearly `TestSuiteName` and `TestName` cannot start or end with `_`
|
||||
(Actually, `TestSuiteName` can start with `_` -- as long as the `_` isn't
|
||||
followed by an upper-case letter. But that's getting complicated. So for
|
||||
simplicity we just say that it cannot start with `_`.).
|
||||
|
||||
It may seem fine for `TestSuiteName` and `TestName` to contain `_` in the
|
||||
middle. However, consider this:
|
||||
|
||||
```c++
|
||||
TEST(Time, Flies_Like_An_Arrow) { ... }
|
||||
TEST(Time_Flies, Like_An_Arrow) { ... }
|
||||
```
|
||||
|
||||
Now, the two `TEST`s will both generate the same class
|
||||
(`Time_Flies_Like_An_Arrow_Test`). That's not good.
|
||||
|
||||
So for simplicity, we just ask the users to avoid `_` in `TestSuiteName` and
|
||||
`TestName`. The rule is more constraining than necessary, but it's simple and
|
||||
easy to remember. It also gives GoogleTest some wiggle room in case its
|
||||
implementation needs to change in the future.
|
||||
|
||||
If you violate the rule, there may not be immediate consequences, but your test
|
||||
may (just may) break with a new compiler (or a new version of the compiler you
|
||||
are using) or with a new version of GoogleTest. Therefore it's best to follow
|
||||
the rule.
|
||||
|
||||
## Why does GoogleTest support `EXPECT_EQ(NULL, ptr)` and `ASSERT_EQ(NULL, ptr)` but not `EXPECT_NE(NULL, ptr)` and `ASSERT_NE(NULL, ptr)`?
|
||||
|
||||
First of all, you can use `nullptr` with each of these macros, e.g.
|
||||
`EXPECT_EQ(ptr, nullptr)`, `EXPECT_NE(ptr, nullptr)`, `ASSERT_EQ(ptr, nullptr)`,
|
||||
`ASSERT_NE(ptr, nullptr)`. This is the preferred syntax in the style guide
|
||||
because `nullptr` does not have the type problems that `NULL` does.
|
||||
|
||||
Due to some peculiarity of C++, it requires some non-trivial template meta
|
||||
programming tricks to support using `NULL` as an argument of the `EXPECT_XX()`
|
||||
and `ASSERT_XX()` macros. Therefore we only do it where it's most needed
|
||||
(otherwise we make the implementation of GoogleTest harder to maintain and more
|
||||
error-prone than necessary).
|
||||
|
||||
Historically, the `EXPECT_EQ()` macro took the *expected* value as its first
|
||||
argument and the *actual* value as the second, though this argument order is now
|
||||
discouraged. It was reasonable that someone wanted
|
||||
to write `EXPECT_EQ(NULL, some_expression)`, and this indeed was requested
|
||||
several times. Therefore we implemented it.
|
||||
|
||||
The need for `EXPECT_NE(NULL, ptr)` wasn't nearly as strong. When the assertion
|
||||
fails, you already know that `ptr` must be `NULL`, so it doesn't add any
|
||||
information to print `ptr` in this case. That means `EXPECT_TRUE(ptr != NULL)`
|
||||
works just as well.
|
||||
|
||||
If we were to support `EXPECT_NE(NULL, ptr)`, for consistency we'd have to
|
||||
support `EXPECT_NE(ptr, NULL)` as well. This means using the template meta
|
||||
programming tricks twice in the implementation, making it even harder to
|
||||
understand and maintain. We believe the benefit doesn't justify the cost.
|
||||
|
||||
Finally, with the growth of the gMock matcher library, we are encouraging people
|
||||
to use the unified `EXPECT_THAT(value, matcher)` syntax more often in tests. One
|
||||
significant advantage of the matcher approach is that matchers can be easily
|
||||
combined to form new matchers, while the `EXPECT_NE`, etc, macros cannot be
|
||||
easily combined. Therefore we want to invest more in the matchers than in the
|
||||
`EXPECT_XX()` macros.
|
||||
|
||||
## I need to test that different implementations of an interface satisfy some common requirements. Should I use typed tests or value-parameterized tests?
|
||||
|
||||
For testing various implementations of the same interface, either typed tests or
|
||||
value-parameterized tests can get it done. It's really up to you the user to
|
||||
decide which is more convenient for you, depending on your particular case. Some
|
||||
rough guidelines:
|
||||
|
||||
* Typed tests can be easier to write if instances of the different
|
||||
implementations can be created the same way, modulo the type. For example,
|
||||
if all these implementations have a public default constructor (such that
|
||||
you can write `new TypeParam`), or if their factory functions have the same
|
||||
form (e.g. `CreateInstance<TypeParam>()`).
|
||||
* Value-parameterized tests can be easier to write if you need different code
|
||||
patterns to create different implementations' instances, e.g. `new Foo` vs
|
||||
`new Bar(5)`. To accommodate for the differences, you can write factory
|
||||
function wrappers and pass these function pointers to the tests as their
|
||||
parameters.
|
||||
* When a typed test fails, the default output includes the name of the type,
|
||||
which can help you quickly identify which implementation is wrong.
|
||||
Value-parameterized tests only show the number of the failed iteration by
|
||||
default. You will need to define a function that returns the iteration name
|
||||
and pass it as the third parameter to INSTANTIATE_TEST_SUITE_P to have more
|
||||
useful output.
|
||||
* When using typed tests, you need to make sure you are testing against the
|
||||
interface type, not the concrete types (in other words, you want to make
|
||||
sure `implicit_cast<MyInterface*>(my_concrete_impl)` works, not just that
|
||||
`my_concrete_impl` works). It's less likely to make mistakes in this area
|
||||
when using value-parameterized tests.
|
||||
|
||||
I hope I didn't confuse you more. :-) If you don't mind, I'd suggest you to give
|
||||
both approaches a try. Practice is a much better way to grasp the subtle
|
||||
differences between the two tools. Once you have some concrete experience, you
|
||||
can much more easily decide which one to use the next time.
|
||||
|
||||
## I got some run-time errors about invalid proto descriptors when using `ProtocolMessageEquals`. Help!
|
||||
|
||||
{: .callout .note}
|
||||
**Note:** `ProtocolMessageEquals` and `ProtocolMessageEquiv` are *deprecated*
|
||||
now. Please use `EqualsProto`, etc instead.
|
||||
|
||||
`ProtocolMessageEquals` and `ProtocolMessageEquiv` were redefined recently and
|
||||
are now less tolerant of invalid protocol buffer definitions. In particular, if
|
||||
you have a `foo.proto` that doesn't fully qualify the type of a protocol message
|
||||
it references (e.g. `message<Bar>` where it should be `message<blah.Bar>`), you
|
||||
will now get run-time errors like:
|
||||
|
||||
```
|
||||
... descriptor.cc:...] Invalid proto descriptor for file "path/to/foo.proto":
|
||||
... descriptor.cc:...] blah.MyMessage.my_field: ".Bar" is not defined.
|
||||
```
|
||||
|
||||
If you see this, your `.proto` file is broken and needs to be fixed by making
|
||||
the types fully qualified. The new definition of `ProtocolMessageEquals` and
|
||||
`ProtocolMessageEquiv` just happen to reveal your bug.
|
||||
|
||||
## My death test modifies some state, but the change seems lost after the death test finishes. Why?
|
||||
|
||||
Death tests (`EXPECT_DEATH`, etc) are executed in a sub-process s.t. the
|
||||
expected crash won't kill the test program (i.e. the parent process). As a
|
||||
result, any in-memory side effects they incur are observable in their respective
|
||||
sub-processes, but not in the parent process. You can think of them as running
|
||||
in a parallel universe, more or less.
|
||||
|
||||
In particular, if you use mocking and the death test statement invokes some mock
|
||||
methods, the parent process will think the calls have never occurred. Therefore,
|
||||
you may want to move your `EXPECT_CALL` statements inside the `EXPECT_DEATH`
|
||||
macro.
|
||||
|
||||
## EXPECT_EQ(htonl(blah), blah_blah) generates weird compiler errors in opt mode. Is this a GoogleTest bug?
|
||||
|
||||
Actually, the bug is in `htonl()`.
|
||||
|
||||
According to `'man htonl'`, `htonl()` is a *function*, which means it's valid to
|
||||
use `htonl` as a function pointer. However, in opt mode `htonl()` is defined as
|
||||
a *macro*, which breaks this usage.
|
||||
|
||||
Worse, the macro definition of `htonl()` uses a `gcc` extension and is *not*
|
||||
standard C++. That hacky implementation has some ad hoc limitations. In
|
||||
particular, it prevents you from writing `Foo<sizeof(htonl(x))>()`, where `Foo`
|
||||
is a template that has an integral argument.
|
||||
|
||||
The implementation of `EXPECT_EQ(a, b)` uses `sizeof(... a ...)` inside a
|
||||
template argument, and thus doesn't compile in opt mode when `a` contains a call
|
||||
to `htonl()`. It is difficult to make `EXPECT_EQ` bypass the `htonl()` bug, as
|
||||
the solution must work with different compilers on various platforms.
|
||||
|
||||
## The compiler complains about "undefined references" to some static const member variables, but I did define them in the class body. What's wrong?
|
||||
|
||||
If your class has a static data member:
|
||||
|
||||
```c++
|
||||
// foo.h
|
||||
class Foo {
|
||||
...
|
||||
static const int kBar = 100;
|
||||
};
|
||||
```
|
||||
|
||||
You also need to define it *outside* of the class body in `foo.cc`:
|
||||
|
||||
```c++
|
||||
const int Foo::kBar; // No initializer here.
|
||||
```
|
||||
|
||||
Otherwise your code is **invalid C++**, and may break in unexpected ways. In
|
||||
particular, using it in GoogleTest comparison assertions (`EXPECT_EQ`, etc) will
|
||||
generate an "undefined reference" linker error. The fact that "it used to work"
|
||||
doesn't mean it's valid. It just means that you were lucky. :-)
|
||||
|
||||
If the declaration of the static data member is `constexpr` then it is
|
||||
implicitly an `inline` definition, and a separate definition in `foo.cc` is not
|
||||
needed:
|
||||
|
||||
```c++
|
||||
// foo.h
|
||||
class Foo {
|
||||
...
|
||||
static constexpr int kBar = 100; // Defines kBar, no need to do it in foo.cc.
|
||||
};
|
||||
```
|
||||
|
||||
## Can I derive a test fixture from another?
|
||||
|
||||
Yes.
|
||||
|
||||
Each test fixture has a corresponding and same named test suite. This means only
|
||||
one test suite can use a particular fixture. Sometimes, however, multiple test
|
||||
cases may want to use the same or slightly different fixtures. For example, you
|
||||
may want to make sure that all of a GUI library's test suites don't leak
|
||||
important system resources like fonts and brushes.
|
||||
|
||||
In GoogleTest, you share a fixture among test suites by putting the shared logic
|
||||
in a base test fixture, then deriving from that base a separate fixture for each
|
||||
test suite that wants to use this common logic. You then use `TEST_F()` to write
|
||||
tests using each derived fixture.
|
||||
|
||||
Typically, your code looks like this:
|
||||
|
||||
```c++
|
||||
// Defines a base test fixture.
|
||||
class BaseTest : public ::testing::Test {
|
||||
protected:
|
||||
...
|
||||
};
|
||||
|
||||
// Derives a fixture FooTest from BaseTest.
|
||||
class FooTest : public BaseTest {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
BaseTest::SetUp(); // Sets up the base fixture first.
|
||||
... additional set-up work ...
|
||||
}
|
||||
|
||||
void TearDown() override {
|
||||
... clean-up work for FooTest ...
|
||||
BaseTest::TearDown(); // Remember to tear down the base fixture
|
||||
// after cleaning up FooTest!
|
||||
}
|
||||
|
||||
... functions and variables for FooTest ...
|
||||
};
|
||||
|
||||
// Tests that use the fixture FooTest.
|
||||
TEST_F(FooTest, Bar) { ... }
|
||||
TEST_F(FooTest, Baz) { ... }
|
||||
|
||||
... additional fixtures derived from BaseTest ...
|
||||
```
|
||||
|
||||
If necessary, you can continue to derive test fixtures from a derived fixture.
|
||||
GoogleTest has no limit on how deep the hierarchy can be.
|
||||
|
||||
For a complete example using derived test fixtures, see
|
||||
[sample5_unittest.cc](https://github.com/google/googletest/blob/main/googletest/samples/sample5_unittest.cc).
|
||||
|
||||
## My compiler complains "void value not ignored as it ought to be." What does this mean?
|
||||
|
||||
You're probably using an `ASSERT_*()` in a function that doesn't return `void`.
|
||||
`ASSERT_*()` can only be used in `void` functions, due to exceptions being
|
||||
disabled by our build system. Please see more details
|
||||
[here](advanced.md#assertion-placement).
|
||||
|
||||
## My death test hangs (or seg-faults). How do I fix it?
|
||||
|
||||
In GoogleTest, death tests are run in a child process and the way they work is
|
||||
delicate. To write death tests you really need to understand how they work—see
|
||||
the details at [Death Assertions](reference/assertions.md#death) in the
|
||||
Assertions Reference.
|
||||
|
||||
In particular, death tests don't like having multiple threads in the parent
|
||||
process. So the first thing you can try is to eliminate creating threads outside
|
||||
of `EXPECT_DEATH()`. For example, you may want to use mocks or fake objects
|
||||
instead of real ones in your tests.
|
||||
|
||||
Sometimes this is impossible as some library you must use may be creating
|
||||
threads before `main()` is even reached. In this case, you can try to minimize
|
||||
the chance of conflicts by either moving as many activities as possible inside
|
||||
`EXPECT_DEATH()` (in the extreme case, you want to move everything inside), or
|
||||
leaving as few things as possible in it. Also, you can try to set the death test
|
||||
style to `"threadsafe"`, which is safer but slower, and see if it helps.
|
||||
|
||||
If you go with thread-safe death tests, remember that they rerun the test
|
||||
program from the beginning in the child process. Therefore make sure your
|
||||
program can run side-by-side with itself and is deterministic.
|
||||
|
||||
In the end, this boils down to good concurrent programming. You have to make
|
||||
sure that there are no race conditions or deadlocks in your program. No silver
|
||||
bullet - sorry!
|
||||
|
||||
## Should I use the constructor/destructor of the test fixture or SetUp()/TearDown()? {#CtorVsSetUp}
|
||||
|
||||
The first thing to remember is that GoogleTest does **not** reuse the same test
|
||||
fixture object across multiple tests. For each `TEST_F`, GoogleTest will create
|
||||
a **fresh** test fixture object, immediately call `SetUp()`, run the test body,
|
||||
call `TearDown()`, and then delete the test fixture object.
|
||||
|
||||
When you need to write per-test set-up and tear-down logic, you have the choice
|
||||
between using the test fixture constructor/destructor or `SetUp()/TearDown()`.
|
||||
The former is usually preferred, as it has the following benefits:
|
||||
|
||||
* By initializing a member variable in the constructor, we have the option to
|
||||
make it `const`, which helps prevent accidental changes to its value and
|
||||
makes the tests more obviously correct.
|
||||
* In case we need to subclass the test fixture class, the subclass'
|
||||
constructor is guaranteed to call the base class' constructor *first*, and
|
||||
the subclass' destructor is guaranteed to call the base class' destructor
|
||||
*afterward*. With `SetUp()/TearDown()`, a subclass may make the mistake of
|
||||
forgetting to call the base class' `SetUp()/TearDown()` or call them at the
|
||||
wrong time.
|
||||
|
||||
You may still want to use `SetUp()/TearDown()` in the following cases:
|
||||
|
||||
* C++ does not allow virtual function calls in constructors and destructors.
|
||||
You can call a method declared as virtual, but it will not use dynamic
|
||||
dispatch. It will use the definition from the class the constructor of which
|
||||
is currently executing. This is because calling a virtual method before the
|
||||
derived class constructor has a chance to run is very dangerous - the
|
||||
virtual method might operate on uninitialized data. Therefore, if you need
|
||||
to call a method that will be overridden in a derived class, you have to use
|
||||
`SetUp()/TearDown()`.
|
||||
* In the body of a constructor (or destructor), it's not possible to use the
|
||||
`ASSERT_xx` macros. Therefore, if the set-up operation could cause a fatal
|
||||
test failure that should prevent the test from running, it's necessary to
|
||||
use `abort` and abort the whole test
|
||||
executable, or to use `SetUp()` instead of a constructor.
|
||||
* If the tear-down operation could throw an exception, you must use
|
||||
`TearDown()` as opposed to the destructor, as throwing in a destructor leads
|
||||
to undefined behavior and usually will kill your program right away. Note
|
||||
that many standard libraries (like STL) may throw when exceptions are
|
||||
enabled in the compiler. Therefore you should prefer `TearDown()` if you
|
||||
want to write portable tests that work with or without exceptions.
|
||||
* The GoogleTest team is considering making the assertion macros throw on
|
||||
platforms where exceptions are enabled (e.g. Windows, Mac OS, and Linux
|
||||
client-side), which will eliminate the need for the user to propagate
|
||||
failures from a subroutine to its caller. Therefore, you shouldn't use
|
||||
GoogleTest assertions in a destructor if your code could run on such a
|
||||
platform.
|
||||
|
||||
## The compiler complains "no matching function to call" when I use ASSERT_PRED*. How do I fix it?
|
||||
|
||||
See details for [`EXPECT_PRED*`](reference/assertions.md#EXPECT_PRED) in the
|
||||
Assertions Reference.
|
||||
|
||||
## My compiler complains about "ignoring return value" when I call RUN_ALL_TESTS(). Why?
|
||||
|
||||
Some people had been ignoring the return value of `RUN_ALL_TESTS()`. That is,
|
||||
instead of
|
||||
|
||||
```c++
|
||||
return RUN_ALL_TESTS();
|
||||
```
|
||||
|
||||
they write
|
||||
|
||||
```c++
|
||||
RUN_ALL_TESTS();
|
||||
```
|
||||
|
||||
This is **wrong and dangerous**. The testing services needs to see the return
|
||||
value of `RUN_ALL_TESTS()` in order to determine if a test has passed. If your
|
||||
`main()` function ignores it, your test will be considered successful even if it
|
||||
has a GoogleTest assertion failure. Very bad.
|
||||
|
||||
We have decided to fix this (thanks to Michael Chastain for the idea). Now, your
|
||||
code will no longer be able to ignore `RUN_ALL_TESTS()` when compiled with
|
||||
`gcc`. If you do so, you'll get a compiler error.
|
||||
|
||||
If you see the compiler complaining about you ignoring the return value of
|
||||
`RUN_ALL_TESTS()`, the fix is simple: just make sure its value is used as the
|
||||
return value of `main()`.
|
||||
|
||||
But how could we introduce a change that breaks existing tests? Well, in this
|
||||
case, the code was already broken in the first place, so we didn't break it. :-)
|
||||
|
||||
## My compiler complains that a constructor (or destructor) cannot return a value. What's going on?
|
||||
|
||||
Due to a peculiarity of C++, in order to support the syntax for streaming
|
||||
messages to an `ASSERT_*`, e.g.
|
||||
|
||||
```c++
|
||||
ASSERT_EQ(1, Foo()) << "blah blah" << foo;
|
||||
```
|
||||
|
||||
we had to give up using `ASSERT*` and `FAIL*` (but not `EXPECT*` and
|
||||
`ADD_FAILURE*`) in constructors and destructors. The workaround is to move the
|
||||
content of your constructor/destructor to a private void member function, or
|
||||
switch to `EXPECT_*()` if that works. This
|
||||
[section](advanced.md#assertion-placement) in the user's guide explains it.
|
||||
|
||||
## My SetUp() function is not called. Why?
|
||||
|
||||
C++ is case-sensitive. Did you spell it as `Setup()`?
|
||||
|
||||
Similarly, sometimes people spell `SetUpTestSuite()` as `SetupTestSuite()` and
|
||||
wonder why it's never called.
|
||||
|
||||
## I have several test suites which share the same test fixture logic, do I have to define a new test fixture class for each of them? This seems pretty tedious.
|
||||
|
||||
You don't have to. Instead of
|
||||
|
||||
```c++
|
||||
class FooTest : public BaseTest {};
|
||||
|
||||
TEST_F(FooTest, Abc) { ... }
|
||||
TEST_F(FooTest, Def) { ... }
|
||||
|
||||
class BarTest : public BaseTest {};
|
||||
|
||||
TEST_F(BarTest, Abc) { ... }
|
||||
TEST_F(BarTest, Def) { ... }
|
||||
```
|
||||
|
||||
you can simply `typedef` the test fixtures:
|
||||
|
||||
```c++
|
||||
typedef BaseTest FooTest;
|
||||
|
||||
TEST_F(FooTest, Abc) { ... }
|
||||
TEST_F(FooTest, Def) { ... }
|
||||
|
||||
typedef BaseTest BarTest;
|
||||
|
||||
TEST_F(BarTest, Abc) { ... }
|
||||
TEST_F(BarTest, Def) { ... }
|
||||
```
|
||||
|
||||
## GoogleTest output is buried in a whole bunch of LOG messages. What do I do?
|
||||
|
||||
The GoogleTest output is meant to be a concise and human-friendly report. If
|
||||
your test generates textual output itself, it will mix with the GoogleTest
|
||||
output, making it hard to read. However, there is an easy solution to this
|
||||
problem.
|
||||
|
||||
Since `LOG` messages go to stderr, we decided to let GoogleTest output go to
|
||||
stdout. This way, you can easily separate the two using redirection. For
|
||||
example:
|
||||
|
||||
```shell
|
||||
$ ./my_test > gtest_output.txt
|
||||
```
|
||||
|
||||
## Why should I prefer test fixtures over global variables?
|
||||
|
||||
There are several good reasons:
|
||||
|
||||
1. It's likely your test needs to change the states of its global variables.
|
||||
This makes it difficult to keep side effects from escaping one test and
|
||||
contaminating others, making debugging difficult. By using fixtures, each
|
||||
test has a fresh set of variables that's different (but with the same
|
||||
names). Thus, tests are kept independent of each other.
|
||||
2. Global variables pollute the global namespace.
|
||||
3. Test fixtures can be reused via subclassing, which cannot be done easily
|
||||
with global variables. This is useful if many test suites have something in
|
||||
common.
|
||||
|
||||
## What can the statement argument in ASSERT_DEATH() be?
|
||||
|
||||
`ASSERT_DEATH(statement, matcher)` (or any death assertion macro) can be used
|
||||
wherever *`statement`* is valid. So basically *`statement`* can be any C++
|
||||
statement that makes sense in the current context. In particular, it can
|
||||
reference global and/or local variables, and can be:
|
||||
|
||||
* a simple function call (often the case),
|
||||
* a complex expression, or
|
||||
* a compound statement.
|
||||
|
||||
Some examples are shown here:
|
||||
|
||||
```c++
|
||||
// A death test can be a simple function call.
|
||||
TEST(MyDeathTest, FunctionCall) {
|
||||
ASSERT_DEATH(Xyz(5), "Xyz failed");
|
||||
}
|
||||
|
||||
// Or a complex expression that references variables and functions.
|
||||
TEST(MyDeathTest, ComplexExpression) {
|
||||
const bool c = Condition();
|
||||
ASSERT_DEATH((c ? Func1(0) : object2.Method("test")),
|
||||
"(Func1|Method) failed");
|
||||
}
|
||||
|
||||
// Death assertions can be used anywhere in a function. In
|
||||
// particular, they can be inside a loop.
|
||||
TEST(MyDeathTest, InsideLoop) {
|
||||
// Verifies that Foo(0), Foo(1), ..., and Foo(4) all die.
|
||||
for (int i = 0; i < 5; i++) {
|
||||
EXPECT_DEATH_M(Foo(i), "Foo has \\d+ errors",
|
||||
::testing::Message() << "where i is " << i);
|
||||
}
|
||||
}
|
||||
|
||||
// A death assertion can contain a compound statement.
|
||||
TEST(MyDeathTest, CompoundStatement) {
|
||||
// Verifies that at lease one of Bar(0), Bar(1), ..., and
|
||||
// Bar(4) dies.
|
||||
ASSERT_DEATH({
|
||||
for (int i = 0; i < 5; i++) {
|
||||
Bar(i);
|
||||
}
|
||||
},
|
||||
"Bar has \\d+ errors");
|
||||
}
|
||||
```
|
||||
|
||||
## I have a fixture class `FooTest`, but `TEST_F(FooTest, Bar)` gives me error ``"no matching function for call to `FooTest::FooTest()'"``. Why?
|
||||
|
||||
GoogleTest needs to be able to create objects of your test fixture class, so it
|
||||
must have a default constructor. Normally the compiler will define one for you.
|
||||
However, there are cases where you have to define your own:
|
||||
|
||||
* If you explicitly declare a non-default constructor for class `FooTest`
|
||||
(`DISALLOW_EVIL_CONSTRUCTORS()` does this), then you need to define a
|
||||
default constructor, even if it would be empty.
|
||||
* If `FooTest` has a const non-static data member, then you have to define the
|
||||
default constructor *and* initialize the const member in the initializer
|
||||
list of the constructor. (Early versions of `gcc` doesn't force you to
|
||||
initialize the const member. It's a bug that has been fixed in `gcc 4`.)
|
||||
|
||||
## Why does ASSERT_DEATH complain about previous threads that were already joined?
|
||||
|
||||
With the Linux pthread library, there is no turning back once you cross the line
|
||||
from a single thread to multiple threads. The first time you create a thread, a
|
||||
manager thread is created in addition, so you get 3, not 2, threads. Later when
|
||||
the thread you create joins the main thread, the thread count decrements by 1,
|
||||
but the manager thread will never be killed, so you still have 2 threads, which
|
||||
means you cannot safely run a death test.
|
||||
|
||||
The new NPTL thread library doesn't suffer from this problem, as it doesn't
|
||||
create a manager thread. However, if you don't control which machine your test
|
||||
runs on, you shouldn't depend on this.
|
||||
|
||||
## Why does GoogleTest require the entire test suite, instead of individual tests, to be named *DeathTest when it uses ASSERT_DEATH?
|
||||
|
||||
GoogleTest does not interleave tests from different test suites. That is, it
|
||||
runs all tests in one test suite first, and then runs all tests in the next test
|
||||
suite, and so on. GoogleTest does this because it needs to set up a test suite
|
||||
before the first test in it is run, and tear it down afterwards. Splitting up
|
||||
the test case would require multiple set-up and tear-down processes, which is
|
||||
inefficient and makes the semantics unclean.
|
||||
|
||||
If we were to determine the order of tests based on test name instead of test
|
||||
case name, then we would have a problem with the following situation:
|
||||
|
||||
```c++
|
||||
TEST_F(FooTest, AbcDeathTest) { ... }
|
||||
TEST_F(FooTest, Uvw) { ... }
|
||||
|
||||
TEST_F(BarTest, DefDeathTest) { ... }
|
||||
TEST_F(BarTest, Xyz) { ... }
|
||||
```
|
||||
|
||||
Since `FooTest.AbcDeathTest` needs to run before `BarTest.Xyz`, and we don't
|
||||
interleave tests from different test suites, we need to run all tests in the
|
||||
`FooTest` case before running any test in the `BarTest` case. This contradicts
|
||||
with the requirement to run `BarTest.DefDeathTest` before `FooTest.Uvw`.
|
||||
|
||||
## But I don't like calling my entire test suite \*DeathTest when it contains both death tests and non-death tests. What do I do?
|
||||
|
||||
You don't have to, but if you like, you may split up the test suite into
|
||||
`FooTest` and `FooDeathTest`, where the names make it clear that they are
|
||||
related:
|
||||
|
||||
```c++
|
||||
class FooTest : public ::testing::Test { ... };
|
||||
|
||||
TEST_F(FooTest, Abc) { ... }
|
||||
TEST_F(FooTest, Def) { ... }
|
||||
|
||||
using FooDeathTest = FooTest;
|
||||
|
||||
TEST_F(FooDeathTest, Uvw) { ... EXPECT_DEATH(...) ... }
|
||||
TEST_F(FooDeathTest, Xyz) { ... ASSERT_DEATH(...) ... }
|
||||
```
|
||||
|
||||
## GoogleTest prints the LOG messages in a death test's child process only when the test fails. How can I see the LOG messages when the death test succeeds?
|
||||
|
||||
Printing the LOG messages generated by the statement inside `EXPECT_DEATH()`
|
||||
makes it harder to search for real problems in the parent's log. Therefore,
|
||||
GoogleTest only prints them when the death test has failed.
|
||||
|
||||
If you really need to see such LOG messages, a workaround is to temporarily
|
||||
break the death test (e.g. by changing the regex pattern it is expected to
|
||||
match). Admittedly, this is a hack. We'll consider a more permanent solution
|
||||
after the fork-and-exec-style death tests are implemented.
|
||||
|
||||
## The compiler complains about `no match for 'operator<<'` when I use an assertion. What gives?
|
||||
|
||||
If you use a user-defined type `FooType` in an assertion, you must make sure
|
||||
there is an `std::ostream& operator<<(std::ostream&, const FooType&)` function
|
||||
defined such that we can print a value of `FooType`.
|
||||
|
||||
In addition, if `FooType` is declared in a name space, the `<<` operator also
|
||||
needs to be defined in the *same* name space. See
|
||||
[Tip of the Week #49](http://abseil.io/tips/49) for details.
|
||||
|
||||
## How do I suppress the memory leak messages on Windows?
|
||||
|
||||
Since the statically initialized GoogleTest singleton requires allocations on
|
||||
the heap, the Visual C++ memory leak detector will report memory leaks at the
|
||||
end of the program run. The easiest way to avoid this is to use the
|
||||
`_CrtMemCheckpoint` and `_CrtMemDumpAllObjectsSince` calls to not report any
|
||||
statically initialized heap objects. See MSDN for more details and additional
|
||||
heap check/debug routines.
|
||||
|
||||
## How can my code detect if it is running in a test?
|
||||
|
||||
If you write code that sniffs whether it's running in a test and does different
|
||||
things accordingly, you are leaking test-only logic into production code and
|
||||
there is no easy way to ensure that the test-only code paths aren't run by
|
||||
mistake in production. Such cleverness also leads to
|
||||
[Heisenbugs](https://en.wikipedia.org/wiki/Heisenbug). Therefore we strongly
|
||||
advise against the practice, and GoogleTest doesn't provide a way to do it.
|
||||
|
||||
In general, the recommended way to cause the code to behave differently under
|
||||
test is [Dependency Injection](http://en.wikipedia.org/wiki/Dependency_injection). You can inject
|
||||
different functionality from the test and from the production code. Since your
|
||||
production code doesn't link in the for-test logic at all (the
|
||||
[`testonly`](http://docs.bazel.build/versions/master/be/common-definitions.html#common.testonly) attribute for BUILD targets helps to ensure
|
||||
that), there is no danger in accidentally running it.
|
||||
|
||||
However, if you *really*, *really*, *really* have no choice, and if you follow
|
||||
the rule of ending your test program names with `_test`, you can use the
|
||||
*horrible* hack of sniffing your executable name (`argv[0]` in `main()`) to know
|
||||
whether the code is under test.
|
||||
|
||||
## How do I temporarily disable a test?
|
||||
|
||||
If you have a broken test that you cannot fix right away, you can add the
|
||||
`DISABLED_` prefix to its name. This will exclude it from execution. This is
|
||||
better than commenting out the code or using `#if 0`, as disabled tests are
|
||||
still compiled (and thus won't rot).
|
||||
|
||||
To include disabled tests in test execution, just invoke the test program with
|
||||
the `--gtest_also_run_disabled_tests` flag.
|
||||
|
||||
## Is it OK if I have two separate `TEST(Foo, Bar)` test methods defined in different namespaces?
|
||||
|
||||
Yes.
|
||||
|
||||
The rule is **all test methods in the same test suite must use the same fixture
|
||||
class.** This means that the following is **allowed** because both tests use the
|
||||
same fixture class (`::testing::Test`).
|
||||
|
||||
```c++
|
||||
namespace foo {
|
||||
TEST(CoolTest, DoSomething) {
|
||||
SUCCEED();
|
||||
}
|
||||
} // namespace foo
|
||||
|
||||
namespace bar {
|
||||
TEST(CoolTest, DoSomething) {
|
||||
SUCCEED();
|
||||
}
|
||||
} // namespace bar
|
||||
```
|
||||
|
||||
However, the following code is **not allowed** and will produce a runtime error
|
||||
from GoogleTest because the test methods are using different test fixture
|
||||
classes with the same test suite name.
|
||||
|
||||
```c++
|
||||
namespace foo {
|
||||
class CoolTest : public ::testing::Test {}; // Fixture foo::CoolTest
|
||||
TEST_F(CoolTest, DoSomething) {
|
||||
SUCCEED();
|
||||
}
|
||||
} // namespace foo
|
||||
|
||||
namespace bar {
|
||||
class CoolTest : public ::testing::Test {}; // Fixture: bar::CoolTest
|
||||
TEST_F(CoolTest, DoSomething) {
|
||||
SUCCEED();
|
||||
}
|
||||
} // namespace bar
|
||||
```
|
|
@ -0,0 +1,241 @@
|
|||
# gMock Cheat Sheet
|
||||
|
||||
## Defining a Mock Class
|
||||
|
||||
### Mocking a Normal Class {#MockClass}
|
||||
|
||||
Given
|
||||
|
||||
```cpp
|
||||
class Foo {
|
||||
public:
|
||||
virtual ~Foo();
|
||||
virtual int GetSize() const = 0;
|
||||
virtual string Describe(const char* name) = 0;
|
||||
virtual string Describe(int type) = 0;
|
||||
virtual bool Process(Bar elem, int count) = 0;
|
||||
};
|
||||
```
|
||||
|
||||
(note that `~Foo()` **must** be virtual) we can define its mock as
|
||||
|
||||
```cpp
|
||||
#include <gmock/gmock.h>
|
||||
|
||||
class MockFoo : public Foo {
|
||||
public:
|
||||
MOCK_METHOD(int, GetSize, (), (const, override));
|
||||
MOCK_METHOD(string, Describe, (const char* name), (override));
|
||||
MOCK_METHOD(string, Describe, (int type), (override));
|
||||
MOCK_METHOD(bool, Process, (Bar elem, int count), (override));
|
||||
};
|
||||
```
|
||||
|
||||
To create a "nice" mock, which ignores all uninteresting calls, a "naggy" mock,
|
||||
which warns on all uninteresting calls, or a "strict" mock, which treats them as
|
||||
failures:
|
||||
|
||||
```cpp
|
||||
using ::testing::NiceMock;
|
||||
using ::testing::NaggyMock;
|
||||
using ::testing::StrictMock;
|
||||
|
||||
NiceMock<MockFoo> nice_foo; // The type is a subclass of MockFoo.
|
||||
NaggyMock<MockFoo> naggy_foo; // The type is a subclass of MockFoo.
|
||||
StrictMock<MockFoo> strict_foo; // The type is a subclass of MockFoo.
|
||||
```
|
||||
|
||||
{: .callout .note}
|
||||
**Note:** A mock object is currently naggy by default. We may make it nice by
|
||||
default in the future.
|
||||
|
||||
### Mocking a Class Template {#MockTemplate}
|
||||
|
||||
Class templates can be mocked just like any class.
|
||||
|
||||
To mock
|
||||
|
||||
```cpp
|
||||
template <typename Elem>
|
||||
class StackInterface {
|
||||
public:
|
||||
virtual ~StackInterface();
|
||||
virtual int GetSize() const = 0;
|
||||
virtual void Push(const Elem& x) = 0;
|
||||
};
|
||||
```
|
||||
|
||||
(note that all member functions that are mocked, including `~StackInterface()`
|
||||
**must** be virtual).
|
||||
|
||||
```cpp
|
||||
template <typename Elem>
|
||||
class MockStack : public StackInterface<Elem> {
|
||||
public:
|
||||
MOCK_METHOD(int, GetSize, (), (const, override));
|
||||
MOCK_METHOD(void, Push, (const Elem& x), (override));
|
||||
};
|
||||
```
|
||||
|
||||
### Specifying Calling Conventions for Mock Functions
|
||||
|
||||
If your mock function doesn't use the default calling convention, you can
|
||||
specify it by adding `Calltype(convention)` to `MOCK_METHOD`'s 4th parameter.
|
||||
For example,
|
||||
|
||||
```cpp
|
||||
MOCK_METHOD(bool, Foo, (int n), (Calltype(STDMETHODCALLTYPE)));
|
||||
MOCK_METHOD(int, Bar, (double x, double y),
|
||||
(const, Calltype(STDMETHODCALLTYPE)));
|
||||
```
|
||||
|
||||
where `STDMETHODCALLTYPE` is defined by `<objbase.h>` on Windows.
|
||||
|
||||
## Using Mocks in Tests {#UsingMocks}
|
||||
|
||||
The typical work flow is:
|
||||
|
||||
1. Import the gMock names you need to use. All gMock symbols are in the
|
||||
`testing` namespace unless they are macros or otherwise noted.
|
||||
2. Create the mock objects.
|
||||
3. Optionally, set the default actions of the mock objects.
|
||||
4. Set your expectations on the mock objects (How will they be called? What
|
||||
will they do?).
|
||||
5. Exercise code that uses the mock objects; if necessary, check the result
|
||||
using googletest assertions.
|
||||
6. When a mock object is destructed, gMock automatically verifies that all
|
||||
expectations on it have been satisfied.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```cpp
|
||||
using ::testing::Return; // #1
|
||||
|
||||
TEST(BarTest, DoesThis) {
|
||||
MockFoo foo; // #2
|
||||
|
||||
ON_CALL(foo, GetSize()) // #3
|
||||
.WillByDefault(Return(1));
|
||||
// ... other default actions ...
|
||||
|
||||
EXPECT_CALL(foo, Describe(5)) // #4
|
||||
.Times(3)
|
||||
.WillRepeatedly(Return("Category 5"));
|
||||
// ... other expectations ...
|
||||
|
||||
EXPECT_EQ(MyProductionFunction(&foo), "good"); // #5
|
||||
} // #6
|
||||
```
|
||||
|
||||
## Setting Default Actions {#OnCall}
|
||||
|
||||
gMock has a **built-in default action** for any function that returns `void`,
|
||||
`bool`, a numeric value, or a pointer. In C++11, it will additionally returns
|
||||
the default-constructed value, if one exists for the given type.
|
||||
|
||||
To customize the default action for functions with return type `T`, use
|
||||
[`DefaultValue<T>`](reference/mocking.md#DefaultValue). For example:
|
||||
|
||||
```cpp
|
||||
// Sets the default action for return type std::unique_ptr<Buzz> to
|
||||
// creating a new Buzz every time.
|
||||
DefaultValue<std::unique_ptr<Buzz>>::SetFactory(
|
||||
[] { return std::make_unique<Buzz>(AccessLevel::kInternal); });
|
||||
|
||||
// When this fires, the default action of MakeBuzz() will run, which
|
||||
// will return a new Buzz object.
|
||||
EXPECT_CALL(mock_buzzer_, MakeBuzz("hello")).Times(AnyNumber());
|
||||
|
||||
auto buzz1 = mock_buzzer_.MakeBuzz("hello");
|
||||
auto buzz2 = mock_buzzer_.MakeBuzz("hello");
|
||||
EXPECT_NE(buzz1, nullptr);
|
||||
EXPECT_NE(buzz2, nullptr);
|
||||
EXPECT_NE(buzz1, buzz2);
|
||||
|
||||
// Resets the default action for return type std::unique_ptr<Buzz>,
|
||||
// to avoid interfere with other tests.
|
||||
DefaultValue<std::unique_ptr<Buzz>>::Clear();
|
||||
```
|
||||
|
||||
To customize the default action for a particular method of a specific mock
|
||||
object, use [`ON_CALL`](reference/mocking.md#ON_CALL). `ON_CALL` has a similar
|
||||
syntax to `EXPECT_CALL`, but it is used for setting default behaviors when you
|
||||
do not require that the mock method is called. See
|
||||
[Knowing When to Expect](gmock_cook_book.md#UseOnCall) for a more detailed
|
||||
discussion.
|
||||
|
||||
## Setting Expectations {#ExpectCall}
|
||||
|
||||
See [`EXPECT_CALL`](reference/mocking.md#EXPECT_CALL) in the Mocking Reference.
|
||||
|
||||
## Matchers {#MatcherList}
|
||||
|
||||
See the [Matchers Reference](reference/matchers.md).
|
||||
|
||||
## Actions {#ActionList}
|
||||
|
||||
See the [Actions Reference](reference/actions.md).
|
||||
|
||||
## Cardinalities {#CardinalityList}
|
||||
|
||||
See the [`Times` clause](reference/mocking.md#EXPECT_CALL.Times) of
|
||||
`EXPECT_CALL` in the Mocking Reference.
|
||||
|
||||
## Expectation Order
|
||||
|
||||
By default, expectations can be matched in *any* order. If some or all
|
||||
expectations must be matched in a given order, you can use the
|
||||
[`After` clause](reference/mocking.md#EXPECT_CALL.After) or
|
||||
[`InSequence` clause](reference/mocking.md#EXPECT_CALL.InSequence) of
|
||||
`EXPECT_CALL`, or use an [`InSequence` object](reference/mocking.md#InSequence).
|
||||
|
||||
## Verifying and Resetting a Mock
|
||||
|
||||
gMock will verify the expectations on a mock object when it is destructed, or
|
||||
you can do it earlier:
|
||||
|
||||
```cpp
|
||||
using ::testing::Mock;
|
||||
...
|
||||
// Verifies and removes the expectations on mock_obj;
|
||||
// returns true if and only if successful.
|
||||
Mock::VerifyAndClearExpectations(&mock_obj);
|
||||
...
|
||||
// Verifies and removes the expectations on mock_obj;
|
||||
// also removes the default actions set by ON_CALL();
|
||||
// returns true if and only if successful.
|
||||
Mock::VerifyAndClear(&mock_obj);
|
||||
```
|
||||
|
||||
Do not set new expectations after verifying and clearing a mock after its use.
|
||||
Setting expectations after code that exercises the mock has undefined behavior.
|
||||
See [Using Mocks in Tests](gmock_for_dummies.md#using-mocks-in-tests) for more
|
||||
information.
|
||||
|
||||
You can also tell gMock that a mock object can be leaked and doesn't need to be
|
||||
verified:
|
||||
|
||||
```cpp
|
||||
Mock::AllowLeak(&mock_obj);
|
||||
```
|
||||
|
||||
## Mock Classes
|
||||
|
||||
gMock defines a convenient mock class template
|
||||
|
||||
```cpp
|
||||
class MockFunction<R(A1, ..., An)> {
|
||||
public:
|
||||
MOCK_METHOD(R, Call, (A1, ..., An));
|
||||
};
|
||||
```
|
||||
|
||||
See this [recipe](gmock_cook_book.md#UsingCheckPoints) for one application of
|
||||
it.
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | Description |
|
||||
| :----------------------------- | :---------------------------------------- |
|
||||
| `--gmock_catch_leaked_mocks=0` | Don't report leaked mock objects as failures. |
|
||||
| `--gmock_verbose=LEVEL` | Sets the default verbosity level (`info`, `warning`, or `error`) of Google Mock messages. |
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,390 @@
|
|||
# Legacy gMock FAQ
|
||||
|
||||
### When I call a method on my mock object, the method for the real object is invoked instead. What's the problem?
|
||||
|
||||
In order for a method to be mocked, it must be *virtual*, unless you use the
|
||||
[high-perf dependency injection technique](gmock_cook_book.md#MockingNonVirtualMethods).
|
||||
|
||||
### Can I mock a variadic function?
|
||||
|
||||
You cannot mock a variadic function (i.e. a function taking ellipsis (`...`)
|
||||
arguments) directly in gMock.
|
||||
|
||||
The problem is that in general, there is *no way* for a mock object to know how
|
||||
many arguments are passed to the variadic method, and what the arguments' types
|
||||
are. Only the *author of the base class* knows the protocol, and we cannot look
|
||||
into his or her head.
|
||||
|
||||
Therefore, to mock such a function, the *user* must teach the mock object how to
|
||||
figure out the number of arguments and their types. One way to do it is to
|
||||
provide overloaded versions of the function.
|
||||
|
||||
Ellipsis arguments are inherited from C and not really a C++ feature. They are
|
||||
unsafe to use and don't work with arguments that have constructors or
|
||||
destructors. Therefore we recommend to avoid them in C++ as much as possible.
|
||||
|
||||
### MSVC gives me warning C4301 or C4373 when I define a mock method with a const parameter. Why?
|
||||
|
||||
If you compile this using Microsoft Visual C++ 2005 SP1:
|
||||
|
||||
```cpp
|
||||
class Foo {
|
||||
...
|
||||
virtual void Bar(const int i) = 0;
|
||||
};
|
||||
|
||||
class MockFoo : public Foo {
|
||||
...
|
||||
MOCK_METHOD(void, Bar, (const int i), (override));
|
||||
};
|
||||
```
|
||||
|
||||
You may get the following warning:
|
||||
|
||||
```shell
|
||||
warning C4301: 'MockFoo::Bar': overriding virtual function only differs from 'Foo::Bar' by const/volatile qualifier
|
||||
```
|
||||
|
||||
This is a MSVC bug. The same code compiles fine with gcc, for example. If you
|
||||
use Visual C++ 2008 SP1, you would get the warning:
|
||||
|
||||
```shell
|
||||
warning C4373: 'MockFoo::Bar': virtual function overrides 'Foo::Bar', previous versions of the compiler did not override when parameters only differed by const/volatile qualifiers
|
||||
```
|
||||
|
||||
In C++, if you *declare* a function with a `const` parameter, the `const`
|
||||
modifier is ignored. Therefore, the `Foo` base class above is equivalent to:
|
||||
|
||||
```cpp
|
||||
class Foo {
|
||||
...
|
||||
virtual void Bar(int i) = 0; // int or const int? Makes no difference.
|
||||
};
|
||||
```
|
||||
|
||||
In fact, you can *declare* `Bar()` with an `int` parameter, and define it with a
|
||||
`const int` parameter. The compiler will still match them up.
|
||||
|
||||
Since making a parameter `const` is meaningless in the method declaration, we
|
||||
recommend to remove it in both `Foo` and `MockFoo`. That should workaround the
|
||||
VC bug.
|
||||
|
||||
Note that we are talking about the *top-level* `const` modifier here. If the
|
||||
function parameter is passed by pointer or reference, declaring the pointee or
|
||||
referee as `const` is still meaningful. For example, the following two
|
||||
declarations are *not* equivalent:
|
||||
|
||||
```cpp
|
||||
void Bar(int* p); // Neither p nor *p is const.
|
||||
void Bar(const int* p); // p is not const, but *p is.
|
||||
```
|
||||
|
||||
### I can't figure out why gMock thinks my expectations are not satisfied. What should I do?
|
||||
|
||||
You might want to run your test with `--gmock_verbose=info`. This flag lets
|
||||
gMock print a trace of every mock function call it receives. By studying the
|
||||
trace, you'll gain insights on why the expectations you set are not met.
|
||||
|
||||
If you see the message "The mock function has no default action set, and its
|
||||
return type has no default value set.", then try
|
||||
[adding a default action](gmock_cheat_sheet.md#OnCall). Due to a known issue,
|
||||
unexpected calls on mocks without default actions don't print out a detailed
|
||||
comparison between the actual arguments and the expected arguments.
|
||||
|
||||
### My program crashed and `ScopedMockLog` spit out tons of messages. Is it a gMock bug?
|
||||
|
||||
gMock and `ScopedMockLog` are likely doing the right thing here.
|
||||
|
||||
When a test crashes, the failure signal handler will try to log a lot of
|
||||
information (the stack trace, and the address map, for example). The messages
|
||||
are compounded if you have many threads with depth stacks. When `ScopedMockLog`
|
||||
intercepts these messages and finds that they don't match any expectations, it
|
||||
prints an error for each of them.
|
||||
|
||||
You can learn to ignore the errors, or you can rewrite your expectations to make
|
||||
your test more robust, for example, by adding something like:
|
||||
|
||||
```cpp
|
||||
using ::testing::AnyNumber;
|
||||
using ::testing::Not;
|
||||
...
|
||||
// Ignores any log not done by us.
|
||||
EXPECT_CALL(log, Log(_, Not(EndsWith("/my_file.cc")), _))
|
||||
.Times(AnyNumber());
|
||||
```
|
||||
|
||||
### How can I assert that a function is NEVER called?
|
||||
|
||||
```cpp
|
||||
using ::testing::_;
|
||||
...
|
||||
EXPECT_CALL(foo, Bar(_))
|
||||
.Times(0);
|
||||
```
|
||||
|
||||
### I have a failed test where gMock tells me TWICE that a particular expectation is not satisfied. Isn't this redundant?
|
||||
|
||||
When gMock detects a failure, it prints relevant information (the mock function
|
||||
arguments, the state of relevant expectations, and etc) to help the user debug.
|
||||
If another failure is detected, gMock will do the same, including printing the
|
||||
state of relevant expectations.
|
||||
|
||||
Sometimes an expectation's state didn't change between two failures, and you'll
|
||||
see the same description of the state twice. They are however *not* redundant,
|
||||
as they refer to *different points in time*. The fact they are the same *is*
|
||||
interesting information.
|
||||
|
||||
### I get a heapcheck failure when using a mock object, but using a real object is fine. What can be wrong?
|
||||
|
||||
Does the class (hopefully a pure interface) you are mocking have a virtual
|
||||
destructor?
|
||||
|
||||
Whenever you derive from a base class, make sure its destructor is virtual.
|
||||
Otherwise Bad Things will happen. Consider the following code:
|
||||
|
||||
```cpp
|
||||
class Base {
|
||||
public:
|
||||
// Not virtual, but should be.
|
||||
~Base() { ... }
|
||||
...
|
||||
};
|
||||
|
||||
class Derived : public Base {
|
||||
public:
|
||||
...
|
||||
private:
|
||||
std::string value_;
|
||||
};
|
||||
|
||||
...
|
||||
Base* p = new Derived;
|
||||
...
|
||||
delete p; // Surprise! ~Base() will be called, but ~Derived() will not
|
||||
// - value_ is leaked.
|
||||
```
|
||||
|
||||
By changing `~Base()` to virtual, `~Derived()` will be correctly called when
|
||||
`delete p` is executed, and the heap checker will be happy.
|
||||
|
||||
### The "newer expectations override older ones" rule makes writing expectations awkward. Why does gMock do that?
|
||||
|
||||
When people complain about this, often they are referring to code like:
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
// foo.Bar() should be called twice, return 1 the first time, and return
|
||||
// 2 the second time. However, I have to write the expectations in the
|
||||
// reverse order. This sucks big time!!!
|
||||
EXPECT_CALL(foo, Bar())
|
||||
.WillOnce(Return(2))
|
||||
.RetiresOnSaturation();
|
||||
EXPECT_CALL(foo, Bar())
|
||||
.WillOnce(Return(1))
|
||||
.RetiresOnSaturation();
|
||||
```
|
||||
|
||||
The problem, is that they didn't pick the **best** way to express the test's
|
||||
intent.
|
||||
|
||||
By default, expectations don't have to be matched in *any* particular order. If
|
||||
you want them to match in a certain order, you need to be explicit. This is
|
||||
gMock's (and jMock's) fundamental philosophy: it's easy to accidentally
|
||||
over-specify your tests, and we want to make it harder to do so.
|
||||
|
||||
There are two better ways to write the test spec. You could either put the
|
||||
expectations in sequence:
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
// foo.Bar() should be called twice, return 1 the first time, and return
|
||||
// 2 the second time. Using a sequence, we can write the expectations
|
||||
// in their natural order.
|
||||
{
|
||||
InSequence s;
|
||||
EXPECT_CALL(foo, Bar())
|
||||
.WillOnce(Return(1))
|
||||
.RetiresOnSaturation();
|
||||
EXPECT_CALL(foo, Bar())
|
||||
.WillOnce(Return(2))
|
||||
.RetiresOnSaturation();
|
||||
}
|
||||
```
|
||||
|
||||
or you can put the sequence of actions in the same expectation:
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
// foo.Bar() should be called twice, return 1 the first time, and return
|
||||
// 2 the second time.
|
||||
EXPECT_CALL(foo, Bar())
|
||||
.WillOnce(Return(1))
|
||||
.WillOnce(Return(2))
|
||||
.RetiresOnSaturation();
|
||||
```
|
||||
|
||||
Back to the original questions: why does gMock search the expectations (and
|
||||
`ON_CALL`s) from back to front? Because this allows a user to set up a mock's
|
||||
behavior for the common case early (e.g. in the mock's constructor or the test
|
||||
fixture's set-up phase) and customize it with more specific rules later. If
|
||||
gMock searches from front to back, this very useful pattern won't be possible.
|
||||
|
||||
### gMock prints a warning when a function without EXPECT_CALL is called, even if I have set its behavior using ON_CALL. Would it be reasonable not to show the warning in this case?
|
||||
|
||||
When choosing between being neat and being safe, we lean toward the latter. So
|
||||
the answer is that we think it's better to show the warning.
|
||||
|
||||
Often people write `ON_CALL`s in the mock object's constructor or `SetUp()`, as
|
||||
the default behavior rarely changes from test to test. Then in the test body
|
||||
they set the expectations, which are often different for each test. Having an
|
||||
`ON_CALL` in the set-up part of a test doesn't mean that the calls are expected.
|
||||
If there's no `EXPECT_CALL` and the method is called, it's possibly an error. If
|
||||
we quietly let the call go through without notifying the user, bugs may creep in
|
||||
unnoticed.
|
||||
|
||||
If, however, you are sure that the calls are OK, you can write
|
||||
|
||||
```cpp
|
||||
using ::testing::_;
|
||||
...
|
||||
EXPECT_CALL(foo, Bar(_))
|
||||
.WillRepeatedly(...);
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```cpp
|
||||
using ::testing::_;
|
||||
...
|
||||
ON_CALL(foo, Bar(_))
|
||||
.WillByDefault(...);
|
||||
```
|
||||
|
||||
This tells gMock that you do expect the calls and no warning should be printed.
|
||||
|
||||
Also, you can control the verbosity by specifying `--gmock_verbose=error`. Other
|
||||
values are `info` and `warning`. If you find the output too noisy when
|
||||
debugging, just choose a less verbose level.
|
||||
|
||||
### How can I delete the mock function's argument in an action?
|
||||
|
||||
If your mock function takes a pointer argument and you want to delete that
|
||||
argument, you can use testing::DeleteArg<N>() to delete the N'th (zero-indexed)
|
||||
argument:
|
||||
|
||||
```cpp
|
||||
using ::testing::_;
|
||||
...
|
||||
MOCK_METHOD(void, Bar, (X* x, const Y& y));
|
||||
...
|
||||
EXPECT_CALL(mock_foo_, Bar(_, _))
|
||||
.WillOnce(testing::DeleteArg<0>()));
|
||||
```
|
||||
|
||||
### How can I perform an arbitrary action on a mock function's argument?
|
||||
|
||||
If you find yourself needing to perform some action that's not supported by
|
||||
gMock directly, remember that you can define your own actions using
|
||||
[`MakeAction()`](#NewMonoActions) or
|
||||
[`MakePolymorphicAction()`](#NewPolyActions), or you can write a stub function
|
||||
and invoke it using [`Invoke()`](#FunctionsAsActions).
|
||||
|
||||
```cpp
|
||||
using ::testing::_;
|
||||
using ::testing::Invoke;
|
||||
...
|
||||
MOCK_METHOD(void, Bar, (X* p));
|
||||
...
|
||||
EXPECT_CALL(mock_foo_, Bar(_))
|
||||
.WillOnce(Invoke(MyAction(...)));
|
||||
```
|
||||
|
||||
### My code calls a static/global function. Can I mock it?
|
||||
|
||||
You can, but you need to make some changes.
|
||||
|
||||
In general, if you find yourself needing to mock a static function, it's a sign
|
||||
that your modules are too tightly coupled (and less flexible, less reusable,
|
||||
less testable, etc). You are probably better off defining a small interface and
|
||||
call the function through that interface, which then can be easily mocked. It's
|
||||
a bit of work initially, but usually pays for itself quickly.
|
||||
|
||||
This Google Testing Blog
|
||||
[post](https://testing.googleblog.com/2008/06/defeat-static-cling.html) says it
|
||||
excellently. Check it out.
|
||||
|
||||
### My mock object needs to do complex stuff. It's a lot of pain to specify the actions. gMock sucks!
|
||||
|
||||
I know it's not a question, but you get an answer for free any way. :-)
|
||||
|
||||
With gMock, you can create mocks in C++ easily. And people might be tempted to
|
||||
use them everywhere. Sometimes they work great, and sometimes you may find them,
|
||||
well, a pain to use. So, what's wrong in the latter case?
|
||||
|
||||
When you write a test without using mocks, you exercise the code and assert that
|
||||
it returns the correct value or that the system is in an expected state. This is
|
||||
sometimes called "state-based testing".
|
||||
|
||||
Mocks are great for what some call "interaction-based" testing: instead of
|
||||
checking the system state at the very end, mock objects verify that they are
|
||||
invoked the right way and report an error as soon as it arises, giving you a
|
||||
handle on the precise context in which the error was triggered. This is often
|
||||
more effective and economical to do than state-based testing.
|
||||
|
||||
If you are doing state-based testing and using a test double just to simulate
|
||||
the real object, you are probably better off using a fake. Using a mock in this
|
||||
case causes pain, as it's not a strong point for mocks to perform complex
|
||||
actions. If you experience this and think that mocks suck, you are just not
|
||||
using the right tool for your problem. Or, you might be trying to solve the
|
||||
wrong problem. :-)
|
||||
|
||||
### I got a warning "Uninteresting function call encountered - default action taken.." Should I panic?
|
||||
|
||||
By all means, NO! It's just an FYI. :-)
|
||||
|
||||
What it means is that you have a mock function, you haven't set any expectations
|
||||
on it (by gMock's rule this means that you are not interested in calls to this
|
||||
function and therefore it can be called any number of times), and it is called.
|
||||
That's OK - you didn't say it's not OK to call the function!
|
||||
|
||||
What if you actually meant to disallow this function to be called, but forgot to
|
||||
write `EXPECT_CALL(foo, Bar()).Times(0)`? While one can argue that it's the
|
||||
user's fault, gMock tries to be nice and prints you a note.
|
||||
|
||||
So, when you see the message and believe that there shouldn't be any
|
||||
uninteresting calls, you should investigate what's going on. To make your life
|
||||
easier, gMock dumps the stack trace when an uninteresting call is encountered.
|
||||
From that you can figure out which mock function it is, and how it is called.
|
||||
|
||||
### I want to define a custom action. Should I use Invoke() or implement the ActionInterface interface?
|
||||
|
||||
Either way is fine - you want to choose the one that's more convenient for your
|
||||
circumstance.
|
||||
|
||||
Usually, if your action is for a particular function type, defining it using
|
||||
`Invoke()` should be easier; if your action can be used in functions of
|
||||
different types (e.g. if you are defining `Return(*value*)`),
|
||||
`MakePolymorphicAction()` is easiest. Sometimes you want precise control on what
|
||||
types of functions the action can be used in, and implementing `ActionInterface`
|
||||
is the way to go here. See the implementation of `Return()` in `gmock-actions.h`
|
||||
for an example.
|
||||
|
||||
### I use SetArgPointee() in WillOnce(), but gcc complains about "conflicting return type specified". What does it mean?
|
||||
|
||||
You got this error as gMock has no idea what value it should return when the
|
||||
mock method is called. `SetArgPointee()` says what the side effect is, but
|
||||
doesn't say what the return value should be. You need `DoAll()` to chain a
|
||||
`SetArgPointee()` with a `Return()` that provides a value appropriate to the API
|
||||
being mocked.
|
||||
|
||||
See this [recipe](gmock_cook_book.md#mocking-side-effects) for more details and
|
||||
an example.
|
||||
|
||||
### I have a huge mock class, and Microsoft Visual C++ runs out of memory when compiling it. What can I do?
|
||||
|
||||
We've noticed that when the `/clr` compiler flag is used, Visual C++ uses 5~6
|
||||
times as much memory when compiling a mock class. We suggest to avoid `/clr`
|
||||
when compiling native C++ mocks.
|
|
@ -0,0 +1,700 @@
|
|||
# gMock for Dummies
|
||||
|
||||
## What Is gMock?
|
||||
|
||||
When you write a prototype or test, often it's not feasible or wise to rely on
|
||||
real objects entirely. A **mock object** implements the same interface as a real
|
||||
object (so it can be used as one), but lets you specify at run time how it will
|
||||
be used and what it should do (which methods will be called? in which order? how
|
||||
many times? with what arguments? what will they return? etc).
|
||||
|
||||
It is easy to confuse the term *fake objects* with mock objects. Fakes and mocks
|
||||
actually mean very different things in the Test-Driven Development (TDD)
|
||||
community:
|
||||
|
||||
* **Fake** objects have working implementations, but usually take some
|
||||
shortcut (perhaps to make the operations less expensive), which makes them
|
||||
not suitable for production. An in-memory file system would be an example of
|
||||
a fake.
|
||||
* **Mocks** are objects pre-programmed with *expectations*, which form a
|
||||
specification of the calls they are expected to receive.
|
||||
|
||||
If all this seems too abstract for you, don't worry - the most important thing
|
||||
to remember is that a mock allows you to check the *interaction* between itself
|
||||
and code that uses it. The difference between fakes and mocks shall become much
|
||||
clearer once you start to use mocks.
|
||||
|
||||
**gMock** is a library (sometimes we also call it a "framework" to make it sound
|
||||
cool) for creating mock classes and using them. It does to C++ what
|
||||
jMock/EasyMock does to Java (well, more or less).
|
||||
|
||||
When using gMock,
|
||||
|
||||
1. first, you use some simple macros to describe the interface you want to
|
||||
mock, and they will expand to the implementation of your mock class;
|
||||
2. next, you create some mock objects and specify its expectations and behavior
|
||||
using an intuitive syntax;
|
||||
3. then you exercise code that uses the mock objects. gMock will catch any
|
||||
violation to the expectations as soon as it arises.
|
||||
|
||||
## Why gMock?
|
||||
|
||||
While mock objects help you remove unnecessary dependencies in tests and make
|
||||
them fast and reliable, using mocks manually in C++ is *hard*:
|
||||
|
||||
* Someone has to implement the mocks. The job is usually tedious and
|
||||
error-prone. No wonder people go great distance to avoid it.
|
||||
* The quality of those manually written mocks is a bit, uh, unpredictable. You
|
||||
may see some really polished ones, but you may also see some that were
|
||||
hacked up in a hurry and have all sorts of ad hoc restrictions.
|
||||
* The knowledge you gained from using one mock doesn't transfer to the next
|
||||
one.
|
||||
|
||||
In contrast, Java and Python programmers have some fine mock frameworks (jMock,
|
||||
EasyMock, etc), which automate the creation of mocks. As a result, mocking is a
|
||||
proven effective technique and widely adopted practice in those communities.
|
||||
Having the right tool absolutely makes the difference.
|
||||
|
||||
gMock was built to help C++ programmers. It was inspired by jMock and EasyMock,
|
||||
but designed with C++'s specifics in mind. It is your friend if any of the
|
||||
following problems is bothering you:
|
||||
|
||||
* You are stuck with a sub-optimal design and wish you had done more
|
||||
prototyping before it was too late, but prototyping in C++ is by no means
|
||||
"rapid".
|
||||
* Your tests are slow as they depend on too many libraries or use expensive
|
||||
resources (e.g. a database).
|
||||
* Your tests are brittle as some resources they use are unreliable (e.g. the
|
||||
network).
|
||||
* You want to test how your code handles a failure (e.g. a file checksum
|
||||
error), but it's not easy to cause one.
|
||||
* You need to make sure that your module interacts with other modules in the
|
||||
right way, but it's hard to observe the interaction; therefore you resort to
|
||||
observing the side effects at the end of the action, but it's awkward at
|
||||
best.
|
||||
* You want to "mock out" your dependencies, except that they don't have mock
|
||||
implementations yet; and, frankly, you aren't thrilled by some of those
|
||||
hand-written mocks.
|
||||
|
||||
We encourage you to use gMock as
|
||||
|
||||
* a *design* tool, for it lets you experiment with your interface design early
|
||||
and often. More iterations lead to better designs!
|
||||
* a *testing* tool to cut your tests' outbound dependencies and probe the
|
||||
interaction between your module and its collaborators.
|
||||
|
||||
## Getting Started
|
||||
|
||||
gMock is bundled with googletest.
|
||||
|
||||
## A Case for Mock Turtles
|
||||
|
||||
Let's look at an example. Suppose you are developing a graphics program that
|
||||
relies on a [LOGO](http://en.wikipedia.org/wiki/Logo_programming_language)-like
|
||||
API for drawing. How would you test that it does the right thing? Well, you can
|
||||
run it and compare the screen with a golden screen snapshot, but let's admit it:
|
||||
tests like this are expensive to run and fragile (What if you just upgraded to a
|
||||
shiny new graphics card that has better anti-aliasing? Suddenly you have to
|
||||
update all your golden images.). It would be too painful if all your tests are
|
||||
like this. Fortunately, you learned about
|
||||
[Dependency Injection](http://en.wikipedia.org/wiki/Dependency_injection) and know the right thing
|
||||
to do: instead of having your application talk to the system API directly, wrap
|
||||
the API in an interface (say, `Turtle`) and code to that interface:
|
||||
|
||||
```cpp
|
||||
class Turtle {
|
||||
...
|
||||
virtual ~Turtle() {}
|
||||
virtual void PenUp() = 0;
|
||||
virtual void PenDown() = 0;
|
||||
virtual void Forward(int distance) = 0;
|
||||
virtual void Turn(int degrees) = 0;
|
||||
virtual void GoTo(int x, int y) = 0;
|
||||
virtual int GetX() const = 0;
|
||||
virtual int GetY() const = 0;
|
||||
};
|
||||
```
|
||||
|
||||
(Note that the destructor of `Turtle` **must** be virtual, as is the case for
|
||||
**all** classes you intend to inherit from - otherwise the destructor of the
|
||||
derived class will not be called when you delete an object through a base
|
||||
pointer, and you'll get corrupted program states like memory leaks.)
|
||||
|
||||
You can control whether the turtle's movement will leave a trace using `PenUp()`
|
||||
and `PenDown()`, and control its movement using `Forward()`, `Turn()`, and
|
||||
`GoTo()`. Finally, `GetX()` and `GetY()` tell you the current position of the
|
||||
turtle.
|
||||
|
||||
Your program will normally use a real implementation of this interface. In
|
||||
tests, you can use a mock implementation instead. This allows you to easily
|
||||
check what drawing primitives your program is calling, with what arguments, and
|
||||
in which order. Tests written this way are much more robust (they won't break
|
||||
because your new machine does anti-aliasing differently), easier to read and
|
||||
maintain (the intent of a test is expressed in the code, not in some binary
|
||||
images), and run *much, much faster*.
|
||||
|
||||
## Writing the Mock Class
|
||||
|
||||
If you are lucky, the mocks you need to use have already been implemented by
|
||||
some nice people. If, however, you find yourself in the position to write a mock
|
||||
class, relax - gMock turns this task into a fun game! (Well, almost.)
|
||||
|
||||
### How to Define It
|
||||
|
||||
Using the `Turtle` interface as example, here are the simple steps you need to
|
||||
follow:
|
||||
|
||||
* Derive a class `MockTurtle` from `Turtle`.
|
||||
* Take a *virtual* function of `Turtle` (while it's possible to
|
||||
[mock non-virtual methods using templates](gmock_cook_book.md#MockingNonVirtualMethods),
|
||||
it's much more involved).
|
||||
* In the `public:` section of the child class, write `MOCK_METHOD();`
|
||||
* Now comes the fun part: you take the function signature, cut-and-paste it
|
||||
into the macro, and add two commas - one between the return type and the
|
||||
name, another between the name and the argument list.
|
||||
* If you're mocking a const method, add a 4th parameter containing `(const)`
|
||||
(the parentheses are required).
|
||||
* Since you're overriding a virtual method, we suggest adding the `override`
|
||||
keyword. For const methods the 4th parameter becomes `(const, override)`,
|
||||
for non-const methods just `(override)`. This isn't mandatory.
|
||||
* Repeat until all virtual functions you want to mock are done. (It goes
|
||||
without saying that *all* pure virtual methods in your abstract class must
|
||||
be either mocked or overridden.)
|
||||
|
||||
After the process, you should have something like:
|
||||
|
||||
```cpp
|
||||
#include <gmock/gmock.h> // Brings in gMock.
|
||||
|
||||
class MockTurtle : public Turtle {
|
||||
public:
|
||||
...
|
||||
MOCK_METHOD(void, PenUp, (), (override));
|
||||
MOCK_METHOD(void, PenDown, (), (override));
|
||||
MOCK_METHOD(void, Forward, (int distance), (override));
|
||||
MOCK_METHOD(void, Turn, (int degrees), (override));
|
||||
MOCK_METHOD(void, GoTo, (int x, int y), (override));
|
||||
MOCK_METHOD(int, GetX, (), (const, override));
|
||||
MOCK_METHOD(int, GetY, (), (const, override));
|
||||
};
|
||||
```
|
||||
|
||||
You don't need to define these mock methods somewhere else - the `MOCK_METHOD`
|
||||
macro will generate the definitions for you. It's that simple!
|
||||
|
||||
### Where to Put It
|
||||
|
||||
When you define a mock class, you need to decide where to put its definition.
|
||||
Some people put it in a `_test.cc`. This is fine when the interface being mocked
|
||||
(say, `Foo`) is owned by the same person or team. Otherwise, when the owner of
|
||||
`Foo` changes it, your test could break. (You can't really expect `Foo`'s
|
||||
maintainer to fix every test that uses `Foo`, can you?)
|
||||
|
||||
Generally, you should not mock classes you don't own. If you must mock such a
|
||||
class owned by others, define the mock class in `Foo`'s Bazel package (usually
|
||||
the same directory or a `testing` sub-directory), and put it in a `.h` and a
|
||||
`cc_library` with `testonly=True`. Then everyone can reference them from their
|
||||
tests. If `Foo` ever changes, there is only one copy of `MockFoo` to change, and
|
||||
only tests that depend on the changed methods need to be fixed.
|
||||
|
||||
Another way to do it: you can introduce a thin layer `FooAdaptor` on top of
|
||||
`Foo` and code to this new interface. Since you own `FooAdaptor`, you can absorb
|
||||
changes in `Foo` much more easily. While this is more work initially, carefully
|
||||
choosing the adaptor interface can make your code easier to write and more
|
||||
readable (a net win in the long run), as you can choose `FooAdaptor` to fit your
|
||||
specific domain much better than `Foo` does.
|
||||
|
||||
## Using Mocks in Tests
|
||||
|
||||
Once you have a mock class, using it is easy. The typical work flow is:
|
||||
|
||||
1. Import the gMock names from the `testing` namespace such that you can use
|
||||
them unqualified (You only have to do it once per file). Remember that
|
||||
namespaces are a good idea.
|
||||
2. Create some mock objects.
|
||||
3. Specify your expectations on them (How many times will a method be called?
|
||||
With what arguments? What should it do? etc.).
|
||||
4. Exercise some code that uses the mocks; optionally, check the result using
|
||||
googletest assertions. If a mock method is called more than expected or with
|
||||
wrong arguments, you'll get an error immediately.
|
||||
5. When a mock is destructed, gMock will automatically check whether all
|
||||
expectations on it have been satisfied.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```cpp
|
||||
#include "path/to/mock-turtle.h"
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
using ::testing::AtLeast; // #1
|
||||
|
||||
TEST(PainterTest, CanDrawSomething) {
|
||||
MockTurtle turtle; // #2
|
||||
EXPECT_CALL(turtle, PenDown()) // #3
|
||||
.Times(AtLeast(1));
|
||||
|
||||
Painter painter(&turtle); // #4
|
||||
|
||||
EXPECT_TRUE(painter.DrawCircle(0, 0, 10)); // #5
|
||||
}
|
||||
```
|
||||
|
||||
As you might have guessed, this test checks that `PenDown()` is called at least
|
||||
once. If the `painter` object didn't call this method, your test will fail with
|
||||
a message like this:
|
||||
|
||||
```text
|
||||
path/to/my_test.cc:119: Failure
|
||||
Actual function call count doesn't match this expectation:
|
||||
Actually: never called;
|
||||
Expected: called at least once.
|
||||
Stack trace:
|
||||
...
|
||||
```
|
||||
|
||||
**Tip 1:** If you run the test from an Emacs buffer, you can hit `<Enter>` on
|
||||
the line number to jump right to the failed expectation.
|
||||
|
||||
**Tip 2:** If your mock objects are never deleted, the final verification won't
|
||||
happen. Therefore it's a good idea to turn on the heap checker in your tests
|
||||
when you allocate mocks on the heap. You get that automatically if you use the
|
||||
`gtest_main` library already.
|
||||
|
||||
**Important note:** gMock requires expectations to be set **before** the mock
|
||||
functions are called, otherwise the behavior is **undefined**. Do not alternate
|
||||
between calls to `EXPECT_CALL()` and calls to the mock functions, and do not set
|
||||
any expectations on a mock after passing the mock to an API.
|
||||
|
||||
This means `EXPECT_CALL()` should be read as expecting that a call will occur
|
||||
*in the future*, not that a call has occurred. Why does gMock work like that?
|
||||
Well, specifying the expectation beforehand allows gMock to report a violation
|
||||
as soon as it rises, when the context (stack trace, etc) is still available.
|
||||
This makes debugging much easier.
|
||||
|
||||
Admittedly, this test is contrived and doesn't do much. You can easily achieve
|
||||
the same effect without using gMock. However, as we shall reveal soon, gMock
|
||||
allows you to do *so much more* with the mocks.
|
||||
|
||||
## Setting Expectations
|
||||
|
||||
The key to using a mock object successfully is to set the *right expectations*
|
||||
on it. If you set the expectations too strict, your test will fail as the result
|
||||
of unrelated changes. If you set them too loose, bugs can slip through. You want
|
||||
to do it just right such that your test can catch exactly the kind of bugs you
|
||||
intend it to catch. gMock provides the necessary means for you to do it "just
|
||||
right."
|
||||
|
||||
### General Syntax
|
||||
|
||||
In gMock we use the `EXPECT_CALL()` macro to set an expectation on a mock
|
||||
method. The general syntax is:
|
||||
|
||||
```cpp
|
||||
EXPECT_CALL(mock_object, method(matchers))
|
||||
.Times(cardinality)
|
||||
.WillOnce(action)
|
||||
.WillRepeatedly(action);
|
||||
```
|
||||
|
||||
The macro has two arguments: first the mock object, and then the method and its
|
||||
arguments. Note that the two are separated by a comma (`,`), not a period (`.`).
|
||||
(Why using a comma? The answer is that it was necessary for technical reasons.)
|
||||
If the method is not overloaded, the macro can also be called without matchers:
|
||||
|
||||
```cpp
|
||||
EXPECT_CALL(mock_object, non-overloaded-method)
|
||||
.Times(cardinality)
|
||||
.WillOnce(action)
|
||||
.WillRepeatedly(action);
|
||||
```
|
||||
|
||||
This syntax allows the test writer to specify "called with any arguments"
|
||||
without explicitly specifying the number or types of arguments. To avoid
|
||||
unintended ambiguity, this syntax may only be used for methods that are not
|
||||
overloaded.
|
||||
|
||||
Either form of the macro can be followed by some optional *clauses* that provide
|
||||
more information about the expectation. We'll discuss how each clause works in
|
||||
the coming sections.
|
||||
|
||||
This syntax is designed to make an expectation read like English. For example,
|
||||
you can probably guess that
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
EXPECT_CALL(turtle, GetX())
|
||||
.Times(5)
|
||||
.WillOnce(Return(100))
|
||||
.WillOnce(Return(150))
|
||||
.WillRepeatedly(Return(200));
|
||||
```
|
||||
|
||||
says that the `turtle` object's `GetX()` method will be called five times, it
|
||||
will return 100 the first time, 150 the second time, and then 200 every time.
|
||||
Some people like to call this style of syntax a Domain-Specific Language (DSL).
|
||||
|
||||
{: .callout .note}
|
||||
**Note:** Why do we use a macro to do this? Well it serves two purposes: first
|
||||
it makes expectations easily identifiable (either by `grep` or by a human
|
||||
reader), and second it allows gMock to include the source file location of a
|
||||
failed expectation in messages, making debugging easier.
|
||||
|
||||
### Matchers: What Arguments Do We Expect?
|
||||
|
||||
When a mock function takes arguments, we may specify what arguments we are
|
||||
expecting, for example:
|
||||
|
||||
```cpp
|
||||
// Expects the turtle to move forward by 100 units.
|
||||
EXPECT_CALL(turtle, Forward(100));
|
||||
```
|
||||
|
||||
Oftentimes you do not want to be too specific. Remember that talk about tests
|
||||
being too rigid? Over specification leads to brittle tests and obscures the
|
||||
intent of tests. Therefore we encourage you to specify only what's necessary—no
|
||||
more, no less. If you aren't interested in the value of an argument, write `_`
|
||||
as the argument, which means "anything goes":
|
||||
|
||||
```cpp
|
||||
using ::testing::_;
|
||||
...
|
||||
// Expects that the turtle jumps to somewhere on the x=50 line.
|
||||
EXPECT_CALL(turtle, GoTo(50, _));
|
||||
```
|
||||
|
||||
`_` is an instance of what we call **matchers**. A matcher is like a predicate
|
||||
and can test whether an argument is what we'd expect. You can use a matcher
|
||||
inside `EXPECT_CALL()` wherever a function argument is expected. `_` is a
|
||||
convenient way of saying "any value".
|
||||
|
||||
In the above examples, `100` and `50` are also matchers; implicitly, they are
|
||||
the same as `Eq(100)` and `Eq(50)`, which specify that the argument must be
|
||||
equal (using `operator==`) to the matcher argument. There are many
|
||||
[built-in matchers](reference/matchers.md) for common types (as well as
|
||||
[custom matchers](gmock_cook_book.md#NewMatchers)); for example:
|
||||
|
||||
```cpp
|
||||
using ::testing::Ge;
|
||||
...
|
||||
// Expects the turtle moves forward by at least 100.
|
||||
EXPECT_CALL(turtle, Forward(Ge(100)));
|
||||
```
|
||||
|
||||
If you don't care about *any* arguments, rather than specify `_` for each of
|
||||
them you may instead omit the parameter list:
|
||||
|
||||
```cpp
|
||||
// Expects the turtle to move forward.
|
||||
EXPECT_CALL(turtle, Forward);
|
||||
// Expects the turtle to jump somewhere.
|
||||
EXPECT_CALL(turtle, GoTo);
|
||||
```
|
||||
|
||||
This works for all non-overloaded methods; if a method is overloaded, you need
|
||||
to help gMock resolve which overload is expected by specifying the number of
|
||||
arguments and possibly also the
|
||||
[types of the arguments](gmock_cook_book.md#SelectOverload).
|
||||
|
||||
### Cardinalities: How Many Times Will It Be Called?
|
||||
|
||||
The first clause we can specify following an `EXPECT_CALL()` is `Times()`. We
|
||||
call its argument a **cardinality** as it tells *how many times* the call should
|
||||
occur. It allows us to repeat an expectation many times without actually writing
|
||||
it as many times. More importantly, a cardinality can be "fuzzy", just like a
|
||||
matcher can be. This allows a user to express the intent of a test exactly.
|
||||
|
||||
An interesting special case is when we say `Times(0)`. You may have guessed - it
|
||||
means that the function shouldn't be called with the given arguments at all, and
|
||||
gMock will report a googletest failure whenever the function is (wrongfully)
|
||||
called.
|
||||
|
||||
We've seen `AtLeast(n)` as an example of fuzzy cardinalities earlier. For the
|
||||
list of built-in cardinalities you can use, see
|
||||
[here](gmock_cheat_sheet.md#CardinalityList).
|
||||
|
||||
The `Times()` clause can be omitted. **If you omit `Times()`, gMock will infer
|
||||
the cardinality for you.** The rules are easy to remember:
|
||||
|
||||
* If **neither** `WillOnce()` **nor** `WillRepeatedly()` is in the
|
||||
`EXPECT_CALL()`, the inferred cardinality is `Times(1)`.
|
||||
* If there are *n* `WillOnce()`'s but **no** `WillRepeatedly()`, where *n* >=
|
||||
1, the cardinality is `Times(n)`.
|
||||
* If there are *n* `WillOnce()`'s and **one** `WillRepeatedly()`, where *n* >=
|
||||
0, the cardinality is `Times(AtLeast(n))`.
|
||||
|
||||
**Quick quiz:** what do you think will happen if a function is expected to be
|
||||
called twice but actually called four times?
|
||||
|
||||
### Actions: What Should It Do?
|
||||
|
||||
Remember that a mock object doesn't really have a working implementation? We as
|
||||
users have to tell it what to do when a method is invoked. This is easy in
|
||||
gMock.
|
||||
|
||||
First, if the return type of a mock function is a built-in type or a pointer,
|
||||
the function has a **default action** (a `void` function will just return, a
|
||||
`bool` function will return `false`, and other functions will return 0). In
|
||||
addition, in C++ 11 and above, a mock function whose return type is
|
||||
default-constructible (i.e. has a default constructor) has a default action of
|
||||
returning a default-constructed value. If you don't say anything, this behavior
|
||||
will be used.
|
||||
|
||||
Second, if a mock function doesn't have a default action, or the default action
|
||||
doesn't suit you, you can specify the action to be taken each time the
|
||||
expectation matches using a series of `WillOnce()` clauses followed by an
|
||||
optional `WillRepeatedly()`. For example,
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
EXPECT_CALL(turtle, GetX())
|
||||
.WillOnce(Return(100))
|
||||
.WillOnce(Return(200))
|
||||
.WillOnce(Return(300));
|
||||
```
|
||||
|
||||
says that `turtle.GetX()` will be called *exactly three times* (gMock inferred
|
||||
this from how many `WillOnce()` clauses we've written, since we didn't
|
||||
explicitly write `Times()`), and will return 100, 200, and 300 respectively.
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
EXPECT_CALL(turtle, GetY())
|
||||
.WillOnce(Return(100))
|
||||
.WillOnce(Return(200))
|
||||
.WillRepeatedly(Return(300));
|
||||
```
|
||||
|
||||
says that `turtle.GetY()` will be called *at least twice* (gMock knows this as
|
||||
we've written two `WillOnce()` clauses and a `WillRepeatedly()` while having no
|
||||
explicit `Times()`), will return 100 and 200 respectively the first two times,
|
||||
and 300 from the third time on.
|
||||
|
||||
Of course, if you explicitly write a `Times()`, gMock will not try to infer the
|
||||
cardinality itself. What if the number you specified is larger than there are
|
||||
`WillOnce()` clauses? Well, after all `WillOnce()`s are used up, gMock will do
|
||||
the *default* action for the function every time (unless, of course, you have a
|
||||
`WillRepeatedly()`.).
|
||||
|
||||
What can we do inside `WillOnce()` besides `Return()`? You can return a
|
||||
reference using `ReturnRef(`*`variable`*`)`, or invoke a pre-defined function,
|
||||
among [others](gmock_cook_book.md#using-actions).
|
||||
|
||||
**Important note:** The `EXPECT_CALL()` statement evaluates the action clause
|
||||
only once, even though the action may be performed many times. Therefore you
|
||||
must be careful about side effects. The following may not do what you want:
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
int n = 100;
|
||||
EXPECT_CALL(turtle, GetX())
|
||||
.Times(4)
|
||||
.WillRepeatedly(Return(n++));
|
||||
```
|
||||
|
||||
Instead of returning 100, 101, 102, ..., consecutively, this mock function will
|
||||
always return 100 as `n++` is only evaluated once. Similarly, `Return(new Foo)`
|
||||
will create a new `Foo` object when the `EXPECT_CALL()` is executed, and will
|
||||
return the same pointer every time. If you want the side effect to happen every
|
||||
time, you need to define a custom action, which we'll teach in the
|
||||
[cook book](gmock_cook_book.md).
|
||||
|
||||
Time for another quiz! What do you think the following means?
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
EXPECT_CALL(turtle, GetY())
|
||||
.Times(4)
|
||||
.WillOnce(Return(100));
|
||||
```
|
||||
|
||||
Obviously `turtle.GetY()` is expected to be called four times. But if you think
|
||||
it will return 100 every time, think twice! Remember that one `WillOnce()`
|
||||
clause will be consumed each time the function is invoked and the default action
|
||||
will be taken afterwards. So the right answer is that `turtle.GetY()` will
|
||||
return 100 the first time, but **return 0 from the second time on**, as
|
||||
returning 0 is the default action for `int` functions.
|
||||
|
||||
### Using Multiple Expectations {#MultiExpectations}
|
||||
|
||||
So far we've only shown examples where you have a single expectation. More
|
||||
realistically, you'll specify expectations on multiple mock methods which may be
|
||||
from multiple mock objects.
|
||||
|
||||
By default, when a mock method is invoked, gMock will search the expectations in
|
||||
the **reverse order** they are defined, and stop when an active expectation that
|
||||
matches the arguments is found (you can think of it as "newer rules override
|
||||
older ones."). If the matching expectation cannot take any more calls, you will
|
||||
get an upper-bound-violated failure. Here's an example:
|
||||
|
||||
```cpp
|
||||
using ::testing::_;
|
||||
...
|
||||
EXPECT_CALL(turtle, Forward(_)); // #1
|
||||
EXPECT_CALL(turtle, Forward(10)) // #2
|
||||
.Times(2);
|
||||
```
|
||||
|
||||
If `Forward(10)` is called three times in a row, the third time it will be an
|
||||
error, as the last matching expectation (#2) has been saturated. If, however,
|
||||
the third `Forward(10)` call is replaced by `Forward(20)`, then it would be OK,
|
||||
as now #1 will be the matching expectation.
|
||||
|
||||
{: .callout .note}
|
||||
**Note:** Why does gMock search for a match in the *reverse* order of the
|
||||
expectations? The reason is that this allows a user to set up the default
|
||||
expectations in a mock object's constructor or the test fixture's set-up phase
|
||||
and then customize the mock by writing more specific expectations in the test
|
||||
body. So, if you have two expectations on the same method, you want to put the
|
||||
one with more specific matchers **after** the other, or the more specific rule
|
||||
would be shadowed by the more general one that comes after it.
|
||||
|
||||
{: .callout .tip}
|
||||
**Tip:** It is very common to start with a catch-all expectation for a method
|
||||
and `Times(AnyNumber())` (omitting arguments, or with `_` for all arguments, if
|
||||
overloaded). This makes any calls to the method expected. This is not necessary
|
||||
for methods that are not mentioned at all (these are "uninteresting"), but is
|
||||
useful for methods that have some expectations, but for which other calls are
|
||||
ok. See
|
||||
[Understanding Uninteresting vs Unexpected Calls](gmock_cook_book.md#uninteresting-vs-unexpected).
|
||||
|
||||
### Ordered vs Unordered Calls {#OrderedCalls}
|
||||
|
||||
By default, an expectation can match a call even though an earlier expectation
|
||||
hasn't been satisfied. In other words, the calls don't have to occur in the
|
||||
order the expectations are specified.
|
||||
|
||||
Sometimes, you may want all the expected calls to occur in a strict order. To
|
||||
say this in gMock is easy:
|
||||
|
||||
```cpp
|
||||
using ::testing::InSequence;
|
||||
...
|
||||
TEST(FooTest, DrawsLineSegment) {
|
||||
...
|
||||
{
|
||||
InSequence seq;
|
||||
|
||||
EXPECT_CALL(turtle, PenDown());
|
||||
EXPECT_CALL(turtle, Forward(100));
|
||||
EXPECT_CALL(turtle, PenUp());
|
||||
}
|
||||
Foo();
|
||||
}
|
||||
```
|
||||
|
||||
By creating an object of type `InSequence`, all expectations in its scope are
|
||||
put into a *sequence* and have to occur *sequentially*. Since we are just
|
||||
relying on the constructor and destructor of this object to do the actual work,
|
||||
its name is really irrelevant.
|
||||
|
||||
In this example, we test that `Foo()` calls the three expected functions in the
|
||||
order as written. If a call is made out-of-order, it will be an error.
|
||||
|
||||
(What if you care about the relative order of some of the calls, but not all of
|
||||
them? Can you specify an arbitrary partial order? The answer is ... yes! The
|
||||
details can be found [here](gmock_cook_book.md#OrderedCalls).)
|
||||
|
||||
### All Expectations Are Sticky (Unless Said Otherwise) {#StickyExpectations}
|
||||
|
||||
Now let's do a quick quiz to see how well you can use this mock stuff already.
|
||||
How would you test that the turtle is asked to go to the origin *exactly twice*
|
||||
(you want to ignore any other instructions it receives)?
|
||||
|
||||
After you've come up with your answer, take a look at ours and compare notes
|
||||
(solve it yourself first - don't cheat!):
|
||||
|
||||
```cpp
|
||||
using ::testing::_;
|
||||
using ::testing::AnyNumber;
|
||||
...
|
||||
EXPECT_CALL(turtle, GoTo(_, _)) // #1
|
||||
.Times(AnyNumber());
|
||||
EXPECT_CALL(turtle, GoTo(0, 0)) // #2
|
||||
.Times(2);
|
||||
```
|
||||
|
||||
Suppose `turtle.GoTo(0, 0)` is called three times. In the third time, gMock will
|
||||
see that the arguments match expectation #2 (remember that we always pick the
|
||||
last matching expectation). Now, since we said that there should be only two
|
||||
such calls, gMock will report an error immediately. This is basically what we've
|
||||
told you in the [Using Multiple Expectations](#MultiExpectations) section above.
|
||||
|
||||
This example shows that **expectations in gMock are "sticky" by default**, in
|
||||
the sense that they remain active even after we have reached their invocation
|
||||
upper bounds. This is an important rule to remember, as it affects the meaning
|
||||
of the spec, and is **different** to how it's done in many other mocking
|
||||
frameworks (Why'd we do that? Because we think our rule makes the common cases
|
||||
easier to express and understand.).
|
||||
|
||||
Simple? Let's see if you've really understood it: what does the following code
|
||||
say?
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
for (int i = n; i > 0; i--) {
|
||||
EXPECT_CALL(turtle, GetX())
|
||||
.WillOnce(Return(10*i));
|
||||
}
|
||||
```
|
||||
|
||||
If you think it says that `turtle.GetX()` will be called `n` times and will
|
||||
return 10, 20, 30, ..., consecutively, think twice! The problem is that, as we
|
||||
said, expectations are sticky. So, the second time `turtle.GetX()` is called,
|
||||
the last (latest) `EXPECT_CALL()` statement will match, and will immediately
|
||||
lead to an "upper bound violated" error - this piece of code is not very useful!
|
||||
|
||||
One correct way of saying that `turtle.GetX()` will return 10, 20, 30, ..., is
|
||||
to explicitly say that the expectations are *not* sticky. In other words, they
|
||||
should *retire* as soon as they are saturated:
|
||||
|
||||
```cpp
|
||||
using ::testing::Return;
|
||||
...
|
||||
for (int i = n; i > 0; i--) {
|
||||
EXPECT_CALL(turtle, GetX())
|
||||
.WillOnce(Return(10*i))
|
||||
.RetiresOnSaturation();
|
||||
}
|
||||
```
|
||||
|
||||
And, there's a better way to do it: in this case, we expect the calls to occur
|
||||
in a specific order, and we line up the actions to match the order. Since the
|
||||
order is important here, we should make it explicit using a sequence:
|
||||
|
||||
```cpp
|
||||
using ::testing::InSequence;
|
||||
using ::testing::Return;
|
||||
...
|
||||
{
|
||||
InSequence s;
|
||||
|
||||
for (int i = 1; i <= n; i++) {
|
||||
EXPECT_CALL(turtle, GetX())
|
||||
.WillOnce(Return(10*i))
|
||||
.RetiresOnSaturation();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
By the way, the other situation where an expectation may *not* be sticky is when
|
||||
it's in a sequence - as soon as another expectation that comes after it in the
|
||||
sequence has been used, it automatically retires (and will never be used to
|
||||
match any call).
|
||||
|
||||
### Uninteresting Calls
|
||||
|
||||
A mock object may have many methods, and not all of them are that interesting.
|
||||
For example, in some tests we may not care about how many times `GetX()` and
|
||||
`GetY()` get called.
|
||||
|
||||
In gMock, if you are not interested in a method, just don't say anything about
|
||||
it. If a call to this method occurs, you'll see a warning in the test output,
|
||||
but it won't be a failure. This is called "naggy" behavior; to change, see
|
||||
[The Nice, the Strict, and the Naggy](gmock_cook_book.md#NiceStrictNaggy).
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue