Merge branch 'master' into gltf_export_id_issue_3978
commit
d0885af634
|
@ -9,6 +9,7 @@ find_package(poly2tri CONFIG REQUIRED)
|
|||
find_package(polyclipping CONFIG REQUIRED)
|
||||
find_package(zip CONFIG REQUIRED)
|
||||
find_package(pugixml CONFIG REQUIRED)
|
||||
find_package(stb CONFIG REQUIRED)
|
||||
|
||||
if(@ASSIMP_BUILD_DRACO@)
|
||||
find_package(draco CONFIG REQUIRED)
|
||||
|
|
|
@ -170,10 +170,10 @@ ColladaParser::ColladaParser(IOSystem *pIOHandler, const std::string &pFile) :
|
|||
// ------------------------------------------------------------------------------------------------
|
||||
// Destructor, private as well
|
||||
ColladaParser::~ColladaParser() {
|
||||
for (auto & it : mNodeLibrary) {
|
||||
for (auto &it : mNodeLibrary) {
|
||||
delete it.second;
|
||||
}
|
||||
for (auto & it : mMeshLibrary) {
|
||||
for (auto &it : mMeshLibrary) {
|
||||
delete it.second;
|
||||
}
|
||||
}
|
||||
|
@ -396,7 +396,7 @@ void ColladaParser::ReadAnimationClipLibrary(XmlNode &node) {
|
|||
|
||||
std::string animName;
|
||||
if (!XmlParser::getStdStrAttribute(node, "name", animName)) {
|
||||
if (!XmlParser::getStdStrAttribute( node, "id", animName )) {
|
||||
if (!XmlParser::getStdStrAttribute(node, "id", animName)) {
|
||||
animName = std::string("animation_") + ai_to_string(mAnimationClipLibrary.size());
|
||||
}
|
||||
}
|
||||
|
@ -420,7 +420,7 @@ void ColladaParser::ReadAnimationClipLibrary(XmlNode &node) {
|
|||
|
||||
void ColladaParser::PostProcessControllers() {
|
||||
std::string meshId;
|
||||
for (auto & it : mControllerLibrary) {
|
||||
for (auto &it : mControllerLibrary) {
|
||||
meshId = it.second.mMeshId;
|
||||
if (meshId.empty()) {
|
||||
continue;
|
||||
|
@ -445,7 +445,7 @@ void ColladaParser::PostProcessRootAnimations() {
|
|||
}
|
||||
|
||||
Animation temp;
|
||||
for (auto & it : mAnimationClipLibrary) {
|
||||
for (auto &it : mAnimationClipLibrary) {
|
||||
std::string clipName = it.first;
|
||||
|
||||
Animation *clip = new Animation();
|
||||
|
@ -552,7 +552,7 @@ void ColladaParser::ReadAnimation(XmlNode &node, Collada::Animation *pParent) {
|
|||
pParent->mSubAnims.push_back(anim);
|
||||
}
|
||||
|
||||
for (const auto & channel : channels) {
|
||||
for (const auto &channel : channels) {
|
||||
anim->mChannels.push_back(channel.second);
|
||||
}
|
||||
|
||||
|
@ -626,8 +626,6 @@ void ColladaParser::ReadController(XmlNode &node, Collada::Controller &controlle
|
|||
XmlNodeIterator xmlIt(node, XmlNodeIterator::PreOrderMode);
|
||||
XmlNode currentNode;
|
||||
while (xmlIt.getNext(currentNode)) {
|
||||
|
||||
//for (XmlNode ¤tNode : node.children()) {
|
||||
const std::string ¤tName = currentNode.name();
|
||||
if (currentName == "morph") {
|
||||
controller.mType = Morph;
|
||||
|
@ -644,7 +642,7 @@ void ColladaParser::ReadController(XmlNode &node, Collada::Controller &controlle
|
|||
} else if (currentName == "skin") {
|
||||
std::string id;
|
||||
if (XmlParser::getStdStrAttribute(currentNode, "source", id)) {
|
||||
controller.mMeshId = id.substr(1, id.size()-1);
|
||||
controller.mMeshId = id.substr(1, id.size() - 1);
|
||||
}
|
||||
} else if (currentName == "bind_shape_matrix") {
|
||||
std::string v;
|
||||
|
@ -698,7 +696,7 @@ void ColladaParser::ReadControllerJoints(XmlNode &node, Collada::Controller &pCo
|
|||
} else if (strcmp(attrSemantic, "INV_BIND_MATRIX") == 0) {
|
||||
pController.mJointOffsetMatrixSource = attrSource;
|
||||
} else {
|
||||
throw DeadlyImportError("Unknown semantic \"" , attrSemantic , "\" in <joints> data <input> element");
|
||||
throw DeadlyImportError("Unknown semantic \"", attrSemantic, "\" in <joints> data <input> element");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -708,7 +706,7 @@ void ColladaParser::ReadControllerJoints(XmlNode &node, Collada::Controller &pCo
|
|||
// Reads the joint weights for the given controller
|
||||
void ColladaParser::ReadControllerWeights(XmlNode &node, Collada::Controller &pController) {
|
||||
// Read vertex count from attributes and resize the array accordingly
|
||||
int vertexCount=0;
|
||||
int vertexCount = 0;
|
||||
XmlParser::getIntAttribute(node, "count", vertexCount);
|
||||
pController.mWeightCounts.resize(vertexCount);
|
||||
|
||||
|
@ -723,7 +721,7 @@ void ColladaParser::ReadControllerWeights(XmlNode &node, Collada::Controller &pC
|
|||
|
||||
// local URLS always start with a '#'. We don't support global URLs
|
||||
if (attrSource[0] != '#') {
|
||||
throw DeadlyImportError( "Unsupported URL format in \"", attrSource, "\" in source attribute of <vertex_weights> data <input> element");
|
||||
throw DeadlyImportError("Unsupported URL format in \"", attrSource, "\" in source attribute of <vertex_weights> data <input> element");
|
||||
}
|
||||
channel.mAccessor = attrSource + 1;
|
||||
|
||||
|
@ -777,7 +775,7 @@ void ColladaParser::ReadImageLibrary(XmlNode &node) {
|
|||
const std::string ¤tName = currentNode.name();
|
||||
if (currentName == "image") {
|
||||
std::string id;
|
||||
if (XmlParser::getStdStrAttribute( currentNode, "id", id )) {
|
||||
if (XmlParser::getStdStrAttribute(currentNode, "id", id)) {
|
||||
mImageLibrary[id] = Image();
|
||||
// read on from there
|
||||
ReadImage(currentNode, mImageLibrary[id]);
|
||||
|
@ -1361,8 +1359,8 @@ void ColladaParser::ReadMesh(XmlNode &node, Mesh &pMesh) {
|
|||
} else if (currentName == "vertices") {
|
||||
ReadVertexData(currentNode, pMesh);
|
||||
} else if (currentName == "triangles" || currentName == "lines" || currentName == "linestrips" ||
|
||||
currentName == "polygons" || currentName == "polylist" || currentName == "trifans" ||
|
||||
currentName == "tristrips") {
|
||||
currentName == "polygons" || currentName == "polylist" || currentName == "trifans" ||
|
||||
currentName == "tristrips") {
|
||||
ReadIndexData(currentNode, pMesh);
|
||||
}
|
||||
}
|
||||
|
@ -1439,9 +1437,8 @@ void ColladaParser::ReadDataArray(XmlNode &node) {
|
|||
throw DeadlyImportError("Expected more values while reading float_array contents.");
|
||||
}
|
||||
|
||||
ai_real value;
|
||||
// read a number
|
||||
//SkipSpacesAndLineEnd(&content);
|
||||
ai_real value;
|
||||
content = fast_atoreal_move<ai_real>(content, value);
|
||||
data.mValues.push_back(value);
|
||||
// skip whitespace after it
|
||||
|
@ -1489,11 +1486,10 @@ void ColladaParser::ReadAccessor(XmlNode &node, const std::string &pID) {
|
|||
std::string name;
|
||||
if (XmlParser::hasAttribute(currentNode, "name")) {
|
||||
XmlParser::getStdStrAttribute(currentNode, "name", name);
|
||||
//name = mReader->getAttributeValue(attrName);
|
||||
|
||||
// analyse for common type components and store it's sub-offset in the corresponding field
|
||||
|
||||
/* Cartesian coordinates */
|
||||
// Cartesian coordinates
|
||||
if (name == "X")
|
||||
acc.mSubOffset[0] = acc.mParams.size();
|
||||
else if (name == "Y")
|
||||
|
@ -1674,12 +1670,9 @@ void ColladaParser::ReadInputChannel(XmlNode &node, std::vector<InputChannel> &p
|
|||
|
||||
// read set if texture coordinates
|
||||
if (channel.mType == IT_Texcoord || channel.mType == IT_Color) {
|
||||
int attrSet = -1;
|
||||
if (XmlParser::hasAttribute(node, "set")) {
|
||||
XmlParser::getIntAttribute(node, "set", attrSet);
|
||||
}
|
||||
|
||||
channel.mIndex = attrSet;
|
||||
unsigned int attrSet = 0;
|
||||
if (XmlParser::getUIntAttribute(node, "set", attrSet))
|
||||
channel.mIndex = attrSet;
|
||||
}
|
||||
|
||||
// store, if valid type
|
||||
|
@ -1704,20 +1697,20 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector<Inp
|
|||
// determine the expected number of indices
|
||||
size_t expectedPointCount = 0;
|
||||
switch (pPrimType) {
|
||||
case Prim_Polylist: {
|
||||
for (size_t i : pVCount)
|
||||
expectedPointCount += i;
|
||||
break;
|
||||
}
|
||||
case Prim_Lines:
|
||||
expectedPointCount = 2 * pNumPrimitives;
|
||||
break;
|
||||
case Prim_Triangles:
|
||||
expectedPointCount = 3 * pNumPrimitives;
|
||||
break;
|
||||
default:
|
||||
// other primitive types don't state the index count upfront... we need to guess
|
||||
break;
|
||||
case Prim_Polylist: {
|
||||
for (size_t i : pVCount)
|
||||
expectedPointCount += i;
|
||||
break;
|
||||
}
|
||||
case Prim_Lines:
|
||||
expectedPointCount = 2 * pNumPrimitives;
|
||||
break;
|
||||
case Prim_Triangles:
|
||||
expectedPointCount = 3 * pNumPrimitives;
|
||||
break;
|
||||
default:
|
||||
// other primitive types don't state the index count upfront... we need to guess
|
||||
break;
|
||||
}
|
||||
|
||||
// and read all indices into a temporary array
|
||||
|
@ -1727,7 +1720,7 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector<Inp
|
|||
}
|
||||
|
||||
// It is possible to not contain any indices
|
||||
if (pNumPrimitives > 0) {
|
||||
if (pNumPrimitives > 0) {
|
||||
std::string v;
|
||||
XmlParser::getValueAsString(node, v);
|
||||
const char *content = v.c_str();
|
||||
|
@ -1925,87 +1918,87 @@ void ColladaParser::ExtractDataObjectFromChannel(const InputChannel &pInput, siz
|
|||
|
||||
// now we reinterpret it according to the type we're reading here
|
||||
switch (pInput.mType) {
|
||||
case IT_Position: // ignore all position streams except 0 - there can be only one position
|
||||
if (pInput.mIndex == 0) {
|
||||
pMesh.mPositions.push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: just one vertex position stream supported");
|
||||
}
|
||||
break;
|
||||
case IT_Normal:
|
||||
case IT_Position: // ignore all position streams except 0 - there can be only one position
|
||||
if (pInput.mIndex == 0) {
|
||||
pMesh.mPositions.push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: just one vertex position stream supported");
|
||||
}
|
||||
break;
|
||||
case IT_Normal:
|
||||
// pad to current vertex count if necessary
|
||||
if (pMesh.mNormals.size() < pMesh.mPositions.size() - 1)
|
||||
pMesh.mNormals.insert(pMesh.mNormals.end(), pMesh.mPositions.size() - pMesh.mNormals.size() - 1, aiVector3D(0, 1, 0));
|
||||
|
||||
// ignore all normal streams except 0 - there can be only one normal
|
||||
if (pInput.mIndex == 0) {
|
||||
pMesh.mNormals.push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: just one vertex normal stream supported");
|
||||
}
|
||||
break;
|
||||
case IT_Tangent:
|
||||
// pad to current vertex count if necessary
|
||||
if (pMesh.mTangents.size() < pMesh.mPositions.size() - 1)
|
||||
pMesh.mTangents.insert(pMesh.mTangents.end(), pMesh.mPositions.size() - pMesh.mTangents.size() - 1, aiVector3D(1, 0, 0));
|
||||
|
||||
// ignore all tangent streams except 0 - there can be only one tangent
|
||||
if (pInput.mIndex == 0) {
|
||||
pMesh.mTangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: just one vertex tangent stream supported");
|
||||
}
|
||||
break;
|
||||
case IT_Bitangent:
|
||||
// pad to current vertex count if necessary
|
||||
if (pMesh.mBitangents.size() < pMesh.mPositions.size() - 1) {
|
||||
pMesh.mBitangents.insert(pMesh.mBitangents.end(), pMesh.mPositions.size() - pMesh.mBitangents.size() - 1, aiVector3D(0, 0, 1));
|
||||
}
|
||||
|
||||
// ignore all bitangent streams except 0 - there can be only one bitangent
|
||||
if (pInput.mIndex == 0) {
|
||||
pMesh.mBitangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: just one vertex bitangent stream supported");
|
||||
}
|
||||
break;
|
||||
case IT_Texcoord:
|
||||
// up to 4 texture coord sets are fine, ignore the others
|
||||
if (pInput.mIndex < AI_MAX_NUMBER_OF_TEXTURECOORDS) {
|
||||
// pad to current vertex count if necessary
|
||||
if (pMesh.mNormals.size() < pMesh.mPositions.size() - 1)
|
||||
pMesh.mNormals.insert(pMesh.mNormals.end(), pMesh.mPositions.size() - pMesh.mNormals.size() - 1, aiVector3D(0, 1, 0));
|
||||
if (pMesh.mTexCoords[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
|
||||
pMesh.mTexCoords[pInput.mIndex].insert(pMesh.mTexCoords[pInput.mIndex].end(),
|
||||
pMesh.mPositions.size() - pMesh.mTexCoords[pInput.mIndex].size() - 1, aiVector3D(0, 0, 0));
|
||||
|
||||
// ignore all normal streams except 0 - there can be only one normal
|
||||
if (pInput.mIndex == 0) {
|
||||
pMesh.mNormals.push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: just one vertex normal stream supported");
|
||||
pMesh.mTexCoords[pInput.mIndex].push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
if (0 != acc.mSubOffset[2] || 0 != acc.mSubOffset[3]) {
|
||||
pMesh.mNumUVComponents[pInput.mIndex] = 3;
|
||||
}
|
||||
break;
|
||||
case IT_Tangent:
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: too many texture coordinate sets. Skipping.");
|
||||
}
|
||||
break;
|
||||
case IT_Color:
|
||||
// up to 4 color sets are fine, ignore the others
|
||||
if (pInput.mIndex < AI_MAX_NUMBER_OF_COLOR_SETS) {
|
||||
// pad to current vertex count if necessary
|
||||
if (pMesh.mTangents.size() < pMesh.mPositions.size() - 1)
|
||||
pMesh.mTangents.insert(pMesh.mTangents.end(), pMesh.mPositions.size() - pMesh.mTangents.size() - 1, aiVector3D(1, 0, 0));
|
||||
if (pMesh.mColors[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
|
||||
pMesh.mColors[pInput.mIndex].insert(pMesh.mColors[pInput.mIndex].end(),
|
||||
pMesh.mPositions.size() - pMesh.mColors[pInput.mIndex].size() - 1, aiColor4D(0, 0, 0, 1));
|
||||
|
||||
// ignore all tangent streams except 0 - there can be only one tangent
|
||||
if (pInput.mIndex == 0) {
|
||||
pMesh.mTangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: just one vertex tangent stream supported");
|
||||
}
|
||||
break;
|
||||
case IT_Bitangent:
|
||||
// pad to current vertex count if necessary
|
||||
if (pMesh.mBitangents.size() < pMesh.mPositions.size() - 1) {
|
||||
pMesh.mBitangents.insert(pMesh.mBitangents.end(), pMesh.mPositions.size() - pMesh.mBitangents.size() - 1, aiVector3D(0, 0, 1));
|
||||
aiColor4D result(0, 0, 0, 1);
|
||||
for (size_t i = 0; i < pInput.mResolved->mSize; ++i) {
|
||||
result[static_cast<unsigned int>(i)] = obj[pInput.mResolved->mSubOffset[i]];
|
||||
}
|
||||
pMesh.mColors[pInput.mIndex].push_back(result);
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: too many vertex color sets. Skipping.");
|
||||
}
|
||||
|
||||
// ignore all bitangent streams except 0 - there can be only one bitangent
|
||||
if (pInput.mIndex == 0) {
|
||||
pMesh.mBitangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: just one vertex bitangent stream supported");
|
||||
}
|
||||
break;
|
||||
case IT_Texcoord:
|
||||
// up to 4 texture coord sets are fine, ignore the others
|
||||
if (pInput.mIndex < AI_MAX_NUMBER_OF_TEXTURECOORDS) {
|
||||
// pad to current vertex count if necessary
|
||||
if (pMesh.mTexCoords[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
|
||||
pMesh.mTexCoords[pInput.mIndex].insert(pMesh.mTexCoords[pInput.mIndex].end(),
|
||||
pMesh.mPositions.size() - pMesh.mTexCoords[pInput.mIndex].size() - 1, aiVector3D(0, 0, 0));
|
||||
|
||||
pMesh.mTexCoords[pInput.mIndex].push_back(aiVector3D(obj[0], obj[1], obj[2]));
|
||||
if (0 != acc.mSubOffset[2] || 0 != acc.mSubOffset[3]) {
|
||||
pMesh.mNumUVComponents[pInput.mIndex] = 3;
|
||||
}
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: too many texture coordinate sets. Skipping.");
|
||||
}
|
||||
break;
|
||||
case IT_Color:
|
||||
// up to 4 color sets are fine, ignore the others
|
||||
if (pInput.mIndex < AI_MAX_NUMBER_OF_COLOR_SETS) {
|
||||
// pad to current vertex count if necessary
|
||||
if (pMesh.mColors[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
|
||||
pMesh.mColors[pInput.mIndex].insert(pMesh.mColors[pInput.mIndex].end(),
|
||||
pMesh.mPositions.size() - pMesh.mColors[pInput.mIndex].size() - 1, aiColor4D(0, 0, 0, 1));
|
||||
|
||||
aiColor4D result(0, 0, 0, 1);
|
||||
for (size_t i = 0; i < pInput.mResolved->mSize; ++i) {
|
||||
result[static_cast<unsigned int>(i)] = obj[pInput.mResolved->mSubOffset[i]];
|
||||
}
|
||||
pMesh.mColors[pInput.mIndex].push_back(result);
|
||||
} else {
|
||||
ASSIMP_LOG_ERROR("Collada: too many vertex color sets. Skipping.");
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
// IT_Invalid and IT_Vertex
|
||||
ai_assert(false && "shouldn't ever get here");
|
||||
break;
|
||||
default:
|
||||
// IT_Invalid and IT_Vertex
|
||||
ai_assert(false && "shouldn't ever get here");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2170,10 +2163,10 @@ void ColladaParser::ReadNodeTransformation(XmlNode &node, Node *pNode, Transform
|
|||
|
||||
// read as many parameters and store in the transformation
|
||||
for (unsigned int a = 0; a < sNumParameters[pType]; a++) {
|
||||
// skip whitespace before the number
|
||||
SkipSpacesAndLineEnd(&content);
|
||||
// read a number
|
||||
content = fast_atoreal_move<ai_real>(content, tf.f[a]);
|
||||
// skip whitespace after it
|
||||
SkipSpacesAndLineEnd(&content);
|
||||
}
|
||||
|
||||
// place the transformation at the queue of the node
|
||||
|
|
|
@ -32,7 +32,7 @@ PROJECT_NAME = Assimp
|
|||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = "v4.1. (December 2018)"
|
||||
PROJECT_NUMBER = "v5.0.1. (December 2020)"
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer
|
||||
|
@ -197,7 +197,7 @@ SEPARATE_MEMBER_PAGES = NO
|
|||
# The TAB_SIZE tag can be used to set the number of spaces in a tab.
|
||||
# Doxygen uses this value to replace tabs by spaces in code fragments.
|
||||
|
||||
TAB_SIZE = 8
|
||||
TAB_SIZE = 4
|
||||
|
||||
# This tag can be used to specify a number of aliases that acts
|
||||
# as commands in the documentation. An alias has the form "name=value".
|
||||
|
@ -663,11 +663,7 @@ WARN_LOGFILE =
|
|||
# with spaces.
|
||||
|
||||
INPUT = @doxy_main_page@ \
|
||||
@PROJECT_SOURCE_DIR@ \
|
||||
@PROJECT_BINARY_DIR@ \
|
||||
@PROJECT_SOURCE_DIR@/include/ \
|
||||
@PROJECT_SOURCE_DIR@/doc/dox.h \
|
||||
@PROJECT_SOURCE_DIR@/code/BaseImporter.h
|
||||
@PROJECT_SOURCE_DIR@/include/
|
||||
|
||||
# This tag can be used to specify the character encoding of the source files
|
||||
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
|
||||
|
@ -690,28 +686,9 @@ FILE_PATTERNS = *.c \
|
|||
*.cxx \
|
||||
*.cpp \
|
||||
*.c++ \
|
||||
*.d \
|
||||
*.java \
|
||||
*.ii \
|
||||
*.ixx \
|
||||
*.ipp \
|
||||
*.i++ \
|
||||
*.inl \
|
||||
*.h \
|
||||
*.hh \
|
||||
*.hxx \
|
||||
*.hpp \
|
||||
*.h++ \
|
||||
*.idl \
|
||||
*.odl \
|
||||
*.cs \
|
||||
*.php \
|
||||
*.php3 \
|
||||
*.inc \
|
||||
*.m \
|
||||
*.mm \
|
||||
*.dox \
|
||||
*.py
|
||||
*.dox
|
||||
|
||||
# The RECURSIVE tag can be used to turn specify whether or not subdirectories
|
||||
# should be searched for input files as well. Possible values are YES and NO.
|
||||
|
@ -725,7 +702,7 @@ RECURSIVE = YES
|
|||
# Note that relative paths are relative to the directory from which doxygen is
|
||||
# run.
|
||||
|
||||
EXCLUDE = irrXML.h
|
||||
EXCLUDE = contrib/*
|
||||
|
||||
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
|
||||
# directories that are symbolic links (a Unix file system feature) are excluded
|
||||
|
@ -739,8 +716,7 @@ EXCLUDE_SYMLINKS = NO
|
|||
# against the file with absolute path, so to exclude all test directories
|
||||
# for example use the pattern */test/*
|
||||
|
||||
EXCLUDE_PATTERNS = */.svn/* \
|
||||
*/.svn
|
||||
EXCLUDE_PATTERNS = */.git/*
|
||||
|
||||
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
|
||||
# (namespaces, classes, functions, etc.) that should be excluded from the
|
||||
|
|
|
@ -34,7 +34,8 @@ if os.name=='posix':
|
|||
additional_dirs.extend([item for item in os.environ['LD_LIBRARY_PATH'].split(':') if item])
|
||||
|
||||
# check if running from anaconda.
|
||||
if "conda" or "continuum" in sys.version.lower():
|
||||
anaconda_keywords = ("conda", "continuum")
|
||||
if any(k in sys.version.lower() for k in anaconda_keywords):
|
||||
cur_path = get_python_lib()
|
||||
pattern = re.compile('.*\/lib\/')
|
||||
conda_lib = pattern.match(cur_path).group()
|
||||
|
|
Binary file not shown.
|
@ -382,3 +382,25 @@ public:
|
|||
TEST_F(utColladaZaeImportExport, importBlenFromFileTest) {
|
||||
EXPECT_TRUE(importerTest());
|
||||
}
|
||||
|
||||
TEST_F(utColladaZaeImportExport, importMakeHumanTest) {
|
||||
Assimp::Importer importer;
|
||||
const aiScene *scene = importer.ReadFile(ASSIMP_TEST_MODELS_DIR "/Collada/human.zae", aiProcess_ValidateDataStructure);
|
||||
ASSERT_NE(nullptr, scene);
|
||||
|
||||
// Expected number of items
|
||||
EXPECT_EQ(scene->mNumMeshes, 2u);
|
||||
EXPECT_EQ(scene->mNumMaterials, 2u);
|
||||
EXPECT_EQ(scene->mNumAnimations, 0u);
|
||||
EXPECT_EQ(scene->mNumTextures, 2u);
|
||||
EXPECT_EQ(scene->mNumLights, 0u);
|
||||
EXPECT_EQ(scene->mNumCameras, 0u);
|
||||
|
||||
// Expected common metadata
|
||||
aiString value;
|
||||
EXPECT_TRUE(scene->mMetaData->Get(AI_METADATA_SOURCE_FORMAT, value)) << "No importer format metadata";
|
||||
EXPECT_STREQ("Collada Importer", value.C_Str());
|
||||
|
||||
EXPECT_TRUE(scene->mMetaData->Get(AI_METADATA_SOURCE_FORMAT_VERSION, value)) << "No format version metadata";
|
||||
EXPECT_STREQ("1.4.1", value.C_Str());
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue