Merge pull request #1 from assimp/master

Merge From assimp/assimp master
pull/3661/head
xiaohunqupo 2021-01-25 14:12:28 +08:00 committed by GitHub
commit fbc45fecec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 125 additions and 127 deletions

3
.gitignore vendored
View File

@ -18,6 +18,9 @@ build
*.VC.db-wal
*.VC.opendb
*.ipch
.vs/
out/
CMakeSettings.json
# Output
bin/

View File

@ -266,6 +266,7 @@ void Discreet3DSImporter::ParseMainChunk() {
case Discreet3DS::CHUNK_PRJ:
bIsPrj = true;
break;
case Discreet3DS::CHUNK_MAIN:
ParseEditorChunk();
break;

View File

@ -44,6 +44,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ASSIMP_BUILD_NO_COB_IMPORTER
#include "AssetLib/COB/COBLoader.h"
#include "AssetLib/COB/COBScene.h"
#include "PostProcessing/ConvertToLHProcess.h"
@ -90,11 +91,15 @@ static const aiImporterDesc desc = {
// ------------------------------------------------------------------------------------------------
// Constructor to be privately used by Importer
COBImporter::COBImporter() {}
COBImporter::COBImporter() {
// empty
}
// ------------------------------------------------------------------------------------------------
// Destructor, private as well
COBImporter::~COBImporter() {}
COBImporter::~COBImporter() {
// empty
}
// ------------------------------------------------------------------------------------------------
// Returns whether the class can handle the format of the given file.
@ -466,8 +471,9 @@ void COBImporter::UnsupportedChunk_Ascii(LineSplitter &splitter, const ChunkInfo
// missing the next line.
splitter.get_stream().IncPtr(nfo.size);
splitter.swallow_next_increment();
} else
} else {
ThrowException(error);
}
}
// ------------------------------------------------------------------------------------------------
@ -790,25 +796,12 @@ void COBImporter::ReadBitM_Ascii(Scene & /*out*/, LineSplitter &splitter, const
if (nfo.version > 1) {
return UnsupportedChunk_Ascii(splitter, nfo, "BitM");
}
/*
"\nThumbNailHdrSize %ld"
"\nThumbHeader: %02hx 02hx %02hx "
"\nColorBufSize %ld"
"\nColorBufZipSize %ld"
"\nZippedThumbnail: %02hx 02hx %02hx "
*/
const unsigned int head = strtoul10((++splitter)[1]);
if (head != sizeof(Bitmap::BitmapHeader)) {
ASSIMP_LOG_WARN("Unexpected ThumbNailHdrSize, skipping this chunk");
return;
}
/*union {
Bitmap::BitmapHeader data;
char opaq[sizeof Bitmap::BitmapHeader()];
};*/
// ReadHexOctets(opaq,head,(++splitter)[1]);
}
// ------------------------------------------------------------------------------------------------
@ -884,7 +877,10 @@ void COBImporter::ReadBinaryFile(Scene &out, StreamReaderLE *reader) {
while (1) {
std::string type;
type += reader->GetI1(), type += reader->GetI1(), type += reader->GetI1(), type += reader->GetI1();
type += reader->GetI1();
type += reader->GetI1();
type += reader->GetI1();
type += reader->GetI1();
ChunkInfo nfo;
nfo.version = reader->GetI2() * 10;
@ -906,14 +902,7 @@ void COBImporter::ReadBinaryFile(Scene &out, StreamReaderLE *reader) {
ReadCame_Binary(out, *reader, nfo);
} else if (type == "Mat1") {
ReadMat1_Binary(out, *reader, nfo);
}
/* else if (type == "Bone") {
ReadBone_Binary(out,*reader,nfo);
}
else if (type == "Chan") {
ReadChan_Binary(out,*reader,nfo);
}*/
else if (type == "Unit") {
} else if (type == "Unit") {
ReadUnit_Binary(out, *reader, nfo);
} else if (type == "OLay") {
// ignore layer index silently.
@ -923,8 +912,9 @@ void COBImporter::ReadBinaryFile(Scene &out, StreamReaderLE *reader) {
return UnsupportedChunk_Binary(*reader, nfo, type.c_str());
} else if (type == "END ") {
return;
} else
} else {
UnsupportedChunk_Binary(*reader, nfo, type.c_str());
}
}
}

View File

@ -334,7 +334,7 @@ void ColladaParser::ReadAssetInfo(XmlNode &node) {
const std::string &currentName = currentNode.name();
if (currentName == "unit") {
mUnitSize = 1.f;
XmlParser::getFloatAttribute(node, "meter", mUnitSize);
XmlParser::getFloatAttribute(currentNode, "meter", mUnitSize);
} else if (currentName == "up_axis") {
std::string v;
if (!XmlParser::getValueAsString(currentNode, v)) {
@ -459,7 +459,6 @@ void ColladaParser::PostProcessRootAnimations() {
if (animation != mAnimationLibrary.end()) {
Animation *pSourceAnimation = animation->second;
pSourceAnimation->CollectChannelsRecursively(clip->mChannels);
}
}
@ -1738,14 +1737,16 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector<Inp
// and read all indices into a temporary array
std::vector<size_t> indices;
if (expectedPointCount > 0)
if (expectedPointCount > 0) {
indices.reserve(expectedPointCount * numOffsets);
}
if (pNumPrimitives > 0) // It is possible to not contain any indices
{
// It is possible to not contain any indices
if (pNumPrimitives > 0) {
std::string v;
XmlParser::getValueAsString(node, v);
const char *content = v.c_str();
SkipSpacesAndLineEnd(&content);
while (*content != 0) {
// read a value.
// Hack: (thom) Some exporters put negative indices sometimes. We just try to carry on anyways.
@ -1772,21 +1773,24 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector<Inp
// find the data for all sources
for (std::vector<InputChannel>::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it) {
InputChannel &input = *it;
if (input.mResolved)
if (input.mResolved) {
continue;
}
// find accessor
input.mResolved = &ResolveLibraryReference(mAccessorLibrary, input.mAccessor);
// resolve accessor's data pointer as well, if necessary
const Accessor *acc = input.mResolved;
if (!acc->mData)
if (!acc->mData) {
acc->mData = &ResolveLibraryReference(mDataLibrary, acc->mSource);
}
}
// and the same for the per-index channels
for (std::vector<InputChannel>::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it) {
InputChannel &input = *it;
if (input.mResolved)
if (input.mResolved) {
continue;
}
// ignore vertex pointer, it doesn't refer to an accessor
if (input.mType == IT_Vertex) {
@ -1801,8 +1805,9 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector<Inp
input.mResolved = &ResolveLibraryReference(mAccessorLibrary, input.mAccessor);
// resolve accessor's data pointer as well, if necessary
const Accessor *acc = input.mResolved;
if (!acc->mData)
if (!acc->mData) {
acc->mData = &ResolveLibraryReference(mDataLibrary, acc->mSource);
}
}
// For continued primitives, the given count does not come all in one <p>, but only one primitive per <p>
@ -1884,11 +1889,13 @@ void ColladaParser::CopyVertex(size_t currentVertex, size_t numOffsets, size_t n
ai_assert((baseOffset + numOffsets - 1) < indices.size());
// extract per-vertex channels using the global per-vertex offset
for (std::vector<InputChannel>::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it)
for (std::vector<InputChannel>::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it) {
ExtractDataObjectFromChannel(*it, indices[baseOffset + perVertexOffset], pMesh);
}
// and extract per-index channels using there specified offset
for (std::vector<InputChannel>::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it)
for (std::vector<InputChannel>::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it) {
ExtractDataObjectFromChannel(*it, indices[baseOffset + it->mOffset], pMesh);
}
// store the vertex-data index for later assignment of bone vertex weights
pMesh.mFacePosIndices.push_back(indices[baseOffset + perVertexOffset]);
@ -1912,8 +1919,9 @@ void ColladaParser::ReadPrimTriStrips(size_t numOffsets, size_t perVertexOffset,
// Extracts a single object from an input channel and stores it in the appropriate mesh data array
void ColladaParser::ExtractDataObjectFromChannel(const InputChannel &pInput, size_t pLocalIndex, Mesh &pMesh) {
// ignore vertex referrer - we handle them that separate
if (pInput.mType == IT_Vertex)
if (pInput.mType == IT_Vertex) {
return;
}
const Accessor &acc = *pInput.mResolved;
if (pLocalIndex >= acc.mCount) {
@ -1926,86 +1934,93 @@ void ColladaParser::ExtractDataObjectFromChannel(const InputChannel &pInput, siz
// assemble according to the accessors component sub-offset list. We don't care, yet,
// what kind of object exactly we're extracting here
ai_real obj[4];
for (size_t c = 0; c < 4; ++c)
for (size_t c = 0; c < 4; ++c) {
obj[c] = dataObject[acc.mSubOffset[c]];
}
// now we reinterpret it according to the type we're reading here
switch (pInput.mType) {
case IT_Position: // ignore all position streams except 0 - there can be only one position
if (pInput.mIndex == 0)
pMesh.mPositions.push_back(aiVector3D(obj[0], obj[1], obj[2]));
else
ASSIMP_LOG_ERROR("Collada: just one vertex position stream supported");
break;
case IT_Normal:
// pad to current vertex count if necessary
if (pMesh.mNormals.size() < pMesh.mPositions.size() - 1)
pMesh.mNormals.insert(pMesh.mNormals.end(), pMesh.mPositions.size() - pMesh.mNormals.size() - 1, aiVector3D(0, 1, 0));
// ignore all normal streams except 0 - there can be only one normal
if (pInput.mIndex == 0)
pMesh.mNormals.push_back(aiVector3D(obj[0], obj[1], obj[2]));
else
ASSIMP_LOG_ERROR("Collada: just one vertex normal stream supported");
break;
case IT_Tangent:
// pad to current vertex count if necessary
if (pMesh.mTangents.size() < pMesh.mPositions.size() - 1)
pMesh.mTangents.insert(pMesh.mTangents.end(), pMesh.mPositions.size() - pMesh.mTangents.size() - 1, aiVector3D(1, 0, 0));
// ignore all tangent streams except 0 - there can be only one tangent
if (pInput.mIndex == 0)
pMesh.mTangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
else
ASSIMP_LOG_ERROR("Collada: just one vertex tangent stream supported");
break;
case IT_Bitangent:
// pad to current vertex count if necessary
if (pMesh.mBitangents.size() < pMesh.mPositions.size() - 1)
pMesh.mBitangents.insert(pMesh.mBitangents.end(), pMesh.mPositions.size() - pMesh.mBitangents.size() - 1, aiVector3D(0, 0, 1));
// ignore all bitangent streams except 0 - there can be only one bitangent
if (pInput.mIndex == 0)
pMesh.mBitangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
else
ASSIMP_LOG_ERROR("Collada: just one vertex bitangent stream supported");
break;
case IT_Texcoord:
// up to 4 texture coord sets are fine, ignore the others
if (pInput.mIndex < AI_MAX_NUMBER_OF_TEXTURECOORDS) {
// pad to current vertex count if necessary
if (pMesh.mTexCoords[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
pMesh.mTexCoords[pInput.mIndex].insert(pMesh.mTexCoords[pInput.mIndex].end(),
pMesh.mPositions.size() - pMesh.mTexCoords[pInput.mIndex].size() - 1, aiVector3D(0, 0, 0));
pMesh.mTexCoords[pInput.mIndex].push_back(aiVector3D(obj[0], obj[1], obj[2]));
if (0 != acc.mSubOffset[2] || 0 != acc.mSubOffset[3]) /* hack ... consider cleaner solution */
pMesh.mNumUVComponents[pInput.mIndex] = 3;
} else {
ASSIMP_LOG_ERROR("Collada: too many texture coordinate sets. Skipping.");
}
break;
case IT_Color:
// up to 4 color sets are fine, ignore the others
if (pInput.mIndex < AI_MAX_NUMBER_OF_COLOR_SETS) {
// pad to current vertex count if necessary
if (pMesh.mColors[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
pMesh.mColors[pInput.mIndex].insert(pMesh.mColors[pInput.mIndex].end(),
pMesh.mPositions.size() - pMesh.mColors[pInput.mIndex].size() - 1, aiColor4D(0, 0, 0, 1));
aiColor4D result(0, 0, 0, 1);
for (size_t i = 0; i < pInput.mResolved->mSize; ++i) {
result[static_cast<unsigned int>(i)] = obj[pInput.mResolved->mSubOffset[i]];
case IT_Position: // ignore all position streams except 0 - there can be only one position
if (pInput.mIndex == 0) {
pMesh.mPositions.push_back(aiVector3D(obj[0], obj[1], obj[2]));
} else {
ASSIMP_LOG_ERROR("Collada: just one vertex position stream supported");
}
pMesh.mColors[pInput.mIndex].push_back(result);
} else {
ASSIMP_LOG_ERROR("Collada: too many vertex color sets. Skipping.");
}
break;
case IT_Normal:
// pad to current vertex count if necessary
if (pMesh.mNormals.size() < pMesh.mPositions.size() - 1)
pMesh.mNormals.insert(pMesh.mNormals.end(), pMesh.mPositions.size() - pMesh.mNormals.size() - 1, aiVector3D(0, 1, 0));
break;
default:
// IT_Invalid and IT_Vertex
ai_assert(false && "shouldn't ever get here");
// ignore all normal streams except 0 - there can be only one normal
if (pInput.mIndex == 0) {
pMesh.mNormals.push_back(aiVector3D(obj[0], obj[1], obj[2]));
} else {
ASSIMP_LOG_ERROR("Collada: just one vertex normal stream supported");
}
break;
case IT_Tangent:
// pad to current vertex count if necessary
if (pMesh.mTangents.size() < pMesh.mPositions.size() - 1)
pMesh.mTangents.insert(pMesh.mTangents.end(), pMesh.mPositions.size() - pMesh.mTangents.size() - 1, aiVector3D(1, 0, 0));
// ignore all tangent streams except 0 - there can be only one tangent
if (pInput.mIndex == 0) {
pMesh.mTangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
} else {
ASSIMP_LOG_ERROR("Collada: just one vertex tangent stream supported");
}
break;
case IT_Bitangent:
// pad to current vertex count if necessary
if (pMesh.mBitangents.size() < pMesh.mPositions.size() - 1) {
pMesh.mBitangents.insert(pMesh.mBitangents.end(), pMesh.mPositions.size() - pMesh.mBitangents.size() - 1, aiVector3D(0, 0, 1));
}
// ignore all bitangent streams except 0 - there can be only one bitangent
if (pInput.mIndex == 0) {
pMesh.mBitangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
} else {
ASSIMP_LOG_ERROR("Collada: just one vertex bitangent stream supported");
}
break;
case IT_Texcoord:
// up to 4 texture coord sets are fine, ignore the others
if (pInput.mIndex < AI_MAX_NUMBER_OF_TEXTURECOORDS) {
// pad to current vertex count if necessary
if (pMesh.mTexCoords[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
pMesh.mTexCoords[pInput.mIndex].insert(pMesh.mTexCoords[pInput.mIndex].end(),
pMesh.mPositions.size() - pMesh.mTexCoords[pInput.mIndex].size() - 1, aiVector3D(0, 0, 0));
pMesh.mTexCoords[pInput.mIndex].push_back(aiVector3D(obj[0], obj[1], obj[2]));
if (0 != acc.mSubOffset[2] || 0 != acc.mSubOffset[3]) {
pMesh.mNumUVComponents[pInput.mIndex] = 3;
}
} else {
ASSIMP_LOG_ERROR("Collada: too many texture coordinate sets. Skipping.");
}
break;
case IT_Color:
// up to 4 color sets are fine, ignore the others
if (pInput.mIndex < AI_MAX_NUMBER_OF_COLOR_SETS) {
// pad to current vertex count if necessary
if (pMesh.mColors[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
pMesh.mColors[pInput.mIndex].insert(pMesh.mColors[pInput.mIndex].end(),
pMesh.mPositions.size() - pMesh.mColors[pInput.mIndex].size() - 1, aiColor4D(0, 0, 0, 1));
aiColor4D result(0, 0, 0, 1);
for (size_t i = 0; i < pInput.mResolved->mSize; ++i) {
result[static_cast<unsigned int>(i)] = obj[pInput.mResolved->mSubOffset[i]];
}
pMesh.mColors[pInput.mIndex].push_back(result);
} else {
ASSIMP_LOG_ERROR("Collada: too many vertex color sets. Skipping.");
}
break;
default:
// IT_Invalid and IT_Vertex
ai_assert(false && "shouldn't ever get here");
}
}

View File

@ -656,7 +656,7 @@ void ProcessExtrudedArea(const Schema_2x3::IfcExtrudedAreaSolid& solid, const Te
}
}
if( openings && ((sides_with_openings == 1 && sides_with_openings) || (sides_with_v_openings == 2 && sides_with_v_openings)) ) {
if( openings && (sides_with_openings == 1 || sides_with_v_openings == 2 ) ) {
IFCImporter::LogWarn("failed to resolve all openings, presumably their topology is not supported by Assimp");
}

View File

@ -1189,20 +1189,9 @@ bool GenerateOpenings(std::vector<TempOpening>& openings,
TempMesh* profile_data = opening.profileMesh.get();
bool is_2d_source = false;
if (opening.profileMesh2D && norm_extrusion_dir.SquareLength() > 0) {
if(std::fabs(norm_extrusion_dir * wall_extrusion_axis_norm) < 0.1) {
// horizontal extrusion
if (std::fabs(norm_extrusion_dir * nor) > 0.9) {
profile_data = opening.profileMesh2D.get();
is_2d_source = true;
}
}
else {
// vertical extrusion
if (std::fabs(norm_extrusion_dir * nor) > 0.9) {
profile_data = opening.profileMesh2D.get();
is_2d_source = true;
}
if (std::fabs(norm_extrusion_dir * nor) > 0.9) {
profile_data = opening.profileMesh2D.get();
is_2d_source = true;
}
}
std::vector<IfcVector3> profile_verts = profile_data->mVerts;