Merge branch 'master' into issue_1513
commit
07a99e0843
15
.travis.sh
15
.travis.sh
|
@ -43,9 +43,24 @@ if [ $ANDROID ]; then
|
|||
ant -v -Dmy.dir=${TRAVIS_BUILD_DIR} -f ${TRAVIS_BUILD_DIR}/port/jassimp/build.xml ndk-jni
|
||||
fi
|
||||
if [ "$TRAVIS_OS_NAME" = "linux" ]; then
|
||||
if [ $ANALYZE = "ON" ] ; then
|
||||
if [ "$CC" = "clang" ]; then
|
||||
scan-build cmake -G "Unix Makefiles" -DBUILD_SHARED_LIBS=OFF -DASSIMP_BUILD_TESTS=OFF
|
||||
scan-build --status-bugs make -j2
|
||||
else
|
||||
cppcheck --version
|
||||
generate \
|
||||
&& cppcheck --error-exitcode=1 -j2 -Iinclude -Icode code 2> cppcheck.txt
|
||||
if [ -s cppcheck.txt ]; then
|
||||
cat cppcheck.txt
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
generate \
|
||||
&& make -j4 \
|
||||
&& sudo make install \
|
||||
&& sudo ldconfig \
|
||||
&& (cd test/unit; ../../bin/unit)
|
||||
fi
|
||||
fi
|
||||
|
|
18
.travis.yml
18
.travis.yml
|
@ -4,7 +4,7 @@ language: cpp
|
|||
cache: ccache
|
||||
|
||||
before_install:
|
||||
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get update -qq && sudo apt-get install cmake && sudo apt-get install cmake python3 && sudo apt-get install -qq freeglut3-dev libxmu-dev libxi-dev ; echo -n | openssl s_client -connect scan.coverity.com:443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | sudo tee -a /etc/ssl/certs/ca- ; fi
|
||||
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then sudo apt-get update -qq && sudo apt-get install cmake cppcheck && sudo apt-get install cmake python3 && sudo apt-get install -qq freeglut3-dev libxmu-dev libxi-dev ; echo -n | openssl s_client -connect scan.coverity.com:443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | sudo tee -a /etc/ssl/certs/ca- ; fi
|
||||
- 'if [ "$TRAVIS_OS_NAME" = "osx" ]; then
|
||||
if brew ls --versions cmake > /dev/null; then
|
||||
echo cmake already installed.;
|
||||
|
@ -34,11 +34,8 @@ env:
|
|||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
compiler: gcc
|
||||
env: DISABLE_EXPORTERS=YES ENABLE_COVERALLS=ON
|
||||
- os: linux
|
||||
compiler: gcc
|
||||
env: SHARED_BUILD=ON
|
||||
compiler: clang
|
||||
env: ANALYZE=ON
|
||||
- os: linux
|
||||
compiler: clang
|
||||
env: ASAN=ON
|
||||
|
@ -48,6 +45,15 @@ matrix:
|
|||
- os: linux
|
||||
compiler: clang
|
||||
env: SHARED_BUILD=ON
|
||||
- os: linux
|
||||
compiler: gcc
|
||||
env: ANALYZE=ON
|
||||
- os: linux
|
||||
compiler: gcc
|
||||
env: DISABLE_EXPORTERS=YES ENABLE_COVERALLS=ON
|
||||
- os: linux
|
||||
compiler: gcc
|
||||
env: SHARED_BUILD=ON
|
||||
|
||||
install:
|
||||
- if [ $ANDROID ]; then wget -c http://dl.google.com/android/ndk/android-ndk-${PV}-${PLATF}.tar.bz2 && tar xf android-ndk-${PV}-${PLATF}.tar.bz2 ; fi
|
||||
|
|
|
@ -281,8 +281,11 @@ size_t AMFImporter::PostprocessHelper_GetTextureID_Or_Create(const std::string&
|
|||
{
|
||||
if(!pID.empty())
|
||||
{
|
||||
for(size_t idx_target = pOffset, idx_src = 0; idx_target < tex_size; idx_target += pStep, idx_src++)
|
||||
converted_texture.Data[idx_target] = src_texture[pSrcTexNum]->Data.at(idx_src);
|
||||
for(size_t idx_target = pOffset, idx_src = 0; idx_target < tex_size; idx_target += pStep, idx_src++) {
|
||||
CAMFImporter_NodeElement_Texture* tex = src_texture[pSrcTexNum];
|
||||
ai_assert(tex);
|
||||
converted_texture.Data[idx_target] = tex->Data.at(idx_src);
|
||||
}
|
||||
}
|
||||
};// auto CopyTextureData = [&](const size_t pOffset, const size_t pStep, const uint8_t pSrcTexNum) -> void
|
||||
|
||||
|
|
|
@ -1021,6 +1021,7 @@ void ASEImporter::ConvertMeshes(ASE::Mesh& mesh, std::vector<aiMesh*>& avOutMesh
|
|||
|
||||
// convert bones, if existing
|
||||
if (!mesh.mBones.empty()) {
|
||||
ai_assert(avOutputBones);
|
||||
// check whether there is a vertex weight for this vertex index
|
||||
if (iIndex2 < mesh.mBoneVertices.size()) {
|
||||
|
||||
|
|
|
@ -171,6 +171,7 @@ inline size_t Write<aiQuaternion>(IOStream * stream, const aiQuaternion& v)
|
|||
t += Write<float>(stream,v.x);
|
||||
t += Write<float>(stream,v.y);
|
||||
t += Write<float>(stream,v.z);
|
||||
ai_assert(t == 16);
|
||||
return 16;
|
||||
}
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ namespace Assimp {
|
|||
offset += Copy(&data[offset], header.size);
|
||||
offset += Copy(&data[offset], header.reserved1);
|
||||
offset += Copy(&data[offset], header.reserved2);
|
||||
offset += Copy(&data[offset], header.offset);
|
||||
Copy(&data[offset], header.offset);
|
||||
|
||||
file->Write(data, Header::header_size, 1);
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ namespace Assimp {
|
|||
offset += Copy(&data[offset], dib.x_resolution);
|
||||
offset += Copy(&data[offset], dib.y_resolution);
|
||||
offset += Copy(&data[offset], dib.nb_colors);
|
||||
offset += Copy(&data[offset], dib.nb_important_colors);
|
||||
Copy(&data[offset], dib.nb_important_colors);
|
||||
|
||||
file->Write(data, DIB::dib_size, 1);
|
||||
}
|
||||
|
|
|
@ -566,7 +566,6 @@ void Converter::ConvertNodes( uint64_t id, aiNode& parent, const aiMatrix4x4& pa
|
|||
|
||||
if ( !name_carrier ) {
|
||||
nodes_chain.push_back( new aiNode( original_name ) );
|
||||
name_carrier = nodes_chain.back();
|
||||
}
|
||||
|
||||
//setup metadata on newest node
|
||||
|
|
|
@ -1499,7 +1499,7 @@ bool TryAddOpenings_Poly2Tri(const std::vector<TempOpening>& openings,const std:
|
|||
|
||||
|
||||
IfcVector3 wall_extrusion;
|
||||
bool do_connections = false, first = true;
|
||||
bool first = true;
|
||||
|
||||
try {
|
||||
|
||||
|
@ -1527,7 +1527,6 @@ bool TryAddOpenings_Poly2Tri(const std::vector<TempOpening>& openings,const std:
|
|||
if (first) {
|
||||
first = false;
|
||||
if (dot > 0.f) {
|
||||
do_connections = true;
|
||||
wall_extrusion = t.extrusionDir;
|
||||
if (is_extruded_side) {
|
||||
wall_extrusion = - wall_extrusion;
|
||||
|
@ -1607,44 +1606,6 @@ bool TryAddOpenings_Poly2Tri(const std::vector<TempOpening>& openings,const std:
|
|||
old_verts.swap(curmesh.verts);
|
||||
old_vertcnt.swap(curmesh.vertcnt);
|
||||
|
||||
|
||||
// add connection geometry to close the adjacent 'holes' for the openings
|
||||
// this should only be done from one side of the wall or the polygons
|
||||
// would be emitted twice.
|
||||
if (false && do_connections) {
|
||||
|
||||
std::vector<IfcVector3> tmpvec;
|
||||
for(ClipperLib::Polygon& opening : holes_union) {
|
||||
|
||||
ai_assert(ClipperLib::Orientation(opening));
|
||||
|
||||
tmpvec.clear();
|
||||
|
||||
for(ClipperLib::IntPoint& point : opening) {
|
||||
|
||||
tmpvec.push_back( minv * IfcVector3(
|
||||
vmin.x + from_int64(point.X) * vmax.x,
|
||||
vmin.y + from_int64(point.Y) * vmax.y,
|
||||
coord));
|
||||
}
|
||||
|
||||
for(size_t i = 0, size = tmpvec.size(); i < size; ++i) {
|
||||
const size_t next = (i+1)%size;
|
||||
|
||||
curmesh.vertcnt.push_back(4);
|
||||
|
||||
const IfcVector3& in_world = tmpvec[i];
|
||||
const IfcVector3& next_world = tmpvec[next];
|
||||
|
||||
// Assumptions: no 'partial' openings, wall thickness roughly the same across the wall
|
||||
curmesh.verts.push_back(in_world);
|
||||
curmesh.verts.push_back(in_world+wall_extrusion);
|
||||
curmesh.verts.push_back(next_world+wall_extrusion);
|
||||
curmesh.verts.push_back(next_world);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector< std::vector<p2t::Point*> > contours;
|
||||
for(ClipperLib::ExPolygon& clip : clipped) {
|
||||
|
||||
|
|
|
@ -394,7 +394,7 @@ void IRRImporter::ComputeAnimations(Node* root, aiNode* real, std::vector<aiNode
|
|||
angles[1] %= 360;
|
||||
angles[2] %= 360;
|
||||
|
||||
if ( bool(angles[0]*angles[1]) && bool(angles[1]*angles[2]) )
|
||||
if ( (angles[0]*angles[1]) != 0 && (angles[1]*angles[2]) != 0 )
|
||||
{
|
||||
FindSuitableMultiple(angles[0]);
|
||||
FindSuitableMultiple(angles[1]);
|
||||
|
|
|
@ -274,10 +274,6 @@ aiReturn Importer::UnregisterLoader(BaseImporter* pImp)
|
|||
|
||||
if (it != pimpl->mImporter.end()) {
|
||||
pimpl->mImporter.erase(it);
|
||||
|
||||
std::set<std::string> st;
|
||||
pImp->GetExtensionList(st);
|
||||
|
||||
DefaultLogger::get()->info("Unregistering custom importer: ");
|
||||
return AI_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -223,6 +223,7 @@ float ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int mesh
|
|||
iMaxRefTris = std::max(iMaxRefTris,*piCur);
|
||||
}
|
||||
}
|
||||
ai_assert(iMaxRefTris > 0);
|
||||
unsigned int* piCandidates = new unsigned int[iMaxRefTris*3];
|
||||
unsigned int iCacheMisses = 0;
|
||||
|
||||
|
|
|
@ -446,8 +446,6 @@ void AnimResolver::GetKeys(std::vector<aiVectorKey>& out,
|
|||
|
||||
// Iterate through all three arrays at once - it's tricky, but
|
||||
// rather interesting to implement.
|
||||
double lasttime = std::min(envl_x->keys[0].time,std::min(envl_y->keys[0].time,envl_z->keys[0].time));
|
||||
|
||||
cur_x = envl_x->keys.begin();
|
||||
cur_y = envl_y->keys.begin();
|
||||
cur_z = envl_z->keys.begin();
|
||||
|
@ -503,7 +501,7 @@ void AnimResolver::GetKeys(std::vector<aiVectorKey>& out,
|
|||
InterpolateTrack(out,fill,(end_y ? (*cur_x) : (*cur_y)).time);
|
||||
}
|
||||
}
|
||||
lasttime = fill.mTime;
|
||||
double lasttime = fill.mTime;
|
||||
out.push_back(fill);
|
||||
|
||||
if (lasttime >= (*cur_x).time) {
|
||||
|
|
|
@ -243,8 +243,6 @@ void NFFImporter::InternReadFile( const std::string& pFile,
|
|||
if( !file.get())
|
||||
throw DeadlyImportError( "Failed to open NFF file " + pFile + ".");
|
||||
|
||||
unsigned int m = (unsigned int)file->FileSize();
|
||||
|
||||
// allocate storage and copy the contents of the file to a memory buffer
|
||||
// (terminate it with zero)
|
||||
std::vector<char> mBuffer2;
|
||||
|
@ -469,7 +467,7 @@ void NFFImporter::InternReadFile( const std::string& pFile,
|
|||
for (unsigned int a = 0; a < numIdx;++a)
|
||||
{
|
||||
SkipSpaces(sz,&sz);
|
||||
m = ::strtoul10(sz,&sz);
|
||||
unsigned int m = ::strtoul10(sz,&sz);
|
||||
if (m >= (unsigned int)tempPositions.size())
|
||||
{
|
||||
DefaultLogger::get()->error("NFF2: Vertex index overflow");
|
||||
|
@ -635,7 +633,7 @@ void NFFImporter::InternReadFile( const std::string& pFile,
|
|||
for (std::vector<unsigned int>::const_iterator it = tempIdx.begin(), end = tempIdx.end();
|
||||
it != end;++it)
|
||||
{
|
||||
m = *it;
|
||||
unsigned int m = *it;
|
||||
|
||||
// copy colors -vertex color specifications override polygon color specifications
|
||||
if (hasColor)
|
||||
|
@ -735,7 +733,7 @@ void NFFImporter::InternReadFile( const std::string& pFile,
|
|||
sz = &line[1];out = currentMesh;
|
||||
}
|
||||
SkipSpaces(sz,&sz);
|
||||
m = strtoul10(sz);
|
||||
unsigned int m = strtoul10(sz);
|
||||
|
||||
// ---- flip the face order
|
||||
out->vertices.resize(out->vertices.size()+m);
|
||||
|
@ -1081,7 +1079,9 @@ void NFFImporter::InternReadFile( const std::string& pFile,
|
|||
// generate the camera
|
||||
if (hasCam)
|
||||
{
|
||||
aiNode* nd = *ppcChildren = new aiNode();
|
||||
ai_assert(ppcChildren);
|
||||
aiNode* nd = new aiNode();
|
||||
*ppcChildren = nd;
|
||||
nd->mName.Set("<NFF_Camera>");
|
||||
nd->mParent = root;
|
||||
|
||||
|
@ -1105,13 +1105,15 @@ void NFFImporter::InternReadFile( const std::string& pFile,
|
|||
// generate light sources
|
||||
if (!lights.empty())
|
||||
{
|
||||
ai_assert(ppcChildren);
|
||||
pScene->mNumLights = (unsigned int)lights.size();
|
||||
pScene->mLights = new aiLight*[pScene->mNumLights];
|
||||
for (unsigned int i = 0; i < pScene->mNumLights;++i,++ppcChildren)
|
||||
{
|
||||
const Light& l = lights[i];
|
||||
|
||||
aiNode* nd = *ppcChildren = new aiNode();
|
||||
aiNode* nd = new aiNode();
|
||||
*ppcChildren = nd;
|
||||
nd->mParent = root;
|
||||
|
||||
nd->mName.length = ::ai_snprintf(nd->mName.data,1024,"<NFF_Light%u>",i);
|
||||
|
@ -1128,7 +1130,8 @@ void NFFImporter::InternReadFile( const std::string& pFile,
|
|||
if (!pScene->mNumMeshes)throw DeadlyImportError("NFF: No meshes loaded");
|
||||
pScene->mMeshes = new aiMesh*[pScene->mNumMeshes];
|
||||
pScene->mMaterials = new aiMaterial*[pScene->mNumMaterials = pScene->mNumMeshes];
|
||||
for (it = meshes.begin(), m = 0; it != end;++it)
|
||||
unsigned int m = 0;
|
||||
for (it = meshes.begin(); it != end;++it)
|
||||
{
|
||||
if ((*it).faces.empty())continue;
|
||||
|
||||
|
|
|
@ -652,6 +652,8 @@ static void setMatrix( aiNode *node, DataArrayList *transformData ) {
|
|||
i++;
|
||||
}
|
||||
|
||||
ai_assert(i == 16);
|
||||
|
||||
node->mTransformation.a1 = m[ 0 ];
|
||||
node->mTransformation.a2 = m[ 4 ];
|
||||
node->mTransformation.a3 = m[ 8 ];
|
||||
|
|
|
@ -233,10 +233,12 @@ void OptimizeGraphProcess::CollectNewChildren(aiNode* nd, std::list<aiNode*>& no
|
|||
|
||||
nd->mNumChildren = static_cast<unsigned int>(child_nodes.size());
|
||||
|
||||
aiNode** tmp = nd->mChildren;
|
||||
for (std::list<aiNode*>::iterator it = child_nodes.begin(); it != child_nodes.end(); ++it) {
|
||||
aiNode* node = *tmp++ = *it;
|
||||
node->mParent = nd;
|
||||
if (nd->mChildren) {
|
||||
aiNode** tmp = nd->mChildren;
|
||||
for (std::list<aiNode*>::iterator it = child_nodes.begin(); it != child_nodes.end(); ++it) {
|
||||
aiNode* node = *tmp++ = *it;
|
||||
node->mParent = nd;
|
||||
}
|
||||
}
|
||||
|
||||
nodes_out += static_cast<unsigned int>(child_nodes.size());
|
||||
|
|
|
@ -671,7 +671,6 @@ bool PLY::ElementInstanceList::ParseInstanceList(
|
|||
PLYImporter* loader)
|
||||
{
|
||||
ai_assert(NULL != pcElement);
|
||||
const char* pCur = (const char*)&buffer[0];
|
||||
|
||||
// parse all elements
|
||||
if (EEST_INVALID == pcElement->eSemantic || pcElement->alProperties.empty())
|
||||
|
@ -683,11 +682,11 @@ bool PLY::ElementInstanceList::ParseInstanceList(
|
|||
PLY::DOM::SkipComments(buffer);
|
||||
PLY::DOM::SkipLine(buffer);
|
||||
streamBuffer.getNextLine(buffer);
|
||||
pCur = (buffer.empty()) ? NULL : (const char*)&buffer[0];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
const char* pCur = (const char*)&buffer[0];
|
||||
// be sure to have enough storage
|
||||
for (unsigned int i = 0; i < pcElement->NumOccur; ++i)
|
||||
{
|
||||
|
|
|
@ -260,7 +260,7 @@ void RAWImporter::InternReadFile( const std::string& pFile,
|
|||
node = *cc = new aiNode();
|
||||
node->mParent = pScene->mRootNode;
|
||||
}
|
||||
else node = *cc;++cc;
|
||||
else node = *cc;
|
||||
node->mName.Set(outGroup.name);
|
||||
|
||||
// add all meshes
|
||||
|
|
|
@ -145,6 +145,7 @@ void RemoveRedundantMatsProcess::Execute( aiScene* pScene)
|
|||
if (!abReferenced[i]) {
|
||||
++unreferencedRemoved;
|
||||
delete pScene->mMaterials[i];
|
||||
pScene->mMaterials[i] = nullptr;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -158,6 +159,7 @@ void RemoveRedundantMatsProcess::Execute( aiScene* pScene)
|
|||
me = 0;
|
||||
aiMappingTable[i] = aiMappingTable[a];
|
||||
delete pScene->mMaterials[i];
|
||||
pScene->mMaterials[i] = nullptr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -169,6 +171,7 @@ void RemoveRedundantMatsProcess::Execute( aiScene* pScene)
|
|||
// If the new material count differs from the original,
|
||||
// we need to rebuild the material list and remap mesh material indexes.
|
||||
if (iNewNum != pScene->mNumMaterials) {
|
||||
ai_assert(iNewNum > 0);
|
||||
aiMaterial** ppcMaterials = new aiMaterial*[iNewNum];
|
||||
::memset(ppcMaterials,0,sizeof(void*)*iNewNum);
|
||||
for (unsigned int p = 0; p < pScene->mNumMaterials;++p)
|
||||
|
|
|
@ -902,6 +902,7 @@ void SIBImporter::InternReadFile(const std::string& pFile,
|
|||
// Add nodes for each object.
|
||||
for (size_t n=0;n<sib.objs.size();n++)
|
||||
{
|
||||
ai_assert(root->mChildren);
|
||||
SIBObject& obj = sib.objs[n];
|
||||
aiNode* node = new aiNode;
|
||||
root->mChildren[childIdx++] = node;
|
||||
|
@ -926,6 +927,7 @@ void SIBImporter::InternReadFile(const std::string& pFile,
|
|||
// (no transformation as the light is already in world space)
|
||||
for (size_t n=0;n<sib.lights.size();n++)
|
||||
{
|
||||
ai_assert(root->mChildren);
|
||||
aiLight* light = sib.lights[n];
|
||||
if ( nullptr != light ) {
|
||||
aiNode* node = new aiNode;
|
||||
|
|
|
@ -141,9 +141,6 @@ void TerragenImporter::InternReadFile( const std::string& pFile,
|
|||
throw DeadlyImportError( "TER: Magic string \'TERRAIN\' not found" );
|
||||
|
||||
unsigned int x = 0,y = 0,mode = 0;
|
||||
float rad = 6370.f;
|
||||
(void)rad;
|
||||
|
||||
|
||||
aiNode* root = pScene->mRootNode = new aiNode();
|
||||
root->mName.Set("<TERRAGEN.TERRAIN>");
|
||||
|
@ -187,7 +184,7 @@ void TerragenImporter::InternReadFile( const std::string& pFile,
|
|||
// mapping == 1: earth radius
|
||||
else if (!::strncmp(head,AI_TERR_CHUNK_CRAD,4))
|
||||
{
|
||||
rad = reader.GetF4();
|
||||
reader.GetF4();
|
||||
}
|
||||
// mapping mode
|
||||
else if (!::strncmp(head,AI_TERR_CHUNK_CRVM,4))
|
||||
|
|
|
@ -48,6 +48,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include "X3DImporter.hpp"
|
||||
|
||||
// Header files, Assimp.
|
||||
#include <assimp/ai_assert.h>
|
||||
#include "StandardShapes.h"
|
||||
#include "StringUtils.h"
|
||||
|
||||
|
@ -357,6 +358,7 @@ void X3DImporter::Postprocess_BuildMesh(const CX3DImporter_NodeElement& pNodeEle
|
|||
// copy additional information from children
|
||||
for(std::list<CX3DImporter_NodeElement*>::iterator ch_it = tnemesh.Child.begin(); ch_it != tnemesh.Child.end(); ch_it++)
|
||||
{
|
||||
ai_assert(*pMesh);
|
||||
if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_Color)
|
||||
MeshGeometry_AddColor(**pMesh, tnemesh.CoordIndex, tnemesh.ColorIndex, ((CX3DImporter_NodeElement_Color*)*ch_it)->Value, tnemesh.ColorPerVertex);
|
||||
else if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_ColorRGBA)
|
||||
|
@ -389,6 +391,7 @@ void X3DImporter::Postprocess_BuildMesh(const CX3DImporter_NodeElement& pNodeEle
|
|||
// copy additional information from children
|
||||
for(std::list<CX3DImporter_NodeElement*>::iterator ch_it = tnemesh.Child.begin(); ch_it != tnemesh.Child.end(); ch_it++)
|
||||
{
|
||||
ai_assert(*pMesh);
|
||||
if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_Color)
|
||||
MeshGeometry_AddColor(**pMesh, tnemesh.CoordIndex, tnemesh.ColorIndex, ((CX3DImporter_NodeElement_Color*)*ch_it)->Value, tnemesh.ColorPerVertex);
|
||||
else if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_ColorRGBA)
|
||||
|
@ -446,6 +449,7 @@ void X3DImporter::Postprocess_BuildMesh(const CX3DImporter_NodeElement& pNodeEle
|
|||
// copy additional information from children
|
||||
for(std::list<CX3DImporter_NodeElement*>::iterator ch_it = tnemesh.Child.begin(); ch_it != tnemesh.Child.end(); ch_it++)
|
||||
{
|
||||
ai_assert(*pMesh);
|
||||
if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_Color)
|
||||
MeshGeometry_AddColor(**pMesh, ((CX3DImporter_NodeElement_Color*)*ch_it)->Value, true);
|
||||
else if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_ColorRGBA)
|
||||
|
@ -475,6 +479,7 @@ void X3DImporter::Postprocess_BuildMesh(const CX3DImporter_NodeElement& pNodeEle
|
|||
// copy additional information from children
|
||||
for(std::list<CX3DImporter_NodeElement*>::iterator ch_it = tnemesh.Child.begin(); ch_it != tnemesh.Child.end(); ch_it++)
|
||||
{
|
||||
ai_assert(*pMesh);
|
||||
if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_Color)
|
||||
MeshGeometry_AddColor(**pMesh, ((CX3DImporter_NodeElement_Color*)*ch_it)->Value, true);
|
||||
else if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_ColorRGBA)
|
||||
|
@ -550,6 +555,7 @@ void X3DImporter::Postprocess_BuildMesh(const CX3DImporter_NodeElement& pNodeEle
|
|||
// copy additional information from children
|
||||
for(std::list<CX3DImporter_NodeElement*>::iterator ch_it = tnemesh.Child.begin(); ch_it != tnemesh.Child.end(); ch_it++)
|
||||
{
|
||||
ai_assert(*pMesh);
|
||||
if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_Color)
|
||||
MeshGeometry_AddColor(**pMesh, ((CX3DImporter_NodeElement_Color*)*ch_it)->Value, tnemesh.ColorPerVertex);
|
||||
else if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_ColorRGBA)
|
||||
|
@ -584,6 +590,7 @@ void X3DImporter::Postprocess_BuildMesh(const CX3DImporter_NodeElement& pNodeEle
|
|||
// copy additional information from children
|
||||
for(std::list<CX3DImporter_NodeElement*>::iterator ch_it = tnemesh.Child.begin(); ch_it != tnemesh.Child.end(); ch_it++)
|
||||
{
|
||||
ai_assert(*pMesh);
|
||||
if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_Color)
|
||||
MeshGeometry_AddColor(**pMesh, ((CX3DImporter_NodeElement_Color*)*ch_it)->Value, tnemesh.ColorPerVertex);
|
||||
else if((*ch_it)->Type == CX3DImporter_NodeElement::ENET_ColorRGBA)
|
||||
|
|
|
@ -360,7 +360,7 @@ inline const char* fast_atoreal_move(const char* c, Real& out, bool check_comma
|
|||
// The same but more human.
|
||||
inline ai_real fast_atof(const char* c)
|
||||
{
|
||||
ai_real ret;
|
||||
ai_real ret(0.0);
|
||||
fast_atoreal_move<ai_real>(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -368,7 +368,7 @@ inline ai_real fast_atof(const char* c)
|
|||
|
||||
inline ai_real fast_atof( const char* c, const char** cout)
|
||||
{
|
||||
ai_real ret;
|
||||
ai_real ret(0.0);
|
||||
*cout = fast_atoreal_move<ai_real>(c, ret);
|
||||
|
||||
return ret;
|
||||
|
@ -376,7 +376,7 @@ inline ai_real fast_atof( const char* c, const char** cout)
|
|||
|
||||
inline ai_real fast_atof( const char** inout)
|
||||
{
|
||||
ai_real ret;
|
||||
ai_real ret(0.0);
|
||||
*inout = fast_atoreal_move<ai_real>(*inout, ret);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -204,7 +204,7 @@ unsigned int LazyDict<T>::Remove(const char* id)
|
|||
throw DeadlyExportError("GLTF: Object with id \"" + std::string(id) + "\" is not found");
|
||||
}
|
||||
|
||||
const int index = it->second;
|
||||
const unsigned int index = it->second;
|
||||
|
||||
mAsset.mUsedIds[id] = false;
|
||||
mObjsById.erase(id);
|
||||
|
|
|
@ -820,6 +820,7 @@ namespace o3dgc
|
|||
for (unsigned n = 0; n < data_symbols; n++)
|
||||
total_count += (symbol_count[n] = (symbol_count[n] + 1) >> 1);
|
||||
}
|
||||
assert(total_count > 0);
|
||||
// compute cumulative distribution, decoder table
|
||||
unsigned k, sum = 0, s = 0;
|
||||
unsigned scale = 0x80000000U / total_count;
|
||||
|
@ -830,6 +831,7 @@ namespace o3dgc
|
|||
sum += symbol_count[k];
|
||||
}
|
||||
else {
|
||||
assert(decoder_table);
|
||||
for (k = 0; k < data_symbols; k++) {
|
||||
distribution[k] = (scale * sum) >> (31 - DM__LengthShift);
|
||||
sum += symbol_count[k];
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <stdexcept>
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
#include <cstdlib>
|
||||
#include <ostream>
|
||||
|
@ -2365,6 +2366,7 @@ void Clipper::ProcessHorizontal(TEdge *horzEdge)
|
|||
//ok, so far it looks like we're still in range of the horizontal edge
|
||||
if ( e->xcurr == horzEdge->xtop && !eMaxPair )
|
||||
{
|
||||
assert(horzEdge->nextInLML);
|
||||
if (SlopesEqual(*e, *horzEdge->nextInLML, m_UseFullRange))
|
||||
{
|
||||
//if output polygons share an edge, they'll need joining later ...
|
||||
|
@ -2429,6 +2431,7 @@ void Clipper::ProcessHorizontal(TEdge *horzEdge)
|
|||
if ( horzEdge->outIdx >= 0 )
|
||||
IntersectEdges( horzEdge, eMaxPair,
|
||||
IntPoint(horzEdge->xtop, horzEdge->ycurr), ipBoth);
|
||||
assert(eMaxPair);
|
||||
if (eMaxPair->outIdx >= 0) throw clipperException("ProcessHorizontal error");
|
||||
DeleteFromAEL(eMaxPair);
|
||||
DeleteFromAEL(horzEdge);
|
||||
|
|
|
@ -135,10 +135,9 @@ bool OpenDDLExport::writeToStream( const std::string &statement ) {
|
|||
}
|
||||
|
||||
bool OpenDDLExport::writeNode( DDLNode *node, std::string &statement ) {
|
||||
bool success( true );
|
||||
writeNodeHeader( node, statement );
|
||||
if (node->hasProperties()) {
|
||||
success |= writeProperties( node, statement );
|
||||
writeProperties( node, statement );
|
||||
}
|
||||
writeLineEnd( statement );
|
||||
|
||||
|
@ -360,11 +359,10 @@ bool OpenDDLExport::writeValueArray( DataArrayList *al, std::string &statement )
|
|||
}
|
||||
|
||||
DataArrayList *nextDataArrayList = al ;
|
||||
Value *nextValue( nextDataArrayList->m_dataList );
|
||||
while (ddl_nullptr != nextDataArrayList) {
|
||||
if (ddl_nullptr != nextDataArrayList) {
|
||||
statement += "{ ";
|
||||
nextValue = nextDataArrayList->m_dataList;
|
||||
Value *nextValue( nextDataArrayList->m_dataList );
|
||||
size_t idx( 0 );
|
||||
while (ddl_nullptr != nextValue) {
|
||||
if (idx > 0) {
|
||||
|
|
|
@ -27,6 +27,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <math.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
|
@ -275,22 +276,24 @@ char *OpenDDLParser::parseHeader( char *in, char *end ) {
|
|||
}
|
||||
delete id;
|
||||
|
||||
Name *name(ddl_nullptr);
|
||||
in = OpenDDLParser::parseName(in, end, &name);
|
||||
Name *name_(ddl_nullptr);
|
||||
in = OpenDDLParser::parseName(in, end, &name_);
|
||||
std::unique_ptr<Name> name(name_);
|
||||
if( ddl_nullptr != name && ddl_nullptr != node ) {
|
||||
const std::string nodeName( name->m_id->m_buffer );
|
||||
node->setName( nodeName );
|
||||
delete name;
|
||||
}
|
||||
|
||||
|
||||
Property *first(ddl_nullptr);
|
||||
std::unique_ptr<Property> first;
|
||||
in = lookForNextToken(in, end);
|
||||
if (*in == Grammar::OpenPropertyToken[0]) {
|
||||
in++;
|
||||
Property *prop(ddl_nullptr), *prev(ddl_nullptr);
|
||||
std::unique_ptr<Property> prop, prev;
|
||||
while (*in != Grammar::ClosePropertyToken[0] && in != end) {
|
||||
in = OpenDDLParser::parseProperty(in, end, &prop);
|
||||
Property *prop_(ddl_nullptr);
|
||||
in = OpenDDLParser::parseProperty(in, end, &prop_);
|
||||
prop.reset(prop_);
|
||||
in = lookForNextToken(in, end);
|
||||
|
||||
if (*in != Grammar::CommaSeparator[0] && *in != Grammar::ClosePropertyToken[0]) {
|
||||
|
@ -300,20 +303,20 @@ char *OpenDDLParser::parseHeader( char *in, char *end ) {
|
|||
|
||||
if (ddl_nullptr != prop && *in != Grammar::CommaSeparator[0]) {
|
||||
if (ddl_nullptr == first) {
|
||||
first = prop;
|
||||
first = std::move(prop);
|
||||
}
|
||||
if (ddl_nullptr != prev) {
|
||||
prev->m_next = prop;
|
||||
prev->m_next = prop.release();
|
||||
}
|
||||
prev = prop;
|
||||
prev = std::move(prop);
|
||||
}
|
||||
}
|
||||
++in;
|
||||
}
|
||||
|
||||
// set the properties
|
||||
if (ddl_nullptr != first && ddl_nullptr != node) {
|
||||
node->setProperties(first);
|
||||
if (first && ddl_nullptr != node) {
|
||||
node->setProperties(first.release());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -339,7 +342,6 @@ char *OpenDDLParser::parseStructure( char *in, char *end ) {
|
|||
} else {
|
||||
++in;
|
||||
logInvalidTokenError( in, std::string( Grammar::OpenBracketToken ), m_logCallback );
|
||||
error = true;
|
||||
return ddl_nullptr;
|
||||
}
|
||||
in = lookForNextToken( in, end );
|
||||
|
|
|
@ -204,7 +204,7 @@ local int unzlocal_getShort (pzlib_filefunc_def,filestream,pX)
|
|||
uLong *pX;
|
||||
{
|
||||
uLong x ;
|
||||
int i;
|
||||
int i = 0;
|
||||
int err;
|
||||
|
||||
err = unzlocal_getByte(pzlib_filefunc_def,filestream,&i);
|
||||
|
@ -232,7 +232,7 @@ local int unzlocal_getLong (pzlib_filefunc_def,filestream,pX)
|
|||
uLong *pX;
|
||||
{
|
||||
uLong x ;
|
||||
int i;
|
||||
int i = 0;
|
||||
int err;
|
||||
|
||||
err = unzlocal_getByte(pzlib_filefunc_def,filestream,&i);
|
||||
|
@ -725,19 +725,15 @@ local int unzlocal_GetCurrentFileInfoInternal (file,
|
|||
|
||||
if (lSeek!=0)
|
||||
{
|
||||
if (ZSEEK(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0)
|
||||
lSeek=0;
|
||||
else
|
||||
if (ZSEEK(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)!=0)
|
||||
err=UNZ_ERRNO;
|
||||
}
|
||||
if ((file_info.size_file_comment>0) && (commentBufferSize>0))
|
||||
if (ZREAD(s->z_filefunc, s->filestream,szComment,uSizeRead)!=uSizeRead)
|
||||
err=UNZ_ERRNO;
|
||||
lSeek+=file_info.size_file_comment - uSizeRead;
|
||||
}
|
||||
else
|
||||
{
|
||||
lSeek+=file_info.size_file_comment;
|
||||
}
|
||||
|
||||
if ((err==UNZ_OK) && (pfile_info!=NULL))
|
||||
|
@ -1129,7 +1125,7 @@ extern int ZEXPORT unzOpenCurrentFile3 (file, method, level, raw, password)
|
|||
|
||||
if ((s->cur_file_info.compression_method!=0) &&
|
||||
(s->cur_file_info.compression_method!=Z_DEFLATED))
|
||||
err=UNZ_BADZIPFILE;
|
||||
return UNZ_BADZIPFILE;
|
||||
|
||||
pfile_in_zip_read_info->crc32_wait=s->cur_file_info.crc;
|
||||
pfile_in_zip_read_info->crc32=0;
|
||||
|
|
|
@ -198,6 +198,7 @@ inline uint32_t Write<aiQuaternion>(const aiQuaternion& v)
|
|||
t += Write<float>(v.x);
|
||||
t += Write<float>(v.y);
|
||||
t += Write<float>(v.z);
|
||||
ai_assert(t == 16);
|
||||
return 16;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue