pull/536/head
Kim Kulling 2015-04-14 12:04:47 +02:00
commit 56e8dc5a43
14 changed files with 17727 additions and 8097 deletions

View File

@ -15,12 +15,12 @@ env:
matrix:
- LINUX=1 TRAVIS_NO_EXPORT=YES
- LINUX=1 TRAVIS_NO_EXPORT=NO
- LINUX=1 TRAVIS_STATIC_BUILD=ON
- LINUX=1 TRAVIS_STATIC_BUILD=OFF
- LINUX=1 SHARED_BUILD=ON
- LINUX=1 SHARED_BUILD=OFF
- WINDOWS=1 TRAVIS_NO_EXPORT=YES
- WINDOWS=1 TRAVIS_NO_EXPORT=NO
- WINDOWS=1 TRAVIS_STATIC_BUILD=ON
- WINDOWS=1 TRAVIS_STATIC_BUILD=OFF
- WINDOWS=1 SHARED_BUILD=ON
- WINDOWS=1 SHARED_BUILD=OFF
- ANDROID=1
language: cpp
@ -35,10 +35,18 @@ install:
script:
- if [ $ANDROID ]; then
ant -v -Dmy.dir=${TRAVIS_BUILD_DIR} -f ${TRAVIS_BUILD_DIR}/port/jassimp/build.xml ndk-jni ;
elif [ $WINDOWS -a $CC = "gcc" ]; then
sudo sh -c "wget http://source.winehq.org/git/wine.git/commitdiff_plain/86781a6a524fa336f893ffd0a87373ffd306913c?hp=076edfe9d4b6cd39b6cf41b9f1d3e18688cc8673 -O - | patch -p 1 -d /usr/x86_64-w64-mingw32" ;
sudo sh -c "wget https://www.winehq.org/pipermail/wine-patches/2012-February/111438.html -O - | patch -p 1 -d /usr/x86_64-w64-mingw32" ;
cmake -G "Unix Makefiles" -DASSIMP_NO_EXPORT=$TRAVIS_NO_EXPORT -DBUILD_SHARED_LIBS=$SHARED_BUILD -DCMAKE_TOOLCHAIN_FILE=cmake-modules/MinGW_x86_64.cmake ;
cmake --build . ;
make install ;
elif [ $WINDOWS ]; then
echo "Skip compile with non-gcc setting." ;
elif [ $RESERVED ]; then
echo "Reserved condition" ;
else
cmake -G "Unix Makefiles" -DASSIMP_ENABLE_BOOST_WORKAROUND=YES -DASSIMP_NO_EXPORT=$TRAVIS_NO_EXPORT -STATIC_BUILD=$TRAVIS_STATIC_BUILD ;
cmake -G "Unix Makefiles" -DASSIMP_ENABLE_BOOST_WORKAROUND=YES -DASSIMP_NO_EXPORT=$TRAVIS_NO_EXPORT -DBUILD_SHARED_LIBS=$SHARED_BUILD ;
make ;
sudo make install ;
sudo ldconfig ;

View File

@ -1,6 +1,12 @@
cmake_minimum_required( VERSION 2.8 )
PROJECT( Assimp )
option(BUILD_SHARED_LIBS "Build package with shared libraries." ON)
if(NOT BUILD_SHARED_LIBS)
#set(CMAKE_EXE_LINKER_FLAGS "-static")
set(LINK_SEARCH_START_STATIC TRUE)
endif(NOT BUILD_SHARED_LIBS)
# Define here the needed parameters
set (ASSIMP_VERSION_MAJOR 3)
set (ASSIMP_VERSION_MINOR 1)
@ -88,9 +94,6 @@ SET( ASSIMP_BIN_INSTALL_DIR "bin" CACHE PATH
SET(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Debug Postfitx for lib, samples and tools")
# Allow the user to build a shared or static library
option ( BUILD_SHARED_LIBS "Build a shared version of the library" ON )
# Only generate this target if no higher-level project already has
IF (NOT TARGET uninstall)
# add make uninstall capability

View File

@ -0,0 +1,16 @@
# this one sets internal to crosscompile (in theory)
SET(CMAKE_SYSTEM_NAME Windows)
# the minimalistic settings
SET(CMAKE_C_COMPILER "/usr/bin/x86_64-w64-mingw32-gcc")
SET(CMAKE_CXX_COMPILER "/usr/bin/x86_64-w64-mingw32-g++")
SET(CMAKE_RC_COMPILER "/usr/bin/x86_64-w64-mingw32-windres")
# where is the target (so called staging) environment
SET(CMAKE_FIND_ROOT_PATH /usr/x86_64-w64-mingw32)
# search for programs in the build host directories (default BOTH)
#SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

View File

@ -148,13 +148,17 @@ void COBImporter::InternReadFile( const std::string& pFile,
}
DefaultLogger::get()->info("File format tag: "+std::string(head+9,6));
void (COBImporter::* load)(Scene&,StreamReaderLE*)= head[15]=='A'?&COBImporter::ReadAsciiFile:&COBImporter::ReadBinaryFile;
if (head[16]!='L') {
ThrowException("File is big-endian, which is not supported");
}
// load data into intermediate structures
(this->*load)(scene,stream.get());
if (head[15]=='A') {
ReadAsciiFile(scene, stream.get());
}
else {
ReadBinaryFile(scene, stream.get());
}
if(scene.nodes.empty()) {
ThrowException("No nodes loaded");
}

View File

@ -522,43 +522,23 @@ IfcMatrix3 DerivePlaneCoordinateSpace(const TempMesh& curmesh, bool& ok, IfcVect
return m;
}
// ------------------------------------------------------------------------------------------------
void ProcessExtrudedAreaSolid(const IfcExtrudedAreaSolid& solid, TempMesh& result,
ConversionData& conv, bool collect_openings)
// Extrudes the given polygon along the direction, converts it into an opening or applies all openings as necessary.
void ProcessExtrudedArea(const IfcExtrudedAreaSolid& solid, const TempMesh& curve,
const IfcVector3& extrusionDir, TempMesh& result, ConversionData &conv, bool collect_openings)
{
TempMesh meshout;
// First read the profile description
if(!ProcessProfile(*solid.SweptArea,meshout,conv) || meshout.verts.size()<=1) {
return;
}
IfcVector3 dir;
ConvertDirection(dir,solid.ExtrudedDirection);
dir *= solid.Depth; /*
if(conv.collect_openings && !conv.apply_openings) {
dir *= 1000.0;
} */
// Outline: assuming that `meshout.verts` is now a list of vertex points forming
// the underlying profile, extrude along the given axis, forming new
// triangles.
std::vector<IfcVector3>& in = meshout.verts;
const size_t size=in.size();
const bool has_area = solid.SweptArea->ProfileType == "AREA" && size>2;
if(solid.Depth < 1e-6) {
if(has_area) {
result = meshout;
// Outline: 'curve' is now a list of vertex points forming the underlying profile, extrude along the given axis,
// forming new triangles.
const bool has_area = solid.SweptArea->ProfileType == "AREA" && curve.verts.size() > 2;
if( solid.Depth < 1e-6 ) {
if( has_area ) {
result.Append(curve);
}
return;
}
result.verts.reserve(size*(has_area?4:2));
result.vertcnt.reserve(meshout.vertcnt.size()+2);
result.verts.reserve(curve.verts.size()*(has_area ? 4 : 2));
result.vertcnt.reserve(curve.verts.size() + 2);
std::vector<IfcVector3> in = curve.verts;
// First step: transform all vertices into the target coordinate space
IfcMatrix4 trafo;
@ -566,7 +546,7 @@ void ProcessExtrudedAreaSolid(const IfcExtrudedAreaSolid& solid, TempMesh& resul
IfcVector3 vmin, vmax;
MinMaxChooser<IfcVector3>()(vmin, vmax);
BOOST_FOREACH(IfcVector3& v,in) {
BOOST_FOREACH(IfcVector3& v, in) {
v *= trafo;
vmin = std::min(vmin, v);
@ -575,93 +555,91 @@ void ProcessExtrudedAreaSolid(const IfcExtrudedAreaSolid& solid, TempMesh& resul
vmax -= vmin;
const IfcFloat diag = vmax.Length();
IfcVector3 min = in[0];
dir *= IfcMatrix3(trafo);
IfcVector3 dir = IfcMatrix3(trafo) * extrusionDir;
// reverse profile polygon if it's winded in the wrong direction in relation to the extrusion direction
IfcVector3 profileNormal = TempMesh::ComputePolygonNormal( in.data(), in.size());
IfcVector3 profileNormal = TempMesh::ComputePolygonNormal(in.data(), in.size());
if( profileNormal * dir < 0.0 )
std::reverse( in.begin(), in.end());
std::reverse(in.begin(), in.end());
std::vector<IfcVector3> nors;
const bool openings = !!conv.apply_openings && conv.apply_openings->size();
// Compute the normal vectors for all opening polygons as a prerequisite
// to TryAddOpenings_Poly2Tri()
// XXX this belongs into the aforementioned function
if (openings) {
if( openings ) {
if (!conv.settings.useCustomTriangulation) {
if( !conv.settings.useCustomTriangulation ) {
// it is essential to apply the openings in the correct spatial order. The direction
// doesn't matter, but we would screw up if we started with e.g. a door in between
// two windows.
std::sort(conv.apply_openings->begin(),conv.apply_openings->end(),
TempOpening::DistanceSorter(min));
std::sort(conv.apply_openings->begin(), conv.apply_openings->end(), TempOpening::DistanceSorter(in[0]));
}
nors.reserve(conv.apply_openings->size());
BOOST_FOREACH(TempOpening& t,*conv.apply_openings) {
BOOST_FOREACH(TempOpening& t, *conv.apply_openings) {
TempMesh& bounds = *t.profileMesh.get();
if (bounds.verts.size() <= 2) {
if( bounds.verts.size() <= 2 ) {
nors.push_back(IfcVector3());
continue;
}
nors.push_back(((bounds.verts[2]-bounds.verts[0])^(bounds.verts[1]-bounds.verts[0]) ).Normalize());
nors.push_back(((bounds.verts[2] - bounds.verts[0]) ^ (bounds.verts[1] - bounds.verts[0])).Normalize());
}
}
TempMesh temp;
TempMesh& curmesh = openings ? temp : result;
std::vector<IfcVector3>& out = curmesh.verts;
size_t sides_with_openings = 0;
for(size_t i = 0; i < size; ++i) {
const size_t next = (i+1)%size;
for( size_t i = 0; i < in.size(); ++i ) {
const size_t next = (i + 1) % in.size();
curmesh.vertcnt.push_back(4);
out.push_back(in[i]);
out.push_back(in[next]);
out.push_back(in[next]+dir);
out.push_back(in[i]+dir);
out.push_back(in[next] + dir);
out.push_back(in[i] + dir);
if(openings) {
if((in[i]-in[next]).Length() > diag * 0.1 && GenerateOpenings(*conv.apply_openings,nors,temp,true, true, dir)) {
if( openings ) {
if( (in[i] - in[next]).Length() > diag * 0.1 && GenerateOpenings(*conv.apply_openings, nors, temp, true, true, dir) ) {
++sides_with_openings;
}
result.Append(temp);
temp.Clear();
}
}
if(openings) {
if( openings ) {
BOOST_FOREACH(TempOpening& opening, *conv.apply_openings) {
if (!opening.wallPoints.empty()) {
if( !opening.wallPoints.empty() ) {
IFCImporter::LogError("failed to generate all window caps");
}
opening.wallPoints.clear();
}
}
size_t sides_with_v_openings = 0;
if(has_area) {
for(size_t n = 0; n < 2; ++n) {
size_t sides_with_v_openings = 0;
if( has_area ) {
for( size_t n = 0; n < 2; ++n ) {
if( n > 0 ) {
for(size_t i = 0; i < size; ++i )
out.push_back(in[i]+dir);
} else {
for(size_t i = size; i--; )
for( size_t i = 0; i < in.size(); ++i )
out.push_back(in[i] + dir);
}
else {
for( size_t i = in.size(); i--; )
out.push_back(in[i]);
}
curmesh.vertcnt.push_back(size);
if(openings && size > 2) {
if(GenerateOpenings(*conv.apply_openings,nors,temp,true, true, dir)) {
curmesh.vertcnt.push_back(in.size());
if( openings && in.size() > 2 ) {
if( GenerateOpenings(*conv.apply_openings, nors, temp, true, true, dir) ) {
++sides_with_v_openings;
}
@ -671,7 +649,7 @@ void ProcessExtrudedAreaSolid(const IfcExtrudedAreaSolid& solid, TempMesh& resul
}
}
if(openings && ((sides_with_openings == 1 && sides_with_openings) || (sides_with_v_openings == 2 && sides_with_v_openings))) {
if( openings && ((sides_with_openings == 1 && sides_with_openings) || (sides_with_v_openings == 2 && sides_with_v_openings)) ) {
IFCImporter::LogWarn("failed to resolve all openings, presumably their topology is not supported by Assimp");
}
@ -679,17 +657,58 @@ void ProcessExtrudedAreaSolid(const IfcExtrudedAreaSolid& solid, TempMesh& resul
// If this is an opening element, store both the extruded mesh and the 2D profile mesh
// it was created from. Return an empty mesh to the caller.
if(collect_openings && !result.IsEmpty()) {
if( collect_openings && !result.IsEmpty() ) {
ai_assert(conv.collect_openings);
boost::shared_ptr<TempMesh> profile = boost::shared_ptr<TempMesh>(new TempMesh());
profile->Swap(result);
boost::shared_ptr<TempMesh> profile2D = boost::shared_ptr<TempMesh>(new TempMesh());
profile2D->Swap(meshout);
conv.collect_openings->push_back(TempOpening(&solid,dir,profile, profile2D));
profile2D->verts.insert(profile2D->verts.end(), in.begin(), in.end());
profile2D->vertcnt.push_back(in.size());
conv.collect_openings->push_back(TempOpening(&solid, dir, profile, profile2D));
ai_assert(result.IsEmpty());
}
}
}
// ------------------------------------------------------------------------------------------------
void ProcessExtrudedAreaSolid(const IfcExtrudedAreaSolid& solid, TempMesh& result,
ConversionData& conv, bool collect_openings)
{
TempMesh meshout;
// First read the profile description.
if(!ProcessProfile(*solid.SweptArea,meshout,conv) || meshout.verts.size()<=1) {
return;
}
IfcVector3 dir;
ConvertDirection(dir,solid.ExtrudedDirection);
dir *= solid.Depth;
// Some profiles bring their own holes, for which we need to provide a container. This all is somewhat backwards,
// and there's still so many corner cases uncovered - we really need a generic solution to all of this hole carving.
std::vector<TempOpening> fisherPriceMyFirstOpenings;
std::vector<TempOpening>* oldApplyOpenings = conv.apply_openings;
if( const IfcArbitraryProfileDefWithVoids* const cprofile = solid.SweptArea->ToPtr<IfcArbitraryProfileDefWithVoids>() ) {
if( !cprofile->InnerCurves.empty() ) {
// read all inner curves and extrude them to form proper openings.
std::vector<TempOpening>* oldCollectOpenings = conv.collect_openings;
conv.collect_openings = &fisherPriceMyFirstOpenings;
BOOST_FOREACH(const IfcCurve* curve, cprofile->InnerCurves) {
TempMesh curveMesh, tempMesh;
ProcessCurve(*curve, curveMesh, conv);
ProcessExtrudedArea(solid, curveMesh, dir, tempMesh, conv, true);
}
// and then apply those to the geometry we're about to generate
conv.apply_openings = conv.collect_openings;
conv.collect_openings = oldCollectOpenings;
}
}
ProcessExtrudedArea(solid, meshout, dir, result, conv, collect_openings);
conv.apply_openings = oldApplyOpenings;
}
// ------------------------------------------------------------------------------------------------
@ -784,7 +803,7 @@ bool ProcessGeometricItem(const IfcRepresentationItem& geo, unsigned int matid,
meshtmp->RemoveDegenerates();
if(fix_orientation) {
meshtmp->FixupFaceOrientation();
// meshtmp->FixupFaceOrientation();
}
aiMesh* const mesh = meshtmp->ToMesh();

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -271,6 +271,7 @@ IfcFloat ConvertSIPrefix(const std::string& prefix);
// IFCProfile.cpp
bool ProcessProfile(const IfcProfileDef& prof, TempMesh& meshout, ConversionData& conv);
bool ProcessCurve(const IfcCurve& curve, TempMesh& meshout, ConversionData& conv);
// IFCMaterial.cpp
unsigned int ProcessMaterials(uint64_t id, unsigned int prevMatId, ConversionData& conv, bool forceDefaultMat);

View File

@ -1023,7 +1023,9 @@ void SceneCombiner::CopyScene(aiScene** _dest,const aiScene* src,bool allocate)
dest->mFlags = src->mFlags;
// source private data might be NULL if the scene is user-allocated (i.e. for use with the export API)
ScenePriv(dest)->mPPStepsApplied = ScenePriv(src) ? ScenePriv(src)->mPPStepsApplied : 0;
if (dest->mPrivate != NULL) {
ScenePriv(dest)->mPPStepsApplied = ScenePriv(src) ? ScenePriv(src)->mPPStepsApplied : 0;
}
}
// ------------------------------------------------------------------------------------------------

View File

@ -44,6 +44,7 @@
entities and data types contained"""
import sys, os, re
from collections import OrderedDict
re_match_entity = re.compile(r"""
ENTITY\s+(\w+)\s* # 'ENTITY foo'
@ -68,8 +69,8 @@ re_match_field = re.compile(r"""
class Schema:
def __init__(self):
self.entities = {}
self.types = {}
self.entities = OrderedDict()
self.types = OrderedDict()
class Entity:
def __init__(self,name,parent,members):

View File

@ -1,16 +1,17 @@
# ==============================================================================
# ==============================================================================
# List of IFC structures needed by Assimp
# ==============================================================================
# ==============================================================================
# use genentitylist.sh to update this list
# This machine-generated list is not complete, it lacks many intermediate
# This machine-generated list is not complete, it lacks many intermediate
# classes in the inheritance hierarchy. Those are magically augmented by the
# code generator. Also, the names of all used entities need to be present
# code generator. Also, the names of all used entities need to be present
# in the source code for this to work.
IfcAnnotation
IfcArbitraryClosedProfileDef
IfcArbitraryOpenProfileDef
IfcArbitraryProfileDefWithVoids
IfcAxis1Placement
IfcAxis2Placement
IfcAxis2Placement2D

File diff suppressed because it is too large Load Diff

View File

@ -3,83 +3,81 @@ Assimp Regression Test Suite
1) How does it work?
---------------------------------------------------------------------------------
run.py checks all model in the <root>/test/models folder and compares the result
against a regression database provided by us (db.zip). If the test passes
successfully, Assimp definitely WORKS perfectly on your system. A few failures
are totally fine as well (see sections 7+). You need to worry if a huge
majority of all files in a particular format or post-processing configuration
fails - this might be a sign of a recent regression in assimp's codebase.
run.py checks all model in the <root>/test/models* folders and compares the result
against a regression database provided with assimp (db.zip). A few failures
are totally fine (see sections 7+). You need to worry if a huge
majority of all files in a particular format (or post-processing configuration)
fails as this might be a sign of a recent regression in assimp's codebase or
gross incompatibility with your system or compiler.
2) What do I need?
---------------------------------------------------------------------------------
- you need Python installed - 3.x !!
- you need to build tools/assimp_cmd as described in the INSTALL file (
make && make install on unixes,release-dll target with msvc).
- You need Python installed (2.7+, 3.x). On Windows, run the scripts using "py".
- You need to build the assimp command line tool (ASSIMP_BUILD_ASSIMP_TOOLS
CMake build flag). Both run.py and gen_db.py take the full path to the binary
as first command line parameter.
3) How to add more test files?
---------------------------------------------------------------------------------
Use the following procedure:
- verify the correctness of your assimp build - run the regression suite.
DO NOT continue if one or more tests fail.
- add your additional test files to <root>/test/models/<fileformat>, where
<fileformat> is the file type (typically the file extension)
- rebuild the regression database using gen_db.py
- run the regression suite again - all tests should pass, including
those for the new files.
- Verify the correctness of your assimp build - run the regression suite.
DO NOT continue if more tests fail than usual.
- Add your additional test files to <root>/test/models/<fileformat>, where
<fileformat> is the file type (typically the file extension).
- If you test file does not meet the BSD license requirements, add it to
<root>/test/models-nonbsd/<fileformat> so people know to be careful with it.
- Rebuild the regression database:
"gen_db.py <binary> -ixyz" where .xyz is the file extension of the new file.
- Run the regression suite again. There should be no new failures and the new
file should not be among the failures.
- Include the db.zip file with your Pull Request. Travis CI enforces a passing
regression suite (with offenders whitelisted as a last resort).
- contributors: commit the db.zip plus your additional test files to
the SVN repository.
4) I made a change/fix/.. to a loader, how to update the database?
4) I made a change/fix/patch to a loader, how to update the database?
---------------------------------------------------------------------------------
- rebuild the regression database using gen_db.py
- run the suite - all tests should pass now. If they do not, don't continue
- contributors: commit the db.zip to the SVN repository
- Rebuild the regression database using "gen_db.py <binary> -ixyz"
where .xyz is the file extension for which the loader was patched.
- Run the regression suite again. There should be no new failures and the new
file should not be among the failures.
- Include the db.zip file with your Pull Request. Travis CI enforces a passing
regression suite (with offenders whitelisted as a last resort).
5) How to add my whole model repository to the suite?
5) How to add my whole model repository to the database?
---------------------------------------------------------------------------------
Edit the reg_settings.py file and add the path to your repository to
<<model_directories>>. Then, rebuild the suite.
<<model_directories>>. Then, rebuild the database.
6) So what is actually verified?
6) So what is actually tested?
---------------------------------------------------------------------------------
The regression database includes mini dumps of the aiScene data structure -
The regression database includes mini dumps of the aiScene data structure, i.e.
the scene hierarchy plus the sizes of all data arrays MUST match. Floating-point
data buffers, such as vertex positions, are handled less strictly: min,max and
average values are stored with lower precision. This takes hardware- or
data buffers, such as vertex positions are handled less strictly: min, max and
average values are stored with low precision. This takes hardware- or
compiler-specific differences in floating-point computations into account.
Generally, almost all significant regressions will be detected while the
number of false positives is relatively low.
7) The test suite fails, what do do?
---------------------------------------------------------------------------------
Get back to ../results and check out regression_suite_failures.txt
It contains a list of all files which failed the test ... they're copied to
../results/tmp. Both an EXPECTED and an ACTUAL file is produced per test.
The output of `assimp cmpdump` is written to regressions_suite_output.txt.
To quickly find all all reports pertaining to tests which failed, I'd
recommend grepping for 'but' because its a constituent of all error messages
produced by assimp_cmd :) Error reports contain detailed information
regarding the point of failure inside the data structure, the values of
the two corresponding fields that were found to be different from EXPECTED
and ACTUAL, respectively, ... this should offer you enough information to start
debugging.
Get back to <root>/test/results and look at regression_suite_failures.txt.
It contains a list of all files which failed the test. Failing dumps are copied to
root>/test/results/tmp. Both an EXPECTED and an ACTUAL file is produced per test.
The output of "assimp cmpdump" is written to regressions_suite_output.txt. Grep
for the file name in question and locate the log for the failed comparison. It
contains a full trace of which scene elements have been compared before, which
makes it reasonably easy to locate the offending field.
8) fp:fast vs fp:precise fails the test suite (same for gcc equivalents)
---------------------------------------------------------------------------------
As mentioned above, floating-point inaccuracies between differently optimized
builds are not considered regressions and all float comparisons done by the test
suite involve an epsilon. Changing floating-point optimizations can, however,
lead to *real* changes in the output data structure, such as different number
of vertices or faces, ... this violates one of our primary targets, that is
produce reliable and portable output. We're working hard on removing these
issues, but at the moment you have to live with few of them.
Currently, the regression database is build on Windows using MSVC8 with
fp:precise. This leads to a small number of failures with fp:fast and
virtally every build with gcc. Be patient, it will be fixed.
suite involve an epsilon to accomodate. However compiler settings that allow
compilers to perform non-IEEE754 compliant optimizations can cause arbitrary
failures in the test suite. Even if the compiler is configured to be IEE754
comformant, there is lots of code in assimp that leaves the compiler a choice
and different compilers make different choices (for example the precision of
float intermediaries is implementation-specified).

View File

@ -69,6 +69,9 @@ configs for an IDE, make sure to build the assimp_cmd project.
-i,--include: List of file extensions to update dumps for. If omitted,
all file extensions are updated except those in `exclude`.
Example: -ixyz,abc
-i.xyz,.abc
--include=xyz,abc
-e,--exclude: Merged with settings.exclude_extensions to produce a
list of all file extensions to ignore. If dumps exist,
@ -78,8 +81,6 @@ configs for an IDE, make sure to build the assimp_cmd project.
Dont' change anything.
-n,--nozip: Don't pack to ZIP archive. Keep all dumps in individual files.
(lists of file extensions are comma delimited, i.e. `3ds,lwo,x`)
"""
# -------------------------------------------------------------------------------
@ -172,30 +173,32 @@ def gen_db(ext_list,outfile):
# -------------------------------------------------------------------------------
if __name__ == "__main__":
assimp_bin_path = sys.argv[1] if len(sys.argv) > 1 else 'assimp'
def clean(f):
f = f.strip("* \'")
return "."+f if f[:1] != '.' else f
if len(sys.argv)>1 and (sys.argv[1] == "--help" or sys.argv[1] == "-h"):
if len(sys.argv) <= 1 or sys.argv[1] == "--help" or sys.argv[1] == "-h":
print(usage)
sys.exit(0)
assimp_bin_path = sys.argv[1]
ext_list, preview, nozip = None, False, False
for m in sys.argv[1:]:
for m in sys.argv[2:]:
if m[:10]=="--exclude=":
settings.exclude_extensions += map(clean, m[10:].split(","))
elif m[:3]=="-e=":
settings.exclude_extensions += map(clean, m[3:].split(","))
elif m[:2]=="-e":
settings.exclude_extensions += map(clean, m[2:].split(","))
elif m[:10]=="--include=":
ext_list = m[10:].split(",")
elif m[:3]=="-i=":
ext_list = m[3:].split(",")
elif m[:2]=="-i":
ext_list = m[2:].split(",")
elif m=="-p" or m == "--preview":
preview = True
elif m=="-n" or m == "--nozip":
nozip = True
else:
print("Unrecognized parameter: " + m)
sys.exit(-1)
outfile = open(os.path.join("..", "results", "gen_regression_db_output.txt"), "w")
if ext_list is None:
@ -206,9 +209,8 @@ if __name__ == "__main__":
# todo: Fix for multi dot extensions like .skeleton.xml
ext_list = list(filter(lambda f: not f in settings.exclude_extensions,
map(clean, ext_list)))
if preview:
print(','.join(ext_list))
print('File extensions processed: ' + ', '.join(ext_list))
if preview:
sys.exit(1)
extract_zip()