Modif dans le parsing IFC suppressions des espaces avant traitement de la chaine
parent
e6c5095e5b
commit
8267d93537
|
@ -192,24 +192,17 @@ void IFCImporter::InternReadFile( const std::string& pFile,
|
|||
}
|
||||
|
||||
// search file (same name as the IFCZIP except for the file extension) and place file pointer there
|
||||
|
||||
if(UNZ_OK == unzGoToFirstFile(zip)) {
|
||||
do {
|
||||
//
|
||||
|
||||
// get file size, etc.
|
||||
unz_file_info fileInfo;
|
||||
char filename[256];
|
||||
unzGetCurrentFileInfo( zip , &fileInfo, filename, sizeof(filename), 0, 0, 0, 0 );
|
||||
|
||||
if (GetExtension(filename) != "ifc") {
|
||||
continue;
|
||||
}
|
||||
|
||||
uint8_t* buff = new uint8_t[fileInfo.uncompressed_size];
|
||||
|
||||
LogInfo("Decompressing IFCZIP file");
|
||||
|
||||
unzOpenCurrentFile( zip );
|
||||
const int ret = unzReadCurrentFile( zip, buff, fileInfo.uncompressed_size);
|
||||
size_t filesize = fileInfo.uncompressed_size;
|
||||
|
@ -271,7 +264,6 @@ void IFCImporter::InternReadFile( const std::string& pFile,
|
|||
|
||||
// feed the IFC schema into the reader and pre-parse all lines
|
||||
STEP::ReadFile(*db, schema, types_to_track, inverse_indices_to_track);
|
||||
|
||||
const STEP::LazyObject* proj = db->GetObject("ifcproject");
|
||||
if (!proj) {
|
||||
ThrowException("missing IfcProject entity");
|
||||
|
@ -565,21 +557,16 @@ void ProcessProductRepresentation(const IfcProduct& el, aiNode* nd, std::vector<
|
|||
if(!el.Representation) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
std::vector<unsigned int> meshes;
|
||||
|
||||
// we want only one representation type, so bring them in a suitable order (i.e try those
|
||||
// that look as if we could read them quickly at first). This way of reading
|
||||
// representation is relatively generic and allows the concrete implementations
|
||||
// for the different representation types to make some sensible choices what
|
||||
// to load and what not to load.
|
||||
const STEP::ListOf< STEP::Lazy< IfcRepresentation >, 1, 0 >& src = el.Representation.Get()->Representations;
|
||||
|
||||
std::vector<const IfcRepresentation*> repr_ordered(src.size());
|
||||
std::copy(src.begin(),src.end(),repr_ordered.begin());
|
||||
std::sort(repr_ordered.begin(),repr_ordered.end(),RateRepresentationPredicate());
|
||||
|
||||
BOOST_FOREACH(const IfcRepresentation* repr, repr_ordered) {
|
||||
bool res = false;
|
||||
BOOST_FOREACH(const IfcRepresentationItem& item, repr->Items) {
|
||||
|
@ -595,7 +582,6 @@ void ProcessProductRepresentation(const IfcProduct& el, aiNode* nd, std::vector<
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
AssignAddedMeshes(meshes,nd,conv);
|
||||
}
|
||||
|
||||
|
|
|
@ -103,13 +103,10 @@ public:
|
|||
swallow = false;
|
||||
return *this;
|
||||
}
|
||||
|
||||
if (!*this) {
|
||||
throw std::logic_error("End of file, no more lines to be retrieved.");
|
||||
}
|
||||
|
||||
char s;
|
||||
|
||||
cur.clear();
|
||||
while(stream.GetRemainingSize() && (s = stream.GetI1(),1)) {
|
||||
if (s == '\n' || s == '\r') {
|
||||
|
@ -124,7 +121,6 @@ public:
|
|||
if (stream.GetRemainingSize() && (s == '\r' && stream.GetI1() != '\n')) {
|
||||
stream.IncPtr(-1);
|
||||
}
|
||||
|
||||
if (trim) {
|
||||
while (stream.GetRemainingSize() && ((s = stream.GetI1()) == ' ' || s == '\t'));
|
||||
if (stream.GetRemainingSize()) {
|
||||
|
@ -132,12 +128,10 @@ public:
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
cur += s;
|
||||
}
|
||||
|
||||
++idx;
|
||||
return *this;
|
||||
}
|
||||
|
@ -174,7 +168,9 @@ public:
|
|||
SkipSpaces(&s);
|
||||
for(size_t i = 0; i < N; ++i) {
|
||||
if(IsLineEnd(*s)) {
|
||||
|
||||
throw std::range_error("Token count out of range, EOL reached");
|
||||
|
||||
}
|
||||
tokens[i] = s;
|
||||
|
||||
|
|
|
@ -175,7 +175,7 @@ bool IsEntityDef(const std::string& snext)
|
|||
if (*it == '=') {
|
||||
return true;
|
||||
}
|
||||
if (*it < '0' || *it > '9') {
|
||||
if ((*it < '0' || *it > '9') && *it != ' ') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -197,16 +197,17 @@ void STEP::ReadFile(DB& db,const EXPRESS::ConversionSchema& scheme,
|
|||
|
||||
const DB::ObjectMap& map = db.GetObjects();
|
||||
LineSplitter& splitter = db.GetSplitter();
|
||||
|
||||
while (splitter) {
|
||||
bool has_next = false;
|
||||
std::string s = *splitter;
|
||||
if (s == "ENDSEC;") {
|
||||
break;
|
||||
}
|
||||
s.erase(std::remove(s.begin(), s.end(), ' '), s.end());
|
||||
|
||||
// want one-based line numbers for human readers, so +1
|
||||
const uint64_t line = splitter.get_index()+1;
|
||||
|
||||
// LineSplitter already ignores empty lines
|
||||
ai_assert(s.length());
|
||||
if (s[0] != '#') {
|
||||
|
@ -214,12 +215,10 @@ void STEP::ReadFile(DB& db,const EXPRESS::ConversionSchema& scheme,
|
|||
++splitter;
|
||||
continue;
|
||||
}
|
||||
|
||||
// ---
|
||||
// extract id, entity class name and argument string,
|
||||
// but don't create the actual object yet.
|
||||
// ---
|
||||
|
||||
const std::string::size_type n0 = s.find_first_of('=');
|
||||
if (n0 == std::string::npos) {
|
||||
DefaultLogger::get()->warn(AddLineNumber("expected token \'=\'",line));
|
||||
|
@ -233,13 +232,10 @@ void STEP::ReadFile(DB& db,const EXPRESS::ConversionSchema& scheme,
|
|||
++splitter;
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string::size_type n1 = s.find_first_of('(',n0);
|
||||
if (n1 == std::string::npos) {
|
||||
|
||||
has_next = true;
|
||||
bool ok = false;
|
||||
|
||||
for( ++splitter; splitter; ++splitter) {
|
||||
const std::string& snext = *splitter;
|
||||
if (snext.empty()) {
|
||||
|
@ -269,13 +265,11 @@ void STEP::ReadFile(DB& db,const EXPRESS::ConversionSchema& scheme,
|
|||
|
||||
has_next = true;
|
||||
bool ok = false;
|
||||
|
||||
for( ++splitter; splitter; ++splitter) {
|
||||
const std::string& snext = *splitter;
|
||||
if (snext.empty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// the next line doesn't start an entity, so maybe it is
|
||||
// just a continuation for this line, keep going
|
||||
if (!IsEntityDef(snext)) {
|
||||
|
@ -287,7 +281,6 @@ void STEP::ReadFile(DB& db,const EXPRESS::ConversionSchema& scheme,
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(!ok) {
|
||||
DefaultLogger::get()->warn(AddLineNumber("expected token \')\'",line));
|
||||
continue;
|
||||
|
@ -300,24 +293,18 @@ void STEP::ReadFile(DB& db,const EXPRESS::ConversionSchema& scheme,
|
|||
|
||||
std::string::size_type ns = n0;
|
||||
do ++ns; while( IsSpace(s.at(ns)));
|
||||
|
||||
std::string::size_type ne = n1;
|
||||
do --ne; while( IsSpace(s.at(ne)));
|
||||
|
||||
std::string type = s.substr(ns,ne-ns+1);
|
||||
std::transform( type.begin(), type.end(), type.begin(), &Assimp::ToLower<char> );
|
||||
|
||||
const char* sz = scheme.GetStaticStringForToken(type);
|
||||
if(sz) {
|
||||
|
||||
const std::string::size_type len = n2-n1+1;
|
||||
char* const copysz = new char[len+1];
|
||||
std::copy(s.c_str()+n1,s.c_str()+n2+1,copysz);
|
||||
copysz[len] = '\0';
|
||||
|
||||
db.InternInsert(new LazyObject(db,id,line,sz,copysz));
|
||||
}
|
||||
|
||||
if(!has_next) {
|
||||
++splitter;
|
||||
}
|
||||
|
@ -338,7 +325,6 @@ boost::shared_ptr<const EXPRESS::DataType> EXPRESS::DataType::Parse(const char*&
|
|||
{
|
||||
const char* cur = inout;
|
||||
SkipSpaces(&cur);
|
||||
|
||||
if (*cur == ',' || IsSpaceOrNewLine(*cur)) {
|
||||
throw STEP::SyntaxError("unexpected token, expected parameter",line);
|
||||
}
|
||||
|
|
|
@ -47,12 +47,10 @@ namespace Assimp {
|
|||
namespace STEP {
|
||||
|
||||
// ### Parsing a STEP file is a twofold procedure ###
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// 1) read file header and return to caller, who checks if the
|
||||
// file is of a supported schema ..
|
||||
DB* ReadFileHeader(boost::shared_ptr<IOStream> stream);
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// 2) read the actual file contents using a user-supplied set of
|
||||
// conversion functions to interpret the data.
|
||||
|
@ -60,8 +58,6 @@ namespace STEP {
|
|||
template <size_t N, size_t N2> inline void ReadFile(DB& db,const EXPRESS::ConversionSchema& scheme, const char* const (&arr)[N], const char* const (&arr2)[N2]) {
|
||||
return ReadFile(db,scheme,arr,N,arr2,N2);
|
||||
}
|
||||
|
||||
|
||||
} // ! STEP
|
||||
} // ! Assimp
|
||||
|
||||
|
|
Loading…
Reference in New Issue