Merge branch 'master' into jakras-progress

pull/5190/head
Kim Kulling 2023-08-30 20:51:23 +02:00 committed by GitHub
commit bfc1207e78
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 207 additions and 140 deletions

View File

@ -95,6 +95,7 @@ ColladaLoader::ColladaLoader() :
noSkeletonMesh(false),
removeEmptyBones(false),
ignoreUpDirection(false),
ignoreUnitSize(false),
useColladaName(false),
mNodeNameCounter(0) {
// empty
@ -122,6 +123,7 @@ void ColladaLoader::SetupProperties(const Importer *pImp) {
noSkeletonMesh = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_NO_SKELETON_MESHES, 0) != 0;
removeEmptyBones = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES, true) != 0;
ignoreUpDirection = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_COLLADA_IGNORE_UP_DIRECTION, 0) != 0;
ignoreUnitSize = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_COLLADA_IGNORE_UNIT_SIZE, 0) != 0;
useColladaName = pImp->GetPropertyInteger(AI_CONFIG_IMPORT_COLLADA_USE_COLLADA_NAMES, 0) != 0;
}
@ -170,12 +172,15 @@ void ColladaLoader::InternReadFile(const std::string &pFile, aiScene *pScene, IO
// ... then fill the materials with the now adjusted settings
FillMaterials(parser, pScene);
if (!ignoreUnitSize) {
// Apply unit-size scale calculation
pScene->mRootNode->mTransformation *= aiMatrix4x4(parser.mUnitSize, 0, 0, 0,
pScene->mRootNode->mTransformation *= aiMatrix4x4(
parser.mUnitSize, 0, 0, 0,
0, parser.mUnitSize, 0, 0,
0, 0, parser.mUnitSize, 0,
0, 0, 0, 1);
}
if (!ignoreUpDirection) {
// Convert to Y_UP, if different orientation
if (parser.mUpDirection == ColladaParser::UP_X) {

View File

@ -239,6 +239,7 @@ protected:
bool noSkeletonMesh;
bool removeEmptyBones;
bool ignoreUpDirection;
bool ignoreUnitSize;
bool useColladaName;
/** Used by FindNameForNode() to generate unique node names */

View File

@ -478,10 +478,10 @@ void FBXConverter::ConvertCamera(const Camera &cam, const std::string &orig_name
float focal_length_mm = cam.FocalLength();
ASSIMP_LOG_VERBOSE_DEBUG("FBX FOV unspecified. Computing from FilmWidth (", film_width_inches, "inches) and FocalLength (", focal_length_mm, "mm).");
double half_fov_rad = std::atan2(film_width_inches * 25.4 * 0.5, focal_length_mm);
out_camera->mHorizontalFOV = half_fov_rad;
out_camera->mHorizontalFOV = static_cast<float>(half_fov_rad);
} else {
// FBX fov is full-view degrees. We want half-view radians.
out_camera->mHorizontalFOV = AI_DEG_TO_RAD(fov_deg) * 0.5;
out_camera->mHorizontalFOV = AI_DEG_TO_RAD(fov_deg) * 0.5f;
}
out_camera->mClipPlaneNear = cam.NearPlane();

View File

@ -211,7 +211,7 @@ Scope::Scope(Parser& parser,bool topLevel)
elements.insert(ElementMap::value_type(str, element));
return;
}
delete element;
delete_Element(element);
ParseError("unexpected end of file",parser.LastToken());
} else {
elements.insert(ElementMap::value_type(str, element));

View File

@ -271,10 +271,16 @@ void MDLImporter::InternReadFile(const std::string &pFile,
}
}
// ------------------------------------------------------------------------------------------------
// Check whether we're still inside the valid file range
bool MDLImporter::IsPosValid(const void *szPos) const {
return szPos && (const unsigned char *)szPos <= this->mBuffer + this->iFileSize && szPos >= this->mBuffer;
}
// ------------------------------------------------------------------------------------------------
// Check whether we're still inside the valid file range
void MDLImporter::SizeCheck(const void *szPos) {
if (!szPos || (const unsigned char *)szPos > this->mBuffer + this->iFileSize || szPos < this->mBuffer) {
if (!IsPosValid(szPos)) {
throw DeadlyImportError("Invalid MDL file. The file is too small "
"or contains invalid data.");
}
@ -284,7 +290,7 @@ void MDLImporter::SizeCheck(const void *szPos) {
// Just for debugging purposes
void MDLImporter::SizeCheck(const void *szPos, const char *szFile, unsigned int iLine) {
ai_assert(nullptr != szFile);
if (!szPos || (const unsigned char *)szPos > mBuffer + iFileSize) {
if (!IsPosValid(szPos)) {
// remove a directory if there is one
const char *szFilePtr = ::strrchr(szFile, '\\');
if (!szFilePtr) {

View File

@ -150,6 +150,7 @@ protected:
*/
void SizeCheck(const void* szPos);
void SizeCheck(const void* szPos, const char* szFile, unsigned int iLine);
bool IsPosValid(const void* szPos) const;
// -------------------------------------------------------------------
/** Validate the header data structure of a game studio MDL7 file

View File

@ -52,6 +52,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <assimp/importerdesc.h>
#include <assimp/StreamReader.h>
#include <map>
#include <limits>
using namespace Assimp;
@ -160,6 +161,9 @@ void NDOImporter::InternReadFile( const std::string& pFile,
temp = file_format >= 12 ? reader.GetU4() : reader.GetU2();
head = (const char*)reader.GetPtr();
if (std::numeric_limits<unsigned int>::max() - 76 < temp) {
throw DeadlyImportError("Invalid name length");
}
reader.IncPtr(temp + 76); /* skip unknown stuff */
obj.name = std::string(head, temp);

View File

@ -263,7 +263,7 @@ size_t NZDiff(void *data, void *dataBase, size_t count, unsigned int numCompsIn,
for (short idx = 0; bufferData_ptr < bufferData_end; idx += 1, bufferData_ptr += numCompsIn) {
bool bNonZero = false;
//for the data, check any component Non Zero
// for the data, check any component Non Zero
for (unsigned int j = 0; j < numCompsOut; j++) {
double valueData = bufferData_ptr[j];
double valueBase = bufferBase_ptr ? bufferBase_ptr[j] : 0;
@ -273,11 +273,11 @@ size_t NZDiff(void *data, void *dataBase, size_t count, unsigned int numCompsIn,
}
}
//all zeros, continue
// all zeros, continue
if (!bNonZero)
continue;
//non zero, store the data
// non zero, store the data
for (unsigned int j = 0; j < numCompsOut; j++) {
T valueData = bufferData_ptr[j];
T valueBase = bufferBase_ptr ? bufferBase_ptr[j] : 0;
@ -286,14 +286,14 @@ size_t NZDiff(void *data, void *dataBase, size_t count, unsigned int numCompsIn,
vNZIdx.push_back(idx);
}
//avoid all-0, put 1 item
// avoid all-0, put 1 item
if (vNZDiff.size() == 0) {
for (unsigned int j = 0; j < numCompsOut; j++)
vNZDiff.push_back(0);
vNZIdx.push_back(0);
}
//process data
// process data
outputNZDiff = new T[vNZDiff.size()];
memcpy(outputNZDiff, vNZDiff.data(), vNZDiff.size() * sizeof(T));
@ -361,7 +361,7 @@ inline Ref<Accessor> ExportDataSparse(Asset &a, std::string &meshName, Ref<Buffe
acc->sparse.reset(new Accessor::Sparse);
acc->sparse->count = nzCount;
//indices
// indices
unsigned int bytesPerIdx = sizeof(unsigned short);
size_t indices_offset = buffer->byteLength;
size_t indices_padding = indices_offset % bytesPerIdx;
@ -379,7 +379,7 @@ inline Ref<Accessor> ExportDataSparse(Asset &a, std::string &meshName, Ref<Buffe
acc->sparse->indicesByteOffset = 0;
acc->WriteSparseIndices(nzCount, nzIdx, 1 * bytesPerIdx);
//values
// values
size_t values_offset = buffer->byteLength;
size_t values_padding = values_offset % bytesPerComp;
values_offset += values_padding;
@ -395,9 +395,9 @@ inline Ref<Accessor> ExportDataSparse(Asset &a, std::string &meshName, Ref<Buffe
acc->sparse->valuesByteOffset = 0;
acc->WriteSparseValues(nzCount, nzDiff, numCompsIn * bytesPerComp);
//clear
delete[](char *) nzDiff;
delete[](char *) nzIdx;
// clear
delete[] (char *)nzDiff;
delete[] (char *)nzIdx;
}
return acc;
}
@ -599,7 +599,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, Ref<Texture> &texture, unsi
if (curTex != nullptr) { // embedded
texture->source->name = curTex->mFilename.C_Str();
//basisu: embedded ktx2, bu
// basisu: embedded ktx2, bu
if (curTex->achFormatHint[0]) {
std::string mimeType = "image/";
if (memcmp(curTex->achFormatHint, "jpg", 3) == 0)
@ -619,7 +619,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, Ref<Texture> &texture, unsi
}
// The asset has its own buffer, see Image::SetData
//basisu: "image/ktx2", "image/basis" as is
// basisu: "image/ktx2", "image/basis" as is
texture->source->SetData(reinterpret_cast<uint8_t *>(curTex->pcData), curTex->mWidth, *mAsset);
} else {
texture->source->uri = path;
@ -629,7 +629,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, Ref<Texture> &texture, unsi
}
}
//basisu
// basisu
if (useBasisUniversal) {
mAsset->extensionsUsed.KHR_texture_basisu = true;
mAsset->extensionsRequired.KHR_texture_basisu = true;
@ -652,7 +652,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, NormalTextureInfo &prop, ai
GetMatTex(mat, texture, prop.texCoord, tt, slot);
if (texture) {
//GetMatTexProp(mat, prop.texCoord, "texCoord", tt, slot);
// GetMatTexProp(mat, prop.texCoord, "texCoord", tt, slot);
GetMatTexProp(mat, prop.scale, "scale", tt, slot);
}
}
@ -663,7 +663,7 @@ void glTF2Exporter::GetMatTex(const aiMaterial &mat, OcclusionTextureInfo &prop,
GetMatTex(mat, texture, prop.texCoord, tt, slot);
if (texture) {
//GetMatTexProp(mat, prop.texCoord, "texCoord", tt, slot);
// GetMatTexProp(mat, prop.texCoord, "texCoord", tt, slot);
GetMatTexProp(mat, prop.strength, "strength", tt, slot);
}
}
@ -832,20 +832,30 @@ void glTF2Exporter::ExportMaterials() {
GetMatTex(mat, m->pbrMetallicRoughness.baseColorTexture, aiTextureType_BASE_COLOR);
if (!m->pbrMetallicRoughness.baseColorTexture.texture) {
//if there wasn't a baseColorTexture defined in the source, fallback to any diffuse texture
// if there wasn't a baseColorTexture defined in the source, fallback to any diffuse texture
GetMatTex(mat, m->pbrMetallicRoughness.baseColorTexture, aiTextureType_DIFFUSE);
}
GetMatTex(mat, m->pbrMetallicRoughness.metallicRoughnessTexture, aiTextureType_DIFFUSE_ROUGHNESS);
if (!m->pbrMetallicRoughness.metallicRoughnessTexture.texture) {
// if there wasn't a aiTextureType_DIFFUSE_ROUGHNESS defined in the source, fallback to aiTextureType_METALNESS
GetMatTex(mat, m->pbrMetallicRoughness.metallicRoughnessTexture, aiTextureType_METALNESS);
}
if (!m->pbrMetallicRoughness.metallicRoughnessTexture.texture) {
// if there still wasn't a aiTextureType_METALNESS defined in the source, fallback to AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_METALLICROUGHNESS_TEXTURE
GetMatTex(mat, m->pbrMetallicRoughness.metallicRoughnessTexture, AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_METALLICROUGHNESS_TEXTURE);
}
if (GetMatColor(mat, m->pbrMetallicRoughness.baseColorFactor, AI_MATKEY_BASE_COLOR) != AI_SUCCESS) {
// if baseColorFactor wasn't defined, then the source is likely not a metallic roughness material.
//a fallback to any diffuse color should be used instead
// a fallback to any diffuse color should be used instead
GetMatColor(mat, m->pbrMetallicRoughness.baseColorFactor, AI_MATKEY_COLOR_DIFFUSE);
}
if (mat.Get(AI_MATKEY_METALLIC_FACTOR, m->pbrMetallicRoughness.metallicFactor) != AI_SUCCESS) {
//if metallicFactor wasn't defined, then the source is likely not a PBR file, and the metallicFactor should be 0
// if metallicFactor wasn't defined, then the source is likely not a PBR file, and the metallicFactor should be 0
m->pbrMetallicRoughness.metallicFactor = 0;
}
@ -858,10 +868,10 @@ void glTF2Exporter::ExportMaterials() {
if (mat.Get(AI_MATKEY_COLOR_SPECULAR, specularColor) == AI_SUCCESS && mat.Get(AI_MATKEY_SHININESS, shininess) == AI_SUCCESS) {
// convert specular color to luminance
float specularIntensity = specularColor[0] * 0.2125f + specularColor[1] * 0.7154f + specularColor[2] * 0.0721f;
//normalize shininess (assuming max is 1000) with an inverse exponentional curve
// normalize shininess (assuming max is 1000) with an inverse exponentional curve
float normalizedShininess = std::sqrt(shininess / 1000);
//clamp the shininess value between 0 and 1
// clamp the shininess value between 0 and 1
normalizedShininess = std::min(std::max(normalizedShininess, 0.0f), 1.0f);
// low specular intensity values should produce a rough material even if shininess is high.
normalizedShininess = normalizedShininess * specularIntensity;
@ -1059,7 +1069,7 @@ void ExportSkin(Asset &mAsset, const aiMesh *aimesh, Ref<Mesh> &meshRef, Ref<Buf
if (boneIndexFitted != -1) {
vertexJointData[vertexId][boneIndexFitted] = static_cast<float>(jointNamesIndex);
}
}else {
} else {
vertexJointData[vertexId][jointsPerVertex[vertexId]] = static_cast<float>(jointNamesIndex);
vertexWeightData[vertexId][jointsPerVertex[vertexId]] = vertWeight;
@ -1362,24 +1372,24 @@ void glTF2Exporter::MergeMeshes() {
unsigned int nMeshes = static_cast<unsigned int>(node->meshes.size());
//skip if it's 1 or less meshes per node
// skip if it's 1 or less meshes per node
if (nMeshes > 1) {
Ref<Mesh> firstMesh = node->meshes.at(0);
//loop backwards to allow easy removal of a mesh from a node once it's merged
// loop backwards to allow easy removal of a mesh from a node once it's merged
for (unsigned int m = nMeshes - 1; m >= 1; --m) {
Ref<Mesh> mesh = node->meshes.at(m);
//append this mesh's primitives to the first mesh's primitives
// append this mesh's primitives to the first mesh's primitives
firstMesh->primitives.insert(
firstMesh->primitives.end(),
mesh->primitives.begin(),
mesh->primitives.end());
//remove the mesh from the list of meshes
// remove the mesh from the list of meshes
unsigned int removedIndex = mAsset->meshes.Remove(mesh->id.c_str());
//find the presence of the removed mesh in other nodes
// find the presence of the removed mesh in other nodes
for (unsigned int nn = 0; nn < mAsset->nodes.Size(); ++nn) {
Ref<Node> curNode = mAsset->nodes.Get(nn);
@ -1398,7 +1408,7 @@ void glTF2Exporter::MergeMeshes() {
}
}
//since we were looping backwards, reverse the order of merged primitives to their original order
// since we were looping backwards, reverse the order of merged primitives to their original order
std::reverse(firstMesh->primitives.begin() + 1, firstMesh->primitives.end());
}
}
@ -1430,7 +1440,7 @@ unsigned int glTF2Exporter::ExportNodeHierarchy(const aiNode *n) {
return node.GetIndex();
}
/*
/*
* Export node and recursively calls ExportNode for all children.
* Since these nodes are not the root node, we also export the parent Ref<Node>
*/
@ -1525,9 +1535,9 @@ inline void ExtractTranslationSampler(Asset &asset, std::string &animId, Ref<Buf
const aiVectorKey &key = nodeChannel->mPositionKeys[i];
// mTime is measured in ticks, but GLTF time is measured in seconds, so convert.
times[i] = static_cast<float>(key.mTime / ticksPerSecond);
values[(i * 3) + 0] = (ai_real) key.mValue.x;
values[(i * 3) + 1] = (ai_real) key.mValue.y;
values[(i * 3) + 2] = (ai_real) key.mValue.z;
values[(i * 3) + 0] = (ai_real)key.mValue.x;
values[(i * 3) + 1] = (ai_real)key.mValue.y;
values[(i * 3) + 2] = (ai_real)key.mValue.z;
}
sampler.input = GetSamplerInputRef(asset, animId, buffer, times);
@ -1544,9 +1554,9 @@ inline void ExtractScaleSampler(Asset &asset, std::string &animId, Ref<Buffer> &
const aiVectorKey &key = nodeChannel->mScalingKeys[i];
// mTime is measured in ticks, but GLTF time is measured in seconds, so convert.
times[i] = static_cast<float>(key.mTime / ticksPerSecond);
values[(i * 3) + 0] = (ai_real) key.mValue.x;
values[(i * 3) + 1] = (ai_real) key.mValue.y;
values[(i * 3) + 2] = (ai_real) key.mValue.z;
values[(i * 3) + 0] = (ai_real)key.mValue.x;
values[(i * 3) + 1] = (ai_real)key.mValue.y;
values[(i * 3) + 2] = (ai_real)key.mValue.z;
}
sampler.input = GetSamplerInputRef(asset, animId, buffer, times);
@ -1563,10 +1573,10 @@ inline void ExtractRotationSampler(Asset &asset, std::string &animId, Ref<Buffer
const aiQuatKey &key = nodeChannel->mRotationKeys[i];
// mTime is measured in ticks, but GLTF time is measured in seconds, so convert.
times[i] = static_cast<float>(key.mTime / ticksPerSecond);
values[(i * 4) + 0] = (ai_real) key.mValue.x;
values[(i * 4) + 1] = (ai_real) key.mValue.y;
values[(i * 4) + 2] = (ai_real) key.mValue.z;
values[(i * 4) + 3] = (ai_real) key.mValue.w;
values[(i * 4) + 0] = (ai_real)key.mValue.x;
values[(i * 4) + 1] = (ai_real)key.mValue.y;
values[(i * 4) + 2] = (ai_real)key.mValue.z;
values[(i * 4) + 3] = (ai_real)key.mValue.w;
}
sampler.input = GetSamplerInputRef(asset, animId, buffer, times);

View File

@ -234,7 +234,8 @@ inline void SetMaterialTextureProperty(std::vector<int> &embeddedTexIdxs, Asset
SetMaterialTextureProperty(embeddedTexIdxs, r, (glTF2::TextureInfo)prop, mat, texType, texSlot);
if (prop.texture && prop.texture->source) {
mat->AddProperty(&prop.strength, 1, AI_MATKEY_GLTF_TEXTURE_STRENGTH(texType, texSlot));
std::string textureStrengthKey = std::string(_AI_MATKEY_TEXTURE_BASE) + "." + "strength";
mat->AddProperty(&prop.strength, 1, textureStrengthKey.c_str(), texType, texSlot);
}
}

View File

@ -1420,9 +1420,8 @@ if(MSVC AND ASSIMP_INSTALL_PDB)
COMPILE_PDB_NAME assimp${LIBRARY_SUFFIX}
COMPILE_PDB_NAME_DEBUG assimp${LIBRARY_SUFFIX}${CMAKE_DEBUG_POSTFIX}
)
ENDIF()
IF(CMAKE_GENERATOR MATCHES "^Visual Studio")
IF(GENERATOR_IS_MULTI_CONFIG)
install(FILES ${Assimp_BINARY_DIR}/code/Debug/assimp${LIBRARY_SUFFIX}${CMAKE_DEBUG_POSTFIX}.pdb
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
CONFIGURATIONS Debug
@ -1441,6 +1440,11 @@ if(MSVC AND ASSIMP_INSTALL_PDB)
CONFIGURATIONS RelWithDebInfo
)
ENDIF()
ELSE()
install(FILES $<TARGET_PDB_FILE:assimp>
DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
)
ENDIF()
ENDIF ()
if (ASSIMP_COVERALLS)

View File

@ -312,12 +312,7 @@ std::string BaseImporter::GetExtension(const std::string &pFile) {
if (!pIOHandler) {
return false;
}
union {
const char *magic;
const uint16_t *magic_u16;
const uint32_t *magic_u32;
};
magic = reinterpret_cast<const char *>(_magic);
const char *magic = reinterpret_cast<const char *>(_magic);
std::unique_ptr<IOStream> pStream(pIOHandler->Open(pFile));
if (pStream) {
@ -339,15 +334,15 @@ std::string BaseImporter::GetExtension(const std::string &pFile) {
// that's just for convenience, the chance that we cause conflicts
// is quite low and it can save some lines and prevent nasty bugs
if (2 == size) {
uint16_t rev = *magic_u16;
ByteSwap::Swap(&rev);
if (data_u16[0] == *magic_u16 || data_u16[0] == rev) {
uint16_t magic_u16;
memcpy(&magic_u16, magic, 2);
if (data_u16[0] == magic_u16 || data_u16[0] == ByteSwap::Swapped(magic_u16)) {
return true;
}
} else if (4 == size) {
uint32_t rev = *magic_u32;
ByteSwap::Swap(&rev);
if (data_u32[0] == *magic_u32 || data_u32[0] == rev) {
uint32_t magic_u32;
memcpy(&magic_u32, magic, 4);
if (data_u32[0] == magic_u32 || data_u32[0] == ByteSwap::Swapped(magic_u32)) {
return true;
}
} else {

View File

@ -15,8 +15,10 @@ option( DDL_STATIC_LIBRARY "Deprecated, use BUILD_SHARED_LIBS instead."
# for backwards compatibility use DDL_STATIC_LIBRARY as initial value for cmake variable BUILD_SHARED_LIBS
# https://cmake.org/cmake/help/latest/variable/BUILD_SHARED_LIBS.html
if ( DDL_STATIC_LIBRARY )
message("Building shared lib.")
set ( build_shared_libs_default OFF )
else()
message("Building static lib.")
set ( build_shared_libs_default ON )
endif()
option( DDL_BUILD_SHARED_LIBS "Set to ON to build shared libary of OpenDDL Parser." ${build_shared_libs_default} )
@ -36,6 +38,7 @@ endif()
add_definitions( -D_VARIADIC_MAX=10 )
add_definitions( -DGTEST_HAS_PTHREAD=0 )
if ( DDL_DEBUG_OUTPUT )
message("Enable debug output.")
add_definitions( -DDDL_DEBUG_HEADER_NAME)
endif()
@ -62,10 +65,12 @@ if (COVERALLS)
include(Coveralls)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
message("Enable coveralls.")
endif()
# Include the doc component.
if(DDL_DOCUMENTATION)
message("Generate doxygen documentation.")
find_package(Doxygen REQUIRED)
CONFIGURE_FILE( doc/openddlparser_doc.in doc/doxygenfile @ONLY )
add_custom_target(doc ALL

View File

@ -5,13 +5,15 @@ The OpenDDL-Parser is a small and easy to use library for OpenDDL-file-format-pa
Build status
============
Linux build status: [![Build Status](https://travis-ci.org/kimkulling/openddl-parser.png)](https://travis-ci.org/kimkulling/openddl-parser)
Linux build status: [![Build Status](https://travis-ci.com/kimkulling/openddl-parser.svg?branch=master)](https://travis-ci.com/kimkulling/openddl-parser)
Current coverity check status:
<a href="https://scan.coverity.com/projects/5606">
<img alt="Coverity Scan Build Status"
src="https://scan.coverity.com/projects/5606/badge.svg"/>
</a>
Current test coverage:[![Coverage Status](https://coveralls.io/repos/github/kimkulling/openddl-parser/badge.svg?branch=master)](https://coveralls.io/github/kimkulling/openddl-parser?branch=cpp_coveralls)
Get the source code
===================
You can get the code from our git repository, which is located at GitHub. You can clone the repository with the following command:
@ -57,11 +59,11 @@ USE_ODDLPARSER_NS;
int main( int argc, char *argv[] ) {
if( argc < 3 ) {
return 1;
return Error;
}
char *filename( nullptr );
if( 0 == strncmp( FileOption, argv[ 1 ], strlen( FileOption ) ) ) {
if( 0 == strncmp( FileOption, argv[1], strlen( FileOption ) ) ) {
filename = argv[ 2 ];
}
std::cout << "file to import: " << filename << std::endl;
@ -73,24 +75,27 @@ int main( int argc, char *argv[] ) {
FILE *fileStream = fopen( filename, "r+" );
if( NULL == filename ) {
std::cerr << "Cannot open file " << filename << std::endl;
return 1;
return Error;
}
// obtain file size:
fseek( fileStream, 0, SEEK_END );
const size_t size( ftell( fileStream ) );
const size_t size = ftell( fileStream );
rewind( fileStream );
if( size > 0 ) {
char *buffer = new char[ size ];
const size_t readSize( fread( buffer, sizeof( char ), size, fileStream ) );
const size_t readSize = fread( buffer, sizeof( char ), size, fileStream );
assert( readSize == size );
// Set the memory buffer
OpenDDLParser theParser;
theParser.setBuffer( buffer, size );
const bool result( theParser.parse() );
if( !result ) {
if( !theParser.parse() ) {
std::cerr << "Error while parsing file " << filename << "." << std::endl;
return Error;
}
}
return 0;
}
@ -106,9 +111,9 @@ theParser.setBuffer( buffer, size );
const bool result( theParser.parse() );
if ( result ) {
DDLNode *root = theParser.getRoot();
DDLNode::DllNodeList childs = root->getChildNodeList();
for ( size_t i=0; i<childs.size(); i++ ) {
DDLNode *child = childs[ i ];
DDLNode::DllNodeList children = root->getChildNodeList();
for ( size_t i=0; i<children.size(); i++ ) {
DDLNode *child = children[ i ];
Property *prop = child->getProperty(); // to get properties
std::string type = child->getType(); // to get the node type
Value *values = child->getValue(); // to get the data;

View File

@ -134,9 +134,10 @@ bool OpenDDLExport::writeToStream(const std::string &statement) {
}
bool OpenDDLExport::writeNode(DDLNode *node, std::string &statement) {
bool success(true);
writeNodeHeader(node, statement);
if (node->hasProperties()) {
writeProperties(node, statement);
success = writeProperties(node, statement);
}
writeLineEnd(statement);
@ -160,7 +161,7 @@ bool OpenDDLExport::writeNode(DDLNode *node, std::string &statement) {
writeToStream(statement);
return true;
return success;
}
bool OpenDDLExport::writeNodeHeader(DDLNode *node, std::string &statement) {

View File

@ -30,7 +30,10 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include <sstream>
#ifdef _WIN32
#include <windows.h>
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
#endif // _WIN32
BEGIN_ODDLPARSER_NS
@ -71,7 +74,7 @@ const char *getTypeToken(Value::ValueType type) {
return Grammar::PrimitiveTypeToken[(size_t)type];
}
static void logInvalidTokenError(char *in, const std::string &exp, OpenDDLParser::logCallback callback) {
static void logInvalidTokenError(const char *in, const std::string &exp, OpenDDLParser::logCallback callback) {
if (callback) {
std::string full(in);
std::string part(full.substr(0, 50));
@ -338,6 +341,7 @@ char *OpenDDLParser::parseStructure(char *in, char *end) {
bool error(false);
in = lookForNextToken(in, end);
if (in != end) {
if (*in == *Grammar::OpenBracketToken) {
// loop over all children ( data and nodes )
do {
@ -345,14 +349,18 @@ char *OpenDDLParser::parseStructure(char *in, char *end) {
if (in == nullptr) {
return nullptr;
}
} while (*in != *Grammar::CloseBracketToken);
} while (in != end &&
*in != *Grammar::CloseBracketToken);
if (in != end) {
++in;
}
} else {
++in;
logInvalidTokenError(in, std::string(Grammar::OpenBracketToken), m_logCallback);
error = true;
return nullptr;
}
}
in = lookForNextToken(in, end);
// pop node from stack after successful parsing
@ -418,8 +426,8 @@ char *OpenDDLParser::parseStructureBody(char *in, char *end, bool &error) {
}
in = lookForNextToken(in, end);
if (*in != '}') {
logInvalidTokenError(in, std::string(Grammar::CloseBracketToken), m_logCallback);
if (in == end || *in != '}') {
logInvalidTokenError(in == end ? "" : in, std::string(Grammar::CloseBracketToken), m_logCallback);
return nullptr;
} else {
//in++;
@ -455,7 +463,7 @@ DDLNode *OpenDDLParser::top() {
return nullptr;
}
DDLNode *top(m_stack.back());
DDLNode *top = m_stack.back();
return top;
}
@ -647,12 +655,15 @@ char *OpenDDLParser::parseBooleanLiteral(char *in, char *end, Value **boolean) {
in = lookForNextToken(in, end);
char *start(in);
size_t len(0);
while (!isSeparator(*in) && in != end) {
++in;
++len;
}
int res = ::strncmp(Grammar::BoolTrue, start, strlen(Grammar::BoolTrue));
int res = ::strncmp(Grammar::BoolTrue, start, len);
if (0 != res) {
res = ::strncmp(Grammar::BoolFalse, start, strlen(Grammar::BoolFalse));
res = ::strncmp(Grammar::BoolFalse, start, len);
if (0 != res) {
*boolean = nullptr;
return in;
@ -733,7 +744,7 @@ char *OpenDDLParser::parseFloatingLiteral(char *in, char *end, Value **floating,
in = lookForNextToken(in, end);
char *start(in);
while (!isSeparator(*in) && in != end) {
while (in != end && !isSeparator(*in)) {
++in;
}
@ -838,6 +849,13 @@ char *OpenDDLParser::parseHexaLiteral(char *in, char *end, Value **data) {
int value(0);
while (pos > 0) {
int v = hex2Decimal(*start);
if (v < 0) {
while (isEndofLine(*in)) {
++in;
}
return in;
}
--pos;
value = (value << 4) | v;
++start;
@ -901,10 +919,10 @@ char *OpenDDLParser::parseDataList(char *in, char *end, Value::ValueType type, V
}
in = lookForNextToken(in, end);
if (*in == '{') {
if (in != end && *in == '{') {
++in;
Value *current(nullptr), *prev(nullptr);
while ('}' != *in) {
while (in != end && '}' != *in) {
current = nullptr;
in = lookForNextToken(in, end);
if (Value::ValueType::ddl_ref == type) {
@ -962,10 +980,11 @@ char *OpenDDLParser::parseDataList(char *in, char *end, Value::ValueType type, V
}
in = getNextSeparator(in, end);
if (',' != *in && Grammar::CloseBracketToken[0] != *in && !isSpace(*in)) {
if (in == end || (',' != *in && Grammar::CloseBracketToken[0] != *in && !isSpace(*in))) {
break;
}
}
if (in != end)
++in;
}

View File

@ -26,8 +26,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include <string>
#include <vector>
#include <stdio.h>
#include <string.h>
#include <cstdio>
#include <cstring>
#ifndef _WIN32
#include <inttypes.h>
#endif

View File

@ -40,15 +40,6 @@ struct Identifier;
struct Reference;
struct Property;
template <class T>
inline bool isEmbeddedCommentOpenTag(T *in, T *end) {
if (in == '/' && in + 1 == '*') {
return true;
}
return false;
}
/// @brief Utility function to search for the next token or the end of the buffer.
/// @param in [in] The start position in the buffer.
/// @param end [in] The end position in the buffer.

View File

@ -54,7 +54,9 @@ inline bool isSeparator(T in) {
return false;
}
static const unsigned char chartype_table[256] = {
const size_t CharTableSize = 256;
static const unsigned char chartype_table[CharTableSize] = {
0,
0,
0,
@ -318,6 +320,10 @@ static const unsigned char chartype_table[256] = {
template <class T>
inline bool isNumeric(const T in) {
if (static_cast<size_t>(in) >= CharTableSize) {
return '\0';
}
size_t idx = static_cast<size_t>(in);
return idx < sizeof(chartype_table) && (chartype_table[idx] == 1);
}
@ -433,7 +439,7 @@ inline bool isEndofLine(const T in) {
template <class T>
inline static T *getNextSeparator(T *in, T *end) {
while (!isSeparator(*in) || in == end) {
while (in != end && !isSeparator(*in)) {
++in;
}
return in;

View File

@ -302,7 +302,9 @@ bool TXmlParser<TNodeType>::parse(IOStream *stream) {
stream->Read(&mData[0], 1, len);
mDoc = new pugi::xml_document();
pugi::xml_parse_result parse_result = mDoc->load_string(&mData[0], pugi::parse_full);
// load_string assumes native encoding (aka always utf-8 per build options)
//pugi::xml_parse_result parse_result = mDoc->load_string(&mData[0], pugi::parse_full);
pugi::xml_parse_result parse_result = mDoc->load_buffer(&mData[0], mData.size(), pugi::parse_full);
if (parse_result.status == pugi::status_ok) {
return true;
}

View File

@ -1035,6 +1035,15 @@ enum aiComponent
*/
#define AI_CONFIG_IMPORT_COLLADA_IGNORE_UP_DIRECTION "IMPORT_COLLADA_IGNORE_UP_DIRECTION"
// ---------------------------------------------------------------------------
/** @brief Specifies whether the Collada loader will ignore the provided unit size.
*
* If this property is set to true, the unit size provided in the file header will
* be ignored and the file will be loaded without scaling the assets.
* Property type: Bool. Default value: false.
*/
#define AI_CONFIG_IMPORT_COLLADA_IGNORE_UNIT_SIZE "IMPORT_COLLADA_IGNORE_UNIT_SIZE"
// ---------------------------------------------------------------------------
/** @brief Specifies whether the Collada loader should use Collada names.
*

View File

@ -211,6 +211,7 @@ AI_FORCE_INLINE aiReturn aiMaterial::Get(const char* pKey,unsigned int type,
unsigned int idx,aiColor3D& pOut) const {
aiColor4D c;
const aiReturn ret = aiGetMaterialColor(this,pKey,type,idx,&c);
if (ret == aiReturn_SUCCESS)
pOut = aiColor3D(c.r,c.g,c.b);
return ret;
}

View File

@ -94,6 +94,7 @@ enum aiPostProcessSteps
* indexed geometry, this step is compulsory or you'll just waste rendering
* time. <b>If this flag is not specified</b>, no vertices are referenced by
* more than one face and <b>no index buffer is required</b> for rendering.
* Unless the importer (like ply) had to split vertices. Then you need one regardless.
*/
aiProcess_JoinIdenticalVertices = 0x2,