Fixes spelling in comments
parent
a3a1c82380
commit
0c7aac02eb
|
@ -182,7 +182,7 @@ void Discreet3DSImporter::InternReadFile( const std::string& pFile,
|
||||||
ParseMainChunk();
|
ParseMainChunk();
|
||||||
|
|
||||||
// Process all meshes in the file. First check whether all
|
// Process all meshes in the file. First check whether all
|
||||||
// face indices haev valid values. The generate our
|
// face indices have valid values. The generate our
|
||||||
// internal verbose representation. Finally compute normal
|
// internal verbose representation. Finally compute normal
|
||||||
// vectors from the smoothing groups we read from the
|
// vectors from the smoothing groups we read from the
|
||||||
// file.
|
// file.
|
||||||
|
@ -679,7 +679,7 @@ void Discreet3DSImporter::ParseHierarchyChunk(uint16_t parent)
|
||||||
|
|
||||||
if ( pcNode)
|
if ( pcNode)
|
||||||
{
|
{
|
||||||
// if the source is not a CHUNK_TRACKINFO block it wont be an object instance
|
// if the source is not a CHUNK_TRACKINFO block it won't be an object instance
|
||||||
if (parent != Discreet3DS::CHUNK_TRACKINFO)
|
if (parent != Discreet3DS::CHUNK_TRACKINFO)
|
||||||
{
|
{
|
||||||
mCurrentNode = pcNode;
|
mCurrentNode = pcNode;
|
||||||
|
|
|
@ -590,7 +590,7 @@ std::list<unsigned int> mesh_idx;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
vert_idx_from = VertexIndex_GetMinimal(face_list_cur, &vert_idx_to);
|
vert_idx_from = VertexIndex_GetMinimal(face_list_cur, &vert_idx_to);
|
||||||
if(vert_idx_from == vert_idx_to) break;// all indices are transfered,
|
if(vert_idx_from == vert_idx_to) break;// all indices are transferred,
|
||||||
|
|
||||||
vert_arr.push_back(pVertexCoordinateArray.at(vert_idx_from));
|
vert_arr.push_back(pVertexCoordinateArray.at(vert_idx_from));
|
||||||
col_arr.push_back(Vertex_CalculateColor(vert_idx_from));
|
col_arr.push_back(Vertex_CalculateColor(vert_idx_from));
|
||||||
|
@ -743,7 +743,7 @@ std::list<aiNode*> ch_node;
|
||||||
// \_ aiNode for transformation (<instance> -> <delta...>, <r...>) - aiNode for pointing to object ("objectid")
|
// \_ aiNode for transformation (<instance> -> <delta...>, <r...>) - aiNode for pointing to object ("objectid")
|
||||||
con_node = new aiNode;
|
con_node = new aiNode;
|
||||||
con_node->mName = pConstellation.ID;
|
con_node->mName = pConstellation.ID;
|
||||||
// Walk thru children and search for instances of another objects, constellations.
|
// Walk through children and search for instances of another objects, constellations.
|
||||||
for(const CAMFImporter_NodeElement* ne: pConstellation.Child)
|
for(const CAMFImporter_NodeElement* ne: pConstellation.Child)
|
||||||
{
|
{
|
||||||
aiMatrix4x4 tmat;
|
aiMatrix4x4 tmat;
|
||||||
|
@ -815,7 +815,7 @@ std::list<CAMFImporter_NodeElement_Metadata*> meta_list;
|
||||||
// Check if root element are found.
|
// Check if root element are found.
|
||||||
if(root_el == nullptr) throw DeadlyImportError("Root(<amf>) element not found.");
|
if(root_el == nullptr) throw DeadlyImportError("Root(<amf>) element not found.");
|
||||||
|
|
||||||
// after that walk thru children of root and collect data. Five types of nodes can be placed at top level - in <amf>: <object>, <material>, <texture>,
|
// after that walk through children of root and collect data. Five types of nodes can be placed at top level - in <amf>: <object>, <material>, <texture>,
|
||||||
// <constellation> and <metadata>. But at first we must read <material> and <texture> because they will be used in <object>. <metadata> can be read
|
// <constellation> and <metadata>. But at first we must read <material> and <texture> because they will be used in <object>. <metadata> can be read
|
||||||
// at any moment.
|
// at any moment.
|
||||||
//
|
//
|
||||||
|
@ -870,7 +870,7 @@ nl_clean_loop:
|
||||||
|
|
||||||
if(node_list.size() > 1)
|
if(node_list.size() > 1)
|
||||||
{
|
{
|
||||||
// walk thru all nodes
|
// walk through all nodes
|
||||||
for(std::list<aiNode*>::iterator nl_it = node_list.begin(); nl_it != node_list.end(); nl_it++)
|
for(std::list<aiNode*>::iterator nl_it = node_list.begin(); nl_it != node_list.end(); nl_it++)
|
||||||
{
|
{
|
||||||
// and try to find them in another top nodes.
|
// and try to find them in another top nodes.
|
||||||
|
@ -959,7 +959,7 @@ nl_clean_loop:
|
||||||
idx++;
|
idx++;
|
||||||
}
|
}
|
||||||
}// if(pScene->mNumTextures > 0)
|
}// if(pScene->mNumTextures > 0)
|
||||||
}// END: after that walk thru children of root and collect data
|
}// END: after that walk through children of root and collect data
|
||||||
|
|
||||||
}// namespace Assimp
|
}// namespace Assimp
|
||||||
|
|
||||||
|
|
|
@ -654,7 +654,7 @@ void ColladaParser::ReadController( Collada::Controller& pController)
|
||||||
// two types of controllers: "skin" and "morph". Only the first one is relevant, we skip the other
|
// two types of controllers: "skin" and "morph". Only the first one is relevant, we skip the other
|
||||||
if( IsElement( "morph"))
|
if( IsElement( "morph"))
|
||||||
{
|
{
|
||||||
// should skip everything inside, so there's no danger of catching elements inbetween
|
// should skip everything inside, so there's no danger of catching elements in between
|
||||||
SkipElement();
|
SkipElement();
|
||||||
}
|
}
|
||||||
else if( IsElement( "skin"))
|
else if( IsElement( "skin"))
|
||||||
|
@ -2827,7 +2827,7 @@ void ColladaParser::ReadNodeGeometry( Node* pNode)
|
||||||
|
|
||||||
if( !mReader->isEmptyElement())
|
if( !mReader->isEmptyElement())
|
||||||
{
|
{
|
||||||
// read material associations. Ignore additional elements inbetween
|
// read material associations. Ignore additional elements in between
|
||||||
while( mReader->read())
|
while( mReader->read())
|
||||||
{
|
{
|
||||||
if( mReader->getNodeType() == irr::io::EXN_ELEMENT)
|
if( mReader->getNodeType() == irr::io::EXN_ELEMENT)
|
||||||
|
|
|
@ -781,7 +781,7 @@ void LWOImporter::LoadLWO2Polygons(unsigned int length)
|
||||||
// Determine the type of the polygons
|
// Determine the type of the polygons
|
||||||
switch (type)
|
switch (type)
|
||||||
{
|
{
|
||||||
// read unsupported stuff too (although we wont process it)
|
// read unsupported stuff too (although we won't process it)
|
||||||
case AI_LWO_MBAL:
|
case AI_LWO_MBAL:
|
||||||
DefaultLogger::get()->warn("LWO2: Encountered unsupported primitive chunk (METABALL)");
|
DefaultLogger::get()->warn("LWO2: Encountered unsupported primitive chunk (METABALL)");
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -498,7 +498,7 @@ void OgreBinarySerializer::NormalizeBoneWeights(VertexData *vertexData) const
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Normalize bone weights.
|
/** Normalize bone weights.
|
||||||
Some exporters wont care if the sum of all bone weights
|
Some exporters won't care if the sum of all bone weights
|
||||||
for a single vertex equals 1 or not, so validate here. */
|
for a single vertex equals 1 or not, so validate here. */
|
||||||
const float epsilon = 0.05f;
|
const float epsilon = 0.05f;
|
||||||
for (const uint32_t vertexIndex : influencedVertices)
|
for (const uint32_t vertexIndex : influencedVertices)
|
||||||
|
|
|
@ -654,7 +654,7 @@ void OgreXmlSerializer::ReadBoneAssignments(VertexDataXml *dest)
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Normalize bone weights.
|
/** Normalize bone weights.
|
||||||
Some exporters wont care if the sum of all bone weights
|
Some exporters won't care if the sum of all bone weights
|
||||||
for a single vertex equals 1 or not, so validate here. */
|
for a single vertex equals 1 or not, so validate here. */
|
||||||
const float epsilon = 0.05f;
|
const float epsilon = 0.05f;
|
||||||
for (const uint32_t vertexIndex : influencedVertices)
|
for (const uint32_t vertexIndex : influencedVertices)
|
||||||
|
|
|
@ -820,7 +820,7 @@ bool PLY::PropertyInstance::ParseValue(
|
||||||
|
|
||||||
case EDT_Float:
|
case EDT_Float:
|
||||||
// technically this should cast to float, but people tend to use float descriptors for double data
|
// technically this should cast to float, but people tend to use float descriptors for double data
|
||||||
// this is the best way to not risk loosing precision on import and it doesn't hurt to do this
|
// this is the best way to not risk losing precision on import and it doesn't hurt to do this
|
||||||
ai_real f;
|
ai_real f;
|
||||||
pCur = fast_atoreal_move<ai_real>(pCur,f);
|
pCur = fast_atoreal_move<ai_real>(pCur,f);
|
||||||
out->fFloat = (ai_real)f;
|
out->fFloat = (ai_real)f;
|
||||||
|
|
|
@ -57,7 +57,7 @@ using namespace Assimp::Formatter;
|
||||||
// Constructor
|
// Constructor
|
||||||
SplitByBoneCountProcess::SplitByBoneCountProcess()
|
SplitByBoneCountProcess::SplitByBoneCountProcess()
|
||||||
{
|
{
|
||||||
// set default, might be overriden by importer config
|
// set default, might be overridden by importer config
|
||||||
mMaxBoneCount = AI_SBBC_DEFAULT_MAX_BONES;
|
mMaxBoneCount = AI_SBBC_DEFAULT_MAX_BONES;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -148,7 +148,7 @@ bool X3DImporter::FindNodeElement(const std::string& pID, const CX3DImporter_Nod
|
||||||
CX3DImporter_NodeElement* tnd = NodeElement_Cur;// temporary pointer to node.
|
CX3DImporter_NodeElement* tnd = NodeElement_Cur;// temporary pointer to node.
|
||||||
bool static_search = false;// flag: true if searching in static node.
|
bool static_search = false;// flag: true if searching in static node.
|
||||||
|
|
||||||
// At first check if we have deal with static node. Go up thru parent nodes and check flag.
|
// At first check if we have deal with static node. Go up through parent nodes and check flag.
|
||||||
while(tnd != nullptr)
|
while(tnd != nullptr)
|
||||||
{
|
{
|
||||||
if(tnd->Type == CX3DImporter_NodeElement::ENET_Group)
|
if(tnd->Type == CX3DImporter_NodeElement::ENET_Group)
|
||||||
|
|
|
@ -134,7 +134,7 @@ void X3DImporter::ParseNode_Geometry3D_Cone()
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
const unsigned int tess = 30;///TODO: IME tesselation factor thru ai_property
|
const unsigned int tess = 30;///TODO: IME tesselation factor through ai_property
|
||||||
|
|
||||||
std::vector<aiVector3D> tvec;// temp array for vertices.
|
std::vector<aiVector3D> tvec;// temp array for vertices.
|
||||||
|
|
||||||
|
@ -207,7 +207,7 @@ void X3DImporter::ParseNode_Geometry3D_Cylinder()
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
const unsigned int tess = 30;///TODO: IME tesselation factor thru ai_property
|
const unsigned int tess = 30;///TODO: IME tesselation factor through ai_property
|
||||||
|
|
||||||
std::vector<aiVector3D> tside;// temp array for vertices of side.
|
std::vector<aiVector3D> tside;// temp array for vertices of side.
|
||||||
std::vector<aiVector3D> tcir;// temp array for vertices of circle.
|
std::vector<aiVector3D> tcir;// temp array for vertices of circle.
|
||||||
|
@ -516,7 +516,7 @@ static aiVector3D GeometryHelper_Extrusion_GetNextZ(const size_t pSpine_PointIdx
|
||||||
|
|
||||||
// As said: "If the Z-axis of the first point is undefined (because the spine is not closed and the first two spine segments are collinear)
|
// As said: "If the Z-axis of the first point is undefined (because the spine is not closed and the first two spine segments are collinear)
|
||||||
// then the Z-axis for the first spine point with a defined Z-axis is used."
|
// then the Z-axis for the first spine point with a defined Z-axis is used."
|
||||||
// Walk thru spine and find Z.
|
// Walk through spine and find Z.
|
||||||
for(size_t next_point = 2; (next_point <= spine_idx_last) && !found; next_point++)
|
for(size_t next_point = 2; (next_point <= spine_idx_last) && !found; next_point++)
|
||||||
{
|
{
|
||||||
// (pSpine[2] - pSpine[1]) ^ (pSpine[0] - pSpine[1])
|
// (pSpine[2] - pSpine[1]) ^ (pSpine[0] - pSpine[1])
|
||||||
|
@ -679,7 +679,7 @@ void X3DImporter::ParseNode_Geometry3D_Extrusion()
|
||||||
// How we done it at all?
|
// How we done it at all?
|
||||||
// 1. At first we will calculate array of basises for every point in spine(look SCP in ISO-dic). Also "orientation" vector
|
// 1. At first we will calculate array of basises for every point in spine(look SCP in ISO-dic). Also "orientation" vector
|
||||||
// are applied vor every basis.
|
// are applied vor every basis.
|
||||||
// 2. After that we can create array of point sets: which are scaled, transfered to basis of relative basis and at final translated to real position
|
// 2. After that we can create array of point sets: which are scaled, transferred to basis of relative basis and at final translated to real position
|
||||||
// using relative spine point.
|
// using relative spine point.
|
||||||
// 3. Next step is creating CoordIdx array(do not forget "-1" delimiter). While creating CoordIdx also created faces for begin and end caps, if
|
// 3. Next step is creating CoordIdx array(do not forget "-1" delimiter). While creating CoordIdx also created faces for begin and end caps, if
|
||||||
// needed. While createing CootdIdx is taking in account CCW flag.
|
// needed. While createing CootdIdx is taking in account CCW flag.
|
||||||
|
@ -749,7 +749,7 @@ void X3DImporter::ParseNode_Geometry3D_Extrusion()
|
||||||
tcross[cri] = tvecX + tvecY + tvecZ + spine[spi];
|
tcross[cri] = tvecX + tvecY + tvecZ + spine[spi];
|
||||||
}// for(size_t cri = 0, cri_e = crossSection.size(); cri < cri_e; i++)
|
}// for(size_t cri = 0, cri_e = crossSection.size(); cri < cri_e; i++)
|
||||||
|
|
||||||
pointset_arr[spi] = tcross;// store transfered point set
|
pointset_arr[spi] = tcross;// store transferred point set
|
||||||
}// for(size_t spi = 0, spi_e = spine.size(); spi < spi_e; i++)
|
}// for(size_t spi = 0, spi_e = spine.size(); spi < spi_e; i++)
|
||||||
}// END: 2. Create array of point sets.
|
}// END: 2. Create array of point sets.
|
||||||
|
|
||||||
|
@ -965,7 +965,7 @@ void X3DImporter::ParseNode_Geometry3D_Sphere()
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
const unsigned int tess = 3;///TODO: IME tesselation factor thru ai_property
|
const unsigned int tess = 3;///TODO: IME tesselation factor through ai_property
|
||||||
|
|
||||||
std::vector<aiVector3D> tlist;
|
std::vector<aiVector3D> tlist;
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ aiMatrix4x4 X3DImporter::PostprocessHelper_Matrix_GlobalToCurrent() const
|
||||||
|
|
||||||
void X3DImporter::PostprocessHelper_CollectMetadata(const CX3DImporter_NodeElement& pNodeElement, std::list<CX3DImporter_NodeElement*>& pList) const
|
void X3DImporter::PostprocessHelper_CollectMetadata(const CX3DImporter_NodeElement& pNodeElement, std::list<CX3DImporter_NodeElement*>& pList) const
|
||||||
{
|
{
|
||||||
// walk thru childs and find for metadata.
|
// walk through childs and find for metadata.
|
||||||
for(std::list<CX3DImporter_NodeElement*>::const_iterator el_it = pNodeElement.Child.begin(); el_it != pNodeElement.Child.end(); el_it++)
|
for(std::list<CX3DImporter_NodeElement*>::const_iterator el_it = pNodeElement.Child.begin(); el_it != pNodeElement.Child.end(); el_it++)
|
||||||
{
|
{
|
||||||
if(((*el_it)->Type == CX3DImporter_NodeElement::ENET_MetaBoolean) || ((*el_it)->Type == CX3DImporter_NodeElement::ENET_MetaDouble) ||
|
if(((*el_it)->Type == CX3DImporter_NodeElement::ENET_MetaBoolean) || ((*el_it)->Type == CX3DImporter_NodeElement::ENET_MetaDouble) ||
|
||||||
|
@ -190,7 +190,7 @@ void X3DImporter::Postprocess_BuildMaterial(const CX3DImporter_NodeElement& pNod
|
||||||
*pMaterial = new aiMaterial;
|
*pMaterial = new aiMaterial;
|
||||||
aiMaterial& taimat = **pMaterial;// creating alias for convenience.
|
aiMaterial& taimat = **pMaterial;// creating alias for convenience.
|
||||||
|
|
||||||
// at this point pNodeElement point to <Appearance> node. Walk thru childs and add all stored data.
|
// at this point pNodeElement point to <Appearance> node. Walk through childs and add all stored data.
|
||||||
for(std::list<CX3DImporter_NodeElement*>::const_iterator el_it = pNodeElement.Child.begin(); el_it != pNodeElement.Child.end(); el_it++)
|
for(std::list<CX3DImporter_NodeElement*>::const_iterator el_it = pNodeElement.Child.begin(); el_it != pNodeElement.Child.end(); el_it++)
|
||||||
{
|
{
|
||||||
if((*el_it)->Type == CX3DImporter_NodeElement::ENET_Material)
|
if((*el_it)->Type == CX3DImporter_NodeElement::ENET_Material)
|
||||||
|
@ -627,10 +627,10 @@ void X3DImporter::Postprocess_BuildNode(const CX3DImporter_NodeElement& pNodeEle
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
for(size_t i = 0; i < (size_t)tne_group.Choice; i++) chit_begin++;// forward iterator to choosed node.
|
for(size_t i = 0; i < (size_t)tne_group.Choice; i++) chit_begin++;// forward iterator to choosen node.
|
||||||
|
|
||||||
chit_end = chit_begin;
|
chit_end = chit_begin;
|
||||||
chit_end++;// point end iterator to next element after choosed.
|
chit_end++;// point end iterator to next element after choosen.
|
||||||
}
|
}
|
||||||
}// if(tne_group.UseChoice)
|
}// if(tne_group.UseChoice)
|
||||||
}// if(pNodeElement.Type == CX3DImporter_NodeElement::ENET_Group)
|
}// if(pNodeElement.Type == CX3DImporter_NodeElement::ENET_Group)
|
||||||
|
@ -764,7 +764,7 @@ void X3DImporter::Postprocess_CollectMetadata(const CX3DImporter_NodeElement& pN
|
||||||
throw DeadlyImportError( "Postprocess. MetaData member in node are not nullptr. Something went wrong." );
|
throw DeadlyImportError( "Postprocess. MetaData member in node are not nullptr. Something went wrong." );
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy collected metadata to output node.
|
// copy collected metadata to output node.
|
||||||
pSceneNode.mMetaData = aiMetadata::Alloc( static_cast<unsigned int>(meta_list.size()) );
|
pSceneNode.mMetaData = aiMetadata::Alloc( static_cast<unsigned int>(meta_list.size()) );
|
||||||
meta_idx = 0;
|
meta_idx = 0;
|
||||||
for(std::list<CX3DImporter_NodeElement*>::const_iterator it = meta_list.begin(); it != meta_list.end(); it++, meta_idx++)
|
for(std::list<CX3DImporter_NodeElement*>::const_iterator it = meta_list.begin(); it != meta_list.end(); it++, meta_idx++)
|
||||||
|
|
|
@ -730,7 +730,7 @@ namespace glTF
|
||||||
enum EType
|
enum EType
|
||||||
{
|
{
|
||||||
#ifdef ASSIMP_IMPORTER_GLTF_USE_OPEN3DGC
|
#ifdef ASSIMP_IMPORTER_GLTF_USE_OPEN3DGC
|
||||||
Compression_Open3DGC,///< Compression of mesh data using Open3DGC algorythm.
|
Compression_Open3DGC,///< Compression of mesh data using Open3DGC algorithm.
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Unknown
|
Unknown
|
||||||
|
@ -752,7 +752,7 @@ namespace glTF
|
||||||
|
|
||||||
#ifdef ASSIMP_IMPORTER_GLTF_USE_OPEN3DGC
|
#ifdef ASSIMP_IMPORTER_GLTF_USE_OPEN3DGC
|
||||||
/// \struct SCompression_Open3DGC
|
/// \struct SCompression_Open3DGC
|
||||||
/// Compression of mesh data using Open3DGC algorythm.
|
/// Compression of mesh data using Open3DGC algorithm.
|
||||||
struct SCompression_Open3DGC : public SExtension
|
struct SCompression_Open3DGC : public SExtension
|
||||||
{
|
{
|
||||||
using SExtension::Type;
|
using SExtension::Type;
|
||||||
|
|
|
@ -270,7 +270,7 @@ void glTFImporter::ImportMeshes(glTF::Asset& r)
|
||||||
// Limitations for meshes when using Open3DGC-compression.
|
// Limitations for meshes when using Open3DGC-compression.
|
||||||
// It's a current limitation of sp... Specification have not this part still - about mesh compression. Why only one primitive?
|
// It's a current limitation of sp... Specification have not this part still - about mesh compression. Why only one primitive?
|
||||||
// Because glTF is very flexibly. But in fact it ugly flexible. Every primitive can has own set of accessors and accessors can
|
// Because glTF is very flexibly. But in fact it ugly flexible. Every primitive can has own set of accessors and accessors can
|
||||||
// point to a-a-a-a-any part of buffer (thru bufferview ofcourse) and even to another buffer. We know that "Open3DGC-compression"
|
// point to a-a-a-a-any part of buffer (through bufferview of course) and even to another buffer. We know that "Open3DGC-compression"
|
||||||
// is applicable only to part of buffer. As we can't guaranty continuity of the data for decoder, we will limit quantity of primitives.
|
// is applicable only to part of buffer. As we can't guaranty continuity of the data for decoder, we will limit quantity of primitives.
|
||||||
// Yes indices, coordinates etc. still can br stored in different buffers, but with current specification it's a exporter problem.
|
// Yes indices, coordinates etc. still can br stored in different buffers, but with current specification it's a exporter problem.
|
||||||
// Also primitive can has only one of "POSITION", "NORMAL" and less then "AI_MAX_NUMBER_OF_TEXTURECOORDS" of "TEXCOORD". All accessor
|
// Also primitive can has only one of "POSITION", "NORMAL" and less then "AI_MAX_NUMBER_OF_TEXTURECOORDS" of "TEXCOORD". All accessor
|
||||||
|
|
Loading…
Reference in New Issue