fixed emply buffer stream write, removed some cast warnings.
parent
3390185270
commit
9d813a48b0
|
@ -253,7 +253,7 @@ private:
|
|||
MatIdArray = it->second;
|
||||
}
|
||||
}
|
||||
MatIdArray.push_back( newMatIdx );
|
||||
MatIdArray.push_back( (unsigned int)newMatIdx );
|
||||
mMatId2MatArray[ mActiveMatGroup ] = MatIdArray;
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ bool DefaultIOSystem::Exists( const char* pFile) const
|
|||
#ifdef _WIN32
|
||||
wchar_t fileName16[PATHLIMIT];
|
||||
|
||||
bool isUnicode = IsTextUnicode(pFile, strlen(pFile), NULL);
|
||||
bool isUnicode = IsTextUnicode(pFile, int(strlen(pFile)), NULL);
|
||||
if (isUnicode) {
|
||||
|
||||
MultiByteToWideChar(CP_UTF8, MB_PRECOMPOSED, pFile, -1, fileName16, PATHLIMIT);
|
||||
|
@ -110,7 +110,7 @@ IOStream* DefaultIOSystem::Open( const char* strFile, const char* strMode)
|
|||
FILE* file;
|
||||
#ifdef _WIN32
|
||||
wchar_t fileName16[PATHLIMIT];
|
||||
bool isUnicode = IsTextUnicode(strFile, strlen(strFile), NULL );
|
||||
bool isUnicode = IsTextUnicode(strFile, int(strlen(strFile)), NULL );
|
||||
if (isUnicode) {
|
||||
MultiByteToWideChar(CP_UTF8, MB_PRECOMPOSED, strFile, -1, fileName16, PATHLIMIT);
|
||||
std::string mode8(strMode);
|
||||
|
@ -158,7 +158,7 @@ inline static void MakeAbsolutePath (const char* in, char* _out)
|
|||
{
|
||||
ai_assert(in && _out);
|
||||
#if defined( _MSC_VER ) || defined( __MINGW32__ )
|
||||
bool isUnicode = IsTextUnicode(in, strlen(in), NULL);
|
||||
bool isUnicode = IsTextUnicode(in, int(strlen(in)), NULL);
|
||||
if (isUnicode) {
|
||||
wchar_t out16[PATHLIMIT];
|
||||
wchar_t in16[PATHLIMIT];
|
||||
|
|
|
@ -99,22 +99,22 @@ void EmbedTexturesProcess::Execute(aiScene* pScene) {
|
|||
}
|
||||
|
||||
bool EmbedTexturesProcess::addTexture(aiScene* pScene, std::string path) const {
|
||||
uint32_t imageSize = 0;
|
||||
std::string imagePath = path;
|
||||
std::streampos imageSize = 0;
|
||||
std::string imagePath = path;
|
||||
|
||||
// Test path directly
|
||||
std::ifstream file(imagePath, std::ios::binary | std::ios::ate);
|
||||
if ((imageSize = file.tellg()) == -1u) {
|
||||
if ((imageSize = file.tellg()) == std::streampos(-1)) {
|
||||
DefaultLogger::get()->warn("EmbedTexturesProcess: Cannot find image: " + imagePath + ". Will try to find it in root folder.");
|
||||
|
||||
// Test path in root path
|
||||
imagePath = mRootPath + path;
|
||||
file.open(imagePath, std::ios::binary | std::ios::ate);
|
||||
if ((imageSize = file.tellg()) == -1u) {
|
||||
if ((imageSize = file.tellg()) == std::streampos(-1)) {
|
||||
// Test path basename in root path
|
||||
imagePath = mRootPath + path.substr(path.find_last_of("\\/") + 1u);
|
||||
file.open(imagePath, std::ios::binary | std::ios::ate);
|
||||
if ((imageSize = file.tellg()) == -1u) {
|
||||
if ((imageSize = file.tellg()) == std::streampos(-1)) {
|
||||
DefaultLogger::get()->error("EmbedTexturesProcess: Unable to embed texture: " + path + ".");
|
||||
return false;
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ bool EmbedTexturesProcess::addTexture(aiScene* pScene, std::string path) const {
|
|||
// Add the new texture
|
||||
auto pTexture = new aiTexture();
|
||||
pTexture->mHeight = 0; // Means that this is still compressed
|
||||
pTexture->mWidth = imageSize;
|
||||
pTexture->mWidth = uint32_t(imageSize);
|
||||
pTexture->pcData = imageContent;
|
||||
|
||||
auto extension = path.substr(path.find_last_of('.') + 1u);
|
||||
|
|
|
@ -704,7 +704,7 @@ void Converter::GenerateTransformationNodeChain( const Model& model, std::vector
|
|||
aiMatrix4x4::Scaling( GeometricScaling, chain[ TransformationComp_GeometricScaling ] );
|
||||
aiVector3D GeometricScalingInverse = GeometricScaling;
|
||||
bool canscale = true;
|
||||
for (size_t i = 0; i < 3; ++i) {
|
||||
for (unsigned int i = 0; i < 3; ++i) {
|
||||
if ( std::fabs( GeometricScalingInverse[i] ) > zero_epsilon ) {
|
||||
GeometricScalingInverse[i] = 1.0f / GeometricScaling[i];
|
||||
} else {
|
||||
|
@ -1888,11 +1888,11 @@ void Converter::SetShadingPropertiesCommon( aiMaterial* out_mat, const PropertyT
|
|||
|
||||
// TransparentColor / TransparencyFactor... gee thanks FBX :rolleyes:
|
||||
const aiColor3D& Transparent = GetColorPropertyFactored( props, "TransparentColor", "TransparencyFactor", ok );
|
||||
float CalculatedOpacity = 1.0;
|
||||
float CalculatedOpacity = 1.0f;
|
||||
if ( ok ) {
|
||||
out_mat->AddProperty( &Transparent, 1, AI_MATKEY_COLOR_TRANSPARENT );
|
||||
// as calculated by FBX SDK 2017:
|
||||
CalculatedOpacity = 1.0 - ((Transparent.r + Transparent.g + Transparent.b) / 3.0);
|
||||
CalculatedOpacity = 1.0f - ((Transparent.r + Transparent.g + Transparent.b) / 3.0f);
|
||||
}
|
||||
|
||||
// use of TransparencyFactor is inconsistent.
|
||||
|
|
|
@ -182,7 +182,7 @@ void FBX::Node::Begin(Assimp::StreamWriterLE &s)
|
|||
s.PutU4(0); // total property section length
|
||||
|
||||
// node name
|
||||
s.PutU1(name.size()); // length of node name
|
||||
s.PutU1(uint8_t(name.size())); // length of node name
|
||||
s.PutString(name); // node name as raw bytes
|
||||
|
||||
// property data comes after here
|
||||
|
@ -217,8 +217,8 @@ void FBX::Node::EndProperties(
|
|||
ai_assert(pos > property_start);
|
||||
size_t property_section_size = pos - property_start;
|
||||
s.Seek(start_pos + 4);
|
||||
s.PutU4(num_properties);
|
||||
s.PutU4(property_section_size);
|
||||
s.PutU4(uint32_t(num_properties));
|
||||
s.PutU4(uint32_t(property_section_size));
|
||||
s.Seek(pos);
|
||||
}
|
||||
|
||||
|
@ -232,7 +232,7 @@ void FBX::Node::End(
|
|||
// now go back and write initial pos
|
||||
this->end_pos = s.Tell();
|
||||
s.Seek(start_pos);
|
||||
s.PutU4(end_pos);
|
||||
s.PutU4(uint32_t(end_pos));
|
||||
s.Seek(end_pos);
|
||||
}
|
||||
|
||||
|
@ -251,9 +251,9 @@ void FBX::Node::WritePropertyNode(
|
|||
Node node(name);
|
||||
node.Begin(s);
|
||||
s.PutU1('d');
|
||||
s.PutU4(v.size()); // number of elements
|
||||
s.PutU4(uint32_t(v.size())); // number of elements
|
||||
s.PutU4(0); // no encoding (1 would be zip-compressed)
|
||||
s.PutU4(v.size() * 8); // data size
|
||||
s.PutU4(uint32_t(v.size()) * 8); // data size
|
||||
for (auto it = v.begin(); it != v.end(); ++it) { s.PutF8(*it); }
|
||||
node.EndProperties(s, 1);
|
||||
node.End(s, false);
|
||||
|
@ -271,9 +271,9 @@ void FBX::Node::WritePropertyNode(
|
|||
Node node(name);
|
||||
node.Begin(s);
|
||||
s.PutU1('i');
|
||||
s.PutU4(v.size()); // number of elements
|
||||
s.PutU4(uint32_t(v.size())); // number of elements
|
||||
s.PutU4(0); // no encoding (1 would be zip-compressed)
|
||||
s.PutU4(v.size() * 4); // data size
|
||||
s.PutU4(uint32_t(v.size()) * 4); // data size
|
||||
for (auto it = v.begin(); it != v.end(); ++it) { s.PutI4(*it); }
|
||||
node.EndProperties(s, 1);
|
||||
node.End(s, false);
|
||||
|
|
|
@ -127,8 +127,8 @@ FBX::Property::Property(const aiMatrix4x4& vm)
|
|||
: type('d'), data(8*16)
|
||||
{
|
||||
double* d = reinterpret_cast<double*>(data.data());
|
||||
for (size_t c = 0; c < 4; ++c) {
|
||||
for (size_t r = 0; r < 4; ++r) {
|
||||
for (unsigned int c = 0; c < 4; ++c) {
|
||||
for (unsigned int r = 0; r < 4; ++r) {
|
||||
d[4*c+r] = vm[r][c];
|
||||
}
|
||||
}
|
||||
|
@ -164,15 +164,15 @@ void FBX::Property::Dump(Assimp::StreamWriterLE &s)
|
|||
case 'L': s.PutI8(*(reinterpret_cast<int64_t*>(data.data()))); return;
|
||||
case 'S':
|
||||
case 'R':
|
||||
s.PutU4(data.size());
|
||||
s.PutU4(uint32_t(data.size()));
|
||||
for (size_t i = 0; i < data.size(); ++i) { s.PutU1(data[i]); }
|
||||
return;
|
||||
case 'i':
|
||||
N = data.size() / 4;
|
||||
s.PutU4(N); // number of elements
|
||||
s.PutU4(uint32_t(N)); // number of elements
|
||||
s.PutU4(0); // no encoding (1 would be zip-compressed)
|
||||
// TODO: compress if large?
|
||||
s.PutU4(data.size()); // data size
|
||||
s.PutU4(uint32_t(data.size())); // data size
|
||||
d = data.data();
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
s.PutI4((reinterpret_cast<int32_t*>(d))[i]);
|
||||
|
@ -180,10 +180,10 @@ void FBX::Property::Dump(Assimp::StreamWriterLE &s)
|
|||
return;
|
||||
case 'd':
|
||||
N = data.size() / 8;
|
||||
s.PutU4(N); // number of elements
|
||||
s.PutU4(uint32_t(N)); // number of elements
|
||||
s.PutU4(0); // no encoding (1 would be zip-compressed)
|
||||
// TODO: compress if large?
|
||||
s.PutU4(data.size()); // data size
|
||||
s.PutU4(uint32_t(data.size())); // data size
|
||||
d = data.data();
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
s.PutF8((reinterpret_cast<double*>(d))[i]);
|
||||
|
|
|
@ -460,7 +460,7 @@ size_t count_images(const aiScene* scene) {
|
|||
){
|
||||
const aiTextureType textype = static_cast<aiTextureType>(tt);
|
||||
const size_t texcount = mat->GetTextureCount(textype);
|
||||
for (size_t j = 0; j < texcount; ++j) {
|
||||
for (unsigned int j = 0; j < texcount; ++j) {
|
||||
mat->GetTexture(textype, j, &texpath);
|
||||
images.insert(std::string(texpath.C_Str()));
|
||||
}
|
||||
|
@ -593,7 +593,7 @@ void FBXExporter::WriteDefinitions ()
|
|||
|
||||
// Model / FbxNode
|
||||
// <~~ node heirarchy
|
||||
count = count_nodes(mScene->mRootNode) - 1; // (not counting root node)
|
||||
count = int32_t(count_nodes(mScene->mRootNode)) - 1; // (not counting root node)
|
||||
if (count) {
|
||||
n = FBX::Node("ObjectType", Property("Model"));
|
||||
n.AddChild("Count", count);
|
||||
|
@ -763,7 +763,7 @@ void FBXExporter::WriteDefinitions ()
|
|||
|
||||
// Video / FbxVideo
|
||||
// one for each image file.
|
||||
count = count_images(mScene);
|
||||
count = int32_t(count_images(mScene));
|
||||
if (count) {
|
||||
n = FBX::Node("ObjectType", Property("Video"));
|
||||
n.AddChild("Count", count);
|
||||
|
@ -792,7 +792,7 @@ void FBXExporter::WriteDefinitions ()
|
|||
|
||||
// Texture / FbxFileTexture
|
||||
// <~~ aiTexture
|
||||
count = count_textures(mScene);
|
||||
count = int32_t(count_textures(mScene));
|
||||
if (count) {
|
||||
n = FBX::Node("ObjectType", Property("Texture"));
|
||||
n.AddChild("Count", count);
|
||||
|
@ -848,7 +848,7 @@ void FBXExporter::WriteDefinitions ()
|
|||
}
|
||||
|
||||
// Deformer
|
||||
count = count_deformers(mScene);
|
||||
count = int32_t(count_deformers(mScene));
|
||||
if (count) {
|
||||
n = FBX::Node("ObjectType", Property("Deformer"));
|
||||
n.AddChild("Count", count);
|
||||
|
@ -943,7 +943,7 @@ void FBXExporter::WriteObjects ()
|
|||
std::vector<int32_t> vertex_indices;
|
||||
// map of vertex value to its index in the data vector
|
||||
std::map<aiVector3D,size_t> index_by_vertex_value;
|
||||
size_t index = 0;
|
||||
int32_t index = 0;
|
||||
for (size_t vi = 0; vi < m->mNumVertices; ++vi) {
|
||||
aiVector3D vtx = m->mVertices[vi];
|
||||
auto elem = index_by_vertex_value.find(vtx);
|
||||
|
@ -955,7 +955,7 @@ void FBXExporter::WriteObjects ()
|
|||
flattened_vertices.push_back(vtx[2]);
|
||||
++index;
|
||||
} else {
|
||||
vertex_indices.push_back(elem->second);
|
||||
vertex_indices.push_back(int32_t(elem->second));
|
||||
}
|
||||
}
|
||||
FBX::Node::WritePropertyNode(
|
||||
|
@ -1052,7 +1052,7 @@ void FBXExporter::WriteObjects ()
|
|||
std::vector<double> uv_data;
|
||||
std::vector<int32_t> uv_indices;
|
||||
std::map<aiVector3D,int32_t> index_by_uv;
|
||||
size_t index = 0;
|
||||
int32_t index = 0;
|
||||
for (size_t fi = 0; fi < m->mNumFaces; ++fi) {
|
||||
const aiFace &f = m->mFaces[fi];
|
||||
for (size_t pvi = 0; pvi < f.mNumIndices; ++pvi) {
|
||||
|
@ -1062,7 +1062,7 @@ void FBXExporter::WriteObjects ()
|
|||
if (elem == index_by_uv.end()) {
|
||||
index_by_uv[uv] = index;
|
||||
uv_indices.push_back(index);
|
||||
for (size_t x = 0; x < m->mNumUVComponents[uvi]; ++x) {
|
||||
for (unsigned int x = 0; x < m->mNumUVComponents[uvi]; ++x) {
|
||||
uv_data.push_back(uv[x]);
|
||||
}
|
||||
++index;
|
||||
|
@ -1208,13 +1208,13 @@ void FBXExporter::WriteObjects ()
|
|||
// and usualy are completely ignored when loading.
|
||||
// One notable exception is the "Opacity" property,
|
||||
// which Blender uses as (1.0 - alpha).
|
||||
c.r = 0; c.g = 0; c.b = 0;
|
||||
c.r = 0.0f; c.g = 0.0f; c.b = 0.0f;
|
||||
m->Get(AI_MATKEY_COLOR_EMISSIVE, c);
|
||||
p.AddP70vector("Emissive", c.r, c.g, c.b);
|
||||
c.r = 0.2; c.g = 0.2; c.b = 0.2;
|
||||
c.r = 0.2f; c.g = 0.2f; c.b = 0.2f;
|
||||
m->Get(AI_MATKEY_COLOR_AMBIENT, c);
|
||||
p.AddP70vector("Ambient", c.r, c.g, c.b);
|
||||
c.r = 0.8; c.g = 0.8; c.b = 0.8;
|
||||
c.r = 0.8f; c.g = 0.8f; c.b = 0.8f;
|
||||
m->Get(AI_MATKEY_COLOR_DIFFUSE, c);
|
||||
p.AddP70vector("Diffuse", c.r, c.g, c.b);
|
||||
// The FBX SDK determines "Opacity" from transparency colour (RGB)
|
||||
|
@ -1223,29 +1223,29 @@ void FBXExporter::WriteObjects ()
|
|||
// so we should take it from AI_MATKEY_OPACITY if possible.
|
||||
// It might make more sense to use TransparencyFactor,
|
||||
// but Blender actually loads "Opacity" correctly, so let's use it.
|
||||
f = 1.0;
|
||||
f = 1.0f;
|
||||
if (m->Get(AI_MATKEY_COLOR_TRANSPARENT, c) == aiReturn_SUCCESS) {
|
||||
f = 1.0 - ((c.r + c.g + c.b) / 3);
|
||||
f = 1.0f - ((c.r + c.g + c.b) / 3.0f);
|
||||
}
|
||||
m->Get(AI_MATKEY_OPACITY, f);
|
||||
p.AddP70double("Opacity", f);
|
||||
if (phong) {
|
||||
// specular color is multiplied by shininess_strength
|
||||
c.r = 0.2; c.g = 0.2; c.b = 0.2;
|
||||
c.r = 0.2f; c.g = 0.2f; c.b = 0.2f;
|
||||
m->Get(AI_MATKEY_COLOR_SPECULAR, c);
|
||||
f = 1.0;
|
||||
f = 1.0f;
|
||||
m->Get(AI_MATKEY_SHININESS_STRENGTH, f);
|
||||
p.AddP70vector("Specular", f*c.r, f*c.g, f*c.b);
|
||||
f = 20.0;
|
||||
f = 20.0f;
|
||||
m->Get(AI_MATKEY_SHININESS, f);
|
||||
p.AddP70double("Shininess", f);
|
||||
// Legacy "Reflectivity" is F*F*((R+G+B)/3),
|
||||
// where F is the proportion of light reflected (AKA reflectivity),
|
||||
// and RGB is the reflective colour of the material.
|
||||
// No idea why, but we might as well set it the same way.
|
||||
f = 0.0;
|
||||
f = 0.0f;
|
||||
m->Get(AI_MATKEY_REFLECTIVITY, f);
|
||||
c.r = 1.0, c.g = 1.0, c.b = 1.0;
|
||||
c.r = 1.0f, c.g = 1.0f, c.b = 1.0f;
|
||||
m->Get(AI_MATKEY_COLOR_REFLECTIVE, c);
|
||||
p.AddP70double("Reflectivity", f*f*((c.r+c.g+c.b)/3.0));
|
||||
}
|
||||
|
@ -1269,7 +1269,7 @@ void FBXExporter::WriteObjects ()
|
|||
const aiTextureType textype = static_cast<aiTextureType>(tt);
|
||||
const size_t texcount = mat->GetTextureCount(textype);
|
||||
for (size_t j = 0; j < texcount; ++j) {
|
||||
mat->GetTexture(textype, j, &texpath);
|
||||
mat->GetTexture(textype, (unsigned int)j, &texpath);
|
||||
const std::string texstring = texpath.C_Str();
|
||||
auto elem = uid_by_image.find(texstring);
|
||||
if (elem == uid_by_image.end()) {
|
||||
|
@ -1591,7 +1591,7 @@ void FBXExporter::WriteObjects ()
|
|||
std::vector<int32_t> vertex_indices;
|
||||
// map of vertex value to its index in the data vector
|
||||
std::map<aiVector3D,size_t> index_by_vertex_value;
|
||||
size_t index = 0;
|
||||
int32_t index = 0;
|
||||
for (size_t vi = 0; vi < m->mNumVertices; ++vi) {
|
||||
aiVector3D vtx = m->mVertices[vi];
|
||||
auto elem = index_by_vertex_value.find(vtx);
|
||||
|
@ -1600,7 +1600,7 @@ void FBXExporter::WriteObjects ()
|
|||
index_by_vertex_value[vtx] = index;
|
||||
++index;
|
||||
} else {
|
||||
vertex_indices.push_back(elem->second);
|
||||
vertex_indices.push_back(int32_t(elem->second));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1616,7 +1616,7 @@ void FBXExporter::WriteObjects ()
|
|||
// as it can be instanced to many nodes.
|
||||
// All we can do is assume no instancing,
|
||||
// and take the first node we find that contains the mesh.
|
||||
aiNode* mesh_node = get_node_for_mesh(mi, mScene->mRootNode);
|
||||
aiNode* mesh_node = get_node_for_mesh((unsigned int)mi, mScene->mRootNode);
|
||||
aiMatrix4x4 mesh_xform = get_world_transform(mesh_node, mScene);
|
||||
|
||||
// now make a subdeformer for each bone in the skeleton
|
||||
|
@ -1682,7 +1682,7 @@ void FBXExporter::WriteObjects ()
|
|||
|
||||
// this should be the same as the bone's mOffsetMatrix.
|
||||
// if it's not the same, the skeleton isn't in the bind pose.
|
||||
const float epsilon = 1e-5; // some error is to be expected
|
||||
const float epsilon = 1e-5f; // some error is to be expected
|
||||
bool bone_xform_okay = true;
|
||||
if (b && ! tr.Equal(b->mOffsetMatrix, epsilon)) {
|
||||
not_in_bind_pose.insert(b);
|
||||
|
@ -2002,7 +2002,7 @@ void FBXExporter::WriteModelNodes(
|
|||
transform_chain.emplace_back(elem->first, t);
|
||||
break;
|
||||
case 'r': // rotation
|
||||
r *= DEG;
|
||||
r *= float(DEG);
|
||||
transform_chain.emplace_back(elem->first, r);
|
||||
break;
|
||||
case 's': // scale
|
||||
|
|
|
@ -1076,8 +1076,8 @@ std::string XFileParser::GetNextToken() {
|
|||
return s;
|
||||
}
|
||||
len = ReadBinDWord();
|
||||
const int bounds( mEnd - mP );
|
||||
const int iLen( len );
|
||||
const int bounds = int( mEnd - mP );
|
||||
const int iLen = int( len );
|
||||
if ( iLen < 0 ) {
|
||||
return s;
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ public:
|
|||
|
||||
// ---------------------------------------------------------------------
|
||||
~StreamWriter() {
|
||||
stream->Write(&buffer[0], 1, buffer.size());
|
||||
stream->Write(buffer.data(), 1, buffer.size());
|
||||
stream->Flush();
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ public:
|
|||
/** Flush the contents of the internal buffer, and the output IOStream */
|
||||
void Flush()
|
||||
{
|
||||
stream->Write(&buffer[0], 1, buffer.size());
|
||||
stream->Write(buffer.data(), 1, buffer.size());
|
||||
stream->Flush();
|
||||
buffer.clear();
|
||||
cursor = 0;
|
||||
|
|
Loading…
Reference in New Issue