Merge pull request #3227 from thomasbiang/gltf2_sparseAccessor_export
Gltf2 Sparse Accessor Export (blendshape export using sparse accessor)LoicFr-master
commit
231447c8d6
|
@ -406,6 +406,8 @@ struct Accessor : public Object {
|
|||
void ExtractData(T *&outData);
|
||||
|
||||
void WriteData(size_t count, const void *src_buffer, size_t src_stride);
|
||||
void WriteSparseValues(size_t count, const void *src_data, size_t src_dataStride);
|
||||
void WriteSparseIndices(size_t count, const void *src_idx, size_t src_idxStride);
|
||||
|
||||
//! Helper class to iterate the data
|
||||
class Indexer {
|
||||
|
|
|
@ -757,6 +757,33 @@ inline void Accessor::WriteData(size_t _count, const void *src_buffer, size_t sr
|
|||
CopyData(_count, src, src_stride, dst, dst_stride);
|
||||
}
|
||||
|
||||
inline void Accessor::WriteSparseValues(size_t _count, const void *src_data, size_t src_dataStride) {
|
||||
if (!sparse)
|
||||
return;
|
||||
|
||||
// values
|
||||
uint8_t *value_buffer_ptr = sparse->values->buffer->GetPointer();
|
||||
size_t value_offset = sparse->valuesByteOffset + sparse->values->byteOffset;
|
||||
size_t value_dst_stride = GetNumComponents() * GetBytesPerComponent();
|
||||
const uint8_t *value_src = reinterpret_cast<const uint8_t *>(src_data);
|
||||
uint8_t *value_dst = reinterpret_cast<uint8_t *>(value_buffer_ptr + value_offset);
|
||||
ai_assert(value_dst + _count * value_dst_stride <= value_buffer_ptr + sparse->values->buffer->byteLength);
|
||||
CopyData(_count, value_src, src_dataStride, value_dst, value_dst_stride);
|
||||
}
|
||||
|
||||
inline void Accessor::WriteSparseIndices(size_t _count, const void *src_idx, size_t src_idxStride) {
|
||||
if (!sparse)
|
||||
return;
|
||||
|
||||
// indices
|
||||
uint8_t *indices_buffer_ptr = sparse->indices->buffer->GetPointer();
|
||||
size_t indices_offset = sparse->indicesByteOffset + sparse->indices->byteOffset;
|
||||
size_t indices_dst_stride = 1 * sizeof(unsigned short);
|
||||
const uint8_t *indices_src = reinterpret_cast<const uint8_t *>(src_idx);
|
||||
uint8_t *indices_dst = reinterpret_cast<uint8_t *>(indices_buffer_ptr + indices_offset);
|
||||
ai_assert(indices_dst + _count * indices_dst_stride <= indices_buffer_ptr + sparse->indices->buffer->byteLength);
|
||||
CopyData(_count, indices_src, src_idxStride, indices_dst, indices_dst_stride);
|
||||
}
|
||||
inline Accessor::Indexer::Indexer(Accessor &acc) :
|
||||
accessor(acc),
|
||||
data(acc.GetPointer()),
|
||||
|
@ -1287,6 +1314,8 @@ inline void Node::Read(Value &obj, Asset &r) {
|
|||
}
|
||||
}
|
||||
|
||||
// Do not retrieve a skin here, just take a reference, to avoid infinite recursion
|
||||
// Skins will be properly loaded later
|
||||
Value *curSkin = FindUInt(obj, "skin");
|
||||
if (nullptr != curSkin) {
|
||||
this->skin = r.skins.Get(curSkin->GetUint());
|
||||
|
@ -1584,7 +1613,6 @@ inline void Asset::Load(const std::string &pFile, bool isBinary) {
|
|||
}
|
||||
}
|
||||
|
||||
// Read skins after nodes have been loaded to avoid infinite recursion
|
||||
if (Value *skinsArray = FindArray(doc, "skins")) {
|
||||
for (unsigned int i = 0; i < skinsArray->Size(); ++i) {
|
||||
skins.Retrieve(i);
|
||||
|
|
|
@ -107,21 +107,47 @@ namespace glTF2 {
|
|||
|
||||
inline void Write(Value& obj, Accessor& a, AssetWriter& w)
|
||||
{
|
||||
obj.AddMember("bufferView", a.bufferView->index, w.mAl);
|
||||
obj.AddMember("byteOffset", (unsigned int)a.byteOffset, w.mAl);
|
||||
if (a.bufferView) {
|
||||
obj.AddMember("bufferView", a.bufferView->index, w.mAl);
|
||||
obj.AddMember("byteOffset", (unsigned int)a.byteOffset, w.mAl);
|
||||
Value vTmpMax, vTmpMin;
|
||||
if (a.componentType == ComponentType_FLOAT) {
|
||||
obj.AddMember("max", MakeValue(vTmpMax, a.max, w.mAl), w.mAl);
|
||||
obj.AddMember("min", MakeValue(vTmpMin, a.min, w.mAl), w.mAl);
|
||||
} else {
|
||||
obj.AddMember("max", MakeValueCast<int64_t>(vTmpMax, a.max, w.mAl), w.mAl);
|
||||
obj.AddMember("min", MakeValueCast<int64_t>(vTmpMin, a.min, w.mAl), w.mAl);
|
||||
}
|
||||
}
|
||||
|
||||
obj.AddMember("componentType", int(a.componentType), w.mAl);
|
||||
obj.AddMember("count", (unsigned int)a.count, w.mAl);
|
||||
obj.AddMember("type", StringRef(AttribType::ToString(a.type)), w.mAl);
|
||||
|
||||
Value vTmpMax, vTmpMin;
|
||||
if (a.componentType == ComponentType_FLOAT) {
|
||||
obj.AddMember("max", MakeValue(vTmpMax, a.max, w.mAl), w.mAl);
|
||||
obj.AddMember("min", MakeValue(vTmpMin, a.min, w.mAl), w.mAl);
|
||||
} else {
|
||||
obj.AddMember("max", MakeValueCast<int64_t>(vTmpMax, a.max, w.mAl), w.mAl);
|
||||
obj.AddMember("min", MakeValueCast<int64_t>(vTmpMin, a.min, w.mAl), w.mAl);
|
||||
}
|
||||
if (a.sparse) {
|
||||
Value sparseValue;
|
||||
sparseValue.SetObject();
|
||||
|
||||
//count
|
||||
sparseValue.AddMember("count", (unsigned int)a.sparse->count, w.mAl);
|
||||
|
||||
//indices
|
||||
Value indices;
|
||||
indices.SetObject();
|
||||
indices.AddMember("bufferView", a.sparse->indices->index, w.mAl);
|
||||
indices.AddMember("byteOffset", (unsigned int)a.sparse->indicesByteOffset, w.mAl);
|
||||
indices.AddMember("componentType", int(a.sparse->indicesType), w.mAl);
|
||||
sparseValue.AddMember("indices", indices, w.mAl);
|
||||
|
||||
//values
|
||||
Value values;
|
||||
values.SetObject();
|
||||
values.AddMember("bufferView", a.sparse->values->index, w.mAl);
|
||||
values.AddMember("byteOffset", (unsigned int)a.sparse->valuesByteOffset, w.mAl);
|
||||
sparseValue.AddMember("values", values, w.mAl);
|
||||
|
||||
obj.AddMember("sparse", sparseValue, w.mAl);
|
||||
}
|
||||
}
|
||||
|
||||
inline void Write(Value& obj, Animation& a, AssetWriter& w)
|
||||
|
|
|
@ -221,6 +221,158 @@ inline void SetAccessorRange(ComponentType compType, Ref<Accessor> acc, void* da
|
|||
}
|
||||
}
|
||||
|
||||
// compute the (data-dataBase), store the non-zero data items
|
||||
template <typename T>
|
||||
size_t NZDiff(void *data, void *dataBase, size_t count, unsigned int numCompsIn, unsigned int numCompsOut, void *&outputNZDiff, void *&outputNZIdx) {
|
||||
std::vector<T> vNZDiff;
|
||||
std::vector<unsigned short> vNZIdx;
|
||||
size_t totalComps = count * numCompsIn;
|
||||
T *bufferData_ptr = static_cast<T *>(data);
|
||||
T *bufferData_end = bufferData_ptr + totalComps;
|
||||
T *bufferBase_ptr = static_cast<T *>(dataBase);
|
||||
|
||||
// Search and set extreme values.
|
||||
for (short idx = 0; bufferData_ptr < bufferData_end; idx += 1, bufferData_ptr += numCompsIn) {
|
||||
bool bNonZero = false;
|
||||
|
||||
//for the data, check any component Non Zero
|
||||
for (unsigned int j = 0; j < numCompsOut; j++) {
|
||||
double valueData = bufferData_ptr[j];
|
||||
double valueBase = bufferBase_ptr ? bufferBase_ptr[j] : 0;
|
||||
if ((valueData - valueBase) != 0) {
|
||||
bNonZero = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//all zeros, continue
|
||||
if (!bNonZero)
|
||||
continue;
|
||||
|
||||
//non zero, store the data
|
||||
for (unsigned int j = 0; j < numCompsOut; j++) {
|
||||
T valueData = bufferData_ptr[j];
|
||||
T valueBase = bufferBase_ptr ? bufferBase_ptr[j] : 0;
|
||||
vNZDiff.push_back(valueData - valueBase);
|
||||
}
|
||||
vNZIdx.push_back(idx);
|
||||
}
|
||||
|
||||
//avoid all-0, put 1 item
|
||||
if (vNZDiff.size() == 0) {
|
||||
for (unsigned int j = 0; j < numCompsOut; j++)
|
||||
vNZDiff.push_back(0);
|
||||
vNZIdx.push_back(0);
|
||||
}
|
||||
|
||||
//process data
|
||||
outputNZDiff = new T[vNZDiff.size()];
|
||||
memcpy(outputNZDiff, vNZDiff.data(), vNZDiff.size() * sizeof(T));
|
||||
|
||||
outputNZIdx = new unsigned short[vNZIdx.size()];
|
||||
memcpy(outputNZIdx, vNZIdx.data(), vNZIdx.size() * sizeof(unsigned short));
|
||||
return vNZIdx.size();
|
||||
}
|
||||
|
||||
inline size_t NZDiff(ComponentType compType, void *data, void *dataBase, size_t count, unsigned int numCompsIn, unsigned int numCompsOut, void *&nzDiff, void *&nzIdx) {
|
||||
switch (compType) {
|
||||
case ComponentType_SHORT:
|
||||
return NZDiff<short>(data, dataBase, count, numCompsIn, numCompsOut, nzDiff, nzIdx);
|
||||
case ComponentType_UNSIGNED_SHORT:
|
||||
return NZDiff<unsigned short>(data, dataBase, count, numCompsIn, numCompsOut, nzDiff, nzIdx);
|
||||
case ComponentType_UNSIGNED_INT:
|
||||
return NZDiff<unsigned int>(data, dataBase, count, numCompsIn, numCompsOut, nzDiff, nzIdx);
|
||||
case ComponentType_FLOAT:
|
||||
return NZDiff<float>(data, dataBase, count, numCompsIn, numCompsOut, nzDiff, nzIdx);
|
||||
case ComponentType_BYTE:
|
||||
return NZDiff<int8_t>(data, dataBase, count, numCompsIn, numCompsOut, nzDiff, nzIdx);
|
||||
case ComponentType_UNSIGNED_BYTE:
|
||||
return NZDiff<uint8_t>(data, dataBase, count, numCompsIn, numCompsOut, nzDiff, nzIdx);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline Ref<Accessor> ExportDataSparse(Asset &a, std::string &meshName, Ref<Buffer> &buffer,
|
||||
size_t count, void *data, AttribType::Value typeIn, AttribType::Value typeOut, ComponentType compType, BufferViewTarget target = BufferViewTarget_NONE, void *dataBase = 0) {
|
||||
if (!count || !data) {
|
||||
return Ref<Accessor>();
|
||||
}
|
||||
|
||||
unsigned int numCompsIn = AttribType::GetNumComponents(typeIn);
|
||||
unsigned int numCompsOut = AttribType::GetNumComponents(typeOut);
|
||||
unsigned int bytesPerComp = ComponentTypeSize(compType);
|
||||
|
||||
// accessor
|
||||
Ref<Accessor> acc = a.accessors.Create(a.FindUniqueID(meshName, "accessor"));
|
||||
|
||||
// if there is a basic data vector
|
||||
if (dataBase) {
|
||||
size_t base_offset = buffer->byteLength;
|
||||
size_t base_padding = base_offset % bytesPerComp;
|
||||
base_offset += base_padding;
|
||||
size_t base_length = count * numCompsOut * bytesPerComp;
|
||||
buffer->Grow(base_length + base_padding);
|
||||
|
||||
Ref<BufferView> bv = a.bufferViews.Create(a.FindUniqueID(meshName, "view"));
|
||||
bv->buffer = buffer;
|
||||
bv->byteOffset = base_offset;
|
||||
bv->byteLength = base_length; //! The target that the WebGL buffer should be bound to.
|
||||
bv->byteStride = 0;
|
||||
bv->target = target;
|
||||
acc->bufferView = bv;
|
||||
acc->WriteData(count, dataBase, numCompsIn * bytesPerComp);
|
||||
}
|
||||
acc->byteOffset = 0;
|
||||
acc->componentType = compType;
|
||||
acc->count = count;
|
||||
acc->type = typeOut;
|
||||
|
||||
if (data) {
|
||||
void *nzDiff = 0, *nzIdx = 0;
|
||||
size_t nzCount = NZDiff(compType, data, dataBase, count, numCompsIn, numCompsOut, nzDiff, nzIdx);
|
||||
acc->sparse.reset(new Accessor::Sparse);
|
||||
acc->sparse->count = nzCount;
|
||||
|
||||
//indices
|
||||
unsigned int bytesPerIdx = sizeof(unsigned short);
|
||||
size_t indices_offset = buffer->byteLength;
|
||||
size_t indices_padding = indices_offset % bytesPerIdx;
|
||||
indices_offset += indices_padding;
|
||||
size_t indices_length = nzCount * 1 * bytesPerIdx;
|
||||
buffer->Grow(indices_length + indices_padding);
|
||||
|
||||
Ref<BufferView> indicesBV = a.bufferViews.Create(a.FindUniqueID(meshName, "view"));
|
||||
indicesBV->buffer = buffer;
|
||||
indicesBV->byteOffset = indices_offset;
|
||||
indicesBV->byteLength = indices_length;
|
||||
indicesBV->byteStride = 0;
|
||||
acc->sparse->indices = indicesBV;
|
||||
acc->sparse->indicesType = ComponentType_UNSIGNED_SHORT;
|
||||
acc->sparse->indicesByteOffset = 0;
|
||||
acc->WriteSparseIndices(nzCount, nzIdx, 1 * bytesPerIdx);
|
||||
|
||||
//values
|
||||
size_t values_offset = buffer->byteLength;
|
||||
size_t values_padding = values_offset % bytesPerComp;
|
||||
values_offset += values_padding;
|
||||
size_t values_length = nzCount * numCompsOut * bytesPerComp;
|
||||
buffer->Grow(values_length + values_padding);
|
||||
|
||||
Ref<BufferView> valuesBV = a.bufferViews.Create(a.FindUniqueID(meshName, "view"));
|
||||
valuesBV->buffer = buffer;
|
||||
valuesBV->byteOffset = values_offset;
|
||||
valuesBV->byteLength = values_length;
|
||||
valuesBV->byteStride = 0;
|
||||
acc->sparse->values = valuesBV;
|
||||
acc->sparse->valuesByteOffset = 0;
|
||||
acc->WriteSparseValues(nzCount, nzDiff, numCompsIn * bytesPerComp);
|
||||
|
||||
//clear
|
||||
delete[] (char*)nzDiff;
|
||||
delete[] (char*)nzIdx;
|
||||
}
|
||||
return acc;
|
||||
}
|
||||
inline Ref<Accessor> ExportData(Asset& a, std::string& meshName, Ref<Buffer>& buffer,
|
||||
size_t count, void* data, AttribType::Value typeIn, AttribType::Value typeOut, ComponentType compType, BufferViewTarget target = BufferViewTarget_NONE)
|
||||
{
|
||||
|
@ -831,6 +983,10 @@ void glTF2Exporter::ExportMeshes()
|
|||
|
||||
/*************** Targets for blendshapes ****************/
|
||||
if (aim->mNumAnimMeshes > 0) {
|
||||
bool bUseSparse = this->mProperties->HasPropertyBool("GLTF2_SPARSE_ACCESSOR_EXP") &&
|
||||
this->mProperties->GetPropertyBool("GLTF2_SPARSE_ACCESSOR_EXP");
|
||||
bool bIncludeNormal = this->mProperties->HasPropertyBool("GLTF2_TARGET_NORMAL_EXP") &&
|
||||
this->mProperties->GetPropertyBool("GLTF2_TARGET_NORMAL_EXP");
|
||||
bool bExportTargetNames = this->mProperties->HasPropertyBool("GLTF2_TARGETNAMES_EXP") &&
|
||||
this->mProperties->GetPropertyBool("GLTF2_TARGETNAMES_EXP");
|
||||
|
||||
|
@ -839,7 +995,6 @@ void glTF2Exporter::ExportMeshes()
|
|||
aiAnimMesh *pAnimMesh = aim->mAnimMeshes[am];
|
||||
if (bExportTargetNames)
|
||||
m->targetNames.push_back(pAnimMesh->mName.data);
|
||||
|
||||
// position
|
||||
if (pAnimMesh->HasPositions()) {
|
||||
// NOTE: in gltf it is the diff stored
|
||||
|
@ -847,9 +1002,16 @@ void glTF2Exporter::ExportMeshes()
|
|||
for (unsigned int vt = 0; vt < pAnimMesh->mNumVertices; ++vt) {
|
||||
pPositionDiff[vt] = pAnimMesh->mVertices[vt] - aim->mVertices[vt];
|
||||
}
|
||||
Ref<Accessor> vec = ExportData(*mAsset, meshId, b,
|
||||
pAnimMesh->mNumVertices, pPositionDiff,
|
||||
AttribType::VEC3, AttribType::VEC3, ComponentType_FLOAT);
|
||||
Ref<Accessor> vec;
|
||||
if (bUseSparse) {
|
||||
vec = ExportDataSparse(*mAsset, meshId, b,
|
||||
pAnimMesh->mNumVertices, pPositionDiff,
|
||||
AttribType::VEC3, AttribType::VEC3, ComponentType_FLOAT);
|
||||
} else {
|
||||
vec = ExportData(*mAsset, meshId, b,
|
||||
pAnimMesh->mNumVertices, pPositionDiff,
|
||||
AttribType::VEC3, AttribType::VEC3, ComponentType_FLOAT);
|
||||
}
|
||||
if (vec) {
|
||||
p.targets[am].position.push_back(vec);
|
||||
}
|
||||
|
@ -857,14 +1019,21 @@ void glTF2Exporter::ExportMeshes()
|
|||
}
|
||||
|
||||
// normal
|
||||
if (pAnimMesh->HasNormals()) {
|
||||
if (pAnimMesh->HasNormals() && bIncludeNormal) {
|
||||
aiVector3D *pNormalDiff = new aiVector3D[pAnimMesh->mNumVertices];
|
||||
for (unsigned int vt = 0; vt < pAnimMesh->mNumVertices; ++vt) {
|
||||
pNormalDiff[vt] = pAnimMesh->mNormals[vt] - aim->mNormals[vt];
|
||||
}
|
||||
Ref<Accessor> vec = ExportData(*mAsset, meshId, b,
|
||||
pAnimMesh->mNumVertices, pNormalDiff,
|
||||
AttribType::VEC3, AttribType::VEC3, ComponentType_FLOAT);
|
||||
Ref<Accessor> vec;
|
||||
if (bUseSparse) {
|
||||
vec = ExportDataSparse(*mAsset, meshId, b,
|
||||
pAnimMesh->mNumVertices, pNormalDiff,
|
||||
AttribType::VEC3, AttribType::VEC3, ComponentType_FLOAT);
|
||||
} else {
|
||||
vec = ExportData(*mAsset, meshId, b,
|
||||
pAnimMesh->mNumVertices, pNormalDiff,
|
||||
AttribType::VEC3, AttribType::VEC3, ComponentType_FLOAT);
|
||||
}
|
||||
if (vec) {
|
||||
p.targets[am].normal.push_back(vec);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue