Merge branch 'master' into master

pull/3598/head
thomasbiang 2021-01-26 13:55:40 -08:00 committed by GitHub
commit 4ed9f3c2f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 107 additions and 357 deletions

2
.github/FUNDING.yml vendored
View File

@ -1,2 +1,2 @@
patreon: assimp patreon: assimp
custom: https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=4JRJVPXC4QJM4 open_collective: assimp

3
.gitignore vendored
View File

@ -18,6 +18,9 @@ build
*.VC.db-wal *.VC.db-wal
*.VC.opendb *.VC.opendb
*.ipch *.ipch
.vs/
out/
CMakeSettings.json
# Output # Output
bin/ bin/

View File

@ -105,12 +105,6 @@ Become a financial contributor and help us sustain our community. [[Contribute](
Monthly donations via Patreon: Monthly donations via Patreon:
<br>[![Patreon](https://cloud.githubusercontent.com/assets/8225057/5990484/70413560-a9ab-11e4-8942-1a63607c0b00.png)](http://www.patreon.com/assimp) <br>[![Patreon](https://cloud.githubusercontent.com/assets/8225057/5990484/70413560-a9ab-11e4-8942-1a63607c0b00.png)](http://www.patreon.com/assimp)
<br>
One-off donations via PayPal:
<br>[![PayPal](https://www.paypalobjects.com/en_US/i/btn/btn_donate_LG.gif)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=4JRJVPXC4QJM4)
<br>
#### Organizations #### Organizations

View File

@ -1,81 +0,0 @@
# AppVeyor file
# http://www.appveyor.com/docs/appveyor-yml
# clone directory
clone_folder: c:\projects\assimp
clone_depth: 1
# branches to build
branches:
# whitelist
only:
- master
matrix:
fast_finish: true
image:
- Visual Studio 2013
#- Visual Studio 2015
#- Visual Studio 2017
- Visual Studio 2019
#- MinGW
platform:
- Win32
- x64
configuration: Release
install:
- set PATH=C:\Ruby24-x64\bin;%PATH%
- set CMAKE_DEFINES -DASSIMP_WERROR=ON
- if [%COMPILER%]==[MinGW] set PATH=C:\MinGW\bin;%PATH%
- if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2013" set CMAKE_GENERATOR_NAME=Visual Studio 12 2013
- if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2015" set CMAKE_GENERATOR_NAME=Visual Studio 14 2015
- if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2017" set CMAKE_GENERATOR_NAME=Visual Studio 15 2017
- if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" set CMAKE_GENERATOR_NAME=Visual Studio 16 2019
- cmake %CMAKE_DEFINES% -G "%CMAKE_GENERATOR_NAME%" -A %platform% .
# Rename sh.exe as sh.exe in PATH interferes with MinGW - if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2015" set CMAKE_GENERATOR_NAME=Visual Studio 14 2015
- rename "C:\Program Files\Git\usr\bin\sh.exe" "sh2.exe"
- set PATH=%PATH%;"C:\\Program Files (x86)\\Inno Setup 5"
- ps: Invoke-WebRequest -Uri https://download.microsoft.com/download/5/7/b/57b2947c-7221-4f33-b35e-2fc78cb10df4/vc_redist.x64.exe -OutFile .\packaging\windows-innosetup\vc_redist.x64.exe
- ps: Invoke-WebRequest -Uri https://download.microsoft.com/download/1/d/8/1d8137db-b5bb-4925-8c5d-927424a2e4de/vc_redist.x86.exe -OutFile .\packaging\windows-innosetup\vc_redist.x86.exe
cache:
- code\assimp.dir\%CONFIGURATION%
- contrib\zlib\zlibstatic.dir\%CONFIGURATION%
- contrib\zlib\zlib.dir\%CONFIGURATION%
- tools\assimp_cmd\assimp_cmd.dir\%CONFIGURATION%
- tools\assimp_view\assimp_viewer.dir\%CONFIGURATION%
- test\unit.dir\%CONFIGURATION%
- bin\.mtime_cache
before_build:
- echo NUMBER_OF_PROCESSORS=%NUMBER_OF_PROCESSORS%
- ruby scripts\AppVeyor\mtime_cache -g scripts\AppVeyor\cacheglobs.txt -c bin\.mtime_cache\cache.json
build_script:
cmake --build . --config Release -- /maxcpucount:2
after_build:
- if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2017" (
if "%platform%"=="x64" (
iscc packaging\windows-innosetup\script_x64.iss
) else (
iscc packaging\windows-innosetup\script_x86.iss
)
)
- 7z a assimp.7z bin\%CONFIGURATION%\* lib\%CONFIGURATION%\*
test_script:
- cmd: bin\%CONFIGURATION%\unit.exe --gtest_output=xml:testout.xml
on_finish:
- ps: (new-object net.webclient).UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\testout.xml))
artifacts:
- path: assimp.7z
name: assimp_lib

View File

@ -334,7 +334,7 @@ void ColladaParser::ReadAssetInfo(XmlNode &node) {
const std::string &currentName = currentNode.name(); const std::string &currentName = currentNode.name();
if (currentName == "unit") { if (currentName == "unit") {
mUnitSize = 1.f; mUnitSize = 1.f;
XmlParser::getFloatAttribute(node, "meter", mUnitSize); XmlParser::getFloatAttribute(currentNode, "meter", mUnitSize);
} else if (currentName == "up_axis") { } else if (currentName == "up_axis") {
std::string v; std::string v;
if (!XmlParser::getValueAsString(currentNode, v)) { if (!XmlParser::getValueAsString(currentNode, v)) {
@ -459,7 +459,6 @@ void ColladaParser::PostProcessRootAnimations() {
if (animation != mAnimationLibrary.end()) { if (animation != mAnimationLibrary.end()) {
Animation *pSourceAnimation = animation->second; Animation *pSourceAnimation = animation->second;
pSourceAnimation->CollectChannelsRecursively(clip->mChannels); pSourceAnimation->CollectChannelsRecursively(clip->mChannels);
} }
} }
@ -1738,14 +1737,16 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector<Inp
// and read all indices into a temporary array // and read all indices into a temporary array
std::vector<size_t> indices; std::vector<size_t> indices;
if (expectedPointCount > 0) if (expectedPointCount > 0) {
indices.reserve(expectedPointCount * numOffsets); indices.reserve(expectedPointCount * numOffsets);
}
if (pNumPrimitives > 0) // It is possible to not contain any indices // It is possible to not contain any indices
{ if (pNumPrimitives > 0) {
std::string v; std::string v;
XmlParser::getValueAsString(node, v); XmlParser::getValueAsString(node, v);
const char *content = v.c_str(); const char *content = v.c_str();
SkipSpacesAndLineEnd(&content);
while (*content != 0) { while (*content != 0) {
// read a value. // read a value.
// Hack: (thom) Some exporters put negative indices sometimes. We just try to carry on anyways. // Hack: (thom) Some exporters put negative indices sometimes. We just try to carry on anyways.
@ -1772,21 +1773,24 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector<Inp
// find the data for all sources // find the data for all sources
for (std::vector<InputChannel>::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it) { for (std::vector<InputChannel>::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it) {
InputChannel &input = *it; InputChannel &input = *it;
if (input.mResolved) if (input.mResolved) {
continue; continue;
}
// find accessor // find accessor
input.mResolved = &ResolveLibraryReference(mAccessorLibrary, input.mAccessor); input.mResolved = &ResolveLibraryReference(mAccessorLibrary, input.mAccessor);
// resolve accessor's data pointer as well, if necessary // resolve accessor's data pointer as well, if necessary
const Accessor *acc = input.mResolved; const Accessor *acc = input.mResolved;
if (!acc->mData) if (!acc->mData) {
acc->mData = &ResolveLibraryReference(mDataLibrary, acc->mSource); acc->mData = &ResolveLibraryReference(mDataLibrary, acc->mSource);
}
} }
// and the same for the per-index channels // and the same for the per-index channels
for (std::vector<InputChannel>::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it) { for (std::vector<InputChannel>::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it) {
InputChannel &input = *it; InputChannel &input = *it;
if (input.mResolved) if (input.mResolved) {
continue; continue;
}
// ignore vertex pointer, it doesn't refer to an accessor // ignore vertex pointer, it doesn't refer to an accessor
if (input.mType == IT_Vertex) { if (input.mType == IT_Vertex) {
@ -1801,8 +1805,9 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector<Inp
input.mResolved = &ResolveLibraryReference(mAccessorLibrary, input.mAccessor); input.mResolved = &ResolveLibraryReference(mAccessorLibrary, input.mAccessor);
// resolve accessor's data pointer as well, if necessary // resolve accessor's data pointer as well, if necessary
const Accessor *acc = input.mResolved; const Accessor *acc = input.mResolved;
if (!acc->mData) if (!acc->mData) {
acc->mData = &ResolveLibraryReference(mDataLibrary, acc->mSource); acc->mData = &ResolveLibraryReference(mDataLibrary, acc->mSource);
}
} }
// For continued primitives, the given count does not come all in one <p>, but only one primitive per <p> // For continued primitives, the given count does not come all in one <p>, but only one primitive per <p>
@ -1884,11 +1889,13 @@ void ColladaParser::CopyVertex(size_t currentVertex, size_t numOffsets, size_t n
ai_assert((baseOffset + numOffsets - 1) < indices.size()); ai_assert((baseOffset + numOffsets - 1) < indices.size());
// extract per-vertex channels using the global per-vertex offset // extract per-vertex channels using the global per-vertex offset
for (std::vector<InputChannel>::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it) for (std::vector<InputChannel>::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it) {
ExtractDataObjectFromChannel(*it, indices[baseOffset + perVertexOffset], pMesh); ExtractDataObjectFromChannel(*it, indices[baseOffset + perVertexOffset], pMesh);
}
// and extract per-index channels using there specified offset // and extract per-index channels using there specified offset
for (std::vector<InputChannel>::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it) for (std::vector<InputChannel>::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it) {
ExtractDataObjectFromChannel(*it, indices[baseOffset + it->mOffset], pMesh); ExtractDataObjectFromChannel(*it, indices[baseOffset + it->mOffset], pMesh);
}
// store the vertex-data index for later assignment of bone vertex weights // store the vertex-data index for later assignment of bone vertex weights
pMesh.mFacePosIndices.push_back(indices[baseOffset + perVertexOffset]); pMesh.mFacePosIndices.push_back(indices[baseOffset + perVertexOffset]);
@ -1912,8 +1919,9 @@ void ColladaParser::ReadPrimTriStrips(size_t numOffsets, size_t perVertexOffset,
// Extracts a single object from an input channel and stores it in the appropriate mesh data array // Extracts a single object from an input channel and stores it in the appropriate mesh data array
void ColladaParser::ExtractDataObjectFromChannel(const InputChannel &pInput, size_t pLocalIndex, Mesh &pMesh) { void ColladaParser::ExtractDataObjectFromChannel(const InputChannel &pInput, size_t pLocalIndex, Mesh &pMesh) {
// ignore vertex referrer - we handle them that separate // ignore vertex referrer - we handle them that separate
if (pInput.mType == IT_Vertex) if (pInput.mType == IT_Vertex) {
return; return;
}
const Accessor &acc = *pInput.mResolved; const Accessor &acc = *pInput.mResolved;
if (pLocalIndex >= acc.mCount) { if (pLocalIndex >= acc.mCount) {
@ -1926,86 +1934,93 @@ void ColladaParser::ExtractDataObjectFromChannel(const InputChannel &pInput, siz
// assemble according to the accessors component sub-offset list. We don't care, yet, // assemble according to the accessors component sub-offset list. We don't care, yet,
// what kind of object exactly we're extracting here // what kind of object exactly we're extracting here
ai_real obj[4]; ai_real obj[4];
for (size_t c = 0; c < 4; ++c) for (size_t c = 0; c < 4; ++c) {
obj[c] = dataObject[acc.mSubOffset[c]]; obj[c] = dataObject[acc.mSubOffset[c]];
}
// now we reinterpret it according to the type we're reading here // now we reinterpret it according to the type we're reading here
switch (pInput.mType) { switch (pInput.mType) {
case IT_Position: // ignore all position streams except 0 - there can be only one position case IT_Position: // ignore all position streams except 0 - there can be only one position
if (pInput.mIndex == 0) if (pInput.mIndex == 0) {
pMesh.mPositions.push_back(aiVector3D(obj[0], obj[1], obj[2])); pMesh.mPositions.push_back(aiVector3D(obj[0], obj[1], obj[2]));
else } else {
ASSIMP_LOG_ERROR("Collada: just one vertex position stream supported"); ASSIMP_LOG_ERROR("Collada: just one vertex position stream supported");
break;
case IT_Normal:
// pad to current vertex count if necessary
if (pMesh.mNormals.size() < pMesh.mPositions.size() - 1)
pMesh.mNormals.insert(pMesh.mNormals.end(), pMesh.mPositions.size() - pMesh.mNormals.size() - 1, aiVector3D(0, 1, 0));
// ignore all normal streams except 0 - there can be only one normal
if (pInput.mIndex == 0)
pMesh.mNormals.push_back(aiVector3D(obj[0], obj[1], obj[2]));
else
ASSIMP_LOG_ERROR("Collada: just one vertex normal stream supported");
break;
case IT_Tangent:
// pad to current vertex count if necessary
if (pMesh.mTangents.size() < pMesh.mPositions.size() - 1)
pMesh.mTangents.insert(pMesh.mTangents.end(), pMesh.mPositions.size() - pMesh.mTangents.size() - 1, aiVector3D(1, 0, 0));
// ignore all tangent streams except 0 - there can be only one tangent
if (pInput.mIndex == 0)
pMesh.mTangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
else
ASSIMP_LOG_ERROR("Collada: just one vertex tangent stream supported");
break;
case IT_Bitangent:
// pad to current vertex count if necessary
if (pMesh.mBitangents.size() < pMesh.mPositions.size() - 1)
pMesh.mBitangents.insert(pMesh.mBitangents.end(), pMesh.mPositions.size() - pMesh.mBitangents.size() - 1, aiVector3D(0, 0, 1));
// ignore all bitangent streams except 0 - there can be only one bitangent
if (pInput.mIndex == 0)
pMesh.mBitangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
else
ASSIMP_LOG_ERROR("Collada: just one vertex bitangent stream supported");
break;
case IT_Texcoord:
// up to 4 texture coord sets are fine, ignore the others
if (pInput.mIndex < AI_MAX_NUMBER_OF_TEXTURECOORDS) {
// pad to current vertex count if necessary
if (pMesh.mTexCoords[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
pMesh.mTexCoords[pInput.mIndex].insert(pMesh.mTexCoords[pInput.mIndex].end(),
pMesh.mPositions.size() - pMesh.mTexCoords[pInput.mIndex].size() - 1, aiVector3D(0, 0, 0));
pMesh.mTexCoords[pInput.mIndex].push_back(aiVector3D(obj[0], obj[1], obj[2]));
if (0 != acc.mSubOffset[2] || 0 != acc.mSubOffset[3]) /* hack ... consider cleaner solution */
pMesh.mNumUVComponents[pInput.mIndex] = 3;
} else {
ASSIMP_LOG_ERROR("Collada: too many texture coordinate sets. Skipping.");
}
break;
case IT_Color:
// up to 4 color sets are fine, ignore the others
if (pInput.mIndex < AI_MAX_NUMBER_OF_COLOR_SETS) {
// pad to current vertex count if necessary
if (pMesh.mColors[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
pMesh.mColors[pInput.mIndex].insert(pMesh.mColors[pInput.mIndex].end(),
pMesh.mPositions.size() - pMesh.mColors[pInput.mIndex].size() - 1, aiColor4D(0, 0, 0, 1));
aiColor4D result(0, 0, 0, 1);
for (size_t i = 0; i < pInput.mResolved->mSize; ++i) {
result[static_cast<unsigned int>(i)] = obj[pInput.mResolved->mSubOffset[i]];
} }
pMesh.mColors[pInput.mIndex].push_back(result); break;
} else { case IT_Normal:
ASSIMP_LOG_ERROR("Collada: too many vertex color sets. Skipping."); // pad to current vertex count if necessary
} if (pMesh.mNormals.size() < pMesh.mPositions.size() - 1)
pMesh.mNormals.insert(pMesh.mNormals.end(), pMesh.mPositions.size() - pMesh.mNormals.size() - 1, aiVector3D(0, 1, 0));
break; // ignore all normal streams except 0 - there can be only one normal
default: if (pInput.mIndex == 0) {
// IT_Invalid and IT_Vertex pMesh.mNormals.push_back(aiVector3D(obj[0], obj[1], obj[2]));
ai_assert(false && "shouldn't ever get here"); } else {
ASSIMP_LOG_ERROR("Collada: just one vertex normal stream supported");
}
break;
case IT_Tangent:
// pad to current vertex count if necessary
if (pMesh.mTangents.size() < pMesh.mPositions.size() - 1)
pMesh.mTangents.insert(pMesh.mTangents.end(), pMesh.mPositions.size() - pMesh.mTangents.size() - 1, aiVector3D(1, 0, 0));
// ignore all tangent streams except 0 - there can be only one tangent
if (pInput.mIndex == 0) {
pMesh.mTangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
} else {
ASSIMP_LOG_ERROR("Collada: just one vertex tangent stream supported");
}
break;
case IT_Bitangent:
// pad to current vertex count if necessary
if (pMesh.mBitangents.size() < pMesh.mPositions.size() - 1) {
pMesh.mBitangents.insert(pMesh.mBitangents.end(), pMesh.mPositions.size() - pMesh.mBitangents.size() - 1, aiVector3D(0, 0, 1));
}
// ignore all bitangent streams except 0 - there can be only one bitangent
if (pInput.mIndex == 0) {
pMesh.mBitangents.push_back(aiVector3D(obj[0], obj[1], obj[2]));
} else {
ASSIMP_LOG_ERROR("Collada: just one vertex bitangent stream supported");
}
break;
case IT_Texcoord:
// up to 4 texture coord sets are fine, ignore the others
if (pInput.mIndex < AI_MAX_NUMBER_OF_TEXTURECOORDS) {
// pad to current vertex count if necessary
if (pMesh.mTexCoords[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
pMesh.mTexCoords[pInput.mIndex].insert(pMesh.mTexCoords[pInput.mIndex].end(),
pMesh.mPositions.size() - pMesh.mTexCoords[pInput.mIndex].size() - 1, aiVector3D(0, 0, 0));
pMesh.mTexCoords[pInput.mIndex].push_back(aiVector3D(obj[0], obj[1], obj[2]));
if (0 != acc.mSubOffset[2] || 0 != acc.mSubOffset[3]) {
pMesh.mNumUVComponents[pInput.mIndex] = 3;
}
} else {
ASSIMP_LOG_ERROR("Collada: too many texture coordinate sets. Skipping.");
}
break;
case IT_Color:
// up to 4 color sets are fine, ignore the others
if (pInput.mIndex < AI_MAX_NUMBER_OF_COLOR_SETS) {
// pad to current vertex count if necessary
if (pMesh.mColors[pInput.mIndex].size() < pMesh.mPositions.size() - 1)
pMesh.mColors[pInput.mIndex].insert(pMesh.mColors[pInput.mIndex].end(),
pMesh.mPositions.size() - pMesh.mColors[pInput.mIndex].size() - 1, aiColor4D(0, 0, 0, 1));
aiColor4D result(0, 0, 0, 1);
for (size_t i = 0; i < pInput.mResolved->mSize; ++i) {
result[static_cast<unsigned int>(i)] = obj[pInput.mResolved->mSubOffset[i]];
}
pMesh.mColors[pInput.mIndex].push_back(result);
} else {
ASSIMP_LOG_ERROR("Collada: too many vertex color sets. Skipping.");
}
break;
default:
// IT_Invalid and IT_Vertex
ai_assert(false && "shouldn't ever get here");
} }
} }

View File

@ -247,14 +247,14 @@ typedef double ai_real;
typedef signed long long int ai_int; typedef signed long long int ai_int;
typedef unsigned long long int ai_uint; typedef unsigned long long int ai_uint;
#ifndef ASSIMP_AI_REAL_TEXT_PRECISION #ifndef ASSIMP_AI_REAL_TEXT_PRECISION
#define ASSIMP_AI_REAL_TEXT_PRECISION 16 #define ASSIMP_AI_REAL_TEXT_PRECISION 17
#endif // ASSIMP_AI_REAL_TEXT_PRECISION #endif // ASSIMP_AI_REAL_TEXT_PRECISION
#else // ASSIMP_DOUBLE_PRECISION #else // ASSIMP_DOUBLE_PRECISION
typedef float ai_real; typedef float ai_real;
typedef signed int ai_int; typedef signed int ai_int;
typedef unsigned int ai_uint; typedef unsigned int ai_uint;
#ifndef ASSIMP_AI_REAL_TEXT_PRECISION #ifndef ASSIMP_AI_REAL_TEXT_PRECISION
#define ASSIMP_AI_REAL_TEXT_PRECISION 8 #define ASSIMP_AI_REAL_TEXT_PRECISION 9
#endif // ASSIMP_AI_REAL_TEXT_PRECISION #endif // ASSIMP_AI_REAL_TEXT_PRECISION
#endif // ASSIMP_DOUBLE_PRECISION #endif // ASSIMP_DOUBLE_PRECISION

View File

@ -1,4 +0,0 @@
code/*.{%{cpp}}
contrib/**/*.{%{cpp}}
include/**/*.{%{cpp}}
test/**/*.{%{cpp}}

View File

@ -1,177 +0,0 @@
#!/usr/bin/env ruby
#
# mtime_cache
# Copyright (c) 2016 Borislav Stanimirov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
require 'digest/md5'
require 'json'
require 'fileutils'
VERSION = "1.0.2"
VERSION_TEXT = "mtime_cache v#{VERSION}"
USAGE = <<ENDUSAGE
Usage:
mtime_cache [<globs>] [-g globfile] [-d] [-q|V] [-c cache]
ENDUSAGE
HELP = <<ENDHELP
Traverse through globbed files, making a json cache based on their mtime.
If a cache exists, changes the mtime of existing unchanged (based on MD5
hash) files to the one in the cache.
Options:
globs Ruby-compatible glob strings (ex some/path/**/*.java)
A extension pattern is allowd in the form %{pattern}
(ex some/path/*.{%{pattern1},%{pattern2}})
The globs support the following patterns:
%{cpp} - common C++ extensions
-g, --globfile A file with list of globs to perform (one per line)
-?, -h, --help Show this help message.
-v, --version Show the version number (#{VERSION})
-q, --quiet Don't log anything to stdout
-V, --verbose Show extra logging
-d, --dryrun Don't change any files on the filesystem
-c, --cache Specify the cache file for input and output.
[Default is .mtime_cache.json]
ENDHELP
param_arg = nil
ARGS = { :cache => '.mtime_cache.json', :globs => [] }
ARGV.each do |arg|
case arg
when '-g', '--globfile' then param_arg = :globfile
when '-h', '-?', '--help' then ARGS[:help] = true
when '-v', '--version' then ARGS[:ver] = true
when '-q', '--quiet' then ARGS[:quiet] = true
when '-V', '--verbose' then ARGS[:verbose] = true
when '-d', '--dryrun' then ARGS[:dry] = true
when '-c', '--cache' then param_arg = :cache
else
if param_arg
ARGS[param_arg] = arg
param_arg = nil
else
ARGS[:globs] << arg
end
end
end
def log(text, level = 0)
return if ARGS[:quiet]
return if level > 0 && !ARGS[:verbose]
puts text
end
if ARGS[:ver] || ARGS[:help]
log VERSION_TEXT
exit if ARGS[:ver]
log USAGE
log HELP
exit
end
if ARGS[:globs].empty? && !ARGS[:globfile]
log 'Error: Missing globs'
log USAGE
exit 1
end
EXTENSION_PATTERNS = {
:cpp => "c,cc,cpp,cxx,h,hpp,hxx,inl,ipp,inc,ixx"
}
cache_file = ARGS[:cache]
cache = {}
if File.file?(cache_file)
log "Found #{cache_file}"
cache = JSON.parse(File.read(cache_file))
log "Read #{cache.length} entries"
else
log "#{cache_file} not found. A new one will be created"
end
globs = ARGS[:globs].map { |g| g % EXTENSION_PATTERNS }
globfile = ARGS[:globfile]
if globfile
File.open(globfile, 'r').each_line do |line|
line.strip!
next if line.empty?
globs << line % EXTENSION_PATTERNS
end
end
if globs.empty?
log 'Error: No globs in globfile'
log USAGE
exit 1
end
files = {}
num_changed = 0
globs.each do |glob|
Dir[glob].each do |file|
next if !File.file?(file)
mtime = File.mtime(file).to_i
hash = Digest::MD5.hexdigest(File.read(file))
cached = cache[file]
if cached && cached['hash'] == hash && cached['mtime'] < mtime
mtime = cached['mtime']
log "mtime_cache: changing mtime of #{file} to #{mtime}", 1
File.utime(File.atime(file), Time.at(mtime), file) if !ARGS[:dry]
num_changed += 1
else
log "mtime_cache: NOT changing mtime of #{file}", 1
end
files[file] = { 'mtime' => mtime, 'hash' => hash }
end
end
log "Changed mtime of #{num_changed} of #{files.length} files"
log "Writing #{cache_file}"
if !ARGS[:dry]
dirname = File.dirname(cache_file)
unless File.directory?(dirname)
FileUtils.mkdir_p(dirname)
end
File.open(cache_file, 'w').write(JSON.pretty_generate(files))
end