diff --git a/port/PyAssimp/README.md b/port/PyAssimp/README.md
new file mode 100644
index 000000000..5bec88711
--- /dev/null
+++ b/port/PyAssimp/README.md
@@ -0,0 +1,74 @@
+PyAssimp Readme
+===============
+
+-- a simple Python wrapper for Assimp using ctypes to access
+the library. Requires Python >= 2.6.
+
+Python 3 support is mostly here, but not well tested.
+
+Note that pyassimp is not complete. Many ASSIMP features are missing. In
+particular, only loading of models is currently supported (no export).
+
+USAGE
+-----
+
+To get started with pyAssimp, examine the sample.py script in scripts/, which
+illustrates the basic usage. All Assimp data structures are wrapped using
+ctypes. All the data+length fields in Assimp's data structures (such as
+'aiMesh::mNumVertices','aiMesh::mVertices') are replaced by simple python
+lists, so you can call len() on them to get their respective size and access
+members using [].
+
+For example, to load a file named 'hello.3ds' and print the first
+vertex of the first mesh, you would do (proper error handling
+substituted by assertions ...):
+
+```python
+
+from pyassimp.core import *
+scene = load('hello.3ds')
+
+assert len(scene.meshes)
+mesh = scene.meshes[0]
+
+assert len(mesh.vertices)
+print(mesh.vertices[0])
+
+# don't forget this one, or you will leak!
+release(scene)
+
+```
+
+Another example to list the 'top nodes' in a
+scene:
+
+```python
+
+from pyassimp.core import *
+scene = load('hello.3ds')
+
+for c in scene.rootnode.children:
+ print(str(c))
+
+release(scene)
+
+```
+
+INSTALL
+-------
+
+Install pyassimp by running:
+
+> python setup.py install
+
+PyAssimp requires a assimp dynamic library (DLL on windows,
+so on linux :-) in order to work. The default search directories
+are:
+
+- the current directory
+- on linux additionally: /usr/lib and /usr/local/lib
+
+To build that library, refer to the Assimp master INSTALL
+instructions. To look in more places, edit ./pyassimp/helper.py.
+There's an 'additional_dirs' list waiting for your entries.
+
diff --git a/port/PyAssimp/pyassimp/core.py b/port/PyAssimp/pyassimp/core.py
new file mode 100644
index 000000000..731dcd5a3
--- /dev/null
+++ b/port/PyAssimp/pyassimp/core.py
@@ -0,0 +1,415 @@
+#-*- coding: UTF-8 -*-
+
+"""
+PyAssimp
+
+This is the main-module of PyAssimp.
+"""
+
+import sys
+if sys.version_info < (2,6):
+ raise 'pyassimp: need python 2.6 or newer'
+
+
+import ctypes
+import os
+import numpy
+
+import logging; logger = logging.getLogger("pyassimp")
+
+# Attach a default, null handler, to the logger.
+# applications can easily get log messages from pyassimp
+# by calling for instance
+# >>> logging.basicConfig(level=logging.DEBUG)
+# before importing pyassimp
+class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+h = NullHandler()
+logger.addHandler(h)
+
+from . import structs
+from .errors import AssimpError
+from . import helper
+
+
+assimp_structs_as_tuple = (
+ structs.Matrix4x4,
+ structs.Matrix3x3,
+ structs.Vector2D,
+ structs.Vector3D,
+ structs.Color3D,
+ structs.Color4D,
+ structs.Quaternion,
+ structs.Plane,
+ structs.Texel)
+
+def make_tuple(ai_obj, type = None):
+ res = None
+
+ if isinstance(ai_obj, structs.Matrix4x4):
+ res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((4,4))
+ #import pdb;pdb.set_trace()
+ elif isinstance(ai_obj, structs.Matrix3x3):
+ res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((3,3))
+ else:
+ res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_])
+
+ return res
+
+def call_init(obj, caller = None):
+ # init children
+ if hasattr(obj, '_init'):
+ obj._init(parent = caller)
+
+ # pointers
+ elif hasattr(obj, 'contents'):
+ if hasattr(obj.contents, '_init'):
+ obj.contents._init(target = obj, parent = caller)
+
+
+
+def _init(self, target = None, parent = None):
+ """
+ Custom initialize() for C structs, adds safely accessable member functionality.
+
+ :param target: set the object which receive the added methods. Useful when manipulating
+ pointers, to skip the intermediate 'contents' deferencing.
+ """
+ if hasattr(self, '_is_init'):
+ return self
+ self._is_init = True
+
+ if not target:
+ target = self
+
+ for m in dir(self):
+
+ name = m[1:].lower()
+
+ if m.startswith("_"):
+ continue
+
+ obj = getattr(self, m)
+
+ if m.startswith('mNum'):
+ if 'm' + m[4:] in dir(self):
+ continue # will be processed later on
+ else:
+ setattr(target, name, obj)
+
+
+
+ # Create tuples
+ if isinstance(obj, assimp_structs_as_tuple):
+ setattr(target, name, make_tuple(obj))
+ logger.debug(str(self) + ": Added array " + str(getattr(target, name)) + " as self." + name.lower())
+ continue
+
+
+ if isinstance(obj, structs.String):
+ setattr(target, 'name', str(obj.data))
+ setattr(target.__class__, '__repr__', lambda x: str(x.__class__) + "(" + x.name + ")")
+ setattr(target.__class__, '__str__', lambda x: x.name)
+ continue
+
+
+ if m.startswith('m'):
+
+ if name == "parent":
+ setattr(target, name, parent)
+ logger.debug("Added a parent as self." + name)
+ continue
+
+ if hasattr(self, 'mNum' + m[1:]):
+
+ length = getattr(self, 'mNum' + m[1:])
+
+ # -> special case: properties are
+ # stored as a dict.
+ if m == 'mProperties':
+ setattr(target, name, _get_properties(obj, length))
+ continue
+
+
+ if not length: # empty!
+ setattr(target, name, [])
+ logger.debug(str(self) + ": " + name + " is an empty list.")
+ continue
+
+
+ try:
+ if obj._type_ in assimp_structs_as_tuple:
+ setattr(target, name, numpy.array([make_tuple(obj[i]) for i in range(length)], dtype=numpy.float32))
+
+ logger.debug(str(self) + ": Added an array of numpy arrays (type "+ str(type(obj)) + ") as self." + name)
+
+ else:
+ setattr(target, name, [obj[i] for i in range(length)]) #TODO: maybe not necessary to recreate an array?
+
+ logger.debug(str(self) + ": Added list of " + str(obj) + " " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
+
+ # initialize array elements
+ for e in getattr(target, name):
+ call_init(e, caller = target)
+
+
+ except IndexError:
+ logger.error("in " + str(self) +" : mismatch between mNum" + name + " and the actual amount of data in m" + name + ". This may be due to version mismatch between libassimp and pyassimp. Quitting now.")
+ sys.exit(1)
+
+ except ValueError as e:
+
+ logger.error("In " + str(self) + "->" + name + ": " + str(e) + ". Quitting now.")
+ if "setting an array element with a sequence" in str(e):
+ logger.error("Note that pyassimp does not currently "
+ "support meshes with mixed triangles "
+ "and quads. Try to load your mesh with"
+ " a post-processing to triangulate your"
+ " faces.")
+ sys.exit(1)
+
+
+ else: # starts with 'm' but not iterable
+
+ setattr(target, name, obj)
+ logger.debug("Added " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
+
+ call_init(obj, caller = target)
+
+
+
+
+ if isinstance(self, structs.Mesh):
+ _finalize_mesh(self, target)
+
+ if isinstance(self, structs.Texture):
+ _finalize_texture(self, target)
+
+
+ return self
+
+
+"""
+Python magic to add the _init() function to all C struct classes.
+"""
+for struct in dir(structs):
+ if not (struct.startswith('_') or struct.startswith('c_') or struct == "Structure" or struct == "POINTER") and not isinstance(getattr(structs, struct),int):
+
+ setattr(getattr(structs, struct), '_init', _init)
+
+
+class AssimpLib(object):
+ """
+ Assimp-Singleton
+ """
+ load, release, dll = helper.search_library()
+
+#the loader as singleton
+_assimp_lib = AssimpLib()
+
+def pythonize_assimp(type, obj, scene):
+ """ This method modify the Assimp data structures
+ to make them easier to work with in Python.
+
+ Supported operations:
+ - MESH: replace a list of mesh IDs by reference to these meshes
+ - ADDTRANSFORMATION: add a reference to an object's transformation taken from their associated node.
+
+ :param type: the type of modification to operate (cf above)
+ :param obj: the input object to modify
+ :param scene: a reference to the whole scene
+ """
+
+ if type == "MESH":
+ meshes = []
+ for i in obj:
+ meshes.append(scene.meshes[i])
+ return meshes
+
+ if type == "ADDTRANSFORMATION":
+
+ def getnode(node, name):
+ if node.name == name: return node
+ for child in node.children:
+ n = getnode(child, name)
+ if n: return n
+
+ node = getnode(scene.rootnode, obj.name)
+ if not node:
+ raise AssimpError("Object " + str(obj) + " has no associated node!")
+ setattr(obj, "transformation", node.transformation)
+
+
+def recur_pythonize(node, scene):
+ """ Recursively call pythonize_assimp on
+ nodes tree to apply several post-processing to
+ pythonize the assimp datastructures.
+ """
+
+ node.meshes = pythonize_assimp("MESH", node.meshes, scene)
+
+ for mesh in node.meshes:
+ mesh.material = scene.materials[mesh.materialindex]
+
+ for cam in scene.cameras:
+ pythonize_assimp("ADDTRANSFORMATION", cam, scene)
+
+ #for light in scene.lights:
+ # pythonize_assimp("ADDTRANSFORMATION", light, scene)
+
+ for c in node.children:
+ recur_pythonize(c, scene)
+
+
+def load(filename, processing=0):
+ """
+ Loads the model with some specific processing parameters.
+
+ filename - file to load model from
+ processing - processing parameters
+
+ result Scene-object with model-data
+
+ throws AssimpError - could not open file
+ """
+ #read pure data
+ #from ctypes import c_char_p, c_uint
+ #model = _assimp_lib.load(c_char_p(filename), c_uint(processing))
+ model = _assimp_lib.load(filename.encode("ascii"), processing)
+ if not model:
+ #Uhhh, something went wrong!
+ raise AssimpError("could not import file: %s" % filename)
+
+ scene = model.contents._init()
+
+ recur_pythonize(scene.rootnode, scene)
+
+ return scene
+
+def release(scene):
+ from ctypes import pointer
+ _assimp_lib.release(pointer(scene))
+
+def _finalize_texture(tex, target):
+ setattr(target, "achformathint", tex.achFormatHint)
+ data = numpy.array([make_tuple(getattr(tex, pcData)[i]) for i in range(tex.mWidth * tex.mHeight)])
+ setattr(target, "data", data)
+
+
+
+def _finalize_mesh(mesh, target):
+ """ Building of meshes is a bit specific.
+
+ We override here the various datasets that can
+ not be process as regular fields.
+
+ For instance, the length of the normals array is
+ mNumVertices (no mNumNormals is available)
+ """
+ nb_vertices = getattr(mesh, "mNumVertices")
+
+ def fill(name):
+ mAttr = getattr(mesh, name)
+ if mAttr:
+ data = numpy.array([make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)], dtype=numpy.float32)
+ setattr(target, name[1:].lower(), data)
+ else:
+ setattr(target, name[1:].lower(), [])
+
+ def fillarray(name):
+ mAttr = getattr(mesh, name)
+
+ data = []
+ for index, mSubAttr in enumerate(mAttr):
+ if mSubAttr:
+ data.append([make_tuple(getattr(mesh, name)[index][i]) for i in range(nb_vertices)])
+
+ setattr(target, name[1:].lower(), numpy.array(data, dtype=numpy.float32))
+
+ fill("mNormals")
+ fill("mTangents")
+ fill("mBitangents")
+
+ fillarray("mColors")
+ fillarray("mTextureCoords")
+
+ # prepare faces
+ faces = numpy.array([f.indices for f in target.faces], dtype=numpy.int32)
+ setattr(target, 'faces', faces)
+
+def _get_properties(properties, length):
+ """
+ Convenience Function to get the material properties as a dict
+ and values in a python format.
+ """
+ result = {}
+ #read all properties
+ for p in [properties[i] for i in range(length)]:
+ #the name
+ p = p.contents
+ key = str(p.mKey.data)
+
+ #the data
+ from ctypes import POINTER, cast, c_int, c_float, sizeof
+ if p.mType == 1:
+ arr = cast(p.mData, POINTER(c_float * int(p.mDataLength/sizeof(c_float)) )).contents
+ value = numpy.array([x for x in arr])
+ elif p.mType == 3: #string can't be an array
+ value = cast(p.mData, POINTER(structs.String)).contents.data
+ elif p.mType == 4:
+ arr = cast(p.mData, POINTER(c_int * int(p.mDataLength/sizeof(c_int)) )).contents
+ value = numpy.array([x for x in arr])
+ else:
+ value = p.mData[:p.mDataLength]
+
+ result[key] = value
+
+ return result
+
+def aiGetMaterialFloatArray(material, key):
+ AI_SUCCESS = 0
+ from ctypes import byref, pointer, cast, c_float, POINTER, sizeof, c_uint
+ out = structs.Color4D()
+ max = c_uint(sizeof(structs.Color4D))
+ r=_assimp_lib.dll.aiGetMaterialFloatArray(pointer(material),
+ key[0],
+ key[1],
+ key[2],
+ byref(out),
+ byref(max))
+
+ if (r != AI_SUCCESS):
+ raise AssimpError("aiGetMaterialFloatArray failed!")
+
+ out._init()
+ return [out[i] for i in range(max.value)]
+
+def aiGetMaterialString(material, key):
+ AI_SUCCESS = 0
+ from ctypes import byref, pointer, cast, c_float, POINTER, sizeof, c_uint
+ out = structs.String()
+ r=_assimp_lib.dll.aiGetMaterialString(pointer(material),
+ key[0],
+ key[1],
+ key[2],
+ byref(out))
+
+ if (r != AI_SUCCESS):
+ raise AssimpError("aiGetMaterialString failed!")
+
+ return str(out.data)
+
+
+
+def decompose_matrix(matrix):
+ if not isinstance(matrix, structs.Matrix4x4):
+ raise AssimpError("pyassimp.decompose_matrix failed: Not a Matrix4x4!")
+
+ scaling = structs.Vector3D()
+ rotation = structs.Quaternion()
+ position = structs.Vector3D()
+
+ from ctypes import byref, pointer
+ _assimp_lib.dll.aiDecomposeMatrix(pointer(matrix), byref(scaling), byref(rotation), byref(position))
+ return scaling._init(), rotation._init(), position._init()
diff --git a/port/PyAssimp/pyassimp/postprocess.py b/port/PyAssimp/pyassimp/postprocess.py
new file mode 100644
index 000000000..932c7c660
--- /dev/null
+++ b/port/PyAssimp/pyassimp/postprocess.py
@@ -0,0 +1,529 @@
+#
Calculates the tangents and bitangents for the imported meshes.
+#
+# Does nothing if a mesh does not have normals. You might want this post
+# processing step to be executed if you plan to use tangent space calculations
+# such as normal mapping applied to the meshes. There's a config setting,
+# #AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE, which allows you to specify
+# a maximum smoothing angle for the algorithm. However, usually you'll
+# want to leave it at the default value.
+#
+aiProcess_CalcTangentSpace = 0x1
+
+##
Identifies and joins identical vertex data sets within all
+# imported meshes.
+#
+# After this step is run, each mesh contains unique vertices,
+# so a vertex may be used by multiple faces. You usually want
+# to use this post processing step. If your application deals with
+# indexed geometry, this step is compulsory or you'll just waste rendering
+# time. If this flag is not specified, no vertices are referenced by
+# more than one face and no index buffer is required for rendering.
+#
+aiProcess_JoinIdenticalVertices = 0x2
+
+##
Converts all the imported data to a left-handed coordinate space.
+#
+# By default the data is returned in a right-handed coordinate space (which
+# OpenGL prefers). In this space, +X points to the right,
+# +Z points towards the viewer, and +Y points upwards. In the DirectX
+# coordinate space +X points to the right, +Y points upwards, and +Z points
+# away from the viewer.
+#
+# You'll probably want to consider this flag if you use Direct3D for
+# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
+# setting and bundles all conversions typically required for D3D-based
+# applications.
+#
+aiProcess_MakeLeftHanded = 0x4
+
+##
Triangulates all faces of all meshes.
+#
+# By default the imported mesh data might contain faces with more than 3
+# indices. For rendering you'll usually want all faces to be triangles.
+# This post processing step splits up faces with more than 3 indices into
+# triangles. Line and point primitives are #not# modified! If you want
+# 'triangles only' with no other kinds of primitives, try the following
+# solution:
+#
+# - Specify both #aiProcess_Triangulate and #aiProcess_SortByPType
-
+#
- Ignore all point and line meshes when you process assimp's output
-
+#
+#
+aiProcess_Triangulate = 0x8
+
+##
Removes some parts of the data structure (animations, materials,
+# light sources, cameras, textures, vertex components).
+#
+# The components to be removed are specified in a separate
+# configuration option, #AI_CONFIG_PP_RVC_FLAGS. This is quite useful
+# if you don't need all parts of the output structure. Vertex colors
+# are rarely used today for example... Calling this step to remove unneeded
+# data from the pipeline as early as possible results in increased
+# performance and a more optimized output data structure.
+# This step is also useful if you want to force Assimp to recompute
+# normals or tangents. The corresponding steps don't recompute them if
+# they're already there (loaded from the source asset). By using this
+# step you can make sure they are NOT there.
+#
+# This flag is a poor one, mainly because its purpose is usually
+# misunderstood. Consider the following case: a 3D model has been exported
+# from a CAD app, and it has per-face vertex colors. Vertex positions can't be
+# shared, thus the #aiProcess_JoinIdenticalVertices step fails to
+# optimize the data because of these nasty little vertex colors.
+# Most apps don't even process them, so it's all for nothing. By using
+# this step, unneeded components are excluded as early as possible
+# thus opening more room for internal optimizations.
+#
+aiProcess_RemoveComponent = 0x10
+
+##
Generates normals for all faces of all meshes.
+#
+# This is ignored if normals are already there at the time this flag
+# is evaluated. Model importers try to load them from the source file, so
+# they're usually already there. Face normals are shared between all points
+# of a single face, so a single point can have multiple normals, which
+# forces the library to duplicate vertices in some cases.
+# #aiProcess_JoinIdenticalVertices is #senseless# then.
+#
+# This flag may not be specified together with #aiProcess_GenSmoothNormals.
+#
+aiProcess_GenNormals = 0x20
+
+##
Generates smooth normals for all vertices in the mesh.
+#
+# This is ignored if normals are already there at the time this flag
+# is evaluated. Model importers try to load them from the source file, so
+# they're usually already there.
+#
+# This flag may not be specified together with
+# #aiProcess_GenNormals. There's a configuration option,
+# #AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE which allows you to specify
+# an angle maximum for the normal smoothing algorithm. Normals exceeding
+# this limit are not smoothed, resulting in a 'hard' seam between two faces.
+# Using a decent angle here (e.g. 80 degrees) results in very good visual
+# appearance.
+#
+aiProcess_GenSmoothNormals = 0x40
+
+##
Splits large meshes into smaller sub-meshes.
+#
+# This is quite useful for real-time rendering, where the number of triangles
+# which can be maximally processed in a single draw-call is limited
+# by the video driverhardware. The maximum vertex buffer is usually limited
+# too. Both requirements can be met with this step: you may specify both a
+# triangle and vertex limit for a single mesh.
+#
+# The split limits can (and should!) be set through the
+# #AI_CONFIG_PP_SLM_VERTEX_LIMIT and #AI_CONFIG_PP_SLM_TRIANGLE_LIMIT
+# settings. The default values are #AI_SLM_DEFAULT_MAX_VERTICES and
+# #AI_SLM_DEFAULT_MAX_TRIANGLES.
+#
+# Note that splitting is generally a time-consuming task, but only if there's
+# something to split. The use of this step is recommended for most users.
+#
+aiProcess_SplitLargeMeshes = 0x80
+
+##
Removes the node graph and pre-transforms all vertices with
+# the local transformation matrices of their nodes.
+#
+# The output scene still contains nodes, however there is only a
+# root node with children, each one referencing only one mesh,
+# and each mesh referencing one material. For rendering, you can
+# simply render all meshes in order - you don't need to pay
+# attention to local transformations and the node hierarchy.
+# Animations are removed during this step.
+# This step is intended for applications without a scenegraph.
+# The step CAN cause some problems: if e.g. a mesh of the asset
+# contains normals and another, using the same material index, does not,
+# they will be brought together, but the first meshes's part of
+# the normal list is zeroed. However, these artifacts are rare.
+# @note The #AI_CONFIG_PP_PTV_NORMALIZE configuration property
+# can be set to normalize the scene's spatial dimension to the -1...1
+# range.
+#
+aiProcess_PreTransformVertices = 0x100
+
+##
Limits the number of bones simultaneously affecting a single vertex
+# to a maximum value.
+#
+# If any vertex is affected by more than the maximum number of bones, the least
+# important vertex weights are removed and the remaining vertex weights are
+# renormalized so that the weights still sum up to 1.
+# The default bone weight limit is 4 (defined as #AI_LMW_MAX_WEIGHTS in
+# config.h), but you can use the #AI_CONFIG_PP_LBW_MAX_WEIGHTS setting to
+# supply your own limit to the post processing step.
+#
+# If you intend to perform the skinning in hardware, this post processing
+# step might be of interest to you.
+#
+aiProcess_LimitBoneWeights = 0x200
+
+##
Validates the imported scene data structure.
+# This makes sure that all indices are valid, all animations and
+# bones are linked correctly, all material references are correct .. etc.
+#
+# It is recommended that you capture Assimp's log output if you use this flag,
+# so you can easily find out what's wrong if a file fails the
+# validation. The validator is quite strict and will find #all#
+# inconsistencies in the data structure... It is recommended that plugin
+# developers use it to debug their loaders. There are two types of
+# validation failures:
+#
+# - Error: There's something wrong with the imported data. Further
+# postprocessing is not possible and the data is not usable at all.
+# The import fails. #Importer::GetErrorString() or #aiGetErrorString()
+# carry the error message around.
-
+#
- Warning: There are some minor issues (e.g. 1000000 animation
+# keyframes with the same time), but further postprocessing and use
+# of the data structure is still safe. Warning details are written
+# to the log file, #AI_SCENE_FLAGS_VALIDATION_WARNING is set
+# in #aiScene::mFlags
-
+#
+#
+# This post-processing step is not time-consuming. Its use is not
+# compulsory, but recommended.
+#
+aiProcess_ValidateDataStructure = 0x400
+
+##
Reorders triangles for better vertex cache locality.
+#
+# The step tries to improve the ACMR (average post-transform vertex cache
+# miss ratio) for all meshes. The implementation runs in O(n) and is
+# roughly based on the 'tipsify' algorithm (see this
+# paper).
+#
+# If you intend to render huge models in hardware, this step might
+# be of interest to you. The #AI_CONFIG_PP_ICL_PTCACHE_SIZEconfig
+# setting can be used to fine-tune the cache optimization.
+#
+aiProcess_ImproveCacheLocality = 0x800
+
+##
Searches for redundantunreferenced materials and removes them.
+#
+# This is especially useful in combination with the
+# #aiProcess_PretransformVertices and #aiProcess_OptimizeMeshes flags.
+# Both join small meshes with equal characteristics, but they can't do
+# their work if two meshes have different materials. Because several
+# material settings are lost during Assimp's import filters,
+# (and because many exporters don't check for redundant materials), huge
+# models often have materials which are are defined several times with
+# exactly the same settings.
+#
+# Several material settings not contributing to the final appearance of
+# a surface are ignored in all comparisons (e.g. the material name).
+# So, if you're passing additional information through the
+# content pipeline (probably using #magic# material names), don't
+# specify this flag. Alternatively take a look at the
+# #AI_CONFIG_PP_RRM_EXCLUDE_LIST setting.
+#
+aiProcess_RemoveRedundantMaterials = 0x1000
+
+##
This step tries to determine which meshes have normal vectors
+# that are facing inwards and inverts them.
+#
+# The algorithm is simple but effective:
+# the bounding box of all vertices + their normals is compared against
+# the volume of the bounding box of all vertices without their normals.
+# This works well for most objects, problems might occur with planar
+# surfaces. However, the step tries to filter such cases.
+# The step inverts all in-facing normals. Generally it is recommended
+# to enable this step, although the result is not always correct.
+#
+aiProcess_FixInfacingNormals = 0x2000
+
+##
This step splits meshes with more than one primitive type in
+# homogeneous sub-meshes.
+#
+# The step is executed after the triangulation step. After the step
+# returns, just one bit is set in aiMesh::mPrimitiveTypes. This is
+# especially useful for real-time rendering where point and line
+# primitives are often ignored or rendered separately.
+# You can use the #AI_CONFIG_PP_SBP_REMOVE option to specify which
+# primitive types you need. This can be used to easily exclude
+# lines and points, which are rarely used, from the import.
+#
+aiProcess_SortByPType = 0x8000
+
+##
This step searches all meshes for degenerate primitives and
+# converts them to proper lines or points.
+#
+# A face is 'degenerate' if one or more of its points are identical.
+# To have the degenerate stuff not only detected and collapsed but
+# removed, try one of the following procedures:
+#
1. (if you support lines and points for rendering but don't
+# want the degenerates)
+#
+# - Specify the #aiProcess_FindDegenerates flag.
+#
-
+#
- Set the AI_CONFIG_PP_FD_REMOVE option to 1. This will
+# cause the step to remove degenerate triangles from the import
+# as soon as they're detected. They won't pass any further
+# pipeline steps.
+#
-
+#
+#
2.(if you don't support lines and points at all)
+#
+# - Specify the #aiProcess_FindDegenerates flag.
+#
-
+#
- Specify the #aiProcess_SortByPType flag. This moves line and
+# point primitives to separate meshes.
+#
-
+#
- Set the AI_CONFIG_PP_SBP_REMOVE option to
+# @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES
+# @endcode to cause SortByPType to reject point
+# and line meshes from the scene.
+#
-
+#
+# @note Degenerate polygons are not necessarily evil and that's why
+# they're not removed by default. There are several file formats which
+# don't support lines or points, and some exporters bypass the
+# format specification and write them as degenerate triangles instead.
+#
+aiProcess_FindDegenerates = 0x10000
+
+##
This step searches all meshes for invalid data, such as zeroed
+# normal vectors or invalid UV coords and removesfixes them. This is
+# intended to get rid of some common exporter errors.
+#
+# This is especially useful for normals. If they are invalid, and
+# the step recognizes this, they will be removed and can later
+# be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.
+# The step will also remove meshes that are infinitely small and reduce
+# animation tracks consisting of hundreds if redundant keys to a single
+# key. The AI_CONFIG_PP_FID_ANIM_ACCURACY config property decides
+# the accuracy of the check for duplicate animation tracks.
+#
+aiProcess_FindInvalidData = 0x20000
+
+##
This step converts non-UV mappings (such as spherical or
+# cylindrical mapping) to proper texture coordinate channels.
+#
+# Most applications will support UV mapping only, so you will
+# probably want to specify this step in every case. Note that Assimp is not
+# always able to match the original mapping implementation of the
+# 3D app which produced a model perfectly. It's always better to let the
+# modelling app compute the UV channels - 3ds max, Maya, Blender,
+# LightWave, and Modo do this for example.
+#
+# @note If this step is not requested, you'll need to process the
+# #AI_MATKEY_MAPPING material property in order to display all assets
+# properly.
+#
+aiProcess_GenUVCoords = 0x40000
+
+##
This step applies per-texture UV transformations and bakes
+# them into stand-alone vtexture coordinate channels.
+#
+# UV transformations are specified per-texture - see the
+# #AI_MATKEY_UVTRANSFORM material key for more information.
+# This step processes all textures with
+# transformed input UV coordinates and generates a new (pre-transformed) UV channel
+# which replaces the old channel. Most applications won't support UV
+# transformations, so you will probably want to specify this step.
+#
+# @note UV transformations are usually implemented in real-time apps by
+# transforming texture coordinates at vertex shader stage with a 3x3
+# (homogenous) transformation matrix.
+#
+aiProcess_TransformUVCoords = 0x80000
+
+##
This step searches for duplicate meshes and replaces them
+# with references to the first mesh.
+#
+# This step takes a while, so don't use it if speed is a concern.
+# Its main purpose is to workaround the fact that many export
+# file formats don't support instanced meshes, so exporters need to
+# duplicate meshes. This step removes the duplicates again. Please
+# note that Assimp does not currently support per-node material
+# assignment to meshes, which means that identical meshes with
+# different materials are currently #not# joined, although this is
+# planned for future versions.
+#
+aiProcess_FindInstances = 0x100000
+
+##
A postprocessing step to reduce the number of meshes.
+#
+# This will, in fact, reduce the number of draw calls.
+#
+# This is a very effective optimization and is recommended to be used
+# together with #aiProcess_OptimizeGraph, if possible. The flag is fully
+# compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType.
+#
+aiProcess_OptimizeMeshes = 0x200000
+
+
+##
A postprocessing step to optimize the scene hierarchy.
+#
+# Nodes without animations, bones, lights or cameras assigned are
+# collapsed and joined.
+#
+# Node names can be lost during this step. If you use special 'tag nodes'
+# to pass additional information through your content pipeline, use the
+# #AI_CONFIG_PP_OG_EXCLUDE_LIST setting to specify a list of node
+# names you want to be kept. Nodes matching one of the names in this list won't
+# be touched or modified.
+#
+# Use this flag with caution. Most simple files will be collapsed to a
+# single node, so complex hierarchies are usually completely lost. This is not
+# useful for editor environments, but probably a very effective
+# optimization if you just want to get the model data, convert it to your
+# own format, and render it as fast as possible.
+#
+# This flag is designed to be used with #aiProcess_OptimizeMeshes for best
+# results.
+#
+# @note 'Crappy' scenes with thousands of extremely small meshes packed
+# in deeply nested nodes exist for almost all file formats.
+# #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph
+# usually fixes them all and makes them renderable.
+#
+aiProcess_OptimizeGraph = 0x400000
+
+##
This step flips all UV coordinates along the y-axis and adjusts
+# material settings and bitangents accordingly.
+#
+# Output UV coordinate system:
+# @code
+# 0y|0y ---------- 1x|0y
+# | |
+# | |
+# | |
+# 0x|1y ---------- 1x|1y
+# @endcode
+#
+# You'll probably want to consider this flag if you use Direct3D for
+# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
+# setting and bundles all conversions typically required for D3D-based
+# applications.
+#
+aiProcess_FlipUVs = 0x800000
+
+##
This step adjusts the output face winding order to be CW.
+#
+# The default face winding order is counter clockwise (CCW).
+#
+# Output face order:
+# @code
+# x2
+#
+# x0
+# x1
+# @endcode
+#
+aiProcess_FlipWindingOrder = 0x1000000
+
+##
This step splits meshes with many bones into sub-meshes so that each
+# su-bmesh has fewer or as many bones as a given limit.
+#
+aiProcess_SplitByBoneCount = 0x2000000
+
+##
This step removes bones losslessly or according to some threshold.
+#
+# In some cases (i.e. formats that require it) exporters are forced to
+# assign dummy bone weights to otherwise static meshes assigned to
+# animated meshes. Full, weight-based skinning is expensive while
+# animating nodes is extremely cheap, so this step is offered to clean up
+# the data in that regard.
+#
+# Use #AI_CONFIG_PP_DB_THRESHOLD to control this.
+# Use #AI_CONFIG_PP_DB_ALL_OR_NONE if you want bones removed if and
+# only if all bones within the scene qualify for removal.
+#
+aiProcess_Debone = 0x4000000
+
+aiProcess_GenEntityMeshes = 0x100000
+aiProcess_OptimizeAnimations = 0x200000
+aiProcess_FixTexturePaths = 0x200000
+
+## @def aiProcess_ConvertToLeftHanded
+ # @brief Shortcut flag for Direct3D-based applications.
+ #
+ # Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and
+ # #aiProcess_FlipWindingOrder flags.
+ # The output data matches Direct3D's conventions: left-handed geometry, upper-left
+ # origin for UV coordinates and finally clockwise face order, suitable for CCW culling.
+ #
+ # @deprecated
+ #
+aiProcess_ConvertToLeftHanded = ( \
+ aiProcess_MakeLeftHanded | \
+ aiProcess_FlipUVs | \
+ aiProcess_FlipWindingOrder | \
+ 0 )
+
+
+## @def aiProcessPreset_TargetRealtimeUse_Fast
+ # @brief Default postprocess configuration optimizing the data for real-time rendering.
+ #
+ # Applications would want to use this preset to load models on end-user PCs,
+ # maybe for direct use in game.
+ #
+ # If you're using DirectX, don't forget to combine this value with
+ # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
+ # in your application apply the #aiProcess_TransformUVCoords step, too.
+ # @note Please take the time to read the docs for the steps enabled by this preset.
+ # Some of them offer further configurable properties, while some of them might not be of
+ # use for you so it might be better to not specify them.
+ #
+aiProcessPreset_TargetRealtime_Fast = ( \
+ aiProcess_CalcTangentSpace | \
+ aiProcess_GenNormals | \
+ aiProcess_JoinIdenticalVertices | \
+ aiProcess_Triangulate | \
+ aiProcess_GenUVCoords | \
+ aiProcess_SortByPType | \
+ 0 )
+
+ ## @def aiProcessPreset_TargetRealtime_Quality
+ # @brief Default postprocess configuration optimizing the data for real-time rendering.
+ #
+ # Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration
+ # performs some extra optimizations to improve rendering speed and
+ # to minimize memory usage. It could be a good choice for a level editor
+ # environment where import speed is not so important.
+ #
+ # If you're using DirectX, don't forget to combine this value with
+ # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
+ # in your application apply the #aiProcess_TransformUVCoords step, too.
+ # @note Please take the time to read the docs for the steps enabled by this preset.
+ # Some of them offer further configurable properties, while some of them might not be
+ # of use for you so it might be better to not specify them.
+ #
+aiProcessPreset_TargetRealtime_Quality = ( \
+ aiProcess_CalcTangentSpace | \
+ aiProcess_GenSmoothNormals | \
+ aiProcess_JoinIdenticalVertices | \
+ aiProcess_ImproveCacheLocality | \
+ aiProcess_LimitBoneWeights | \
+ aiProcess_RemoveRedundantMaterials | \
+ aiProcess_SplitLargeMeshes | \
+ aiProcess_Triangulate | \
+ aiProcess_GenUVCoords | \
+ aiProcess_SortByPType | \
+ aiProcess_FindDegenerates | \
+ aiProcess_FindInvalidData | \
+ 0 )
+
+ ## @def aiProcessPreset_TargetRealtime_MaxQuality
+ # @brief Default postprocess configuration optimizing the data for real-time rendering.
+ #
+ # This preset enables almost every optimization step to achieve perfectly
+ # optimized data. It's your choice for level editor environments where import speed
+ # is not important.
+ #
+ # If you're using DirectX, don't forget to combine this value with
+ # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
+ # in your application, apply the #aiProcess_TransformUVCoords step, too.
+ # @note Please take the time to read the docs for the steps enabled by this preset.
+ # Some of them offer further configurable properties, while some of them might not be
+ # of use for you so it might be better to not specify them.
+ #
+aiProcessPreset_TargetRealtime_MaxQuality = ( \
+ aiProcessPreset_TargetRealtime_Quality | \
+ aiProcess_FindInstances | \
+ aiProcess_ValidateDataStructure | \
+ aiProcess_OptimizeMeshes | \
+ 0 )
+
+
diff --git a/port/PyAssimp/scripts/opengl_viewer.py b/port/PyAssimp/scripts/opengl_viewer.py
new file mode 100755
index 000000000..2cfc4748c
--- /dev/null
+++ b/port/PyAssimp/scripts/opengl_viewer.py
@@ -0,0 +1,421 @@
+#!/usr/bin/env python
+#-*- coding: UTF-8 -*-
+
+""" This program demonstrate the use of pyassimp to render
+objects in OpenGL.
+
+It loads a 3D model with ASSIMP and display it.
+
+Materials are supported but textures are currently ignored.
+
+Half-working keyboard + mouse navigation is supported.
+
+This sample is based on several sources, including:
+ - http://www.lighthouse3d.com/tutorials
+ - http://www.songho.ca/opengl/gl_transform.html
+ - http://code.activestate.com/recipes/325391/
+ - ASSIMP's C++ SimpleOpenGL viewer
+"""
+
+import os, sys
+from OpenGL.GLUT import *
+from OpenGL.GLU import *
+from OpenGL.GL import *
+from OpenGL.arrays import ArrayDatatype
+
+import logging;logger = logging.getLogger("assimp_opengl")
+logging.basicConfig(level=logging.INFO)
+
+import math
+import numpy
+
+from pyassimp import core as pyassimp
+from pyassimp.postprocess import *
+from pyassimp.helper import *
+
+
+name = 'pyassimp OpenGL viewer'
+height = 600
+width = 900
+
+class GLRenderer():
+ def __init__(self):
+ self.scene = None
+
+ self.drot = 0.0
+ self.dp = 0.0
+
+ self.angle = 0.0
+ self.x = 1.0
+ self.z = 3.0
+ self.lx = 0.0
+ self.lz = 0.0
+ self.using_fixed_cam = False
+ self.current_cam_index = 0
+
+ self.x_origin = -1 # x position of the mouse when pressing left btn
+
+ # for FPS calculation
+ self.prev_time = 0
+ self.prev_fps_time = 0
+ self.frames = 0
+
+ def prepare_gl_buffers(self, mesh):
+
+ mesh.gl = {}
+
+ # Fill the buffer for vertex positions
+ mesh.gl["vertices"] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["vertices"])
+ glBufferData(GL_ARRAY_BUFFER,
+ mesh.vertices,
+ GL_STATIC_DRAW)
+
+ # Fill the buffer for normals
+ mesh.gl["normals"] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["normals"])
+ glBufferData(GL_ARRAY_BUFFER,
+ mesh.normals,
+ GL_STATIC_DRAW)
+
+
+ # Fill the buffer for vertex positions
+ mesh.gl["triangles"] = glGenBuffers(1)
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["triangles"])
+ glBufferData(GL_ELEMENT_ARRAY_BUFFER,
+ mesh.faces,
+ GL_STATIC_DRAW)
+
+ # Unbind buffers
+ glBindBuffer(GL_ARRAY_BUFFER,0)
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0)
+
+
+ def load_dae(self, path, postprocess = None):
+ logger.info("Loading model:" + path + "...")
+
+ if postprocess:
+ self.scene = pyassimp.load(path, postprocess)
+ else:
+ self.scene = pyassimp.load(path)
+ logger.info("Done.")
+
+ scene = self.scene
+ #log some statistics
+ logger.info(" meshes: %d" % len(scene.meshes))
+ logger.info(" total faces: %d" % sum([len(mesh.faces) for mesh in scene.meshes]))
+ logger.info(" materials: %d" % len(scene.materials))
+ self.bb_min, self.bb_max = get_bounding_box(self.scene)
+ logger.info(" bounding box:" + str(self.bb_min) + " - " + str(self.bb_max))
+
+ self.scene_center = [(a + b) / 2. for a, b in zip(self.bb_min, self.bb_max)]
+
+ for index, mesh in enumerate(scene.meshes):
+ self.prepare_gl_buffers(mesh)
+
+ # Finally release the model
+ pyassimp.release(scene)
+
+ def cycle_cameras(self):
+ self.current_cam_index
+ if not self.scene.cameras:
+ return None
+ self.current_cam_index = (self.current_cam_index + 1) % len(self.scene.cameras)
+ cam = self.scene.cameras[self.current_cam_index]
+ logger.info("Switched to camera " + str(cam))
+ return cam
+
+ def set_default_camera(self):
+
+ if not self.using_fixed_cam:
+ glLoadIdentity()
+ gluLookAt(self.x ,1., self.z, # pos
+ self.x + self.lx - 1.0, 1., self.z + self.lz - 3.0, # look at
+ 0.,1.,0.) # up vector
+
+
+ def set_camera(self, camera):
+
+ if not camera:
+ return
+
+ self.using_fixed_cam = True
+
+ znear = camera.clipplanenear
+ zfar = camera.clipplanefar
+ aspect = camera.aspect
+ fov = camera.horizontalfov
+
+ glMatrixMode(GL_PROJECTION)
+ glLoadIdentity()
+
+ # Compute gl frustrum
+ tangent = math.tan(fov/2.)
+ h = znear * tangent
+ w = h * aspect
+
+ # params: left, right, bottom, top, near, far
+ glFrustum(-w, w, -h, h, znear, zfar)
+ # equivalent to:
+ #gluPerspective(fov * 180/math.pi, aspect, znear, zfar)
+
+ glMatrixMode(GL_MODELVIEW)
+ glLoadIdentity()
+
+ cam = transform(camera.position, camera.transformation)
+ at = transform(camera.lookat, camera.transformation)
+ gluLookAt(cam[0], cam[2], -cam[1],
+ at[0], at[2], -at[1],
+ 0, 1, 0)
+
+ def fit_scene(self, restore = False):
+ """ Compute a scale factor and a translation to fit and center
+ the whole geometry on the screen.
+ """
+
+ x_max = self.bb_max[0] - self.bb_min[0]
+ y_max = self.bb_max[1] - self.bb_min[1]
+ tmp = max(x_max, y_max)
+ z_max = self.bb_max[2] - self.bb_min[2]
+ tmp = max(z_max, tmp)
+
+ if not restore:
+ tmp = 1. / tmp
+
+ logger.info("Scaling the scene by %.03f" % tmp)
+ glScalef(tmp, tmp, tmp)
+
+ # center the model
+ direction = -1 if not restore else 1
+ glTranslatef( direction * self.scene_center[0],
+ direction * self.scene_center[1],
+ direction * self.scene_center[2] )
+
+ return x_max, y_max, z_max
+
+ def apply_material(self, mat):
+ """ Apply an OpenGL, using one OpenGL list per material to cache
+ the operation.
+ """
+
+ if not hasattr(mat, "gl_mat"): # evaluate once the mat properties, and cache the values in a glDisplayList.
+
+ diffuse = mat.properties.get("$clr.diffuse", numpy.array([0.8, 0.8, 0.8, 1.0]))
+ specular = mat.properties.get("$clr.specular", numpy.array([0., 0., 0., 1.0]))
+ ambient = mat.properties.get("$clr.ambient", numpy.array([0.2, 0.2, 0.2, 1.0]))
+ emissive = mat.properties.get("$clr.emissive", numpy.array([0., 0., 0., 1.0]))
+ shininess = min(mat.properties.get("$mat.shininess", 1.0), 128)
+ wireframe = mat.properties.get("$mat.wireframe", 0)
+ twosided = mat.properties.get("$mat.twosided", 1)
+
+ from OpenGL.raw import GL
+ setattr(mat, "gl_mat", GL.GLuint(0))
+ mat.gl_mat = glGenLists(1)
+ glNewList(mat.gl_mat, GL_COMPILE)
+
+ glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, diffuse)
+ glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, specular)
+ glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT, ambient)
+ glMaterialfv(GL_FRONT_AND_BACK, GL_EMISSION, emissive)
+ glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, shininess)
+ glPolygonMode(GL_FRONT_AND_BACK, GL_LINE if wireframe else GL_FILL)
+ glDisable(GL_CULL_FACE) if twosided else glEnable(GL_CULL_FACE)
+
+ glEndList()
+
+ glCallList(mat.gl_mat)
+
+
+
+ def do_motion(self):
+
+ gl_time = glutGet(GLUT_ELAPSED_TIME)
+
+ # Compute the new position of the camera and set it
+ self.x += self.dp * self.lx * 0.01 * (gl_time-self.prev_time)
+ self.z += self.dp * self.lz * 0.01 * (gl_time-self.prev_time)
+ self.angle += self.drot * 0.1 * (gl_time-self.prev_time)
+ self.lx = math.sin(self.angle)
+ self.lz = -math.cos(self.angle)
+ self.set_default_camera()
+
+ self.prev_time = gl_time
+
+ # Compute FPS
+ self.frames += 1
+ if gl_time - self.prev_fps_time >= 1000:
+ current_fps = self.frames * 1000 / (gl_time - self.prev_fps_time)
+ logger.info('%.0f fps' % current_fps)
+ self.frames = 0
+ self.prev_fps_time = gl_time
+
+ glutPostRedisplay()
+
+ def recursive_render(self, node):
+ """ Main recursive rendering method.
+ """
+
+ # save model matrix and apply node transformation
+ glPushMatrix()
+ m = node.transformation.transpose() # OpenGL row major
+ glMultMatrixf(m)
+
+ for mesh in node.meshes:
+ self.apply_material(mesh.material)
+
+ glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["vertices"])
+ glEnableClientState(GL_VERTEX_ARRAY)
+ glVertexPointer(3, GL_FLOAT, 0, None)
+
+ glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["normals"])
+ glEnableClientState(GL_NORMAL_ARRAY)
+ glNormalPointer(GL_FLOAT, 0, None)
+
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["triangles"])
+ glDrawElements(GL_TRIANGLES,len(mesh.faces) * 3, GL_UNSIGNED_INT, None)
+
+ glDisableClientState(GL_VERTEX_ARRAY)
+ glDisableClientState(GL_NORMAL_ARRAY)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
+
+ for child in node.children:
+ self.recursive_render(child)
+
+ glPopMatrix()
+
+
+ def display(self):
+ """ GLUT callback to redraw OpenGL surface
+ """
+ glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
+
+ self.recursive_render(self.scene.rootnode)
+
+ glutSwapBuffers()
+ self.do_motion()
+ return
+
+ ####################################################################
+ ## GLUT keyboard and mouse callbacks ##
+ ####################################################################
+ def onkeypress(self, key, x, y):
+ if key == 'c':
+ self.fit_scene(restore = True)
+ self.set_camera(self.cycle_cameras())
+ if key == 'q':
+ sys.exit(0)
+
+ def onspecialkeypress(self, key, x, y):
+
+ fraction = 0.05
+
+ if key == GLUT_KEY_UP:
+ self.dp = 0.5
+ if key == GLUT_KEY_DOWN:
+ self.dp = -0.5
+ if key == GLUT_KEY_LEFT:
+ self.drot = -0.01
+ if key == GLUT_KEY_RIGHT:
+ self.drot = 0.01
+
+ def onspecialkeyrelease(self, key, x, y):
+
+ if key == GLUT_KEY_UP:
+ self.dp = 0.
+ if key == GLUT_KEY_DOWN:
+ self.dp = 0.
+ if key == GLUT_KEY_LEFT:
+ self.drot = 0.0
+ if key == GLUT_KEY_RIGHT:
+ self.drot = 0.0
+
+ def onclick(self, button, state, x, y):
+ if button == GLUT_LEFT_BUTTON:
+ if state == GLUT_UP:
+ self.drot = 0
+ self.x_origin = -1
+ else: # GLUT_DOWN
+ self.x_origin = x
+
+ def onmousemove(self, x, y):
+ if self.x_origin >= 0:
+ self.drot = (x - self.x_origin) * 0.001
+
+ def render(self, filename=None, fullscreen = False, autofit = True, postprocess = None):
+ """
+
+ :param autofit: if true, scale the scene to fit the whole geometry
+ in the viewport.
+ """
+
+ # First initialize the openGL context
+ glutInit(sys.argv)
+ glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
+ if not fullscreen:
+ glutInitWindowSize(width, height)
+ glutCreateWindow(name)
+ else:
+ glutGameModeString("1024x768")
+ if glutGameModeGet(GLUT_GAME_MODE_POSSIBLE):
+ glutEnterGameMode()
+ else:
+ print("Fullscreen mode not available!")
+ sys.exit(1)
+
+ self.load_dae(filename, postprocess = postprocess)
+
+ glClearColor(0.1,0.1,0.1,1.)
+ #glShadeModel(GL_SMOOTH)
+
+ glEnable(GL_LIGHTING)
+
+ glEnable(GL_CULL_FACE)
+ glEnable(GL_DEPTH_TEST)
+
+ #lightZeroPosition = [10.,4.,10.,1.]
+ #lightZeroColor = [0.8,1.0,0.8,1.0] #green tinged
+ #glLightfv(GL_LIGHT0, GL_POSITION, lightZeroPosition)
+ #glLightfv(GL_LIGHT0, GL_DIFFUSE, lightZeroColor)
+ #glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0.1)
+ #glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.05)
+ glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)
+ glEnable(GL_NORMALIZE)
+ glEnable(GL_LIGHT0)
+
+ glutDisplayFunc(self.display)
+
+
+ glMatrixMode(GL_PROJECTION)
+ glLoadIdentity()
+ gluPerspective(35.0, width/float(height) , 0.10, 100.0)
+ glMatrixMode(GL_MODELVIEW)
+ self.set_default_camera()
+
+ if autofit:
+ # scale the whole asset to fit into our view frustumĀ·
+ self.fit_scene()
+
+ glPushMatrix()
+
+ # Register GLUT callbacks for keyboard and mouse
+ glutKeyboardFunc(self.onkeypress)
+ glutSpecialFunc(self.onspecialkeypress)
+ glutIgnoreKeyRepeat(1)
+ glutSpecialUpFunc(self.onspecialkeyrelease)
+
+ glutMouseFunc(self.onclick)
+ glutMotionFunc(self.onmousemove)
+
+ glutMainLoop()
+
+
+if __name__ == '__main__':
+ if not len(sys.argv) > 1:
+ print("Usage: " + __file__ + " ")
+ sys.exit(0)
+
+ glrender = GLRenderer()
+ glrender.render(sys.argv[1], fullscreen = False, postprocess = aiProcessPreset_TargetRealtime_MaxQuality)
+
diff --git a/port/PyAssimp/scripts/quicktest.py b/port/PyAssimp/scripts/quicktest.py
new file mode 100755
index 000000000..e01a49fa5
--- /dev/null
+++ b/port/PyAssimp/scripts/quicktest.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+#-*- coding: UTF-8 -*-
+
+"""
+This module uses the sample.py script to load all test models it finds.
+
+Note: this is not an exhaustive test suite, it does not check the
+data structures in detail. It just verifies whether basic
+loading and querying of 3d models using pyassimp works.
+"""
+
+
+import sys,os
+import sample
+from pyassimp import pyassimp,errors
+
+# paths to be walkd recursively
+basepaths = [os.path.join('..','..','test','models'), os.path.join('..','..','test','models-nonbsd')]
+
+# file extensions to be considered
+extensions = ['.3ds','.x','.lwo','.obj','.md5mesh','.dxf','.ply','.stl','.dae','.md5anim','.lws','.irrmesh','.nff','.off','.blend']
+
+def run_tests():
+ ok,err = 0,0
+ for path in basepaths:
+ for root, dirs, files in os.walk(path):
+ for afile in files:
+ base,ext = os.path.splitext(afile)
+ if ext in extensions:
+ try:
+ sample.main(os.path.join(root,afile))
+ ok += 1
+ except errors.AssimpError as error:
+ # assimp error is fine, this is a controlled case
+ print error
+ err += 1
+ print '** Loaded %s models, got controlled errors for %s files' % (ok,err)
+
+
+if __name__ == '__main__':
+ run_tests()
+
+
+
+
diff --git a/port/PyAssimp/scripts/sample.py b/port/PyAssimp/scripts/sample.py
new file mode 100755
index 000000000..b843b82ba
--- /dev/null
+++ b/port/PyAssimp/scripts/sample.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+#-*- coding: UTF-8 -*-
+
+"""
+This module demonstrates the functionality of PyAssimp.
+"""
+
+import os, sys
+import logging
+logging.basicConfig(level=logging.DEBUG)
+
+from pyassimp import core as pyassimp
+
+def recur_node(node,level = 0):
+ print(" " + "\t" * level + "- " + str(node))
+ for child in node.children:
+ recur_node(child, level + 1)
+
+
+def main(filename=None):
+
+ scene = pyassimp.load(filename)
+
+ #the model we load
+ print("MODEL:" + filename)
+ print
+
+ #write some statistics
+ print("SCENE:")
+ print(" meshes:" + str(len(scene.meshes)))
+ print(" materials:" + str(len(scene.materials)))
+ print(" textures:" + str(len(scene.textures)))
+ print
+
+ print("NODES:")
+ recur_node(scene.rootnode)
+
+ print
+ print("MESHES:")
+ for index, mesh in enumerate(scene.meshes):
+ print(" MESH" + str(index+1))
+ print(" material id:" + str(mesh.materialindex+1))
+ print(" vertices:" + str(len(mesh.vertices)))
+ print(" first 3 verts:\n" + str(mesh.vertices[:3]))
+ if mesh.normals.any():
+ print(" first 3 normals:\n" + str(mesh.normals[:3]))
+ else:
+ print(" no normals")
+ print(" colors:" + str(len(mesh.colors)))
+ tcs = mesh.texturecoords
+ if tcs:
+ for index, tc in enumerate(tcs):
+ print(" texture-coords "+ str(index) + ":" + str(len(tcs[index])) + "first3:" + str(tcs[index][:3]))
+
+ else:
+ print(" no texture coordinates")
+ print(" uv-component-count:" + str(len(mesh.numuvcomponents)))
+ print(" faces:" + str(len(mesh.faces)) + " -> first:\n" + str(mesh.faces[:3]))
+ print(" bones:" + str(len(mesh.bones)) + " -> first:" + str([str(b) for b in mesh.bones[:3]]))
+ print
+
+ print("MATERIALS:")
+ for index, material in enumerate(scene.materials):
+ print(" MATERIAL (id:" + str(index+1) + ")")
+ for key, value in material.properties.items():
+ print(" %s: %s" % (key, value))
+ print
+
+ print("TEXTURES:")
+ for index, texture in enumerate(scene.textures):
+ print(" TEXTURE" + str(index+1))
+ print(" width:" + str(texture.width))
+ print(" height:" + str(texture.height))
+ print(" hint:" + str(texture.achformathint))
+ print(" data (size):" + str(len(texture.data)))
+
+ # Finally release the model
+ pyassimp.release(scene)
+
+if __name__ == "__main__":
+ main(sys.argv[1] if len(sys.argv)>1 else None)
diff --git a/port/PyAssimp/setup.py b/port/PyAssimp/setup.py
new file mode 100644
index 000000000..1d2242fbb
--- /dev/null
+++ b/port/PyAssimp/setup.py
@@ -0,0 +1,13 @@
+ # -*- coding: utf-8 -*-
+import os
+from distutils.core import setup
+
+setup(name='pyassimp',
+ version='0.1',
+ license='ISC',
+ description='Python bindings for the Open Asset Import Library (ASSIMP)',
+ url='http://assimp.sourceforge.net/',
+ packages=['pyassimp'],
+ data_files=[('share/pyassimp', ['README.md']),
+ ('share/examples/pyassimp', ['scripts/' + f for f in os.listdir('scripts/')])]
+ )