Skip to content

Commit

Permalink
gltfpack: Fix translation track optimization to take scale into account
Browse files Browse the repository at this point in the history
When analyzing track errors, currently gltfpack doesn't take the
hierarchy into account. This is known to be problematic in some cases -
ideally, all errors need to be analyzed at leaf level which would take
into account the entire hierarchy to make sure that each constant
folding doesn't affect the final result too much.

For now, this fixes one egregious and potentially common case where when
a scene root has a large scale (which is common for cases where scenes
go through a unit conversion), the visual error gets magnified by this
scale, which can result in a visible degradation of animation quality.

Fixes zeux#278.
  • Loading branch information
zeux committed May 22, 2021
1 parent 0d2ba6e commit 1d10b9d
Showing 1 changed file with 23 additions and 5 deletions.
28 changes: 23 additions & 5 deletions gltf/animation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,12 +207,10 @@ static void resampleKeyframes(std::vector<Attr>& data, const std::vector<float>&
}
}

static bool isTrackEqual(const std::vector<Attr>& data, cgltf_animation_path_type type, int frames, const Attr* value, size_t components)
static bool isTrackEqual(const std::vector<Attr>& data, cgltf_animation_path_type type, int frames, const Attr* value, size_t components, float tolerance)
{
assert(data.size() == frames * components);

float tolerance = getDeltaTolerance(type);

for (int i = 0; i < frames; ++i)
{
for (size_t j = 0; j < components; ++j)
Expand Down Expand Up @@ -261,6 +259,20 @@ static void getBaseTransform(Attr* result, size_t components, cgltf_animation_pa
}
}

static float getWorldScale(cgltf_node* node)
{
float transform[16];
cgltf_node_transform_world(node, transform);

// 3x3 determinant computes scale^3
float a0 = transform[5] * transform[10] - transform[6] * transform[9];
float a1 = transform[4] * transform[10] - transform[6] * transform[8];
float a2 = transform[4] * transform[9] - transform[5] * transform[8];
float det = transform[0] * a0 - transform[1] * a1 + transform[2] * a2;

return powf(fabsf(det), 1.f / 3.f);
}

void processAnimation(Animation& animation, const Settings& settings)
{
float mint = FLT_MAX, maxt = 0;
Expand Down Expand Up @@ -296,7 +308,13 @@ void processAnimation(Animation& animation, const Settings& settings)
track.time.clear();
track.data.swap(result);

if (isTrackEqual(track.data, track.path, frames, &track.data[0], track.components))
float tolerance = getDeltaTolerance(track.path);

// translation tracks use world space tolerance; in the future, we should compute all errors as linear using hierarchy
if (track.node && track.path == cgltf_animation_path_type_translation)
tolerance /= getWorldScale(track.node);

if (isTrackEqual(track.data, track.path, frames, &track.data[0], track.components, tolerance))
{
// track is constant (equal to first keyframe), we only need the first keyframe
track.constant = true;
Expand All @@ -306,7 +324,7 @@ void processAnimation(Animation& animation, const Settings& settings)
base.resize(track.components);
getBaseTransform(&base[0], track.components, track.path, track.node);

track.dummy = isTrackEqual(track.data, track.path, 1, &base[0], track.components);
track.dummy = isTrackEqual(track.data, track.path, 1, &base[0], track.components, tolerance);
}
}
}

0 comments on commit 1d10b9d

Please sign in to comment.