Index: trunk/src/gfx/skeleton_instance.cc
===================================================================
--- trunk/src/gfx/skeleton_instance.cc	(revision 483)
+++ trunk/src/gfx/skeleton_instance.cc	(revision 484)
@@ -52,5 +52,8 @@
 	for ( uint32 n = 0; n < skeleton.size(); ++n )
 	{
-		m_matrix[n] = transforms[n].extract() * bones.m_offsets[n];
+ 		transform tr( bones.m_offsets[n] );
+ 		tr.set_orientation( normalize( tr.get_orientation() ) );
+ 		m_matrix[n] = ( transforms[n] * tr ).extract();
+	//	m_matrix[n] = transforms[n].extract() * bones.m_offsets[n];
 	}
 }
@@ -74,5 +77,5 @@
 }
 
-void nv::skeleton_transforms::interpolate( const skeleton_transforms& a, const skeleton_transforms& b, float t )
+void nv::skeleton_transforms::interpolate_linear( const skeleton_transforms& a, const skeleton_transforms& b, float t )
 {
 	NV_ASSERT( a.size() == b.size(), "!!!" );
@@ -81,6 +84,117 @@
 	for ( uint32 n = 0; n < a.size(); ++n )
 	{
+		m_transforms[n] = transform(
+			math::mix( a.m_transforms[n].get_position(), b.m_transforms[n].get_position(), t ),
+			math::lerp( a.m_transforms[n].get_orientation(), b.m_transforms[n].get_orientation(), t )
+			);
+	}
+
+	if ( m_transforms.size() > 0 )
+		m_transforms[0] = nv::interpolate( a.m_transforms[0], b.m_transforms[0], t );
+}
+
+void nv::skeleton_transforms::interpolate_nlerp( const skeleton_transforms& a, const skeleton_transforms& b, float t )
+{
+	NV_ASSERT( a.size() == b.size(), "!!!" );
+	if ( m_transforms.size() != a.size() )
+		m_transforms.resize( a.size() );
+
+	for ( uint32 n = 0; n < a.size(); ++n )
+	{
+		m_transforms[n] = transform(
+			math::mix( a.m_transforms[n].get_position(), b.m_transforms[n].get_position(), t ),
+			math::nlerp( a.m_transforms[n].get_orientation(), b.m_transforms[n].get_orientation(), t )
+			);
+	}
+
+	if ( m_transforms.size() > 0 )
+		m_transforms[0] = nv::interpolate( a.m_transforms[0], b.m_transforms[0], t );
+}
+
+
+void nv::skeleton_transforms::interpolate_slerp( const skeleton_transforms& a, const skeleton_transforms& b, float t )
+{
+	NV_ASSERT( a.size() == b.size(), "!!!" );
+	if ( m_transforms.size() != a.size() )
+		m_transforms.resize( a.size() );
+	for ( uint32 n = 0; n < a.size(); ++n )
+	{
 		m_transforms[n] = nv::interpolate( a.m_transforms[n], b.m_transforms[n], t );
 	}
+}
+
+void nv::skeleton_transforms::interpolate4( const skeleton_transforms& s1, const skeleton_transforms& v1, const skeleton_transforms& v2, const skeleton_transforms& s2, float t )
+{
+	NV_ASSERT( s1.size() == s2.size(), "!!!" );
+	NV_ASSERT( v1.size() == v2.size(), "!!!" );
+	NV_ASSERT( s1.size() == v1.size(), "!!!" );
+	if ( m_transforms.size() != s1.size() )
+		m_transforms.resize( s1.size() );
+	float interp_squared = t*t;
+	float interp_cubed = interp_squared*t;
+	float weights[4];
+	weights[0] = 0.5f * ( -interp_cubed + 2.0f * interp_squared - t );
+	weights[1] = 0.5f * ( 3.0f * interp_cubed - 5.0f * interp_squared + 2.0f );
+	weights[2] = 0.5f * ( -3.0f * interp_cubed + 4.0f * interp_squared + t );
+	weights[3] = 0.5f * ( interp_cubed - interp_squared );
+
+	for ( uint32 n = 0; n < s1.size(); ++n )
+	{
+		quat qs1 = s1.m_transforms[n].get_orientation();
+		quat qs2 = s2.m_transforms[n].get_orientation();
+		quat qv1 = v1.m_transforms[n].get_orientation();
+		quat qv2 = v2.m_transforms[n].get_orientation();
+
+		float a = dot( qv1, qv2 ) > 0.0f ? 1.0f : -1.0f;
+
+		quat qr = weights[0] * qs1 
+				+ weights[1] * (a * qv1 )
+				+ weights[2] * qv2 
+				+ weights[3] * qs2;
+
+		qr = normalize( qr );
+
+ 		if ( n == 0 ) 
+			qr = nv::math::slerp( v1.m_transforms[n].get_orientation(), v2.m_transforms[n].get_orientation(), t );
+
+		m_transforms[n] = transform(
+			weights[0] * s1.m_transforms[n].get_position() +
+			weights[1] * v1.m_transforms[n].get_position() +
+			weights[2] * v2.m_transforms[n].get_position() +
+			weights[3] * s2.m_transforms[n].get_position(),
+			qr
+		);
+	}
+}
+
+
+void nv::skeleton_transforms::interpolate_squad( const skeleton_transforms& s1, const skeleton_transforms& v1, const skeleton_transforms& v2, const skeleton_transforms& s2, float t )
+{
+	NV_ASSERT( s1.size() == s2.size(), "!!!" );
+	NV_ASSERT( v1.size() == v2.size(), "!!!" );
+	NV_ASSERT( s1.size() == v1.size(), "!!!" );
+	if ( m_transforms.size() != s1.size() )
+		m_transforms.resize( s1.size() );
+
+	for ( uint32 n = 0; n < s1.size(); ++n )
+	{
+		nv::quat ss1 = s1.m_transforms[n].get_orientation();
+		nv::quat ss2 = s2.m_transforms[n].get_orientation();
+		nv::quat q = normalize( nv::math::squad(
+			v1.m_transforms[n].get_orientation(),
+			v2.m_transforms[n].get_orientation(),
+			nv::math::intermediate( ss1, v1.m_transforms[n].get_orientation(), v2.m_transforms[n].get_orientation() ),
+			nv::math::intermediate( v1.m_transforms[n].get_orientation(), v2.m_transforms[n].get_orientation(), ss2 ),
+			t ) );
+		if ( n == 0 ) q = nv::math::slerp(
+			v1.m_transforms[n].get_orientation(),
+			v2.m_transforms[n].get_orientation(), t );
+
+		m_transforms[n] = transform(
+			mix( v1.m_transforms[n].get_position(), v2.m_transforms[n].get_position(), t ),
+			q
+			);
+	}
+
 }
 
@@ -101,5 +215,7 @@
 		{
 			if ( node->size() > 0 )
+			{
 				m_transforms[bone_id] = raw_channel_interpolator( node, binding.m_key ).get< transform >( frame );
+			}
 			int confirm_that_not_needed;
 // 			else
@@ -189,10 +305,7 @@
 void nv::bone_transforms::prepare( const data_node_list& bone_data )
 {
-	if ( m_offsets.empty() )
-	{
-		m_offsets.resize( bone_data.size() );
-
-		for ( nv::uint16 bi = 0; bi < bone_data.size(); ++bi )
-			m_offsets[bi] = bone_data[bi].transform;
-	}
-}
+	m_offsets.resize( bone_data.size() );
+
+	for ( nv::uint16 bi = 0; bi < bone_data.size(); ++bi )
+		m_offsets[bi] = bone_data[bi].transform;
+}
