Blender V2.61 - r43446

LOD_NdQuadric.cpp

Go to the documentation of this file.
00001 /*
00002  * ***** BEGIN GPL LICENSE BLOCK *****
00003  *
00004  * This program is free software; you can redistribute it and/or
00005  * modify it under the terms of the GNU General Public License
00006  * as published by the Free Software Foundation; either version 2
00007  * of the License, or (at your option) any later version.
00008  *
00009  * This program is distributed in the hope that it will be useful,
00010  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00012  * GNU General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU General Public License
00015  * along with this program; if not, write to the Free Software Foundation,
00016  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
00017  *
00018  * The Original Code is Copyright (C) 2001-2002 by NaN Holding BV.
00019  * All rights reserved.
00020  *
00021  * The Original Code is: all of this file.
00022  *
00023  * Contributor(s): none yet.
00024  *
00025  * ***** END GPL LICENSE BLOCK *****
00026  */
00027 
00033 #include "LOD_NdQuadric.h"
00034 
00035 #include <iostream>
00036 
00037 // for Lower Upper decompostion code.
00038 #include "TNT/lu.h"
00039 
00040 //soon to be template parameter.
00041 #define MAT_SIZE 3
00042 
00043 LOD_NdQuadric::
00044 LOD_NdQuadric(
00045     const MT_Vector3 & vec,
00046     const MT_Scalar & offset
00047 ):
00048     m_q(vec,offset),
00049     m_prop_grad1(int(MAT_SIZE),MT_Scalar(0)),
00050     m_prop_grad2(int(MAT_SIZE),MT_Scalar(0)),
00051     m_prop_grad3(int(MAT_SIZE),MT_Scalar(0)),
00052     m_bbottom(int(MAT_SIZE),MT_Scalar(0)),
00053     m_diag (int(MAT_SIZE),MT_Scalar(0))
00054 {
00055     // nothing to do
00056 }
00057 // Initialize a quadric from a linear functional describing
00058 // the property gradient.pos is the position where this
00059 // functional is placed in the array.
00060 
00061 LOD_NdQuadric::
00062 LOD_NdQuadric(
00063     const MT_Vector3 & vec,
00064     const MT_Scalar & offset,
00065     int pos
00066 ):
00067     m_q(vec,offset),
00068     m_prop_grad1(int(MAT_SIZE),MT_Scalar(0)),
00069     m_prop_grad2(int(MAT_SIZE),MT_Scalar(0)),
00070     m_prop_grad3(int(MAT_SIZE),MT_Scalar(0)),
00071     m_bbottom(int(MAT_SIZE),MT_Scalar(0)),
00072     m_diag (int(MAT_SIZE),MT_Scalar(0))
00073 {
00074     m_prop_grad1[pos] = -vec[0];
00075     m_prop_grad2[pos] = -vec[1];
00076     m_prop_grad3[pos] = -vec[2];
00077     
00078     m_bbottom[pos] = -offset;
00079     m_diag[pos] = MT_Scalar(1);
00080 }
00081 
00082 
00083 LOD_NdQuadric::
00084 LOD_NdQuadric(
00085 ):
00086     m_prop_grad1(int(MAT_SIZE),MT_Scalar(0)),
00087     m_prop_grad2(int(MAT_SIZE),MT_Scalar(0)),
00088     m_prop_grad3(int(MAT_SIZE),MT_Scalar(0)),
00089     m_bbottom(int(MAT_SIZE),MT_Scalar(0)),
00090     m_diag (int(MAT_SIZE),MT_Scalar(0)),
00091     m_q()
00092 {
00093 }
00094 
00095 
00096 
00097 // This class also has the default copy constructors
00098 // available.
00099 
00100     void
00101 LOD_NdQuadric::
00102 Tensor(
00103     TNT::Matrix<MT_Scalar> &tensor      
00104 ) const {
00105     
00106     // Set the matrix to zero
00107     tensor = 0;
00108     
00109     // Fill in the matrix values from d variables.
00110 
00111     MT_Matrix3x3 tensor3x3 = m_q.Tensor();
00112     int i,j;
00113 
00114     for (j=0; j<=2; j++) {
00115         for (i=0; i<=2; i++) {
00116             tensor[j][i] = tensor3x3[j][i];
00117         }
00118     }
00119 
00120     // now the diagonal entry.
00121 
00122     for (i=3;i <3+MAT_SIZE;++i) {
00123         tensor[i][i] = m_diag[i-3];
00124     }
00125 
00126     // now the property gradients rows and symmetrical columns
00127 
00128     for (i=3; i < 3+MAT_SIZE; i++) {
00129         tensor[0][i] = m_prop_grad1[i-3];
00130         tensor[i][0] = m_prop_grad1[i-3];
00131 
00132         tensor[1][i] = m_prop_grad2[i-3];
00133         tensor[i][1] = m_prop_grad2[i-3];
00134 
00135         tensor[2][i] = m_prop_grad3[i-3];
00136         tensor[i][2] = m_prop_grad3[i-3];
00137     }
00138 
00139 
00140 }
00141         
00142     void
00143 LOD_NdQuadric::
00144 Vector(
00145     TNT::Vector<MT_Scalar> &vector
00146 ) const {
00147     int i;
00148 
00149     MT_Vector3 q_vec = m_q.Vector();
00150 
00151     vector[0] = q_vec[0];   
00152     vector[1] = q_vec[1];
00153     vector[2] = q_vec[2];
00154 
00155     for (i = 3; i < 3 + MAT_SIZE; ++i) {
00156         vector[i] = m_bbottom[i-3];
00157     }
00158 }        
00159 
00160     void 
00161 LOD_NdQuadric::
00162 Clear(
00163     MT_Scalar val
00164 ) {
00165     m_q.Clear(val);
00166     m_prop_grad1 = val;
00167     m_prop_grad2 = val;
00168     m_prop_grad3 = val;
00169     m_bbottom = val;
00170     m_diag = val;
00171 };
00172 
00173     LOD_NdQuadric & 
00174 LOD_NdQuadric::
00175 operator=(
00176     const LOD_NdQuadric& Q
00177 ){
00178 
00179     m_q = Q.m_q;
00180     m_prop_grad1 = Q.m_prop_grad1;
00181     m_prop_grad2 = Q.m_prop_grad2;
00182     m_prop_grad3 = Q.m_prop_grad3;
00183     m_bbottom = Q.m_bbottom;
00184     m_diag = Q.m_diag;
00185 
00186     return (*this);
00187 }
00188     
00189     LOD_NdQuadric& 
00190 LOD_NdQuadric::
00191 operator+=(
00192     const LOD_NdQuadric& Q
00193 ){  
00194     m_q += Q.m_q;
00195     TNT::vectoradd(m_prop_grad1,Q.m_prop_grad1);
00196     TNT::vectoradd(m_prop_grad2,Q.m_prop_grad2);
00197     TNT::vectoradd(m_prop_grad3,Q.m_prop_grad3);
00198     TNT::vectoradd(m_bbottom,Q.m_bbottom);
00199     TNT::vectoradd(m_diag,Q.m_diag);
00200 
00201     return (*this);
00202 }
00203 
00204     LOD_NdQuadric& 
00205 LOD_NdQuadric::
00206 operator*=(
00207     const MT_Scalar & s
00208 ){
00209     // multiply everything by s
00210 
00211     m_q *= s;
00212     TNT::vectorscale(m_prop_grad1,s);
00213     TNT::vectorscale(m_prop_grad2,s);
00214     TNT::vectorscale(m_prop_grad3,s);
00215     TNT::vectorscale(m_bbottom,s);
00216     TNT::vectorscale(m_diag,s);
00217 
00218     return (*this);
00219 }
00220 
00221     MT_Scalar 
00222 LOD_NdQuadric::
00223 Evaluate(
00224     const TNT::Vector<MT_Scalar> & vec
00225 ) const {
00226     // compute the quadric error cost for the vector.
00227 
00228     //FIXME very inefficient memory usage here 
00229     //Way to many computations-these are symmetric!
00230 
00231     TNT::Matrix<MT_Scalar>  Atensor(3+MAT_SIZE,3+MAT_SIZE,MT_Scalar(0));
00232     Tensor(Atensor);
00233     
00234     TNT::Vector<MT_Scalar>  Bvector(3+MAT_SIZE);
00235     Vector(Bvector);
00236 
00237     TNT::Vector<MT_Scalar> temp1(3+MAT_SIZE);
00238 
00239     TNT::matmult(temp1,Atensor,vec);
00240 
00241     return TNT::dot_prod(vec,temp1) + 2*TNT::dot_prod(Bvector,vec) + m_q.Scalar();
00242 }   
00243 
00244 
00245     bool 
00246 LOD_NdQuadric::
00247 Optimize(
00248     TNT::Vector<MT_Scalar> & vec
00249 ) const {
00250     
00251     // Return the solution to Tensor().invers() * Vector();
00252     // Use lower upper decomposition --again we're creating
00253     // heap memory for this function.
00254     
00255     TNT::Vector<TNT::Subscript> pivots(3+MAT_SIZE,0);
00256     TNT::Matrix<MT_Scalar> Atensor(3+MAT_SIZE,3+MAT_SIZE,MT_Scalar(0));
00257     
00258     Tensor(Atensor);
00259     Vector(vec);
00260 
00261     // Find the decomposition of the matrix. If none available 
00262     // matrix is singular return false.
00263 
00264     if (TNT::LU_factor(Atensor,pivots)) {
00265         return false;
00266     }
00267 
00268     if (TNT::LU_solve(Atensor,pivots,vec)) {
00269         return false;
00270     }
00271     
00272     // Now we should return -Ainv(B)!
00273     
00274     TNT::Vector<MT_Scalar>::iterator start = vec.begin();
00275     TNT::Vector<MT_Scalar>::const_iterator end = vec.end();
00276 
00277     while (start != end) {
00278         *start = -(*start);
00279         ++start;
00280     }
00281 
00282 #if 0
00283     std::cout << vec << "\n";
00284 #endif
00285     return true;
00286 };
00287 
00288 
00289 #undef MAT_SIZE