Blender V2.61 - r43446
|
00001 /* 00002 ----------------------------------------------------------------------------- 00003 This source file is part of VideoTexture library 00004 00005 Copyright (c) 2007 The Zdeno Ash Miklas 00006 00007 This program is free software; you can redistribute it and/or modify it under 00008 the terms of the GNU Lesser General Public License as published by the Free Software 00009 Foundation; either version 2 of the License, or (at your option) any later 00010 version. 00011 00012 This program is distributed in the hope that it will be useful, but WITHOUT 00013 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 00014 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. 00015 00016 You should have received a copy of the GNU Lesser General Public License along with 00017 this program; if not, write to the Free Software Foundation, Inc., 59 Temple 00018 Place - Suite 330, Boston, MA 02111-1307, USA, or go to 00019 http://www.gnu.org/copyleft/lesser.txt. 00020 ----------------------------------------------------------------------------- 00021 */ 00022 00027 // implementation 00028 00029 #include <PyObjectPlus.h> 00030 #include <structmember.h> 00031 #include <float.h> 00032 #include <math.h> 00033 00034 00035 #include "GL/glew.h" 00036 00037 #include "KX_PythonInit.h" 00038 #include "DNA_scene_types.h" 00039 #include "RAS_CameraData.h" 00040 #include "RAS_MeshObject.h" 00041 #include "BLI_math.h" 00042 00043 #include "ImageRender.h" 00044 #include "ImageBase.h" 00045 #include "BlendType.h" 00046 #include "Exception.h" 00047 #include "Texture.h" 00048 00049 ExceptionID SceneInvalid, CameraInvalid, ObserverInvalid; 00050 ExceptionID MirrorInvalid, MirrorSizeInvalid, MirrorNormalInvalid, MirrorHorizontal, MirrorTooSmall; 00051 ExpDesc SceneInvalidDesc (SceneInvalid, "Scene object is invalid"); 00052 ExpDesc CameraInvalidDesc (CameraInvalid, "Camera object is invalid"); 00053 ExpDesc ObserverInvalidDesc (ObserverInvalid, "Observer object is invalid"); 00054 ExpDesc MirrorInvalidDesc (MirrorInvalid, "Mirror object is invalid"); 00055 ExpDesc MirrorSizeInvalidDesc (MirrorSizeInvalid, "Mirror has no vertex or no size"); 00056 ExpDesc MirrorNormalInvalidDesc (MirrorNormalInvalid, "Cannot determine mirror plane"); 00057 ExpDesc MirrorHorizontalDesc (MirrorHorizontal, "Mirror is horizontal in local space"); 00058 ExpDesc MirrorTooSmallDesc (MirrorTooSmall, "Mirror is too small"); 00059 00060 // constructor 00061 ImageRender::ImageRender (KX_Scene * scene, KX_Camera * camera) : 00062 ImageViewport(), 00063 m_render(true), 00064 m_scene(scene), 00065 m_camera(camera), 00066 m_owncamera(false), 00067 m_observer(NULL), 00068 m_mirror(NULL), 00069 m_clip(100.f) 00070 { 00071 // initialize background color 00072 setBackground(0, 0, 255, 255); 00073 // retrieve rendering objects 00074 m_engine = KX_GetActiveEngine(); 00075 m_rasterizer = m_engine->GetRasterizer(); 00076 m_canvas = m_engine->GetCanvas(); 00077 m_rendertools = m_engine->GetRenderTools(); 00078 } 00079 00080 // destructor 00081 ImageRender::~ImageRender (void) 00082 { 00083 if (m_owncamera) 00084 m_camera->Release(); 00085 } 00086 00087 00088 // set background color 00089 void ImageRender::setBackground (int red, int green, int blue, int alpha) 00090 { 00091 m_background[0] = (red < 0) ? 0.f : (red > 255) ? 1.f : float(red)/255.f; 00092 m_background[1] = (green < 0) ? 0.f : (green > 255) ? 1.f : float(green)/255.f; 00093 m_background[2] = (blue < 0) ? 0.f : (blue > 255) ? 1.f : float(blue)/255.f; 00094 m_background[3] = (alpha < 0) ? 0.f : (alpha > 255) ? 1.f : float(alpha)/255.f; 00095 } 00096 00097 00098 // capture image from viewport 00099 void ImageRender::calcImage (unsigned int texId, double ts) 00100 { 00101 if (m_rasterizer->GetDrawingMode() != RAS_IRasterizer::KX_TEXTURED || // no need for texture 00102 m_camera->GetViewport() || // camera must be inactive 00103 m_camera == m_scene->GetActiveCamera()) 00104 { 00105 // no need to compute texture in non texture rendering 00106 m_avail = false; 00107 return; 00108 } 00109 // render the scene from the camera 00110 Render(); 00111 // get image from viewport 00112 ImageViewport::calcImage(texId, ts); 00113 // restore OpenGL state 00114 m_canvas->EndFrame(); 00115 } 00116 00117 void ImageRender::Render() 00118 { 00119 RAS_FrameFrustum frustrum; 00120 00121 if (!m_render) 00122 return; 00123 00124 if (m_mirror) 00125 { 00126 // mirror mode, compute camera frustrum, position and orientation 00127 // convert mirror position and normal in world space 00128 const MT_Matrix3x3 & mirrorObjWorldOri = m_mirror->GetSGNode()->GetWorldOrientation(); 00129 const MT_Point3 & mirrorObjWorldPos = m_mirror->GetSGNode()->GetWorldPosition(); 00130 const MT_Vector3 & mirrorObjWorldScale = m_mirror->GetSGNode()->GetWorldScaling(); 00131 MT_Point3 mirrorWorldPos = 00132 mirrorObjWorldPos + mirrorObjWorldScale * (mirrorObjWorldOri * m_mirrorPos); 00133 MT_Vector3 mirrorWorldZ = mirrorObjWorldOri * m_mirrorZ; 00134 // get observer world position 00135 const MT_Point3 & observerWorldPos = m_observer->GetSGNode()->GetWorldPosition(); 00136 // get plane D term = mirrorPos . normal 00137 MT_Scalar mirrorPlaneDTerm = mirrorWorldPos.dot(mirrorWorldZ); 00138 // compute distance of observer to mirror = D - observerPos . normal 00139 MT_Scalar observerDistance = mirrorPlaneDTerm - observerWorldPos.dot(mirrorWorldZ); 00140 // if distance < 0.01 => observer is on wrong side of mirror, don't render 00141 if (observerDistance < 0.01f) 00142 return; 00143 // set camera world position = observerPos + normal * 2 * distance 00144 MT_Point3 cameraWorldPos = observerWorldPos + (MT_Scalar(2.0)*observerDistance)*mirrorWorldZ; 00145 m_camera->GetSGNode()->SetLocalPosition(cameraWorldPos); 00146 // set camera orientation: z=normal, y=mirror_up in world space, x= y x z 00147 MT_Vector3 mirrorWorldY = mirrorObjWorldOri * m_mirrorY; 00148 MT_Vector3 mirrorWorldX = mirrorObjWorldOri * m_mirrorX; 00149 MT_Matrix3x3 cameraWorldOri( 00150 mirrorWorldX[0], mirrorWorldY[0], mirrorWorldZ[0], 00151 mirrorWorldX[1], mirrorWorldY[1], mirrorWorldZ[1], 00152 mirrorWorldX[2], mirrorWorldY[2], mirrorWorldZ[2]); 00153 m_camera->GetSGNode()->SetLocalOrientation(cameraWorldOri); 00154 m_camera->GetSGNode()->UpdateWorldData(0.0); 00155 // compute camera frustrum: 00156 // get position of mirror relative to camera: offset = mirrorPos-cameraPos 00157 MT_Vector3 mirrorOffset = mirrorWorldPos - cameraWorldPos; 00158 // convert to camera orientation 00159 mirrorOffset = mirrorOffset * cameraWorldOri; 00160 // scale mirror size to world scale: 00161 // get closest local axis for mirror Y and X axis and scale height and width by local axis scale 00162 MT_Scalar x, y; 00163 x = fabs(m_mirrorY[0]); 00164 y = fabs(m_mirrorY[1]); 00165 float height = (x > y) ? 00166 ((x > fabs(m_mirrorY[2])) ? mirrorObjWorldScale[0] : mirrorObjWorldScale[2]): 00167 ((y > fabs(m_mirrorY[2])) ? mirrorObjWorldScale[1] : mirrorObjWorldScale[2]); 00168 x = fabs(m_mirrorX[0]); 00169 y = fabs(m_mirrorX[1]); 00170 float width = (x > y) ? 00171 ((x > fabs(m_mirrorX[2])) ? mirrorObjWorldScale[0] : mirrorObjWorldScale[2]): 00172 ((y > fabs(m_mirrorX[2])) ? mirrorObjWorldScale[1] : mirrorObjWorldScale[2]); 00173 width *= m_mirrorHalfWidth; 00174 height *= m_mirrorHalfHeight; 00175 // left = offsetx-width 00176 // right = offsetx+width 00177 // top = offsety+height 00178 // bottom = offsety-height 00179 // near = -offsetz 00180 // far = near+100 00181 frustrum.x1 = mirrorOffset[0]-width; 00182 frustrum.x2 = mirrorOffset[0]+width; 00183 frustrum.y1 = mirrorOffset[1]-height; 00184 frustrum.y2 = mirrorOffset[1]+height; 00185 frustrum.camnear = -mirrorOffset[2]; 00186 frustrum.camfar = -mirrorOffset[2]+m_clip; 00187 } 00188 // Store settings to be restored later 00189 const RAS_IRasterizer::StereoMode stereomode = m_rasterizer->GetStereoMode(); 00190 RAS_Rect area = m_canvas->GetWindowArea(); 00191 00192 // The screen area that ImageViewport will copy is also the rendering zone 00193 m_canvas->SetViewPort(m_position[0], m_position[1], m_position[0]+m_capSize[0]-1, m_position[1]+m_capSize[1]-1); 00194 m_canvas->ClearColor(m_background[0], m_background[1], m_background[2], m_background[3]); 00195 m_canvas->ClearBuffer(RAS_ICanvas::COLOR_BUFFER|RAS_ICanvas::DEPTH_BUFFER); 00196 m_rasterizer->BeginFrame(RAS_IRasterizer::KX_TEXTURED,m_engine->GetClockTime()); 00197 m_rendertools->BeginFrame(m_rasterizer); 00198 m_engine->SetWorldSettings(m_scene->GetWorldInfo()); 00199 m_rendertools->SetAuxilaryClientInfo(m_scene); 00200 m_rasterizer->DisplayFog(); 00201 // matrix calculation, don't apply any of the stereo mode 00202 m_rasterizer->SetStereoMode(RAS_IRasterizer::RAS_STEREO_NOSTEREO); 00203 if (m_mirror) 00204 { 00205 // frustrum was computed above 00206 // get frustrum matrix and set projection matrix 00207 MT_Matrix4x4 projmat = m_rasterizer->GetFrustumMatrix( 00208 frustrum.x1, frustrum.x2, frustrum.y1, frustrum.y2, frustrum.camnear, frustrum.camfar); 00209 00210 m_camera->SetProjectionMatrix(projmat); 00211 } else if (m_camera->hasValidProjectionMatrix()) 00212 { 00213 m_rasterizer->SetProjectionMatrix(m_camera->GetProjectionMatrix()); 00214 } else 00215 { 00216 float lens = m_camera->GetLens(); 00217 float sensor_x = m_camera->GetSensorWidth(); 00218 float sensor_y = m_camera->GetSensorHeight(); 00219 bool orthographic = !m_camera->GetCameraData()->m_perspective; 00220 float nearfrust = m_camera->GetCameraNear(); 00221 float farfrust = m_camera->GetCameraFar(); 00222 float aspect_ratio = 1.0f; 00223 Scene *blenderScene = m_scene->GetBlenderScene(); 00224 MT_Matrix4x4 projmat; 00225 00226 // compute the aspect ratio from frame blender scene settings so that render to texture 00227 // works the same in Blender and in Blender player 00228 if (blenderScene->r.ysch != 0) 00229 aspect_ratio = float(blenderScene->r.xsch*blenderScene->r.xasp) / float(blenderScene->r.ysch*blenderScene->r.yasp); 00230 00231 if (orthographic) { 00232 00233 RAS_FramingManager::ComputeDefaultOrtho( 00234 nearfrust, 00235 farfrust, 00236 m_camera->GetScale(), 00237 aspect_ratio, 00238 m_camera->GetSensorFit(), 00239 frustrum 00240 ); 00241 00242 projmat = m_rasterizer->GetOrthoMatrix( 00243 frustrum.x1, frustrum.x2, frustrum.y1, frustrum.y2, frustrum.camnear, frustrum.camfar); 00244 } else 00245 { 00246 RAS_FramingManager::ComputeDefaultFrustum( 00247 nearfrust, 00248 farfrust, 00249 lens, 00250 sensor_x, 00251 sensor_y, 00252 RAS_SENSORFIT_AUTO, 00253 aspect_ratio, 00254 frustrum); 00255 00256 projmat = m_rasterizer->GetFrustumMatrix( 00257 frustrum.x1, frustrum.x2, frustrum.y1, frustrum.y2, frustrum.camnear, frustrum.camfar); 00258 } 00259 m_camera->SetProjectionMatrix(projmat); 00260 } 00261 00262 MT_Transform camtrans(m_camera->GetWorldToCamera()); 00263 MT_Matrix4x4 viewmat(camtrans); 00264 00265 m_rasterizer->SetViewMatrix(viewmat, m_camera->NodeGetWorldOrientation(), m_camera->NodeGetWorldPosition(), m_camera->GetCameraData()->m_perspective); 00266 m_camera->SetModelviewMatrix(viewmat); 00267 // restore the stereo mode now that the matrix is computed 00268 m_rasterizer->SetStereoMode(stereomode); 00269 00270 m_scene->CalculateVisibleMeshes(m_rasterizer,m_camera); 00271 00272 m_scene->RenderBuckets(camtrans, m_rasterizer, m_rendertools); 00273 00274 // restore the canvas area now that the render is completed 00275 m_canvas->GetWindowArea() = area; 00276 } 00277 00278 00279 // cast Image pointer to ImageRender 00280 inline ImageRender * getImageRender (PyImage * self) 00281 { return static_cast<ImageRender*>(self->m_image); } 00282 00283 00284 // python methods 00285 00286 // Blender Scene type 00287 BlendType<KX_Scene> sceneType ("KX_Scene"); 00288 // Blender Camera type 00289 BlendType<KX_Camera> cameraType ("KX_Camera"); 00290 00291 00292 // object initialization 00293 static int ImageRender_init (PyObject * pySelf, PyObject * args, PyObject * kwds) 00294 { 00295 // parameters - scene object 00296 PyObject * scene; 00297 // camera object 00298 PyObject * camera; 00299 // parameter keywords 00300 static const char *kwlist[] = {"sceneObj", "cameraObj", NULL}; 00301 // get parameters 00302 if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO", 00303 const_cast<char**>(kwlist), &scene, &camera)) 00304 return -1; 00305 try 00306 { 00307 // get scene pointer 00308 KX_Scene * scenePtr (NULL); 00309 if (scene != NULL) scenePtr = sceneType.checkType(scene); 00310 // throw exception if scene is not available 00311 if (scenePtr == NULL) THRWEXCP(SceneInvalid, S_OK); 00312 00313 // get camera pointer 00314 KX_Camera * cameraPtr (NULL); 00315 if (camera != NULL) cameraPtr = cameraType.checkType(camera); 00316 // throw exception if camera is not available 00317 if (cameraPtr == NULL) THRWEXCP(CameraInvalid, S_OK); 00318 00319 // get pointer to image structure 00320 PyImage * self = reinterpret_cast<PyImage*>(pySelf); 00321 // create source object 00322 if (self->m_image != NULL) delete self->m_image; 00323 self->m_image = new ImageRender(scenePtr, cameraPtr); 00324 } 00325 catch (Exception & exp) 00326 { 00327 exp.report(); 00328 return -1; 00329 } 00330 // initialization succeded 00331 return 0; 00332 } 00333 00334 00335 // get background color 00336 PyObject * getBackground (PyImage * self, void * closure) 00337 { 00338 return Py_BuildValue("[BBBB]", 00339 getImageRender(self)->getBackground(0), 00340 getImageRender(self)->getBackground(1), 00341 getImageRender(self)->getBackground(2), 00342 getImageRender(self)->getBackground(3)); 00343 } 00344 00345 // set color 00346 static int setBackground (PyImage * self, PyObject * value, void * closure) 00347 { 00348 // check validity of parameter 00349 if (value == NULL || !PySequence_Check(value) || PySequence_Size(value) != 4 00350 || !PyLong_Check(PySequence_Fast_GET_ITEM(value, 0)) 00351 || !PyLong_Check(PySequence_Fast_GET_ITEM(value, 1)) 00352 || !PyLong_Check(PySequence_Fast_GET_ITEM(value, 2)) 00353 || !PyLong_Check(PySequence_Fast_GET_ITEM(value, 3))) 00354 { 00355 PyErr_SetString(PyExc_TypeError, "The value must be a sequence of 4 integer between 0 and 255"); 00356 return -1; 00357 } 00358 // set background color 00359 getImageRender(self)->setBackground((unsigned char)(PyLong_AsSsize_t(PySequence_Fast_GET_ITEM(value, 0))), 00360 (unsigned char)(PyLong_AsSsize_t(PySequence_Fast_GET_ITEM(value, 1))), 00361 (unsigned char)(PyLong_AsSsize_t(PySequence_Fast_GET_ITEM(value, 2))), 00362 (unsigned char)(PyLong_AsSsize_t(PySequence_Fast_GET_ITEM(value, 3)))); 00363 // success 00364 return 0; 00365 } 00366 00367 00368 // methods structure 00369 static PyMethodDef imageRenderMethods[] = 00370 { // methods from ImageBase class 00371 {"refresh", (PyCFunction)Image_refresh, METH_NOARGS, "Refresh image - invalidate its current content"}, 00372 {NULL} 00373 }; 00374 // attributes structure 00375 static PyGetSetDef imageRenderGetSets[] = 00376 { 00377 {(char*)"background", (getter)getBackground, (setter)setBackground, (char*)"background color", NULL}, 00378 // attribute from ImageViewport 00379 {(char*)"capsize", (getter)ImageViewport_getCaptureSize, (setter)ImageViewport_setCaptureSize, (char*)"size of render area", NULL}, 00380 {(char*)"alpha", (getter)ImageViewport_getAlpha, (setter)ImageViewport_setAlpha, (char*)"use alpha in texture", NULL}, 00381 {(char*)"whole", (getter)ImageViewport_getWhole, (setter)ImageViewport_setWhole, (char*)"use whole viewport to render", NULL}, 00382 // attributes from ImageBase class 00383 {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL}, 00384 {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL}, 00385 {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL}, 00386 {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbour)", NULL}, 00387 {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL}, 00388 {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL}, 00389 {NULL} 00390 }; 00391 00392 00393 // define python type 00394 PyTypeObject ImageRenderType = 00395 { 00396 PyVarObject_HEAD_INIT(NULL, 0) 00397 "VideoTexture.ImageRender", /*tp_name*/ 00398 sizeof(PyImage), /*tp_basicsize*/ 00399 0, /*tp_itemsize*/ 00400 (destructor)Image_dealloc, /*tp_dealloc*/ 00401 0, /*tp_print*/ 00402 0, /*tp_getattr*/ 00403 0, /*tp_setattr*/ 00404 0, /*tp_compare*/ 00405 0, /*tp_repr*/ 00406 0, /*tp_as_number*/ 00407 0, /*tp_as_sequence*/ 00408 0, /*tp_as_mapping*/ 00409 0, /*tp_hash */ 00410 0, /*tp_call*/ 00411 0, /*tp_str*/ 00412 0, /*tp_getattro*/ 00413 0, /*tp_setattro*/ 00414 &imageBufferProcs, /*tp_as_buffer*/ 00415 Py_TPFLAGS_DEFAULT, /*tp_flags*/ 00416 "Image source from render", /* tp_doc */ 00417 0, /* tp_traverse */ 00418 0, /* tp_clear */ 00419 0, /* tp_richcompare */ 00420 0, /* tp_weaklistoffset */ 00421 0, /* tp_iter */ 00422 0, /* tp_iternext */ 00423 imageRenderMethods, /* tp_methods */ 00424 0, /* tp_members */ 00425 imageRenderGetSets, /* tp_getset */ 00426 0, /* tp_base */ 00427 0, /* tp_dict */ 00428 0, /* tp_descr_get */ 00429 0, /* tp_descr_set */ 00430 0, /* tp_dictoffset */ 00431 (initproc)ImageRender_init, /* tp_init */ 00432 0, /* tp_alloc */ 00433 Image_allocNew, /* tp_new */ 00434 }; 00435 00436 // object initialization 00437 static int ImageMirror_init (PyObject * pySelf, PyObject * args, PyObject * kwds) 00438 { 00439 // parameters - scene object 00440 PyObject * scene; 00441 // reference object for mirror 00442 PyObject * observer; 00443 // object holding the mirror 00444 PyObject * mirror; 00445 // material of the mirror 00446 short materialID = 0; 00447 // parameter keywords 00448 static const char *kwlist[] = {"scene", "observer", "mirror", "material", NULL}; 00449 // get parameters 00450 if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOO|h", 00451 const_cast<char**>(kwlist), &scene, &observer, &mirror, &materialID)) 00452 return -1; 00453 try 00454 { 00455 // get scene pointer 00456 KX_Scene * scenePtr (NULL); 00457 if (scene != NULL && PyObject_TypeCheck(scene, &KX_Scene::Type)) 00458 scenePtr = static_cast<KX_Scene*>BGE_PROXY_REF(scene); 00459 else 00460 THRWEXCP(SceneInvalid, S_OK); 00461 00462 if(scenePtr==NULL) /* incase the python proxy reference is invalid */ 00463 THRWEXCP(SceneInvalid, S_OK); 00464 00465 // get observer pointer 00466 KX_GameObject * observerPtr (NULL); 00467 if (observer != NULL && PyObject_TypeCheck(observer, &KX_GameObject::Type)) 00468 observerPtr = static_cast<KX_GameObject*>BGE_PROXY_REF(observer); 00469 else if (observer != NULL && PyObject_TypeCheck(observer, &KX_Camera::Type)) 00470 observerPtr = static_cast<KX_Camera*>BGE_PROXY_REF(observer); 00471 else 00472 THRWEXCP(ObserverInvalid, S_OK); 00473 00474 if(observerPtr==NULL) /* incase the python proxy reference is invalid */ 00475 THRWEXCP(ObserverInvalid, S_OK); 00476 00477 // get mirror pointer 00478 KX_GameObject * mirrorPtr (NULL); 00479 if (mirror != NULL && PyObject_TypeCheck(mirror, &KX_GameObject::Type)) 00480 mirrorPtr = static_cast<KX_GameObject*>BGE_PROXY_REF(mirror); 00481 else 00482 THRWEXCP(MirrorInvalid, S_OK); 00483 00484 if(mirrorPtr==NULL) /* incase the python proxy reference is invalid */ 00485 THRWEXCP(MirrorInvalid, S_OK); 00486 00487 // locate the material in the mirror 00488 RAS_IPolyMaterial * material = getMaterial(mirror, materialID); 00489 if (material == NULL) 00490 THRWEXCP(MaterialNotAvail, S_OK); 00491 00492 // get pointer to image structure 00493 PyImage * self = reinterpret_cast<PyImage*>(pySelf); 00494 00495 // create source object 00496 if (self->m_image != NULL) 00497 { 00498 delete self->m_image; 00499 self->m_image = NULL; 00500 } 00501 self->m_image = new ImageRender(scenePtr, observerPtr, mirrorPtr, material); 00502 } 00503 catch (Exception & exp) 00504 { 00505 exp.report(); 00506 return -1; 00507 } 00508 // initialization succeded 00509 return 0; 00510 } 00511 00512 // get background color 00513 PyObject * getClip (PyImage * self, void * closure) 00514 { 00515 return PyFloat_FromDouble(getImageRender(self)->getClip()); 00516 } 00517 00518 // set clip 00519 static int setClip (PyImage * self, PyObject * value, void * closure) 00520 { 00521 // check validity of parameter 00522 double clip; 00523 if (value == NULL || !PyFloat_Check(value) || (clip = PyFloat_AsDouble(value)) < 0.01 || clip > 5000.0) 00524 { 00525 PyErr_SetString(PyExc_TypeError, "The value must be an float between 0.01 and 5000"); 00526 return -1; 00527 } 00528 // set background color 00529 getImageRender(self)->setClip(float(clip)); 00530 // success 00531 return 0; 00532 } 00533 00534 // attributes structure 00535 static PyGetSetDef imageMirrorGetSets[] = 00536 { 00537 {(char*)"clip", (getter)getClip, (setter)setClip, (char*)"clipping distance", NULL}, 00538 // attribute from ImageRender 00539 {(char*)"background", (getter)getBackground, (setter)setBackground, (char*)"background color", NULL}, 00540 // attribute from ImageViewport 00541 {(char*)"capsize", (getter)ImageViewport_getCaptureSize, (setter)ImageViewport_setCaptureSize, (char*)"size of render area", NULL}, 00542 {(char*)"alpha", (getter)ImageViewport_getAlpha, (setter)ImageViewport_setAlpha, (char*)"use alpha in texture", NULL}, 00543 {(char*)"whole", (getter)ImageViewport_getWhole, (setter)ImageViewport_setWhole, (char*)"use whole viewport to render", NULL}, 00544 // attributes from ImageBase class 00545 {(char*)"valid", (getter)Image_valid, NULL, (char*)"bool to tell if an image is available", NULL}, 00546 {(char*)"image", (getter)Image_getImage, NULL, (char*)"image data", NULL}, 00547 {(char*)"size", (getter)Image_getSize, NULL, (char*)"image size", NULL}, 00548 {(char*)"scale", (getter)Image_getScale, (setter)Image_setScale, (char*)"fast scale of image (near neighbour)", NULL}, 00549 {(char*)"flip", (getter)Image_getFlip, (setter)Image_setFlip, (char*)"flip image vertically", NULL}, 00550 {(char*)"filter", (getter)Image_getFilter, (setter)Image_setFilter, (char*)"pixel filter", NULL}, 00551 {NULL} 00552 }; 00553 00554 00555 // constructor 00556 ImageRender::ImageRender (KX_Scene * scene, KX_GameObject * observer, KX_GameObject * mirror, RAS_IPolyMaterial * mat) : 00557 ImageViewport(), 00558 m_render(false), 00559 m_scene(scene), 00560 m_observer(observer), 00561 m_mirror(mirror), 00562 m_clip(100.f) 00563 { 00564 // this constructor is used for automatic planar mirror 00565 // create a camera, take all data by default, in any case we will recompute the frustrum on each frame 00566 RAS_CameraData camdata; 00567 vector<RAS_TexVert*> mirrorVerts; 00568 vector<RAS_TexVert*>::iterator it; 00569 float mirrorArea = 0.f; 00570 float mirrorNormal[3] = {0.f, 0.f, 0.f}; 00571 float mirrorUp[3]; 00572 float dist, vec[3], axis[3]; 00573 float zaxis[3] = {0.f, 0.f, 1.f}; 00574 float yaxis[3] = {0.f, 1.f, 0.f}; 00575 float mirrorMat[3][3]; 00576 float left, right, top, bottom, back; 00577 // make sure this camera will delete its node 00578 m_camera= new KX_Camera(scene, KX_Scene::m_callbacks, camdata, true, true); 00579 m_camera->SetName("__mirror__cam__"); 00580 // don't add the camera to the scene object list, it doesn't need to be accessible 00581 m_owncamera = true; 00582 // retrieve rendering objects 00583 m_engine = KX_GetActiveEngine(); 00584 m_rasterizer = m_engine->GetRasterizer(); 00585 m_canvas = m_engine->GetCanvas(); 00586 m_rendertools = m_engine->GetRenderTools(); 00587 // locate the vertex assigned to mat and do following calculation in mesh coordinates 00588 for (int meshIndex = 0; meshIndex < mirror->GetMeshCount(); meshIndex++) 00589 { 00590 RAS_MeshObject* mesh = mirror->GetMesh(meshIndex); 00591 int numPolygons = mesh->NumPolygons(); 00592 for (int polygonIndex=0; polygonIndex < numPolygons; polygonIndex++) 00593 { 00594 RAS_Polygon* polygon = mesh->GetPolygon(polygonIndex); 00595 if (polygon->GetMaterial()->GetPolyMaterial() == mat) 00596 { 00597 RAS_TexVert *v1, *v2, *v3, *v4; 00598 float normal[3]; 00599 float area; 00600 // this polygon is part of the mirror, 00601 v1 = polygon->GetVertex(0); 00602 v2 = polygon->GetVertex(1); 00603 v3 = polygon->GetVertex(2); 00604 mirrorVerts.push_back(v1); 00605 mirrorVerts.push_back(v2); 00606 mirrorVerts.push_back(v3); 00607 if (polygon->VertexCount() == 4) 00608 { 00609 v4 = polygon->GetVertex(3); 00610 mirrorVerts.push_back(v4); 00611 area = normal_quad_v3( normal,(float*)v1->getXYZ(), (float*)v2->getXYZ(), (float*)v3->getXYZ(), (float*)v4->getXYZ()); 00612 } else 00613 { 00614 area = normal_tri_v3( normal,(float*)v1->getXYZ(), (float*)v2->getXYZ(), (float*)v3->getXYZ()); 00615 } 00616 area = fabs(area); 00617 mirrorArea += area; 00618 mul_v3_fl(normal, area); 00619 add_v3_v3v3(mirrorNormal, mirrorNormal, normal); 00620 } 00621 } 00622 } 00623 if (mirrorVerts.size() == 0 || mirrorArea < FLT_EPSILON) 00624 { 00625 // no vertex or zero size mirror 00626 THRWEXCP(MirrorSizeInvalid, S_OK); 00627 } 00628 // compute average normal of mirror faces 00629 mul_v3_fl(mirrorNormal, 1.0f/mirrorArea); 00630 if (normalize_v3(mirrorNormal) == 0.f) 00631 { 00632 // no normal 00633 THRWEXCP(MirrorNormalInvalid, S_OK); 00634 } 00635 // the mirror plane has an equation of the type ax+by+cz = d where (a,b,c) is the normal vector 00636 // if the mirror is more vertical then horizontal, the Z axis is the up direction. 00637 // otherwise the Y axis is the up direction. 00638 // If the mirror is not perfectly vertical(horizontal), the Z(Y) axis projection on the mirror 00639 // plan by the normal will be the up direction. 00640 if (fabs(mirrorNormal[2]) > fabs(mirrorNormal[1]) && 00641 fabs(mirrorNormal[2]) > fabs(mirrorNormal[0])) 00642 { 00643 // the mirror is more horizontal than vertical 00644 copy_v3_v3(axis, yaxis); 00645 } 00646 else 00647 { 00648 // the mirror is more vertical than horizontal 00649 copy_v3_v3(axis, zaxis); 00650 } 00651 dist = dot_v3v3(mirrorNormal, axis); 00652 if (fabs(dist) < FLT_EPSILON) 00653 { 00654 // the mirror is already fully aligned with up axis 00655 copy_v3_v3(mirrorUp, axis); 00656 } 00657 else 00658 { 00659 // projection of axis to mirror plane through normal 00660 copy_v3_v3(vec, mirrorNormal); 00661 mul_v3_fl(vec, dist); 00662 sub_v3_v3v3(mirrorUp, axis, vec); 00663 if (normalize_v3(mirrorUp) == 0.f) 00664 { 00665 // should not happen 00666 THRWEXCP(MirrorHorizontal, S_OK); 00667 return; 00668 } 00669 } 00670 // compute rotation matrix between local coord and mirror coord 00671 // to match camera orientation, we select mirror z = -normal, y = up, x = y x z 00672 negate_v3_v3(mirrorMat[2], mirrorNormal); 00673 copy_v3_v3(mirrorMat[1], mirrorUp); 00674 cross_v3_v3v3(mirrorMat[0], mirrorMat[1], mirrorMat[2]); 00675 // transpose to make it a orientation matrix from local space to mirror space 00676 transpose_m3(mirrorMat); 00677 // transform all vertex to plane coordinates and determine mirror position 00678 left = FLT_MAX; 00679 right = -FLT_MAX; 00680 bottom = FLT_MAX; 00681 top = -FLT_MAX; 00682 back = -FLT_MAX; // most backward vertex (=highest Z coord in mirror space) 00683 for (it = mirrorVerts.begin(); it != mirrorVerts.end(); it++) 00684 { 00685 copy_v3_v3(vec, (float*)(*it)->getXYZ()); 00686 mul_m3_v3(mirrorMat, vec); 00687 if (vec[0] < left) 00688 left = vec[0]; 00689 if (vec[0] > right) 00690 right = vec[0]; 00691 if (vec[1] < bottom) 00692 bottom = vec[1]; 00693 if (vec[1] > top) 00694 top = vec[1]; 00695 if (vec[2] > back) 00696 back = vec[2]; 00697 } 00698 // now store this information in the object for later rendering 00699 m_mirrorHalfWidth = (right-left)*0.5f; 00700 m_mirrorHalfHeight = (top-bottom)*0.5f; 00701 if (m_mirrorHalfWidth < 0.01f || m_mirrorHalfHeight < 0.01f) 00702 { 00703 // mirror too small 00704 THRWEXCP(MirrorTooSmall, S_OK); 00705 } 00706 // mirror position in mirror coord 00707 vec[0] = (left+right)*0.5f; 00708 vec[1] = (top+bottom)*0.5f; 00709 vec[2] = back; 00710 // convert it in local space: transpose again the matrix to get back to mirror to local transform 00711 transpose_m3(mirrorMat); 00712 mul_m3_v3(mirrorMat, vec); 00713 // mirror position in local space 00714 m_mirrorPos.setValue(vec[0], vec[1], vec[2]); 00715 // mirror normal vector (pointed towards the back of the mirror) in local space 00716 m_mirrorZ.setValue(-mirrorNormal[0], -mirrorNormal[1], -mirrorNormal[2]); 00717 m_mirrorY.setValue(mirrorUp[0], mirrorUp[1], mirrorUp[2]); 00718 m_mirrorX = m_mirrorY.cross(m_mirrorZ); 00719 m_render = true; 00720 00721 setBackground(0, 0, 255, 255); 00722 } 00723 00724 00725 00726 00727 // define python type 00728 PyTypeObject ImageMirrorType = 00729 { 00730 PyVarObject_HEAD_INIT(NULL, 0) 00731 "VideoTexture.ImageMirror", /*tp_name*/ 00732 sizeof(PyImage), /*tp_basicsize*/ 00733 0, /*tp_itemsize*/ 00734 (destructor)Image_dealloc, /*tp_dealloc*/ 00735 0, /*tp_print*/ 00736 0, /*tp_getattr*/ 00737 0, /*tp_setattr*/ 00738 0, /*tp_compare*/ 00739 0, /*tp_repr*/ 00740 0, /*tp_as_number*/ 00741 0, /*tp_as_sequence*/ 00742 0, /*tp_as_mapping*/ 00743 0, /*tp_hash */ 00744 0, /*tp_call*/ 00745 0, /*tp_str*/ 00746 0, /*tp_getattro*/ 00747 0, /*tp_setattro*/ 00748 &imageBufferProcs, /*tp_as_buffer*/ 00749 Py_TPFLAGS_DEFAULT, /*tp_flags*/ 00750 "Image source from mirror", /* tp_doc */ 00751 0, /* tp_traverse */ 00752 0, /* tp_clear */ 00753 0, /* tp_richcompare */ 00754 0, /* tp_weaklistoffset */ 00755 0, /* tp_iter */ 00756 0, /* tp_iternext */ 00757 imageRenderMethods, /* tp_methods */ 00758 0, /* tp_members */ 00759 imageMirrorGetSets, /* tp_getset */ 00760 0, /* tp_base */ 00761 0, /* tp_dict */ 00762 0, /* tp_descr_get */ 00763 0, /* tp_descr_set */ 00764 0, /* tp_dictoffset */ 00765 (initproc)ImageMirror_init, /* tp_init */ 00766 0, /* tp_alloc */ 00767 Image_allocNew, /* tp_new */ 00768 }; 00769 00770