Blender V2.61 - r43446
|
00001 /* 00002 * ***** BEGIN GPL LICENSE BLOCK ***** 00003 * 00004 * This program is free software; you can redistribute it and/or 00005 * modify it under the terms of the GNU General Public License 00006 * as published by the Free Software Foundation; either version 2 00007 * of the License, or (at your option) any later version. 00008 * 00009 * This program is distributed in the hope that it will be useful, 00010 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00011 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00012 * GNU General Public License for more details. 00013 * 00014 * You should have received a copy of the GNU General Public License 00015 * along with this program; if not, write to the Free Software Foundation, 00016 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 00017 * 00018 * The Original Code is Copyright (C) 2005 Blender Foundation. 00019 * All rights reserved. 00020 * 00021 * The Original Code is: all of this file. 00022 * 00023 * Contributor(s): Brecht Van Lommel. 00024 * 00025 * ***** END GPL LICENSE BLOCK ***** 00026 */ 00027 00033 #include <limits.h> 00034 #include <stddef.h> 00035 #include <string.h> 00036 00037 #include "GL/glew.h" 00038 00039 #include "MEM_guardedalloc.h" 00040 00041 #include "BLI_math.h" 00042 #include "BLI_utildefines.h" 00043 #include "BLI_ghash.h" 00044 #include "BLI_threads.h" 00045 00046 #include "DNA_meshdata_types.h" 00047 00048 #include "BKE_DerivedMesh.h" 00049 00050 #include "DNA_userdef_types.h" 00051 00052 #include "GPU_buffers.h" 00053 00054 typedef enum { 00055 GPU_BUFFER_VERTEX_STATE = 1, 00056 GPU_BUFFER_NORMAL_STATE = 2, 00057 GPU_BUFFER_TEXCOORD_STATE = 4, 00058 GPU_BUFFER_COLOR_STATE = 8, 00059 GPU_BUFFER_ELEMENT_STATE = 16, 00060 } GPUBufferState; 00061 00062 #define MAX_GPU_ATTRIB_DATA 32 00063 00064 /* material number is an 16-bit short and the range of short is from -16383 to 16383 (assume material number is non-negative) */ 00065 #define MAX_MATERIALS 16384 00066 00067 /* -1 - undefined, 0 - vertex arrays, 1 - VBOs */ 00068 static int useVBOs = -1; 00069 static GPUBufferState GLStates = 0; 00070 static GPUAttrib attribData[MAX_GPU_ATTRIB_DATA] = { { -1, 0, 0 } }; 00071 00072 /* stores recently-deleted buffers so that new buffers won't have to 00073 be recreated as often 00074 00075 only one instance of this pool is created, stored in 00076 gpu_buffer_pool 00077 00078 note that the number of buffers in the pool is usually limited to 00079 MAX_FREE_GPU_BUFFERS, but this limit may be exceeded temporarily 00080 when a GPUBuffer is released outside the main thread; due to OpenGL 00081 restrictions it cannot be immediately released 00082 */ 00083 typedef struct GPUBufferPool { 00084 /* number of allocated buffers stored */ 00085 int totbuf; 00086 /* actual allocated length of the array */ 00087 int maxsize; 00088 GPUBuffer **buffers; 00089 } GPUBufferPool; 00090 #define MAX_FREE_GPU_BUFFERS 8 00091 00092 /* create a new GPUBufferPool */ 00093 static GPUBufferPool *gpu_buffer_pool_new(void) 00094 { 00095 GPUBufferPool *pool; 00096 00097 /* enable VBOs if supported */ 00098 if(useVBOs == -1) 00099 useVBOs = (GLEW_ARB_vertex_buffer_object ? 1 : 0); 00100 00101 pool = MEM_callocN(sizeof(GPUBufferPool), "GPUBuffer"); 00102 00103 pool->maxsize = MAX_FREE_GPU_BUFFERS; 00104 pool->buffers = MEM_callocN(sizeof(GPUBuffer*)*pool->maxsize, 00105 "GPUBuffer.buffers"); 00106 00107 return pool; 00108 } 00109 00110 /* remove a GPUBuffer from the pool (does not free the GPUBuffer) */ 00111 static void gpu_buffer_pool_remove_index(GPUBufferPool *pool, int index) 00112 { 00113 int i; 00114 00115 if(!pool || index < 0 || index >= pool->totbuf) 00116 return; 00117 00118 /* shift entries down, overwriting the buffer at `index' */ 00119 for(i = index; i < pool->totbuf - 1; i++) 00120 pool->buffers[i] = pool->buffers[i+1]; 00121 00122 /* clear the last entry */ 00123 if(pool->totbuf > 0) 00124 pool->buffers[pool->totbuf - 1] = NULL; 00125 00126 pool->totbuf--; 00127 } 00128 00129 /* delete the last entry in the pool */ 00130 static void gpu_buffer_pool_delete_last(GPUBufferPool *pool) 00131 { 00132 GPUBuffer *last; 00133 00134 if(pool->totbuf <= 0) 00135 return; 00136 00137 /* get the last entry */ 00138 if(!(last = pool->buffers[pool->totbuf - 1])) 00139 return; 00140 00141 /* delete the buffer's data */ 00142 if(useVBOs) 00143 glDeleteBuffersARB(1, &last->id); 00144 else 00145 MEM_freeN(last->pointer); 00146 00147 /* delete the buffer and remove from pool */ 00148 MEM_freeN(last); 00149 pool->totbuf--; 00150 pool->buffers[pool->totbuf] = NULL; 00151 } 00152 00153 /* free a GPUBufferPool; also frees the data in the pool's 00154 GPUBuffers */ 00155 static void gpu_buffer_pool_free(GPUBufferPool *pool) 00156 { 00157 if(!pool) 00158 return; 00159 00160 while(pool->totbuf) 00161 gpu_buffer_pool_delete_last(pool); 00162 00163 MEM_freeN(pool->buffers); 00164 MEM_freeN(pool); 00165 } 00166 00167 static GPUBufferPool *gpu_buffer_pool = NULL; 00168 static GPUBufferPool *gpu_get_global_buffer_pool(void) 00169 { 00170 /* initialize the pool */ 00171 if(!gpu_buffer_pool) 00172 gpu_buffer_pool = gpu_buffer_pool_new(); 00173 00174 return gpu_buffer_pool; 00175 } 00176 00177 void GPU_global_buffer_pool_free(void) 00178 { 00179 gpu_buffer_pool_free(gpu_buffer_pool); 00180 gpu_buffer_pool = NULL; 00181 } 00182 00183 /* get a GPUBuffer of at least `size' bytes; uses one from the buffer 00184 pool if possible, otherwise creates a new one */ 00185 GPUBuffer *GPU_buffer_alloc(int size) 00186 { 00187 GPUBufferPool *pool; 00188 GPUBuffer *buf; 00189 int i, bufsize, bestfit = -1; 00190 00191 pool = gpu_get_global_buffer_pool(); 00192 00193 /* not sure if this buffer pool code has been profiled much, 00194 seems to me that the graphics driver and system memory 00195 management might do this stuff anyway. --nicholas 00196 */ 00197 00198 /* check the global buffer pool for a recently-deleted buffer 00199 that is at least as big as the request, but not more than 00200 twice as big */ 00201 for(i = 0; i < pool->totbuf; i++) { 00202 bufsize = pool->buffers[i]->size; 00203 00204 /* check for an exact size match */ 00205 if(bufsize == size) { 00206 bestfit = i; 00207 break; 00208 } 00209 /* smaller buffers won't fit data and buffers at least 00210 twice as big are a waste of memory */ 00211 else if(bufsize > size && size > (bufsize / 2)) { 00212 /* is it closer to the required size than the 00213 last appropriate buffer found. try to save 00214 memory */ 00215 if(bestfit == -1 || pool->buffers[bestfit]->size > bufsize) { 00216 bestfit = i; 00217 } 00218 } 00219 } 00220 00221 /* if an acceptable buffer was found in the pool, remove it 00222 from the pool and return it */ 00223 if(bestfit != -1) { 00224 buf = pool->buffers[bestfit]; 00225 gpu_buffer_pool_remove_index(pool, bestfit); 00226 return buf; 00227 } 00228 00229 /* no acceptable buffer found in the pool, create a new one */ 00230 buf = MEM_callocN(sizeof(GPUBuffer), "GPUBuffer"); 00231 buf->size = size; 00232 00233 if(useVBOs == 1) { 00234 /* create a new VBO and initialize it to the requested 00235 size */ 00236 glGenBuffersARB(1, &buf->id); 00237 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buf->id); 00238 glBufferDataARB(GL_ARRAY_BUFFER_ARB, size, NULL, GL_STATIC_DRAW_ARB); 00239 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); 00240 } 00241 else { 00242 buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer"); 00243 00244 /* purpose of this seems to be dealing with 00245 out-of-memory errors? looks a bit iffy to me 00246 though, at least on Linux I expect malloc() would 00247 just overcommit. --nicholas */ 00248 while(!buf->pointer && pool->totbuf > 0) { 00249 gpu_buffer_pool_delete_last(pool); 00250 buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer"); 00251 } 00252 if(!buf->pointer) 00253 return NULL; 00254 } 00255 00256 return buf; 00257 } 00258 00259 /* release a GPUBuffer; does not free the actual buffer or its data, 00260 but rather moves it to the pool of recently-free'd buffers for 00261 possible re-use*/ 00262 void GPU_buffer_free(GPUBuffer *buffer) 00263 { 00264 GPUBufferPool *pool; 00265 int i; 00266 00267 if(!buffer) 00268 return; 00269 00270 pool = gpu_get_global_buffer_pool(); 00271 00272 /* free the last used buffer in the queue if no more space, but only 00273 if we are in the main thread. for e.g. rendering or baking it can 00274 happen that we are in other thread and can't call OpenGL, in that 00275 case cleanup will be done GPU_buffer_pool_free_unused */ 00276 if(BLI_thread_is_main()) { 00277 /* in main thread, safe to decrease size of pool back 00278 down to MAX_FREE_GPU_BUFFERS */ 00279 while(pool->totbuf >= MAX_FREE_GPU_BUFFERS) 00280 gpu_buffer_pool_delete_last(pool); 00281 } 00282 else { 00283 /* outside of main thread, can't safely delete the 00284 buffer, so increase pool size */ 00285 if(pool->maxsize == pool->totbuf) { 00286 pool->maxsize += MAX_FREE_GPU_BUFFERS; 00287 pool->buffers = MEM_reallocN(pool->buffers, 00288 sizeof(GPUBuffer*) * pool->maxsize); 00289 } 00290 } 00291 00292 /* shift pool entries up by one */ 00293 for(i = pool->totbuf; i > 0; i--) 00294 pool->buffers[i] = pool->buffers[i-1]; 00295 00296 /* insert the buffer into the beginning of the pool */ 00297 pool->buffers[0] = buffer; 00298 pool->totbuf++; 00299 } 00300 00301 typedef struct GPUVertPointLink { 00302 struct GPUVertPointLink *next; 00303 /* -1 means uninitialized */ 00304 int point_index; 00305 } GPUVertPointLink; 00306 00307 /* add a new point to the list of points related to a particular 00308 vertex */ 00309 static void gpu_drawobject_add_vert_point(GPUDrawObject *gdo, int vert_index, int point_index) 00310 { 00311 GPUVertPointLink *lnk; 00312 00313 lnk = &gdo->vert_points[vert_index]; 00314 00315 /* if first link is in use, add a new link at the end */ 00316 if(lnk->point_index != -1) { 00317 /* get last link */ 00318 for(; lnk->next; lnk = lnk->next); 00319 00320 /* add a new link from the pool */ 00321 lnk = lnk->next = &gdo->vert_points_mem[gdo->vert_points_usage]; 00322 gdo->vert_points_usage++; 00323 } 00324 00325 lnk->point_index = point_index; 00326 } 00327 00328 /* update the vert_points and triangle_to_mface fields with a new 00329 triangle */ 00330 static void gpu_drawobject_add_triangle(GPUDrawObject *gdo, 00331 int base_point_index, 00332 int face_index, 00333 int v1, int v2, int v3) 00334 { 00335 int i, v[3] = {v1, v2, v3}; 00336 for(i = 0; i < 3; i++) 00337 gpu_drawobject_add_vert_point(gdo, v[i], base_point_index + i); 00338 gdo->triangle_to_mface[base_point_index / 3] = face_index; 00339 } 00340 00341 /* for each vertex, build a list of points related to it; these lists 00342 are stored in an array sized to the number of vertices */ 00343 static void gpu_drawobject_init_vert_points(GPUDrawObject *gdo, MFace *f, int totface) 00344 { 00345 GPUBufferMaterial *mat; 00346 int i, mat_orig_to_new[MAX_MATERIALS]; 00347 00348 /* allocate the array and space for links */ 00349 gdo->vert_points = MEM_callocN(sizeof(GPUVertPointLink) * gdo->totvert, 00350 "GPUDrawObject.vert_points"); 00351 gdo->vert_points_mem = MEM_callocN(sizeof(GPUVertPointLink) * gdo->tot_triangle_point, 00352 "GPUDrawObject.vert_points_mem"); 00353 gdo->vert_points_usage = 0; 00354 00355 /* build a map from the original material indices to the new 00356 GPUBufferMaterial indices */ 00357 for(i = 0; i < gdo->totmaterial; i++) 00358 mat_orig_to_new[gdo->materials[i].mat_nr] = i; 00359 00360 /* -1 indicates the link is not yet used */ 00361 for(i = 0; i < gdo->totvert; i++) 00362 gdo->vert_points[i].point_index = -1; 00363 00364 for(i = 0; i < totface; i++, f++) { 00365 mat = &gdo->materials[mat_orig_to_new[f->mat_nr]]; 00366 00367 /* add triangle */ 00368 gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint, 00369 i, f->v1, f->v2, f->v3); 00370 mat->totpoint += 3; 00371 00372 /* add second triangle for quads */ 00373 if(f->v4) { 00374 gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint, 00375 i, f->v3, f->v4, f->v1); 00376 mat->totpoint += 3; 00377 } 00378 } 00379 00380 /* map any unused vertices to loose points */ 00381 for(i = 0; i < gdo->totvert; i++) { 00382 if(gdo->vert_points[i].point_index == -1) { 00383 gdo->vert_points[i].point_index = gdo->tot_triangle_point + gdo->tot_loose_point; 00384 gdo->tot_loose_point++; 00385 } 00386 } 00387 } 00388 00389 /* see GPUDrawObject's structure definition for a description of the 00390 data being initialized here */ 00391 GPUDrawObject *GPU_drawobject_new( DerivedMesh *dm ) 00392 { 00393 GPUDrawObject *gdo; 00394 MFace *mface; 00395 int points_per_mat[MAX_MATERIALS]; 00396 int i, curmat, curpoint, totface; 00397 00398 mface = dm->getFaceArray(dm); 00399 totface= dm->getNumFaces(dm); 00400 00401 /* get the number of points used by each material, treating 00402 each quad as two triangles */ 00403 memset(points_per_mat, 0, sizeof(int)*MAX_MATERIALS); 00404 for(i = 0; i < totface; i++) 00405 points_per_mat[mface[i].mat_nr] += mface[i].v4 ? 6 : 3; 00406 00407 /* create the GPUDrawObject */ 00408 gdo = MEM_callocN(sizeof(GPUDrawObject),"GPUDrawObject"); 00409 gdo->totvert = dm->getNumVerts(dm); 00410 gdo->totedge = dm->getNumEdges(dm); 00411 00412 /* count the number of materials used by this DerivedMesh */ 00413 for(i = 0; i < MAX_MATERIALS; i++) { 00414 if(points_per_mat[i] > 0) 00415 gdo->totmaterial++; 00416 } 00417 00418 /* allocate an array of materials used by this DerivedMesh */ 00419 gdo->materials = MEM_mallocN(sizeof(GPUBufferMaterial) * gdo->totmaterial, 00420 "GPUDrawObject.materials"); 00421 00422 /* initialize the materials array */ 00423 for(i = 0, curmat = 0, curpoint = 0; i < MAX_MATERIALS; i++) { 00424 if(points_per_mat[i] > 0) { 00425 gdo->materials[curmat].start = curpoint; 00426 gdo->materials[curmat].totpoint = 0; 00427 gdo->materials[curmat].mat_nr = i; 00428 00429 curpoint += points_per_mat[i]; 00430 curmat++; 00431 } 00432 } 00433 00434 /* store total number of points used for triangles */ 00435 gdo->tot_triangle_point = curpoint; 00436 00437 gdo->triangle_to_mface = MEM_mallocN(sizeof(int) * (gdo->tot_triangle_point / 3), 00438 "GPUDrawObject.triangle_to_mface"); 00439 00440 gpu_drawobject_init_vert_points(gdo, mface, totface); 00441 00442 return gdo; 00443 } 00444 00445 void GPU_drawobject_free(DerivedMesh *dm) 00446 { 00447 GPUDrawObject *gdo; 00448 00449 if(!dm || !(gdo = dm->drawObject)) 00450 return; 00451 00452 MEM_freeN(gdo->materials); 00453 MEM_freeN(gdo->triangle_to_mface); 00454 MEM_freeN(gdo->vert_points); 00455 MEM_freeN(gdo->vert_points_mem); 00456 GPU_buffer_free(gdo->points); 00457 GPU_buffer_free(gdo->normals); 00458 GPU_buffer_free(gdo->uv); 00459 GPU_buffer_free(gdo->colors); 00460 GPU_buffer_free(gdo->edges); 00461 GPU_buffer_free(gdo->uvedges); 00462 00463 MEM_freeN(gdo); 00464 dm->drawObject = NULL; 00465 } 00466 00467 typedef void (*GPUBufferCopyFunc)(DerivedMesh *dm, float *varray, int *index, 00468 int *mat_orig_to_new, void *user_data); 00469 00470 static GPUBuffer *gpu_buffer_setup(DerivedMesh *dm, GPUDrawObject *object, 00471 int vector_size, int size, GLenum target, 00472 void *user, GPUBufferCopyFunc copy_f) 00473 { 00474 GPUBufferPool *pool; 00475 GPUBuffer *buffer; 00476 float *varray; 00477 int mat_orig_to_new[MAX_MATERIALS]; 00478 int *cur_index_per_mat; 00479 int i; 00480 int success; 00481 GLboolean uploaded; 00482 00483 pool = gpu_get_global_buffer_pool(); 00484 00485 /* alloc a GPUBuffer; fall back to legacy mode on failure */ 00486 if(!(buffer = GPU_buffer_alloc(size))) 00487 dm->drawObject->legacy = 1; 00488 00489 /* nothing to do for legacy mode */ 00490 if(dm->drawObject->legacy) 00491 return NULL; 00492 00493 cur_index_per_mat = MEM_mallocN(sizeof(int)*object->totmaterial, 00494 "GPU_buffer_setup.cur_index_per_mat"); 00495 for(i = 0; i < object->totmaterial; i++) { 00496 /* for each material, the current index to copy data to */ 00497 cur_index_per_mat[i] = object->materials[i].start * vector_size; 00498 00499 /* map from original material index to new 00500 GPUBufferMaterial index */ 00501 mat_orig_to_new[object->materials[i].mat_nr] = i; 00502 } 00503 00504 if(useVBOs) { 00505 success = 0; 00506 00507 while(!success) { 00508 /* bind the buffer and discard previous data, 00509 avoids stalling gpu */ 00510 glBindBufferARB(target, buffer->id); 00511 glBufferDataARB(target, buffer->size, NULL, GL_STATIC_DRAW_ARB); 00512 00513 /* attempt to map the buffer */ 00514 if(!(varray = glMapBufferARB(target, GL_WRITE_ONLY_ARB))) { 00515 /* failed to map the buffer; delete it */ 00516 GPU_buffer_free(buffer); 00517 gpu_buffer_pool_delete_last(pool); 00518 buffer= NULL; 00519 00520 /* try freeing an entry from the pool 00521 and reallocating the buffer */ 00522 if(pool->totbuf > 0) { 00523 gpu_buffer_pool_delete_last(pool); 00524 buffer = GPU_buffer_alloc(size); 00525 } 00526 00527 /* allocation still failed; fall back 00528 to legacy mode */ 00529 if(!buffer) { 00530 dm->drawObject->legacy = 1; 00531 success = 1; 00532 } 00533 } 00534 else { 00535 success = 1; 00536 } 00537 } 00538 00539 /* check legacy fallback didn't happen */ 00540 if(dm->drawObject->legacy == 0) { 00541 uploaded = GL_FALSE; 00542 /* attempt to upload the data to the VBO */ 00543 while(uploaded == GL_FALSE) { 00544 (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user); 00545 /* glUnmapBuffer returns GL_FALSE if 00546 * the data store is corrupted; retry 00547 * in that case */ 00548 uploaded = glUnmapBufferARB(target); 00549 } 00550 } 00551 glBindBufferARB(target, 0); 00552 } 00553 else { 00554 /* VBO not supported, use vertex array fallback */ 00555 if(buffer->pointer) { 00556 varray = buffer->pointer; 00557 (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user); 00558 } 00559 else { 00560 dm->drawObject->legacy = 1; 00561 } 00562 } 00563 00564 MEM_freeN(cur_index_per_mat); 00565 00566 return buffer; 00567 } 00568 00569 static void GPU_buffer_copy_vertex(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user)) 00570 { 00571 MVert *mvert; 00572 MFace *f; 00573 int i, j, start, totface; 00574 00575 mvert = dm->getVertArray(dm); 00576 f = dm->getFaceArray(dm); 00577 00578 totface= dm->getNumFaces(dm); 00579 for(i = 0; i < totface; i++, f++) { 00580 start = index[mat_orig_to_new[f->mat_nr]]; 00581 00582 /* v1 v2 v3 */ 00583 copy_v3_v3(&varray[start], mvert[f->v1].co); 00584 copy_v3_v3(&varray[start+3], mvert[f->v2].co); 00585 copy_v3_v3(&varray[start+6], mvert[f->v3].co); 00586 index[mat_orig_to_new[f->mat_nr]] += 9; 00587 00588 if(f->v4) { 00589 /* v3 v4 v1 */ 00590 copy_v3_v3(&varray[start+9], mvert[f->v3].co); 00591 copy_v3_v3(&varray[start+12], mvert[f->v4].co); 00592 copy_v3_v3(&varray[start+15], mvert[f->v1].co); 00593 index[mat_orig_to_new[f->mat_nr]] += 9; 00594 } 00595 } 00596 00597 /* copy loose points */ 00598 j = dm->drawObject->tot_triangle_point*3; 00599 for(i = 0; i < dm->drawObject->totvert; i++) { 00600 if(dm->drawObject->vert_points[i].point_index >= dm->drawObject->tot_triangle_point) { 00601 copy_v3_v3(&varray[j],mvert[i].co); 00602 j+=3; 00603 } 00604 } 00605 } 00606 00607 static void GPU_buffer_copy_normal(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user)) 00608 { 00609 int i, totface; 00610 int start; 00611 float f_no[3]; 00612 00613 float *nors= dm->getFaceDataArray(dm, CD_NORMAL); 00614 MVert *mvert = dm->getVertArray(dm); 00615 MFace *f = dm->getFaceArray(dm); 00616 00617 totface= dm->getNumFaces(dm); 00618 for(i = 0; i < totface; i++, f++) { 00619 const int smoothnormal = (f->flag & ME_SMOOTH); 00620 00621 start = index[mat_orig_to_new[f->mat_nr]]; 00622 index[mat_orig_to_new[f->mat_nr]] += f->v4 ? 18 : 9; 00623 00624 if(smoothnormal) { 00625 /* copy vertex normal */ 00626 normal_short_to_float_v3(&varray[start], mvert[f->v1].no); 00627 normal_short_to_float_v3(&varray[start+3], mvert[f->v2].no); 00628 normal_short_to_float_v3(&varray[start+6], mvert[f->v3].no); 00629 00630 if(f->v4) { 00631 normal_short_to_float_v3(&varray[start+9], mvert[f->v3].no); 00632 normal_short_to_float_v3(&varray[start+12], mvert[f->v4].no); 00633 normal_short_to_float_v3(&varray[start+15], mvert[f->v1].no); 00634 } 00635 } 00636 else if(nors) { 00637 /* copy cached face normal */ 00638 copy_v3_v3(&varray[start], &nors[i*3]); 00639 copy_v3_v3(&varray[start+3], &nors[i*3]); 00640 copy_v3_v3(&varray[start+6], &nors[i*3]); 00641 00642 if(f->v4) { 00643 copy_v3_v3(&varray[start+9], &nors[i*3]); 00644 copy_v3_v3(&varray[start+12], &nors[i*3]); 00645 copy_v3_v3(&varray[start+15], &nors[i*3]); 00646 } 00647 } 00648 else { 00649 /* calculate face normal */ 00650 if(f->v4) 00651 normal_quad_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co, mvert[f->v4].co); 00652 else 00653 normal_tri_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co); 00654 00655 copy_v3_v3(&varray[start], f_no); 00656 copy_v3_v3(&varray[start+3], f_no); 00657 copy_v3_v3(&varray[start+6], f_no); 00658 00659 if(f->v4) { 00660 copy_v3_v3(&varray[start+9], f_no); 00661 copy_v3_v3(&varray[start+12], f_no); 00662 copy_v3_v3(&varray[start+15], f_no); 00663 } 00664 } 00665 } 00666 } 00667 00668 static void GPU_buffer_copy_uv(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user)) 00669 { 00670 int start; 00671 int i, totface; 00672 00673 MTFace *mtface; 00674 MFace *f; 00675 00676 if(!(mtface = DM_get_face_data_layer(dm, CD_MTFACE))) 00677 return; 00678 f = dm->getFaceArray(dm); 00679 00680 totface = dm->getNumFaces(dm); 00681 for(i = 0; i < totface; i++, f++) { 00682 start = index[mat_orig_to_new[f->mat_nr]]; 00683 00684 /* v1 v2 v3 */ 00685 copy_v2_v2(&varray[start],mtface[i].uv[0]); 00686 copy_v2_v2(&varray[start+2],mtface[i].uv[1]); 00687 copy_v2_v2(&varray[start+4],mtface[i].uv[2]); 00688 index[mat_orig_to_new[f->mat_nr]] += 6; 00689 00690 if(f->v4) { 00691 /* v3 v4 v1 */ 00692 copy_v2_v2(&varray[start+6],mtface[i].uv[2]); 00693 copy_v2_v2(&varray[start+8],mtface[i].uv[3]); 00694 copy_v2_v2(&varray[start+10],mtface[i].uv[0]); 00695 index[mat_orig_to_new[f->mat_nr]] += 6; 00696 } 00697 } 00698 } 00699 00700 00701 static void GPU_buffer_copy_color3(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user) 00702 { 00703 int i, totface; 00704 char *varray = (char *)varray_; 00705 char *mcol = (char *)user; 00706 MFace *f = dm->getFaceArray(dm); 00707 00708 totface= dm->getNumFaces(dm); 00709 for(i=0; i < totface; i++, f++) { 00710 int start = index[mat_orig_to_new[f->mat_nr]]; 00711 00712 /* v1 v2 v3 */ 00713 copy_v3_v3_char(&varray[start], &mcol[i*12]); 00714 copy_v3_v3_char(&varray[start+3], &mcol[i*12+3]); 00715 copy_v3_v3_char(&varray[start+6], &mcol[i*12+6]); 00716 index[mat_orig_to_new[f->mat_nr]] += 9; 00717 00718 if(f->v4) { 00719 /* v3 v4 v1 */ 00720 copy_v3_v3_char(&varray[start+9], &mcol[i*12+6]); 00721 copy_v3_v3_char(&varray[start+12], &mcol[i*12+9]); 00722 copy_v3_v3_char(&varray[start+15], &mcol[i*12]); 00723 index[mat_orig_to_new[f->mat_nr]] += 9; 00724 } 00725 } 00726 } 00727 00728 static void copy_mcol_uc3(unsigned char *v, unsigned char *col) 00729 { 00730 v[0] = col[3]; 00731 v[1] = col[2]; 00732 v[2] = col[1]; 00733 } 00734 00735 /* treat varray_ as an array of MCol, four MCol's per face */ 00736 static void GPU_buffer_copy_mcol(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user) 00737 { 00738 int i, totface; 00739 unsigned char *varray = (unsigned char *)varray_; 00740 unsigned char *mcol = (unsigned char *)user; 00741 MFace *f = dm->getFaceArray(dm); 00742 00743 totface= dm->getNumFaces(dm); 00744 for(i=0; i < totface; i++, f++) { 00745 int start = index[mat_orig_to_new[f->mat_nr]]; 00746 00747 /* v1 v2 v3 */ 00748 copy_mcol_uc3(&varray[start], &mcol[i*16]); 00749 copy_mcol_uc3(&varray[start+3], &mcol[i*16+4]); 00750 copy_mcol_uc3(&varray[start+6], &mcol[i*16+8]); 00751 index[mat_orig_to_new[f->mat_nr]] += 9; 00752 00753 if(f->v4) { 00754 /* v3 v4 v1 */ 00755 copy_mcol_uc3(&varray[start+9], &mcol[i*16+8]); 00756 copy_mcol_uc3(&varray[start+12], &mcol[i*16+12]); 00757 copy_mcol_uc3(&varray[start+15], &mcol[i*16]); 00758 index[mat_orig_to_new[f->mat_nr]] += 9; 00759 } 00760 } 00761 } 00762 00763 static void GPU_buffer_copy_edge(DerivedMesh *dm, float *varray_, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user)) 00764 { 00765 MEdge *medge; 00766 unsigned int *varray = (unsigned int *)varray_; 00767 int i, totedge; 00768 00769 medge = dm->getEdgeArray(dm); 00770 totedge = dm->getNumEdges(dm); 00771 00772 for(i = 0; i < totedge; i++, medge++) { 00773 varray[i*2] = dm->drawObject->vert_points[medge->v1].point_index; 00774 varray[i*2+1] = dm->drawObject->vert_points[medge->v2].point_index; 00775 } 00776 } 00777 00778 static void GPU_buffer_copy_uvedge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user)) 00779 { 00780 MTFace *tf = DM_get_face_data_layer(dm, CD_MTFACE); 00781 int i, j=0; 00782 00783 if(!tf) 00784 return; 00785 00786 for(i = 0; i < dm->numFaceData; i++, tf++) { 00787 MFace mf; 00788 dm->getFace(dm,i,&mf); 00789 00790 copy_v2_v2(&varray[j],tf->uv[0]); 00791 copy_v2_v2(&varray[j+2],tf->uv[1]); 00792 00793 copy_v2_v2(&varray[j+4],tf->uv[1]); 00794 copy_v2_v2(&varray[j+6],tf->uv[2]); 00795 00796 if(!mf.v4) { 00797 copy_v2_v2(&varray[j+8],tf->uv[2]); 00798 copy_v2_v2(&varray[j+10],tf->uv[0]); 00799 j+=12; 00800 } else { 00801 copy_v2_v2(&varray[j+8],tf->uv[2]); 00802 copy_v2_v2(&varray[j+10],tf->uv[3]); 00803 00804 copy_v2_v2(&varray[j+12],tf->uv[3]); 00805 copy_v2_v2(&varray[j+14],tf->uv[0]); 00806 j+=16; 00807 } 00808 } 00809 } 00810 00811 /* get the DerivedMesh's MCols; choose (in decreasing order of 00812 preference) from CD_ID_MCOL, CD_WEIGHT_MCOL, or CD_MCOL */ 00813 static MCol *gpu_buffer_color_type(DerivedMesh *dm) 00814 { 00815 MCol *c; 00816 int type; 00817 00818 type = CD_ID_MCOL; 00819 c = DM_get_face_data_layer(dm, type); 00820 if(!c) { 00821 type = CD_WEIGHT_MCOL; 00822 c = DM_get_face_data_layer(dm, type); 00823 if(!c) { 00824 type = CD_MCOL; 00825 c = DM_get_face_data_layer(dm, type); 00826 } 00827 } 00828 00829 dm->drawObject->colType = type; 00830 return c; 00831 } 00832 00833 typedef enum { 00834 GPU_BUFFER_VERTEX = 0, 00835 GPU_BUFFER_NORMAL, 00836 GPU_BUFFER_COLOR, 00837 GPU_BUFFER_UV, 00838 GPU_BUFFER_EDGE, 00839 GPU_BUFFER_UVEDGE, 00840 } GPUBufferType; 00841 00842 typedef struct { 00843 GPUBufferCopyFunc copy; 00844 GLenum gl_buffer_type; 00845 int vector_size; 00846 } GPUBufferTypeSettings; 00847 00848 const GPUBufferTypeSettings gpu_buffer_type_settings[] = { 00849 {GPU_buffer_copy_vertex, GL_ARRAY_BUFFER_ARB, 3}, 00850 {GPU_buffer_copy_normal, GL_ARRAY_BUFFER_ARB, 3}, 00851 {GPU_buffer_copy_mcol, GL_ARRAY_BUFFER_ARB, 3}, 00852 {GPU_buffer_copy_uv, GL_ARRAY_BUFFER_ARB, 2}, 00853 {GPU_buffer_copy_edge, GL_ELEMENT_ARRAY_BUFFER_ARB, 2}, 00854 {GPU_buffer_copy_uvedge, GL_ELEMENT_ARRAY_BUFFER_ARB, 4} 00855 }; 00856 00857 /* get the GPUDrawObject buffer associated with a type */ 00858 static GPUBuffer **gpu_drawobject_buffer_from_type(GPUDrawObject *gdo, GPUBufferType type) 00859 { 00860 switch(type) { 00861 case GPU_BUFFER_VERTEX: 00862 return &gdo->points; 00863 case GPU_BUFFER_NORMAL: 00864 return &gdo->normals; 00865 case GPU_BUFFER_COLOR: 00866 return &gdo->colors; 00867 case GPU_BUFFER_UV: 00868 return &gdo->uv; 00869 case GPU_BUFFER_EDGE: 00870 return &gdo->edges; 00871 case GPU_BUFFER_UVEDGE: 00872 return &gdo->uvedges; 00873 default: 00874 return NULL; 00875 } 00876 } 00877 00878 /* get the amount of space to allocate for a buffer of a particular type */ 00879 static int gpu_buffer_size_from_type(DerivedMesh *dm, GPUBufferType type) 00880 { 00881 switch(type) { 00882 case GPU_BUFFER_VERTEX: 00883 return sizeof(float)*3 * (dm->drawObject->tot_triangle_point + dm->drawObject->tot_loose_point); 00884 case GPU_BUFFER_NORMAL: 00885 return sizeof(float)*3*dm->drawObject->tot_triangle_point; 00886 case GPU_BUFFER_COLOR: 00887 return sizeof(char)*3*dm->drawObject->tot_triangle_point; 00888 case GPU_BUFFER_UV: 00889 return sizeof(float)*2*dm->drawObject->tot_triangle_point; 00890 case GPU_BUFFER_EDGE: 00891 return sizeof(int)*2*dm->drawObject->totedge; 00892 case GPU_BUFFER_UVEDGE: 00893 /* each face gets 3 points, 3 edges per triangle, and 00894 each edge has its own, non-shared coords, so each 00895 tri corner needs minimum of 4 floats, quads used 00896 less so here we can over allocate and assume all 00897 tris. */ 00898 return sizeof(float) * dm->drawObject->tot_triangle_point; 00899 default: 00900 return -1; 00901 } 00902 } 00903 00904 /* call gpu_buffer_setup with settings for a particular type of buffer */ 00905 static GPUBuffer *gpu_buffer_setup_type(DerivedMesh *dm, GPUBufferType type) 00906 { 00907 const GPUBufferTypeSettings *ts; 00908 void *user_data = NULL; 00909 GPUBuffer *buf; 00910 00911 ts = &gpu_buffer_type_settings[type]; 00912 00913 /* special handling for MCol and UV buffers */ 00914 if(type == GPU_BUFFER_COLOR) { 00915 if(!(user_data = gpu_buffer_color_type(dm))) 00916 return NULL; 00917 } 00918 else if(type == GPU_BUFFER_UV) { 00919 if(!DM_get_face_data_layer(dm, CD_MTFACE)) 00920 return NULL; 00921 } 00922 00923 buf = gpu_buffer_setup(dm, dm->drawObject, ts->vector_size, 00924 gpu_buffer_size_from_type(dm, type), 00925 ts->gl_buffer_type, user_data, ts->copy); 00926 00927 return buf; 00928 } 00929 00930 /* get the buffer of `type', initializing the GPUDrawObject and 00931 buffer if needed */ 00932 static GPUBuffer *gpu_buffer_setup_common(DerivedMesh *dm, GPUBufferType type) 00933 { 00934 GPUBuffer **buf; 00935 00936 if(!dm->drawObject) 00937 dm->drawObject = GPU_drawobject_new(dm); 00938 00939 buf = gpu_drawobject_buffer_from_type(dm->drawObject, type); 00940 if(!(*buf)) 00941 *buf = gpu_buffer_setup_type(dm, type); 00942 00943 return *buf; 00944 } 00945 00946 void GPU_vertex_setup(DerivedMesh *dm) 00947 { 00948 if(!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX)) 00949 return; 00950 00951 glEnableClientState(GL_VERTEX_ARRAY); 00952 if(useVBOs) { 00953 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id); 00954 glVertexPointer(3, GL_FLOAT, 0, 0); 00955 } 00956 else { 00957 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer); 00958 } 00959 00960 GLStates |= GPU_BUFFER_VERTEX_STATE; 00961 } 00962 00963 void GPU_normal_setup(DerivedMesh *dm) 00964 { 00965 if(!gpu_buffer_setup_common(dm, GPU_BUFFER_NORMAL)) 00966 return; 00967 00968 glEnableClientState(GL_NORMAL_ARRAY); 00969 if(useVBOs) { 00970 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->normals->id); 00971 glNormalPointer(GL_FLOAT, 0, 0); 00972 } 00973 else { 00974 glNormalPointer(GL_FLOAT, 0, dm->drawObject->normals->pointer); 00975 } 00976 00977 GLStates |= GPU_BUFFER_NORMAL_STATE; 00978 } 00979 00980 void GPU_uv_setup(DerivedMesh *dm) 00981 { 00982 if(!gpu_buffer_setup_common(dm, GPU_BUFFER_UV)) 00983 return; 00984 00985 glEnableClientState(GL_TEXTURE_COORD_ARRAY); 00986 if(useVBOs) { 00987 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uv->id); 00988 glTexCoordPointer(2, GL_FLOAT, 0, 0); 00989 } 00990 else { 00991 glTexCoordPointer(2, GL_FLOAT, 0, dm->drawObject->uv->pointer); 00992 } 00993 00994 GLStates |= GPU_BUFFER_TEXCOORD_STATE; 00995 } 00996 00997 void GPU_color_setup(DerivedMesh *dm) 00998 { 00999 if(!gpu_buffer_setup_common(dm, GPU_BUFFER_COLOR)) 01000 return; 01001 01002 glEnableClientState(GL_COLOR_ARRAY); 01003 if(useVBOs) { 01004 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->colors->id); 01005 glColorPointer(3, GL_UNSIGNED_BYTE, 0, 0); 01006 } 01007 else { 01008 glColorPointer(3, GL_UNSIGNED_BYTE, 0, dm->drawObject->colors->pointer); 01009 } 01010 01011 GLStates |= GPU_BUFFER_COLOR_STATE; 01012 } 01013 01014 void GPU_edge_setup(DerivedMesh *dm) 01015 { 01016 if(!gpu_buffer_setup_common(dm, GPU_BUFFER_EDGE)) 01017 return; 01018 01019 if(!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX)) 01020 return; 01021 01022 glEnableClientState(GL_VERTEX_ARRAY); 01023 if(useVBOs) { 01024 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id); 01025 glVertexPointer(3, GL_FLOAT, 0, 0); 01026 } 01027 else { 01028 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer); 01029 } 01030 01031 GLStates |= GPU_BUFFER_VERTEX_STATE; 01032 01033 if(useVBOs) 01034 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, dm->drawObject->edges->id); 01035 01036 GLStates |= GPU_BUFFER_ELEMENT_STATE; 01037 } 01038 01039 void GPU_uvedge_setup(DerivedMesh *dm) 01040 { 01041 if(!gpu_buffer_setup_common(dm, GPU_BUFFER_UVEDGE)) 01042 return; 01043 01044 glEnableClientState(GL_VERTEX_ARRAY); 01045 if(useVBOs) { 01046 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uvedges->id); 01047 glVertexPointer(2, GL_FLOAT, 0, 0); 01048 } 01049 else { 01050 glVertexPointer(2, GL_FLOAT, 0, dm->drawObject->uvedges->pointer); 01051 } 01052 01053 GLStates |= GPU_BUFFER_VERTEX_STATE; 01054 } 01055 01056 static int GPU_typesize(int type) 01057 { 01058 switch(type) { 01059 case GL_FLOAT: 01060 return sizeof(float); 01061 case GL_INT: 01062 return sizeof(int); 01063 case GL_UNSIGNED_INT: 01064 return sizeof(unsigned int); 01065 case GL_BYTE: 01066 return sizeof(char); 01067 case GL_UNSIGNED_BYTE: 01068 return sizeof(unsigned char); 01069 default: 01070 return 0; 01071 } 01072 } 01073 01074 int GPU_attrib_element_size(GPUAttrib data[], int numdata) 01075 { 01076 int i, elementsize = 0; 01077 01078 for(i = 0; i < numdata; i++) { 01079 int typesize = GPU_typesize(data[i].type); 01080 if(typesize != 0) 01081 elementsize += typesize*data[i].size; 01082 } 01083 return elementsize; 01084 } 01085 01086 void GPU_interleaved_attrib_setup(GPUBuffer *buffer, GPUAttrib data[], int numdata) 01087 { 01088 int i; 01089 int elementsize; 01090 intptr_t offset = 0; 01091 01092 for(i = 0; i < MAX_GPU_ATTRIB_DATA; i++) { 01093 if(attribData[i].index != -1) { 01094 glDisableVertexAttribArrayARB(attribData[i].index); 01095 } 01096 else 01097 break; 01098 } 01099 elementsize = GPU_attrib_element_size(data, numdata); 01100 01101 if(useVBOs) { 01102 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id); 01103 for(i = 0; i < numdata; i++) { 01104 glEnableVertexAttribArrayARB(data[i].index); 01105 glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type, 01106 GL_FALSE, elementsize, (void *)offset); 01107 offset += data[i].size*GPU_typesize(data[i].type); 01108 01109 attribData[i].index = data[i].index; 01110 attribData[i].size = data[i].size; 01111 attribData[i].type = data[i].type; 01112 } 01113 attribData[numdata].index = -1; 01114 } 01115 else { 01116 for(i = 0; i < numdata; i++) { 01117 glEnableVertexAttribArrayARB(data[i].index); 01118 glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type, 01119 GL_FALSE, elementsize, (char *)buffer->pointer + offset); 01120 offset += data[i].size*GPU_typesize(data[i].type); 01121 } 01122 } 01123 } 01124 01125 01126 void GPU_buffer_unbind(void) 01127 { 01128 int i; 01129 01130 if(GLStates & GPU_BUFFER_VERTEX_STATE) 01131 glDisableClientState(GL_VERTEX_ARRAY); 01132 if(GLStates & GPU_BUFFER_NORMAL_STATE) 01133 glDisableClientState(GL_NORMAL_ARRAY); 01134 if(GLStates & GPU_BUFFER_TEXCOORD_STATE) 01135 glDisableClientState(GL_TEXTURE_COORD_ARRAY); 01136 if(GLStates & GPU_BUFFER_COLOR_STATE) 01137 glDisableClientState(GL_COLOR_ARRAY); 01138 if(GLStates & GPU_BUFFER_ELEMENT_STATE) { 01139 if(useVBOs) { 01140 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); 01141 } 01142 } 01143 GLStates &= !(GPU_BUFFER_VERTEX_STATE | GPU_BUFFER_NORMAL_STATE | 01144 GPU_BUFFER_TEXCOORD_STATE | GPU_BUFFER_COLOR_STATE | 01145 GPU_BUFFER_ELEMENT_STATE); 01146 01147 for(i = 0; i < MAX_GPU_ATTRIB_DATA; i++) { 01148 if(attribData[i].index != -1) { 01149 glDisableVertexAttribArrayARB(attribData[i].index); 01150 } 01151 else 01152 break; 01153 } 01154 01155 if(useVBOs) 01156 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); 01157 } 01158 01159 /* confusion: code in cdderivedmesh calls both GPU_color_setup and 01160 GPU_color3_upload; both of these set the `colors' buffer, so seems 01161 like it will just needlessly overwrite? --nicholas */ 01162 void GPU_color3_upload(DerivedMesh *dm, unsigned char *data) 01163 { 01164 if(dm->drawObject == 0) 01165 dm->drawObject = GPU_drawobject_new(dm); 01166 GPU_buffer_free(dm->drawObject->colors); 01167 01168 dm->drawObject->colors = gpu_buffer_setup(dm, dm->drawObject, 3, 01169 sizeof(char)*3*dm->drawObject->tot_triangle_point, 01170 GL_ARRAY_BUFFER_ARB, data, GPU_buffer_copy_color3); 01171 } 01172 01173 /* this is used only in cdDM_drawFacesColored, which I think is no 01174 longer used, so can probably remove this --nicholas */ 01175 void GPU_color4_upload(DerivedMesh *UNUSED(dm), unsigned char *UNUSED(data)) 01176 { 01177 /*if(dm->drawObject == 0) 01178 dm->drawObject = GPU_drawobject_new(dm); 01179 GPU_buffer_free(dm->drawObject->colors); 01180 dm->drawObject->colors = gpu_buffer_setup(dm, dm->drawObject, 3, 01181 sizeof(char)*3*dm->drawObject->tot_triangle_point, 01182 GL_ARRAY_BUFFER_ARB, data, GPU_buffer_copy_color4);*/ 01183 } 01184 01185 void GPU_color_switch(int mode) 01186 { 01187 if(mode) { 01188 if(!(GLStates & GPU_BUFFER_COLOR_STATE)) 01189 glEnableClientState(GL_COLOR_ARRAY); 01190 GLStates |= GPU_BUFFER_COLOR_STATE; 01191 } 01192 else { 01193 if(GLStates & GPU_BUFFER_COLOR_STATE) 01194 glDisableClientState(GL_COLOR_ARRAY); 01195 GLStates &= (!GPU_BUFFER_COLOR_STATE); 01196 } 01197 } 01198 01199 /* return 1 if drawing should be done using old immediate-mode 01200 code, 0 otherwise */ 01201 int GPU_buffer_legacy(DerivedMesh *dm) 01202 { 01203 int test= (U.gameflags & USER_DISABLE_VBO); 01204 if(test) 01205 return 1; 01206 01207 if(dm->drawObject == 0) 01208 dm->drawObject = GPU_drawobject_new(dm); 01209 return dm->drawObject->legacy; 01210 } 01211 01212 void *GPU_buffer_lock(GPUBuffer *buffer) 01213 { 01214 float *varray; 01215 01216 if(!buffer) 01217 return 0; 01218 01219 if(useVBOs) { 01220 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id); 01221 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); 01222 return varray; 01223 } 01224 else { 01225 return buffer->pointer; 01226 } 01227 } 01228 01229 void *GPU_buffer_lock_stream(GPUBuffer *buffer) 01230 { 01231 float *varray; 01232 01233 if(!buffer) 01234 return 0; 01235 01236 if(useVBOs) { 01237 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id); 01238 /* discard previous data, avoid stalling gpu */ 01239 glBufferDataARB(GL_ARRAY_BUFFER_ARB, buffer->size, 0, GL_STREAM_DRAW_ARB); 01240 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); 01241 return varray; 01242 } 01243 else { 01244 return buffer->pointer; 01245 } 01246 } 01247 01248 void GPU_buffer_unlock(GPUBuffer *buffer) 01249 { 01250 if(useVBOs) { 01251 if(buffer) { 01252 /* note: this operation can fail, could return 01253 an error code from this function? */ 01254 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB); 01255 } 01256 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); 01257 } 01258 } 01259 01260 /* used for drawing edges */ 01261 void GPU_buffer_draw_elements(GPUBuffer *elements, unsigned int mode, int start, int count) 01262 { 01263 glDrawElements(mode, count, GL_UNSIGNED_INT, 01264 (useVBOs ? 01265 (void*)(start * sizeof(unsigned int)) : 01266 ((int*)elements->pointer) + start)); 01267 } 01268 01269 01270 /* XXX: the rest of the code in this file is used for optimized PBVH 01271 drawing and doesn't interact at all with the buffer code above */ 01272 01273 /* Convenience struct for building the VBO. */ 01274 typedef struct { 01275 float co[3]; 01276 short no[3]; 01277 } VertexBufferFormat; 01278 01279 struct GPU_Buffers { 01280 /* opengl buffer handles */ 01281 GLuint vert_buf, index_buf; 01282 GLenum index_type; 01283 01284 /* mesh pointers in case buffer allocation fails */ 01285 MFace *mface; 01286 MVert *mvert; 01287 int *face_indices; 01288 int totface; 01289 01290 /* grid pointers */ 01291 DMGridData **grids; 01292 int *grid_indices; 01293 int totgrid; 01294 int gridsize; 01295 01296 unsigned int tot_tri, tot_quad; 01297 }; 01298 01299 void GPU_update_mesh_buffers(GPU_Buffers *buffers, MVert *mvert, 01300 int *vert_indices, int totvert) 01301 { 01302 VertexBufferFormat *vert_data; 01303 int i; 01304 01305 if(buffers->vert_buf) { 01306 /* Build VBO */ 01307 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf); 01308 glBufferDataARB(GL_ARRAY_BUFFER_ARB, 01309 sizeof(VertexBufferFormat) * totvert, 01310 NULL, GL_STATIC_DRAW_ARB); 01311 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); 01312 01313 if(vert_data) { 01314 for(i = 0; i < totvert; ++i) { 01315 MVert *v = mvert + vert_indices[i]; 01316 VertexBufferFormat *out = vert_data + i; 01317 01318 copy_v3_v3(out->co, v->co); 01319 memcpy(out->no, v->no, sizeof(short) * 3); 01320 } 01321 01322 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB); 01323 } 01324 else { 01325 glDeleteBuffersARB(1, &buffers->vert_buf); 01326 buffers->vert_buf = 0; 01327 } 01328 01329 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); 01330 } 01331 01332 buffers->mvert = mvert; 01333 } 01334 01335 GPU_Buffers *GPU_build_mesh_buffers(GHash *map, MVert *mvert, MFace *mface, 01336 int *face_indices, int totface, 01337 int *vert_indices, int tot_uniq_verts, 01338 int totvert) 01339 { 01340 GPU_Buffers *buffers; 01341 unsigned short *tri_data; 01342 int i, j, k, tottri; 01343 01344 buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers"); 01345 buffers->index_type = GL_UNSIGNED_SHORT; 01346 01347 /* Count the number of triangles */ 01348 for(i = 0, tottri = 0; i < totface; ++i) 01349 tottri += mface[face_indices[i]].v4 ? 2 : 1; 01350 01351 if(GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO)) 01352 glGenBuffersARB(1, &buffers->index_buf); 01353 01354 if(buffers->index_buf) { 01355 /* Generate index buffer object */ 01356 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf); 01357 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 01358 sizeof(unsigned short) * tottri * 3, NULL, GL_STATIC_DRAW_ARB); 01359 01360 /* Fill the triangle buffer */ 01361 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); 01362 if(tri_data) { 01363 for(i = 0; i < totface; ++i) { 01364 MFace *f = mface + face_indices[i]; 01365 int v[3]; 01366 01367 v[0]= f->v1; 01368 v[1]= f->v2; 01369 v[2]= f->v3; 01370 01371 for(j = 0; j < (f->v4 ? 2 : 1); ++j) { 01372 for(k = 0; k < 3; ++k) { 01373 void *value, *key = SET_INT_IN_POINTER(v[k]); 01374 int vbo_index; 01375 01376 value = BLI_ghash_lookup(map, key); 01377 vbo_index = GET_INT_FROM_POINTER(value); 01378 01379 if(vbo_index < 0) { 01380 vbo_index = -vbo_index + 01381 tot_uniq_verts - 1; 01382 } 01383 01384 *tri_data = vbo_index; 01385 ++tri_data; 01386 } 01387 v[0] = f->v4; 01388 v[1] = f->v1; 01389 v[2] = f->v3; 01390 } 01391 } 01392 glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB); 01393 } 01394 else { 01395 glDeleteBuffersARB(1, &buffers->index_buf); 01396 buffers->index_buf = 0; 01397 } 01398 01399 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); 01400 } 01401 01402 if(buffers->index_buf) 01403 glGenBuffersARB(1, &buffers->vert_buf); 01404 GPU_update_mesh_buffers(buffers, mvert, vert_indices, totvert); 01405 01406 buffers->tot_tri = tottri; 01407 01408 buffers->mface = mface; 01409 buffers->face_indices = face_indices; 01410 buffers->totface = totface; 01411 01412 return buffers; 01413 } 01414 01415 void GPU_update_grid_buffers(GPU_Buffers *buffers, DMGridData **grids, 01416 int *grid_indices, int totgrid, int gridsize, int smooth) 01417 { 01418 DMGridData *vert_data; 01419 int i, j, k, totvert; 01420 01421 totvert= gridsize*gridsize*totgrid; 01422 01423 /* Build VBO */ 01424 if(buffers->vert_buf) { 01425 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf); 01426 glBufferDataARB(GL_ARRAY_BUFFER_ARB, 01427 sizeof(DMGridData) * totvert, 01428 NULL, GL_STATIC_DRAW_ARB); 01429 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); 01430 if(vert_data) { 01431 for(i = 0; i < totgrid; ++i) { 01432 DMGridData *grid= grids[grid_indices[i]]; 01433 memcpy(vert_data, grid, sizeof(DMGridData)*gridsize*gridsize); 01434 01435 if(!smooth) { 01436 /* for flat shading, recalc normals and set the last vertex of 01437 each quad in the index buffer to have the flat normal as 01438 that is what opengl will use */ 01439 for(j = 0; j < gridsize-1; ++j) { 01440 for(k = 0; k < gridsize-1; ++k) { 01441 normal_quad_v3(vert_data[(j+1)*gridsize + (k+1)].no, 01442 vert_data[(j+1)*gridsize + k].co, 01443 vert_data[(j+1)*gridsize + k+1].co, 01444 vert_data[j*gridsize + k+1].co, 01445 vert_data[j*gridsize + k].co); 01446 } 01447 } 01448 } 01449 01450 vert_data += gridsize*gridsize; 01451 } 01452 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB); 01453 } 01454 else { 01455 glDeleteBuffersARB(1, &buffers->vert_buf); 01456 buffers->vert_buf = 0; 01457 } 01458 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); 01459 } 01460 01461 buffers->grids = grids; 01462 buffers->grid_indices = grid_indices; 01463 buffers->totgrid = totgrid; 01464 buffers->gridsize = gridsize; 01465 01466 //printf("node updated %p\n", buffers); 01467 } 01468 01469 GPU_Buffers *GPU_build_grid_buffers(DMGridData **UNUSED(grids), int *UNUSED(grid_indices), 01470 int totgrid, int gridsize) 01471 { 01472 GPU_Buffers *buffers; 01473 int i, j, k, totquad, offset= 0; 01474 01475 buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers"); 01476 01477 /* Count the number of quads */ 01478 totquad= (gridsize-1)*(gridsize-1)*totgrid; 01479 01480 /* Generate index buffer object */ 01481 if(GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO)) 01482 glGenBuffersARB(1, &buffers->index_buf); 01483 01484 if(buffers->index_buf) { 01485 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf); 01486 01487 if(totquad < USHRT_MAX) { 01488 unsigned short *quad_data; 01489 01490 buffers->index_type = GL_UNSIGNED_SHORT; 01491 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 01492 sizeof(unsigned short) * totquad * 4, NULL, GL_STATIC_DRAW_ARB); 01493 01494 /* Fill the quad buffer */ 01495 quad_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); 01496 if(quad_data) { 01497 for(i = 0; i < totgrid; ++i) { 01498 for(j = 0; j < gridsize-1; ++j) { 01499 for(k = 0; k < gridsize-1; ++k) { 01500 *(quad_data++)= offset + j*gridsize + k+1; 01501 *(quad_data++)= offset + j*gridsize + k; 01502 *(quad_data++)= offset + (j+1)*gridsize + k; 01503 *(quad_data++)= offset + (j+1)*gridsize + k+1; 01504 } 01505 } 01506 01507 offset += gridsize*gridsize; 01508 } 01509 glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB); 01510 } 01511 else { 01512 glDeleteBuffersARB(1, &buffers->index_buf); 01513 buffers->index_buf = 0; 01514 } 01515 } 01516 else { 01517 unsigned int *quad_data; 01518 01519 buffers->index_type = GL_UNSIGNED_INT; 01520 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 01521 sizeof(unsigned int) * totquad * 4, NULL, GL_STATIC_DRAW_ARB); 01522 01523 /* Fill the quad buffer */ 01524 quad_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); 01525 01526 if(quad_data) { 01527 for(i = 0; i < totgrid; ++i) { 01528 for(j = 0; j < gridsize-1; ++j) { 01529 for(k = 0; k < gridsize-1; ++k) { 01530 *(quad_data++)= offset + j*gridsize + k+1; 01531 *(quad_data++)= offset + j*gridsize + k; 01532 *(quad_data++)= offset + (j+1)*gridsize + k; 01533 *(quad_data++)= offset + (j+1)*gridsize + k+1; 01534 } 01535 } 01536 01537 offset += gridsize*gridsize; 01538 } 01539 glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB); 01540 } 01541 else { 01542 glDeleteBuffersARB(1, &buffers->index_buf); 01543 buffers->index_buf = 0; 01544 } 01545 } 01546 01547 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); 01548 } 01549 01550 /* Build VBO */ 01551 if(buffers->index_buf) 01552 glGenBuffersARB(1, &buffers->vert_buf); 01553 01554 buffers->tot_quad = totquad; 01555 01556 return buffers; 01557 } 01558 01559 void GPU_draw_buffers(GPU_Buffers *buffers) 01560 { 01561 if(buffers->vert_buf && buffers->index_buf) { 01562 glEnableClientState(GL_VERTEX_ARRAY); 01563 glEnableClientState(GL_NORMAL_ARRAY); 01564 01565 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf); 01566 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf); 01567 01568 if(buffers->tot_quad) { 01569 glVertexPointer(3, GL_FLOAT, sizeof(DMGridData), (void*)offsetof(DMGridData, co)); 01570 glNormalPointer(GL_FLOAT, sizeof(DMGridData), (void*)offsetof(DMGridData, no)); 01571 01572 glDrawElements(GL_QUADS, buffers->tot_quad * 4, buffers->index_type, 0); 01573 } 01574 else { 01575 glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat), (void*)offsetof(VertexBufferFormat, co)); 01576 glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat), (void*)offsetof(VertexBufferFormat, no)); 01577 01578 glDrawElements(GL_TRIANGLES, buffers->tot_tri * 3, buffers->index_type, 0); 01579 } 01580 01581 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); 01582 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); 01583 01584 glDisableClientState(GL_VERTEX_ARRAY); 01585 glDisableClientState(GL_NORMAL_ARRAY); 01586 } 01587 else if(buffers->totface) { 01588 /* fallback if we are out of memory */ 01589 int i; 01590 01591 for(i = 0; i < buffers->totface; ++i) { 01592 MFace *f = buffers->mface + buffers->face_indices[i]; 01593 01594 glBegin((f->v4)? GL_QUADS: GL_TRIANGLES); 01595 glNormal3sv(buffers->mvert[f->v1].no); 01596 glVertex3fv(buffers->mvert[f->v1].co); 01597 glNormal3sv(buffers->mvert[f->v2].no); 01598 glVertex3fv(buffers->mvert[f->v2].co); 01599 glNormal3sv(buffers->mvert[f->v3].no); 01600 glVertex3fv(buffers->mvert[f->v3].co); 01601 if(f->v4) { 01602 glNormal3sv(buffers->mvert[f->v4].no); 01603 glVertex3fv(buffers->mvert[f->v4].co); 01604 } 01605 glEnd(); 01606 } 01607 } 01608 else if(buffers->totgrid) { 01609 int i, x, y, gridsize = buffers->gridsize; 01610 01611 for(i = 0; i < buffers->totgrid; ++i) { 01612 DMGridData *grid = buffers->grids[buffers->grid_indices[i]]; 01613 01614 for(y = 0; y < gridsize-1; y++) { 01615 glBegin(GL_QUAD_STRIP); 01616 for(x = 0; x < gridsize; x++) { 01617 DMGridData *a = &grid[y*gridsize + x]; 01618 DMGridData *b = &grid[(y+1)*gridsize + x]; 01619 01620 glNormal3fv(a->no); 01621 glVertex3fv(a->co); 01622 glNormal3fv(b->no); 01623 glVertex3fv(b->co); 01624 } 01625 glEnd(); 01626 } 01627 } 01628 } 01629 } 01630 01631 void GPU_free_buffers(GPU_Buffers *buffers) 01632 { 01633 if(buffers) { 01634 if(buffers->vert_buf) 01635 glDeleteBuffersARB(1, &buffers->vert_buf); 01636 if(buffers->index_buf) 01637 glDeleteBuffersARB(1, &buffers->index_buf); 01638 01639 MEM_freeN(buffers); 01640 } 01641 }