Blender V2.61 - r43446
|
00001 /* 00002 * ***** BEGIN GPL LICENSE BLOCK ***** 00003 * 00004 * This program is free software; you can redistribute it and/or 00005 * modify it under the terms of the GNU General Public License 00006 * as published by the Free Software Foundation; either version 2 00007 * of the License, or (at your option) any later version. 00008 * 00009 * This program is distributed in the hope that it will be useful, 00010 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00011 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00012 * GNU General Public License for more details. 00013 * 00014 * You should have received a copy of the GNU General Public License 00015 * along with this program; if not, write to the Free Software Foundation, 00016 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 00017 * 00018 * The Original Code is Copyright (C) 2006 Blender Foundation. 00019 * All rights reserved. 00020 * 00021 * The Original Code is: all of this file. 00022 * 00023 * Contributor(s): none yet. 00024 * 00025 * ***** END GPL LICENSE BLOCK ***** 00026 */ 00027 00033 #include "node_composite_util.h" 00034 00035 CompBuf *alloc_compbuf(int sizex, int sizey, int type, int alloc) 00036 { 00037 CompBuf *cbuf= MEM_callocN(sizeof(CompBuf), "compbuf"); 00038 00039 cbuf->x= sizex; 00040 cbuf->y= sizey; 00041 cbuf->xrad= sizex/2; 00042 cbuf->yrad= sizey/2; 00043 00044 cbuf->type= type; 00045 if(alloc) { 00046 if(cbuf->type==CB_RGBA) 00047 cbuf->rect= MEM_mapallocN(4*sizeof(float)*sizex*sizey, "compbuf RGBA rect"); 00048 else if(cbuf->type==CB_VEC3) 00049 cbuf->rect= MEM_mapallocN(3*sizeof(float)*sizex*sizey, "compbuf Vector3 rect"); 00050 else if(cbuf->type==CB_VEC2) 00051 cbuf->rect= MEM_mapallocN(2*sizeof(float)*sizex*sizey, "compbuf Vector2 rect"); 00052 else 00053 cbuf->rect= MEM_mapallocN(sizeof(float)*sizex*sizey, "compbuf Fac rect"); 00054 cbuf->malloc= 1; 00055 } 00056 cbuf->disprect.xmin= 0; 00057 cbuf->disprect.ymin= 0; 00058 cbuf->disprect.xmax= sizex; 00059 cbuf->disprect.ymax= sizey; 00060 00061 return cbuf; 00062 } 00063 00064 CompBuf *dupalloc_compbuf(CompBuf *cbuf) 00065 { 00066 CompBuf *dupbuf= alloc_compbuf(cbuf->x, cbuf->y, cbuf->type, 1); 00067 if(dupbuf) { 00068 memcpy(dupbuf->rect, cbuf->rect, cbuf->type*sizeof(float)*cbuf->x*cbuf->y); 00069 00070 dupbuf->xof= cbuf->xof; 00071 dupbuf->yof= cbuf->yof; 00072 } 00073 return dupbuf; 00074 } 00075 00076 /* instead of reference counting, we create a list */ 00077 CompBuf *pass_on_compbuf(CompBuf *cbuf) 00078 { 00079 CompBuf *dupbuf= (cbuf)? alloc_compbuf(cbuf->x, cbuf->y, cbuf->type, 0): NULL; 00080 CompBuf *lastbuf; 00081 00082 if(dupbuf) { 00083 dupbuf->rect= cbuf->rect; 00084 dupbuf->xof= cbuf->xof; 00085 dupbuf->yof= cbuf->yof; 00086 dupbuf->malloc= 0; 00087 00088 /* get last buffer in list, and append dupbuf */ 00089 for(lastbuf= cbuf; lastbuf; lastbuf= lastbuf->next) 00090 if(lastbuf->next==NULL) 00091 break; 00092 lastbuf->next= dupbuf; 00093 dupbuf->prev= lastbuf; 00094 } 00095 return dupbuf; 00096 } 00097 00098 00099 void free_compbuf(CompBuf *cbuf) 00100 { 00101 /* check referencing, then remove from list and set malloc tag */ 00102 if(cbuf->prev || cbuf->next) { 00103 if(cbuf->prev) 00104 cbuf->prev->next= cbuf->next; 00105 if(cbuf->next) 00106 cbuf->next->prev= cbuf->prev; 00107 if(cbuf->malloc) { 00108 if(cbuf->prev) 00109 cbuf->prev->malloc= 1; 00110 else 00111 cbuf->next->malloc= 1; 00112 cbuf->malloc= 0; 00113 } 00114 } 00115 00116 if(cbuf->malloc && cbuf->rect) 00117 MEM_freeN(cbuf->rect); 00118 00119 MEM_freeN(cbuf); 00120 } 00121 00122 void print_compbuf(char *str, CompBuf *cbuf) 00123 { 00124 printf("Compbuf %s %d %d %p\n", str, cbuf->x, cbuf->y, (void *)cbuf->rect); 00125 00126 } 00127 00128 void compbuf_set_node(CompBuf *cbuf, bNode *node) 00129 { 00130 if (cbuf) cbuf->node = node; 00131 } 00132 00133 /* used for disabling node (similar code in node_draw.c for disable line and node_edit for untangling nodes) */ 00134 void node_compo_pass_on(void *UNUSED(data), int UNUSED(thread), struct bNode *node, void *UNUSED(nodedata), 00135 struct bNodeStack **in, struct bNodeStack **out) 00136 { 00137 ListBase links; 00138 LinkInOutsMuteNode *lnk; 00139 int i; 00140 00141 if(node->typeinfo->mutelinksfunc == NULL) 00142 return; 00143 00144 /* Get default muting links (as bNodeStack pointers). */ 00145 links = node->typeinfo->mutelinksfunc(NULL, node, in, out, NULL, NULL); 00146 00147 for(lnk = links.first; lnk; lnk = lnk->next) { 00148 for(i = 0; i < lnk->num_outs; i++) { 00149 if(((bNodeStack*)(lnk->in))->data) 00150 (((bNodeStack*)(lnk->outs))+i)->data = pass_on_compbuf((CompBuf*)((bNodeStack*)(lnk->in))->data); 00151 } 00152 /* If num_outs > 1, lnk->outs was an allocated table of pointers... */ 00153 if(i > 1) 00154 MEM_freeN(lnk->outs); 00155 } 00156 BLI_freelistN(&links); 00157 } 00158 00159 00160 CompBuf *get_cropped_compbuf(rcti *drect, float *rectf, int rectx, int recty, int type) 00161 { 00162 CompBuf *cbuf; 00163 rcti disprect= *drect; 00164 float *outfp; 00165 int dx, y; 00166 00167 if(disprect.xmax>rectx) disprect.xmax= rectx; 00168 if(disprect.ymax>recty) disprect.ymax= recty; 00169 if(disprect.xmin>= disprect.xmax) return NULL; 00170 if(disprect.ymin>= disprect.ymax) return NULL; 00171 00172 cbuf= alloc_compbuf(disprect.xmax-disprect.xmin, disprect.ymax-disprect.ymin, type, 1); 00173 outfp= cbuf->rect; 00174 rectf += type*(disprect.ymin*rectx + disprect.xmin); 00175 dx= type*cbuf->x; 00176 for(y=cbuf->y; y>0; y--, outfp+=dx, rectf+=type*rectx) 00177 memcpy(outfp, rectf, sizeof(float)*dx); 00178 00179 return cbuf; 00180 } 00181 00182 CompBuf *scalefast_compbuf(CompBuf *inbuf, int newx, int newy) 00183 { 00184 CompBuf *outbuf; 00185 float *rectf, *newrectf, *rf; 00186 int x, y, c, pixsize= inbuf->type; 00187 int ofsx, ofsy, stepx, stepy; 00188 00189 if(inbuf->x==newx && inbuf->y==newy) 00190 return dupalloc_compbuf(inbuf); 00191 00192 outbuf= alloc_compbuf(newx, newy, inbuf->type, 1); 00193 newrectf= outbuf->rect; 00194 00195 stepx = (65536.0 * (inbuf->x - 1.0) / (newx - 1.0)) + 0.5; 00196 stepy = (65536.0 * (inbuf->y - 1.0) / (newy - 1.0)) + 0.5; 00197 ofsy = 32768; 00198 00199 for (y = newy; y > 0 ; y--){ 00200 rectf = inbuf->rect; 00201 rectf += pixsize * (ofsy >> 16) * inbuf->x; 00202 00203 ofsy += stepy; 00204 ofsx = 32768; 00205 00206 for (x = newx ; x>0 ; x--) { 00207 00208 rf= rectf + pixsize*(ofsx >> 16); 00209 for(c=0; c<pixsize; c++) 00210 newrectf[c] = rf[c]; 00211 00212 newrectf+= pixsize; 00213 00214 ofsx += stepx; 00215 } 00216 } 00217 00218 return outbuf; 00219 } 00220 00221 void typecheck_compbuf_color(float *out, float *in, int outtype, int intype) 00222 { 00223 if(intype == outtype) { 00224 memcpy(out, in, sizeof(float)*outtype); 00225 } 00226 else if(outtype==CB_VAL) { 00227 if(intype==CB_VEC2) { 00228 *out= 0.5f*(in[0]+in[1]); 00229 } 00230 else if(intype==CB_VEC3) { 00231 *out= 0.333333f*(in[0]+in[1]+in[2]); 00232 } 00233 else if(intype==CB_RGBA) { 00234 *out= in[0]*0.35f + in[1]*0.45f + in[2]*0.2f; 00235 } 00236 } 00237 else if(outtype==CB_VEC2) { 00238 if(intype==CB_VAL) { 00239 out[0]= in[0]; 00240 out[1]= in[0]; 00241 } 00242 else if(intype==CB_VEC3) { 00243 out[0]= in[0]; 00244 out[1]= in[1]; 00245 } 00246 else if(intype==CB_RGBA) { 00247 out[0]= in[0]; 00248 out[1]= in[1]; 00249 } 00250 } 00251 else if(outtype==CB_VEC3) { 00252 if(intype==CB_VAL) { 00253 out[0]= in[0]; 00254 out[1]= in[0]; 00255 out[2]= in[0]; 00256 } 00257 else if(intype==CB_VEC2) { 00258 out[0]= in[0]; 00259 out[1]= in[1]; 00260 out[2]= 0.0f; 00261 } 00262 else if(intype==CB_RGBA) { 00263 out[0]= in[0]; 00264 out[1]= in[1]; 00265 out[2]= in[2]; 00266 } 00267 } 00268 else if(outtype==CB_RGBA) { 00269 if(intype==CB_VAL) { 00270 out[0]= in[0]; 00271 out[1]= in[0]; 00272 out[2]= in[0]; 00273 out[3]= 1.0f; 00274 } 00275 else if(intype==CB_VEC2) { 00276 out[0]= in[0]; 00277 out[1]= in[1]; 00278 out[2]= 0.0f; 00279 out[3]= 1.0f; 00280 } 00281 else if(intype==CB_VEC3) { 00282 out[0]= in[0]; 00283 out[1]= in[1]; 00284 out[2]= in[2]; 00285 out[3]= 1.0f; 00286 } 00287 } 00288 } 00289 00290 CompBuf *typecheck_compbuf(CompBuf *inbuf, int type) 00291 { 00292 if(inbuf && inbuf->type!=type) { 00293 CompBuf *outbuf; 00294 float *inrf, *outrf; 00295 int x; 00296 00297 outbuf= alloc_compbuf(inbuf->x, inbuf->y, type, 1); 00298 00299 /* warning note: xof and yof are applied in pixelprocessor, but should be copied otherwise? */ 00300 outbuf->xof= inbuf->xof; 00301 outbuf->yof= inbuf->yof; 00302 00303 if(inbuf->rect_procedural) { 00304 outbuf->rect_procedural= inbuf->rect_procedural; 00305 copy_v3_v3(outbuf->procedural_size, inbuf->procedural_size); 00306 copy_v3_v3(outbuf->procedural_offset, inbuf->procedural_offset); 00307 outbuf->procedural_type= inbuf->procedural_type; 00308 outbuf->node= inbuf->node; 00309 return outbuf; 00310 } 00311 00312 inrf= inbuf->rect; 00313 outrf= outbuf->rect; 00314 x= inbuf->x*inbuf->y; 00315 00316 if(type==CB_VAL) { 00317 if(inbuf->type==CB_VEC2) { 00318 for(; x>0; x--, outrf+= 1, inrf+= 2) 00319 *outrf= 0.5f*(inrf[0]+inrf[1]); 00320 } 00321 else if(inbuf->type==CB_VEC3) { 00322 for(; x>0; x--, outrf+= 1, inrf+= 3) 00323 *outrf= 0.333333f*(inrf[0]+inrf[1]+inrf[2]); 00324 } 00325 else if(inbuf->type==CB_RGBA) { 00326 for(; x>0; x--, outrf+= 1, inrf+= 4) 00327 *outrf= inrf[0]*0.35f + inrf[1]*0.45f + inrf[2]*0.2f; 00328 } 00329 } 00330 else if(type==CB_VEC2) { 00331 if(inbuf->type==CB_VAL) { 00332 for(; x>0; x--, outrf+= 2, inrf+= 1) { 00333 outrf[0]= inrf[0]; 00334 outrf[1]= inrf[0]; 00335 } 00336 } 00337 else if(inbuf->type==CB_VEC3) { 00338 for(; x>0; x--, outrf+= 2, inrf+= 3) { 00339 outrf[0]= inrf[0]; 00340 outrf[1]= inrf[1]; 00341 } 00342 } 00343 else if(inbuf->type==CB_RGBA) { 00344 for(; x>0; x--, outrf+= 2, inrf+= 4) { 00345 outrf[0]= inrf[0]; 00346 outrf[1]= inrf[1]; 00347 } 00348 } 00349 } 00350 else if(type==CB_VEC3) { 00351 if(inbuf->type==CB_VAL) { 00352 for(; x>0; x--, outrf+= 3, inrf+= 1) { 00353 outrf[0]= inrf[0]; 00354 outrf[1]= inrf[0]; 00355 outrf[2]= inrf[0]; 00356 } 00357 } 00358 else if(inbuf->type==CB_VEC2) { 00359 for(; x>0; x--, outrf+= 3, inrf+= 2) { 00360 outrf[0]= inrf[0]; 00361 outrf[1]= inrf[1]; 00362 outrf[2]= 0.0f; 00363 } 00364 } 00365 else if(inbuf->type==CB_RGBA) { 00366 for(; x>0; x--, outrf+= 3, inrf+= 4) { 00367 outrf[0]= inrf[0]; 00368 outrf[1]= inrf[1]; 00369 outrf[2]= inrf[2]; 00370 } 00371 } 00372 } 00373 else if(type==CB_RGBA) { 00374 if(inbuf->type==CB_VAL) { 00375 for(; x>0; x--, outrf+= 4, inrf+= 1) { 00376 outrf[0]= inrf[0]; 00377 outrf[1]= inrf[0]; 00378 outrf[2]= inrf[0]; 00379 outrf[3]= 1.0f; 00380 } 00381 } 00382 else if(inbuf->type==CB_VEC2) { 00383 for(; x>0; x--, outrf+= 4, inrf+= 2) { 00384 outrf[0]= inrf[0]; 00385 outrf[1]= inrf[1]; 00386 outrf[2]= 0.0f; 00387 outrf[3]= 1.0f; 00388 } 00389 } 00390 else if(inbuf->type==CB_VEC3) { 00391 for(; x>0; x--, outrf+= 4, inrf+= 3) { 00392 outrf[0]= inrf[0]; 00393 outrf[1]= inrf[1]; 00394 outrf[2]= inrf[2]; 00395 outrf[3]= 1.0f; 00396 } 00397 } 00398 } 00399 00400 return outbuf; 00401 } 00402 return inbuf; 00403 } 00404 00405 float *compbuf_get_pixel(CompBuf *cbuf, float *defcol, float *use, int x, int y, int xrad, int yrad) 00406 { 00407 if(cbuf) { 00408 if(cbuf->rect_procedural) { 00409 cbuf->rect_procedural(cbuf, use, (float)x/(float)xrad, (float)y/(float)yrad); 00410 return use; 00411 } 00412 else { 00413 static float col[4]= {0.0f, 0.0f, 0.0f, 0.0f}; 00414 00415 /* map coords */ 00416 x-= cbuf->xof; 00417 y-= cbuf->yof; 00418 00419 if(y<-cbuf->yrad || y>= -cbuf->yrad+cbuf->y) return col; 00420 if(x<-cbuf->xrad || x>= -cbuf->xrad+cbuf->x) return col; 00421 00422 return cbuf->rect + cbuf->type*( (cbuf->yrad+y)*cbuf->x + (cbuf->xrad+x) ); 00423 } 00424 } 00425 else return defcol; 00426 } 00427 00428 /* **************************************************** */ 00429 00430 static CompBuf *composit_check_compbuf(CompBuf *cbuf, int type, CompBuf *outbuf) 00431 { 00432 /* check type */ 00433 CompBuf *dbuf= typecheck_compbuf(cbuf, type); 00434 00435 /* if same as output and translated, duplicate so pixels don't interfere */ 00436 if(dbuf == outbuf && !dbuf->rect_procedural && (dbuf->xof || dbuf->yof)) 00437 dbuf= dupalloc_compbuf(dbuf); 00438 00439 return dbuf; 00440 } 00441 00442 /* Pixel-to-Pixel operation, 1 Image in, 1 out */ 00443 void composit1_pixel_processor(bNode *node, CompBuf *out, CompBuf *src_buf, float *src_col, 00444 void (*func)(bNode *, float *, float *), 00445 int src_type) 00446 { 00447 CompBuf *src_use; 00448 float *outfp=out->rect, *srcfp; 00449 float color[4]; /* local color if compbuf is procedural */ 00450 int xrad, yrad, x, y; 00451 00452 src_use= composit_check_compbuf(src_buf, src_type, out); 00453 00454 xrad= out->xrad; 00455 yrad= out->yrad; 00456 00457 for(y= -yrad; y<-yrad+out->y; y++) { 00458 for(x= -xrad; x<-xrad+out->x; x++, outfp+=out->type) { 00459 srcfp= compbuf_get_pixel(src_use, src_col, color, x, y, xrad, yrad); 00460 func(node, outfp, srcfp); 00461 } 00462 } 00463 00464 if(src_use!=src_buf) 00465 free_compbuf(src_use); 00466 } 00467 00468 /* Pixel-to-Pixel operation, 2 Images in, 1 out */ 00469 void composit2_pixel_processor(bNode *node, CompBuf *out, CompBuf *src_buf, float *src_col, 00470 CompBuf *fac_buf, float *fac, void (*func)(bNode *, float *, float *, float *), 00471 int src_type, int fac_type) 00472 { 00473 CompBuf *src_use, *fac_use; 00474 float *outfp=out->rect, *srcfp, *facfp; 00475 float color[4]; /* local color if compbuf is procedural */ 00476 int xrad, yrad, x, y; 00477 00478 src_use= composit_check_compbuf(src_buf, src_type, out); 00479 fac_use= composit_check_compbuf(fac_buf, fac_type, out); 00480 00481 xrad= out->xrad; 00482 yrad= out->yrad; 00483 00484 for(y= -yrad; y<-yrad+out->y; y++) { 00485 for(x= -xrad; x<-xrad+out->x; x++, outfp+=out->type) { 00486 srcfp= compbuf_get_pixel(src_use, src_col, color, x, y, xrad, yrad); 00487 facfp= compbuf_get_pixel(fac_use, fac, color, x, y, xrad, yrad); 00488 00489 func(node, outfp, srcfp, facfp); 00490 } 00491 } 00492 if(src_use!=src_buf) 00493 free_compbuf(src_use); 00494 if(fac_use!=fac_buf) 00495 free_compbuf(fac_use); 00496 } 00497 00498 /* Pixel-to-Pixel operation, 3 Images in, 1 out */ 00499 void composit3_pixel_processor(bNode *node, CompBuf *out, CompBuf *src1_buf, float *src1_col, CompBuf *src2_buf, float *src2_col, 00500 CompBuf *fac_buf, float *fac, void (*func)(bNode *, float *, float *, float *, float *), 00501 int src1_type, int src2_type, int fac_type) 00502 { 00503 CompBuf *src1_use, *src2_use, *fac_use; 00504 float *outfp=out->rect, *src1fp, *src2fp, *facfp; 00505 float color[4]; /* local color if compbuf is procedural */ 00506 int xrad, yrad, x, y; 00507 00508 src1_use= composit_check_compbuf(src1_buf, src1_type, out); 00509 src2_use= composit_check_compbuf(src2_buf, src2_type, out); 00510 fac_use= composit_check_compbuf(fac_buf, fac_type, out); 00511 00512 xrad= out->xrad; 00513 yrad= out->yrad; 00514 00515 for(y= -yrad; y<-yrad+out->y; y++) { 00516 for(x= -xrad; x<-xrad+out->x; x++, outfp+=out->type) { 00517 src1fp= compbuf_get_pixel(src1_use, src1_col, color, x, y, xrad, yrad); 00518 src2fp= compbuf_get_pixel(src2_use, src2_col, color, x, y, xrad, yrad); 00519 facfp= compbuf_get_pixel(fac_use, fac, color, x, y, xrad, yrad); 00520 00521 func(node, outfp, src1fp, src2fp, facfp); 00522 } 00523 } 00524 00525 if(src1_use!=src1_buf) 00526 free_compbuf(src1_use); 00527 if(src2_use!=src2_buf) 00528 free_compbuf(src2_use); 00529 if(fac_use!=fac_buf) 00530 free_compbuf(fac_use); 00531 } 00532 00533 /* Pixel-to-Pixel operation, 4 Images in, 1 out */ 00534 void composit4_pixel_processor(bNode *node, CompBuf *out, CompBuf *src1_buf, float *src1_col, CompBuf *fac1_buf, float *fac1, 00535 CompBuf *src2_buf, float *src2_col, CompBuf *fac2_buf, float *fac2, 00536 void (*func)(bNode *, float *, float *, float *, float *, float *), 00537 int src1_type, int fac1_type, int src2_type, int fac2_type) 00538 { 00539 CompBuf *src1_use, *src2_use, *fac1_use, *fac2_use; 00540 float *outfp=out->rect, *src1fp, *src2fp, *fac1fp, *fac2fp; 00541 float color[4]; /* local color if compbuf is procedural */ 00542 int xrad, yrad, x, y; 00543 00544 src1_use= composit_check_compbuf(src1_buf, src1_type, out); 00545 src2_use= composit_check_compbuf(src2_buf, src2_type, out); 00546 fac1_use= composit_check_compbuf(fac1_buf, fac1_type, out); 00547 fac2_use= composit_check_compbuf(fac2_buf, fac2_type, out); 00548 00549 xrad= out->xrad; 00550 yrad= out->yrad; 00551 00552 for(y= -yrad; y<-yrad+out->y; y++) { 00553 for(x= -xrad; x<-xrad+out->x; x++, outfp+=out->type) { 00554 src1fp= compbuf_get_pixel(src1_use, src1_col, color, x, y, xrad, yrad); 00555 src2fp= compbuf_get_pixel(src2_use, src2_col, color, x, y, xrad, yrad); 00556 fac1fp= compbuf_get_pixel(fac1_use, fac1, color, x, y, xrad, yrad); 00557 fac2fp= compbuf_get_pixel(fac2_use, fac2, color, x, y, xrad, yrad); 00558 00559 func(node, outfp, src1fp, fac1fp, src2fp, fac2fp); 00560 } 00561 } 00562 00563 if(src1_use!=src1_buf) 00564 free_compbuf(src1_use); 00565 if(src2_use!=src2_buf) 00566 free_compbuf(src2_use); 00567 if(fac1_use!=fac1_buf) 00568 free_compbuf(fac1_use); 00569 if(fac2_use!=fac2_buf) 00570 free_compbuf(fac2_use); 00571 } 00572 00573 00574 CompBuf *valbuf_from_rgbabuf(CompBuf *cbuf, int channel) 00575 { 00576 CompBuf *valbuf= alloc_compbuf(cbuf->x, cbuf->y, CB_VAL, 1); 00577 float *valf, *rectf; 00578 int tot; 00579 00580 /* warning note: xof and yof are applied in pixelprocessor, but should be copied otherwise? */ 00581 valbuf->xof= cbuf->xof; 00582 valbuf->yof= cbuf->yof; 00583 00584 valf= valbuf->rect; 00585 00586 /* defaults to returning alpha channel */ 00587 if ((channel < CHAN_R) || (channel > CHAN_A)) channel = CHAN_A; 00588 00589 rectf= cbuf->rect + channel; 00590 00591 for(tot= cbuf->x*cbuf->y; tot>0; tot--, valf++, rectf+=4) 00592 *valf= *rectf; 00593 00594 return valbuf; 00595 } 00596 00597 static CompBuf *generate_procedural_preview(CompBuf *cbuf, int newx, int newy) 00598 { 00599 CompBuf *outbuf; 00600 float *outfp; 00601 int xrad, yrad, x, y; 00602 00603 outbuf= alloc_compbuf(newx, newy, CB_RGBA, 1); 00604 00605 outfp= outbuf->rect; 00606 xrad= outbuf->xrad; 00607 yrad= outbuf->yrad; 00608 00609 for(y= -yrad; y<-yrad+outbuf->y; y++) 00610 for(x= -xrad; x<-xrad+outbuf->x; x++, outfp+=outbuf->type) 00611 cbuf->rect_procedural(cbuf, outfp, (float)x/(float)xrad, (float)y/(float)yrad); 00612 00613 return outbuf; 00614 } 00615 00616 void generate_preview(void *data, bNode *node, CompBuf *stackbuf) 00617 { 00618 RenderData *rd= data; 00619 bNodePreview *preview= node->preview; 00620 int xsize, ysize; 00621 int profile_from= (rd->color_mgt_flag & R_COLOR_MANAGEMENT)? IB_PROFILE_LINEAR_RGB: IB_PROFILE_SRGB; 00622 int predivide= (rd->color_mgt_flag & R_COLOR_MANAGEMENT_PREDIVIDE); 00623 int dither= 0; 00624 unsigned char *rect; 00625 00626 if(preview && stackbuf) { 00627 CompBuf *cbuf, *stackbuf_use; 00628 00629 if(stackbuf->rect==NULL && stackbuf->rect_procedural==NULL) return; 00630 00631 stackbuf_use= typecheck_compbuf(stackbuf, CB_RGBA); 00632 00633 if(stackbuf->x > stackbuf->y) { 00634 xsize= 140; 00635 ysize= (140*stackbuf->y)/stackbuf->x; 00636 } 00637 else { 00638 ysize= 140; 00639 xsize= (140*stackbuf->x)/stackbuf->y; 00640 } 00641 00642 if(stackbuf_use->rect_procedural) 00643 cbuf= generate_procedural_preview(stackbuf_use, xsize, ysize); 00644 else 00645 cbuf= scalefast_compbuf(stackbuf_use, xsize, ysize); 00646 00647 /* convert to byte for preview */ 00648 rect= MEM_callocN(sizeof(unsigned char)*4*xsize*ysize, "bNodePreview.rect"); 00649 00650 IMB_buffer_byte_from_float(rect, cbuf->rect, 00651 4, dither, IB_PROFILE_SRGB, profile_from, predivide, 00652 xsize, ysize, xsize, xsize); 00653 00654 free_compbuf(cbuf); 00655 if(stackbuf_use!=stackbuf) 00656 free_compbuf(stackbuf_use); 00657 00658 BLI_lock_thread(LOCK_PREVIEW); 00659 00660 if(preview->rect) 00661 MEM_freeN(preview->rect); 00662 preview->xsize= xsize; 00663 preview->ysize= ysize; 00664 preview->rect= rect; 00665 00666 BLI_unlock_thread(LOCK_PREVIEW); 00667 } 00668 } 00669 00670 void do_rgba_to_yuva(bNode *UNUSED(node), float *out, float *in) 00671 { 00672 rgb_to_yuv(in[0],in[1],in[2], &out[0], &out[1], &out[2]); 00673 out[3]=in[3]; 00674 } 00675 00676 void do_rgba_to_hsva(bNode *UNUSED(node), float *out, float *in) 00677 { 00678 rgb_to_hsv(in[0],in[1],in[2], &out[0], &out[1], &out[2]); 00679 out[3]=in[3]; 00680 } 00681 00682 void do_rgba_to_ycca(bNode *UNUSED(node), float *out, float *in) 00683 { 00684 rgb_to_ycc(in[0],in[1],in[2], &out[0], &out[1], &out[2], BLI_YCC_ITU_BT601); 00685 out[3]=in[3]; 00686 } 00687 00688 void do_yuva_to_rgba(bNode *UNUSED(node), float *out, float *in) 00689 { 00690 yuv_to_rgb(in[0],in[1],in[2], &out[0], &out[1], &out[2]); 00691 out[3]=in[3]; 00692 } 00693 00694 void do_hsva_to_rgba(bNode *UNUSED(node), float *out, float *in) 00695 { 00696 hsv_to_rgb(in[0],in[1],in[2], &out[0], &out[1], &out[2]); 00697 out[3]=in[3]; 00698 } 00699 00700 void do_ycca_to_rgba(bNode *UNUSED(node), float *out, float *in) 00701 { 00702 ycc_to_rgb(in[0],in[1],in[2], &out[0], &out[1], &out[2], BLI_YCC_ITU_BT601); 00703 out[3]=in[3]; 00704 } 00705 00706 void do_copy_rgba(bNode *UNUSED(node), float *out, float *in) 00707 { 00708 copy_v4_v4(out, in); 00709 } 00710 00711 void do_copy_rgb(bNode *UNUSED(node), float *out, float *in) 00712 { 00713 copy_v3_v3(out, in); 00714 out[3]= 1.0f; 00715 } 00716 00717 void do_copy_value(bNode *UNUSED(node), float *out, float *in) 00718 { 00719 out[0]= in[0]; 00720 } 00721 00722 void do_copy_a_rgba(bNode *UNUSED(node), float *out, float *in, float *fac) 00723 { 00724 copy_v3_v3(out, in); 00725 out[3]= *fac; 00726 } 00727 00728 /* only accepts RGBA buffers */ 00729 void gamma_correct_compbuf(CompBuf *img, int inversed) 00730 { 00731 float *drect; 00732 int x; 00733 00734 if(img->type!=CB_RGBA) return; 00735 00736 drect= img->rect; 00737 if(inversed) { 00738 for(x=img->x*img->y; x>0; x--, drect+=4) { 00739 if(drect[0]>0.0f) drect[0]= sqrt(drect[0]); else drect[0]= 0.0f; 00740 if(drect[1]>0.0f) drect[1]= sqrt(drect[1]); else drect[1]= 0.0f; 00741 if(drect[2]>0.0f) drect[2]= sqrt(drect[2]); else drect[2]= 0.0f; 00742 } 00743 } 00744 else { 00745 for(x=img->x*img->y; x>0; x--, drect+=4) { 00746 if(drect[0]>0.0f) drect[0]*= drect[0]; else drect[0]= 0.0f; 00747 if(drect[1]>0.0f) drect[1]*= drect[1]; else drect[1]= 0.0f; 00748 if(drect[2]>0.0f) drect[2]*= drect[2]; else drect[2]= 0.0f; 00749 } 00750 } 00751 } 00752 00753 void premul_compbuf(CompBuf *img, int inversed) 00754 { 00755 float *drect; 00756 int x; 00757 00758 if(img->type!=CB_RGBA) return; 00759 00760 drect= img->rect; 00761 if(inversed) { 00762 for(x=img->x*img->y; x>0; x--, drect+=4) { 00763 if(fabsf(drect[3]) < 1e-5f) { 00764 drect[0]= 0.0f; 00765 drect[1]= 0.0f; 00766 drect[2]= 0.0f; 00767 } 00768 else { 00769 drect[0] /= drect[3]; 00770 drect[1] /= drect[3]; 00771 drect[2] /= drect[3]; 00772 } 00773 } 00774 } 00775 else { 00776 for(x=img->x*img->y; x>0; x--, drect+=4) { 00777 drect[0] *= drect[3]; 00778 drect[1] *= drect[3]; 00779 drect[2] *= drect[3]; 00780 } 00781 } 00782 } 00783 00784 00785 00786 /* 00787 * 2D Fast Hartley Transform, used for convolution 00788 */ 00789 00790 typedef float fREAL; 00791 00792 // returns next highest power of 2 of x, as well it's log2 in L2 00793 static unsigned int nextPow2(unsigned int x, unsigned int* L2) 00794 { 00795 unsigned int pw, x_notpow2 = x & (x-1); 00796 *L2 = 0; 00797 while (x>>=1) ++(*L2); 00798 pw = 1 << (*L2); 00799 if (x_notpow2) { (*L2)++; pw<<=1; } 00800 return pw; 00801 } 00802 00803 //------------------------------------------------------------------------------ 00804 00805 // from FXT library by Joerg Arndt, faster in order bitreversal 00806 // use: r = revbin_upd(r, h) where h = N>>1 00807 static unsigned int revbin_upd(unsigned int r, unsigned int h) 00808 { 00809 while (!((r^=h)&h)) h >>= 1; 00810 return r; 00811 } 00812 //------------------------------------------------------------------------------ 00813 static void FHT(fREAL* data, unsigned int M, unsigned int inverse) 00814 { 00815 double tt, fc, dc, fs, ds, a = M_PI; 00816 fREAL t1, t2; 00817 int n2, bd, bl, istep, k, len = 1 << M, n = 1; 00818 00819 int i, j = 0; 00820 unsigned int Nh = len >> 1; 00821 for (i=1;i<(len-1);++i) { 00822 j = revbin_upd(j, Nh); 00823 if (j>i) { 00824 t1 = data[i]; 00825 data[i] = data[j]; 00826 data[j] = t1; 00827 } 00828 } 00829 00830 do { 00831 fREAL* data_n = &data[n]; 00832 00833 istep = n << 1; 00834 for (k=0; k<len; k+=istep) { 00835 t1 = data_n[k]; 00836 data_n[k] = data[k] - t1; 00837 data[k] += t1; 00838 } 00839 00840 n2 = n >> 1; 00841 if (n>2) { 00842 fc = dc = cos(a); 00843 fs = ds = sqrt(1.0 - fc*fc); //sin(a); 00844 bd = n-2; 00845 for (bl=1; bl<n2; bl++) { 00846 fREAL* data_nbd = &data_n[bd]; 00847 fREAL* data_bd = &data[bd]; 00848 for (k=bl; k<len; k+=istep) { 00849 t1 = fc*data_n[k] + fs*data_nbd[k]; 00850 t2 = fs*data_n[k] - fc*data_nbd[k]; 00851 data_n[k] = data[k] - t1; 00852 data_nbd[k] = data_bd[k] - t2; 00853 data[k] += t1; 00854 data_bd[k] += t2; 00855 } 00856 tt = fc*dc - fs*ds; 00857 fs = fs*dc + fc*ds; 00858 fc = tt; 00859 bd -= 2; 00860 } 00861 } 00862 00863 if (n>1) { 00864 for (k=n2; k<len; k+=istep) { 00865 t1 = data_n[k]; 00866 data_n[k] = data[k] - t1; 00867 data[k] += t1; 00868 } 00869 } 00870 00871 n = istep; 00872 a *= 0.5; 00873 } while (n<len); 00874 00875 if (inverse) { 00876 fREAL sc = (fREAL)1 / (fREAL)len; 00877 for (k=0; k<len; ++k) 00878 data[k] *= sc; 00879 } 00880 } 00881 //------------------------------------------------------------------------------ 00882 /* 2D Fast Hartley Transform, Mx/My -> log2 of width/height, 00883 nzp -> the row where zero pad data starts, 00884 inverse -> see above */ 00885 static void FHT2D(fREAL *data, unsigned int Mx, unsigned int My, 00886 unsigned int nzp, unsigned int inverse) 00887 { 00888 unsigned int i, j, Nx, Ny, maxy; 00889 fREAL t; 00890 00891 Nx = 1 << Mx; 00892 Ny = 1 << My; 00893 00894 // rows (forward transform skips 0 pad data) 00895 maxy = inverse ? Ny : nzp; 00896 for (j=0; j<maxy; ++j) 00897 FHT(&data[Nx*j], Mx, inverse); 00898 00899 // transpose data 00900 if (Nx==Ny) { // square 00901 for (j=0; j<Ny; ++j) 00902 for (i=j+1; i<Nx; ++i) { 00903 unsigned int op = i + (j << Mx), np = j + (i << My); 00904 t=data[op], data[op]=data[np], data[np]=t; 00905 } 00906 } 00907 else { // rectangular 00908 unsigned int k, Nym = Ny-1, stm = 1 << (Mx + My); 00909 for (i=0; stm>0; i++) { 00910 #define pred(k) (((k & Nym) << Mx) + (k >> My)) 00911 for (j=pred(i); j>i; j=pred(j)); 00912 if (j < i) continue; 00913 for (k=i, j=pred(i); j!=i; k=j, j=pred(j), stm--) 00914 { t=data[j], data[j]=data[k], data[k]=t; } 00915 #undef pred 00916 stm--; 00917 } 00918 } 00919 // swap Mx/My & Nx/Ny 00920 i = Nx, Nx = Ny, Ny = i; 00921 i = Mx, Mx = My, My = i; 00922 00923 // now columns == transposed rows 00924 for (j=0; j<Ny; ++j) 00925 FHT(&data[Nx*j], Mx, inverse); 00926 00927 // finalize 00928 for (j=0; j<=(Ny >> 1); j++) { 00929 unsigned int jm = (Ny - j) & (Ny-1); 00930 unsigned int ji = j << Mx; 00931 unsigned int jmi = jm << Mx; 00932 for (i=0; i<=(Nx >> 1); i++) { 00933 unsigned int im = (Nx - i) & (Nx-1); 00934 fREAL A = data[ji + i]; 00935 fREAL B = data[jmi + i]; 00936 fREAL C = data[ji + im]; 00937 fREAL D = data[jmi + im]; 00938 fREAL E = (fREAL)0.5*((A + D) - (B + C)); 00939 data[ji + i] = A - E; 00940 data[jmi + i] = B + E; 00941 data[ji + im] = C + E; 00942 data[jmi + im] = D - E; 00943 } 00944 } 00945 00946 } 00947 00948 //------------------------------------------------------------------------------ 00949 00950 /* 2D convolution calc, d1 *= d2, M/N - > log2 of width/height */ 00951 static void fht_convolve(fREAL* d1, fREAL* d2, unsigned int M, unsigned int N) 00952 { 00953 fREAL a, b; 00954 unsigned int i, j, k, L, mj, mL; 00955 unsigned int m = 1 << M, n = 1 << N; 00956 unsigned int m2 = 1 << (M-1), n2 = 1 << (N-1); 00957 unsigned int mn2 = m << (N-1); 00958 00959 d1[0] *= d2[0]; 00960 d1[mn2] *= d2[mn2]; 00961 d1[m2] *= d2[m2]; 00962 d1[m2 + mn2] *= d2[m2 + mn2]; 00963 for (i=1; i<m2; i++) { 00964 k = m - i; 00965 a = d1[i]*d2[i] - d1[k]*d2[k]; 00966 b = d1[k]*d2[i] + d1[i]*d2[k]; 00967 d1[i] = (b + a)*(fREAL)0.5; 00968 d1[k] = (b - a)*(fREAL)0.5; 00969 a = d1[i + mn2]*d2[i + mn2] - d1[k + mn2]*d2[k + mn2]; 00970 b = d1[k + mn2]*d2[i + mn2] + d1[i + mn2]*d2[k + mn2]; 00971 d1[i + mn2] = (b + a)*(fREAL)0.5; 00972 d1[k + mn2] = (b - a)*(fREAL)0.5; 00973 } 00974 for (j=1; j<n2; j++) { 00975 L = n - j; 00976 mj = j << M; 00977 mL = L << M; 00978 a = d1[mj]*d2[mj] - d1[mL]*d2[mL]; 00979 b = d1[mL]*d2[mj] + d1[mj]*d2[mL]; 00980 d1[mj] = (b + a)*(fREAL)0.5; 00981 d1[mL] = (b - a)*(fREAL)0.5; 00982 a = d1[m2 + mj]*d2[m2 + mj] - d1[m2 + mL]*d2[m2 + mL]; 00983 b = d1[m2 + mL]*d2[m2 + mj] + d1[m2 + mj]*d2[m2 + mL]; 00984 d1[m2 + mj] = (b + a)*(fREAL)0.5; 00985 d1[m2 + mL] = (b - a)*(fREAL)0.5; 00986 } 00987 for (i=1; i<m2; i++) { 00988 k = m - i; 00989 for (j=1; j<n2; j++) { 00990 L = n - j; 00991 mj = j << M; 00992 mL = L << M; 00993 a = d1[i + mj]*d2[i + mj] - d1[k + mL]*d2[k + mL]; 00994 b = d1[k + mL]*d2[i + mj] + d1[i + mj]*d2[k + mL]; 00995 d1[i + mj] = (b + a)*(fREAL)0.5; 00996 d1[k + mL] = (b - a)*(fREAL)0.5; 00997 a = d1[i + mL]*d2[i + mL] - d1[k + mj]*d2[k + mj]; 00998 b = d1[k + mj]*d2[i + mL] + d1[i + mL]*d2[k + mj]; 00999 d1[i + mL] = (b + a)*(fREAL)0.5; 01000 d1[k + mj] = (b - a)*(fREAL)0.5; 01001 } 01002 } 01003 } 01004 01005 //------------------------------------------------------------------------------ 01006 01007 void convolve(CompBuf* dst, CompBuf* in1, CompBuf* in2) 01008 { 01009 fREAL *data1, *data2, *fp; 01010 unsigned int w2, h2, hw, hh, log2_w, log2_h; 01011 fRGB wt, *colp; 01012 int x, y, ch; 01013 int xbl, ybl, nxb, nyb, xbsz, ybsz; 01014 int in2done = 0; 01015 01016 CompBuf* rdst = alloc_compbuf(in1->x, in1->y, in1->type, 1); 01017 01018 // convolution result width & height 01019 w2 = 2*in2->x - 1; 01020 h2 = 2*in2->y - 1; 01021 // FFT pow2 required size & log2 01022 w2 = nextPow2(w2, &log2_w); 01023 h2 = nextPow2(h2, &log2_h); 01024 01025 // alloc space 01026 data1 = (fREAL*)MEM_callocN(3*w2*h2*sizeof(fREAL), "convolve_fast FHT data1"); 01027 data2 = (fREAL*)MEM_callocN(w2*h2*sizeof(fREAL), "convolve_fast FHT data2"); 01028 01029 // normalize convolutor 01030 wt[0] = wt[1] = wt[2] = 0.f; 01031 for (y=0; y<in2->y; y++) { 01032 colp = (fRGB*)&in2->rect[y*in2->x*in2->type]; 01033 for (x=0; x<in2->x; x++) 01034 fRGB_add(wt, colp[x]); 01035 } 01036 if (wt[0] != 0.f) wt[0] = 1.f/wt[0]; 01037 if (wt[1] != 0.f) wt[1] = 1.f/wt[1]; 01038 if (wt[2] != 0.f) wt[2] = 1.f/wt[2]; 01039 for (y=0; y<in2->y; y++) { 01040 colp = (fRGB*)&in2->rect[y*in2->x*in2->type]; 01041 for (x=0; x<in2->x; x++) 01042 fRGB_colormult(colp[x], wt); 01043 } 01044 01045 // copy image data, unpacking interleaved RGBA into separate channels 01046 // only need to calc data1 once 01047 01048 // block add-overlap 01049 hw = in2->x >> 1; 01050 hh = in2->y >> 1; 01051 xbsz = (w2 + 1) - in2->x; 01052 ybsz = (h2 + 1) - in2->y; 01053 nxb = in1->x / xbsz; 01054 if (in1->x % xbsz) nxb++; 01055 nyb = in1->y / ybsz; 01056 if (in1->y % ybsz) nyb++; 01057 for (ybl=0; ybl<nyb; ybl++) { 01058 for (xbl=0; xbl<nxb; xbl++) { 01059 01060 // each channel one by one 01061 for (ch=0; ch<3; ch++) { 01062 fREAL* data1ch = &data1[ch*w2*h2]; 01063 01064 // only need to calc fht data from in2 once, can re-use for every block 01065 if (!in2done) { 01066 // in2, channel ch -> data1 01067 for (y=0; y<in2->y; y++) { 01068 fp = &data1ch[y*w2]; 01069 colp = (fRGB*)&in2->rect[y*in2->x*in2->type]; 01070 for (x=0; x<in2->x; x++) 01071 fp[x] = colp[x][ch]; 01072 } 01073 } 01074 01075 // in1, channel ch -> data2 01076 memset(data2, 0, w2*h2*sizeof(fREAL)); 01077 for (y=0; y<ybsz; y++) { 01078 int yy = ybl*ybsz + y; 01079 if (yy >= in1->y) continue; 01080 fp = &data2[y*w2]; 01081 colp = (fRGB*)&in1->rect[yy*in1->x*in1->type]; 01082 for (x=0; x<xbsz; x++) { 01083 int xx = xbl*xbsz + x; 01084 if (xx >= in1->x) continue; 01085 fp[x] = colp[xx][ch]; 01086 } 01087 } 01088 01089 // forward FHT 01090 // zero pad data start is different for each == height+1 01091 if (!in2done) FHT2D(data1ch, log2_w, log2_h, in2->y+1, 0); 01092 FHT2D(data2, log2_w, log2_h, in2->y+1, 0); 01093 01094 // FHT2D transposed data, row/col now swapped 01095 // convolve & inverse FHT 01096 fht_convolve(data2, data1ch, log2_h, log2_w); 01097 FHT2D(data2, log2_h, log2_w, 0, 1); 01098 // data again transposed, so in order again 01099 01100 // overlap-add result 01101 for (y=0; y<(int)h2; y++) { 01102 const int yy = ybl*ybsz + y - hh; 01103 if ((yy < 0) || (yy >= in1->y)) continue; 01104 fp = &data2[y*w2]; 01105 colp = (fRGB*)&rdst->rect[yy*in1->x*in1->type]; 01106 for (x=0; x<(int)w2; x++) { 01107 const int xx = xbl*xbsz + x - hw; 01108 if ((xx < 0) || (xx >= in1->x)) continue; 01109 colp[xx][ch] += fp[x]; 01110 } 01111 } 01112 01113 } 01114 in2done = 1; 01115 } 01116 } 01117 01118 MEM_freeN(data2); 01119 MEM_freeN(data1); 01120 memcpy(dst->rect, rdst->rect, sizeof(float)*dst->x*dst->y*dst->type); 01121 free_compbuf(rdst); 01122 } 01123 01124 01125 /* 01126 * 01127 * Utility functions qd_* should probably be intergrated better with other functions here. 01128 * 01129 */ 01130 // sets fcol to pixelcolor at (x, y) 01131 void qd_getPixel(CompBuf* src, int x, int y, float* col) 01132 { 01133 if(src->rect_procedural) { 01134 float bc[4]; 01135 src->rect_procedural(src, bc, (float)x/(float)src->xrad, (float)y/(float)src->yrad); 01136 01137 switch(src->type){ 01138 /* these fallthrough to get all the channels */ 01139 case CB_RGBA: col[3]=bc[3]; 01140 case CB_VEC3: col[2]=bc[2]; 01141 case CB_VEC2: col[1]=bc[1]; 01142 case CB_VAL: col[0]=bc[0]; 01143 } 01144 } 01145 else if ((x >= 0) && (x < src->x) && (y >= 0) && (y < src->y)) { 01146 float* bc = &src->rect[(x + y*src->x)*src->type]; 01147 switch(src->type){ 01148 /* these fallthrough to get all the channels */ 01149 case CB_RGBA: col[3]=bc[3]; 01150 case CB_VEC3: col[2]=bc[2]; 01151 case CB_VEC2: col[1]=bc[1]; 01152 case CB_VAL: col[0]=bc[0]; 01153 } 01154 } 01155 else { 01156 switch(src->type){ 01157 /* these fallthrough to get all the channels */ 01158 case CB_RGBA: col[3]=0.0; 01159 case CB_VEC3: col[2]=0.0; 01160 case CB_VEC2: col[1]=0.0; 01161 case CB_VAL: col[0]=0.0; 01162 } 01163 } 01164 } 01165 01166 // sets pixel (x, y) to color col 01167 void qd_setPixel(CompBuf* src, int x, int y, float* col) 01168 { 01169 if ((x >= 0) && (x < src->x) && (y >= 0) && (y < src->y)) { 01170 float* bc = &src->rect[(x + y*src->x)*src->type]; 01171 switch(src->type){ 01172 /* these fallthrough to get all the channels */ 01173 case CB_RGBA: bc[3]=col[3]; 01174 case CB_VEC3: bc[2]=col[2]; 01175 case CB_VEC2: bc[1]=col[1]; 01176 case CB_VAL: bc[0]=col[0]; 01177 } 01178 } 01179 } 01180 01181 // adds fcol to pixelcolor (x, y) 01182 void qd_addPixel(CompBuf* src, int x, int y, float* col) 01183 { 01184 if ((x >= 0) && (x < src->x) && (y >= 0) && (y < src->y)) { 01185 float* bc = &src->rect[(x + y*src->x)*src->type]; 01186 bc[0] += col[0], bc[1] += col[1], bc[2] += col[2]; 01187 } 01188 } 01189 01190 // multiplies pixel by factor value f 01191 void qd_multPixel(CompBuf* src, int x, int y, float f) 01192 { 01193 if ((x >= 0) && (x < src->x) && (y >= 0) && (y < src->y)) { 01194 float* bc = &src->rect[(x + y*src->x)*src->type]; 01195 bc[0] *= f, bc[1] *= f, bc[2] *= f; 01196 } 01197 } 01198 01199 // bilinear interpolation with wraparound 01200 void qd_getPixelLerpWrap(CompBuf* src, float u, float v, float* col) 01201 { 01202 const float ufl = floor(u), vfl = floor(v); 01203 const int nx = (int)ufl % src->x, ny = (int)vfl % src->y; 01204 const int x1 = (nx < 0) ? (nx + src->x) : nx; 01205 const int y1 = (ny < 0) ? (ny + src->y) : ny; 01206 const int x2 = (x1 + 1) % src->x, y2 = (y1 + 1) % src->y; 01207 const float* c00 = &src->rect[(x1 + y1*src->x)*src->type]; 01208 const float* c10 = &src->rect[(x2 + y1*src->x)*src->type]; 01209 const float* c01 = &src->rect[(x1 + y2*src->x)*src->type]; 01210 const float* c11 = &src->rect[(x2 + y2*src->x)*src->type]; 01211 const float uf = u - ufl, vf = v - vfl; 01212 const float w00=(1.f-uf)*(1.f-vf), w10=uf*(1.f-vf), w01=(1.f-uf)*vf, w11=uf*vf; 01213 col[0] = w00*c00[0] + w10*c10[0] + w01*c01[0] + w11*c11[0]; 01214 if (src->type != CB_VAL) { 01215 col[1] = w00*c00[1] + w10*c10[1] + w01*c01[1] + w11*c11[1]; 01216 col[2] = w00*c00[2] + w10*c10[2] + w01*c01[2] + w11*c11[2]; 01217 col[3] = w00*c00[3] + w10*c10[3] + w01*c01[3] + w11*c11[3]; 01218 } 01219 } 01220 01221 // as above, without wrap around 01222 void qd_getPixelLerp(CompBuf* src, float u, float v, float* col) 01223 { 01224 const float ufl = floor(u), vfl = floor(v); 01225 const int x1 = (int)ufl, y1 = (int)vfl; 01226 const int x2 = (int)ceil(u), y2 = (int)ceil(v); 01227 if ((x2 >= 0) && (y2 >= 0) && (x1 < src->x) && (y1 < src->y)) { 01228 const float B[4] = {0,0,0,0}; 01229 const int ox1 = (x1 < 0), oy1 = (y1 < 0), ox2 = (x2 >= src->x), oy2 = (y2 >= src->y); 01230 const float* c00 = (ox1 || oy1) ? B : &src->rect[(x1 + y1*src->x)*src->type]; 01231 const float* c10 = (ox2 || oy1) ? B : &src->rect[(x2 + y1*src->x)*src->type]; 01232 const float* c01 = (ox1 || oy2) ? B : &src->rect[(x1 + y2*src->x)*src->type]; 01233 const float* c11 = (ox2 || oy2) ? B : &src->rect[(x2 + y2*src->x)*src->type]; 01234 const float uf = u - ufl, vf = v - vfl; 01235 const float w00=(1.f-uf)*(1.f-vf), w10=uf*(1.f-vf), w01=(1.f-uf)*vf, w11=uf*vf; 01236 col[0] = w00*c00[0] + w10*c10[0] + w01*c01[0] + w11*c11[0]; 01237 if (src->type != CB_VAL) { 01238 col[1] = w00*c00[1] + w10*c10[1] + w01*c01[1] + w11*c11[1]; 01239 col[2] = w00*c00[2] + w10*c10[2] + w01*c01[2] + w11*c11[2]; 01240 col[3] = w00*c00[3] + w10*c10[3] + w01*c01[3] + w11*c11[3]; 01241 } 01242 } 01243 else col[0] = col[1] = col[2] = col[3] = 0.f; 01244 } 01245 01246 // as above, sampling only one channel 01247 void qd_getPixelLerpChan(CompBuf* src, float u, float v, int chan, float* out) 01248 { 01249 const float ufl = floor(u), vfl = floor(v); 01250 const int x1 = (int)ufl, y1 = (int)vfl; 01251 const int x2 = (int)ceil(u), y2 = (int)ceil(v); 01252 if (chan >= src->type) chan = 0; 01253 if ((x2 >= 0) && (y2 >= 0) && (x1 < src->x) && (y1 < src->y)) { 01254 const float B[4] = {0,0,0,0}; 01255 const int ox1 = (x1 < 0), oy1 = (y1 < 0), ox2 = (x2 >= src->x), oy2 = (y2 >= src->y); 01256 const float* c00 = (ox1 || oy1) ? B : &src->rect[(x1 + y1*src->x)*src->type + chan]; 01257 const float* c10 = (ox2 || oy1) ? B : &src->rect[(x2 + y1*src->x)*src->type + chan]; 01258 const float* c01 = (ox1 || oy2) ? B : &src->rect[(x1 + y2*src->x)*src->type + chan]; 01259 const float* c11 = (ox2 || oy2) ? B : &src->rect[(x2 + y2*src->x)*src->type + chan]; 01260 const float uf = u - ufl, vf = v - vfl; 01261 const float w00=(1.f-uf)*(1.f-vf), w10=uf*(1.f-vf), w01=(1.f-uf)*vf, w11=uf*vf; 01262 out[0] = w00*c00[0] + w10*c10[0] + w01*c01[0] + w11*c11[0]; 01263 } 01264 else *out = 0.f; 01265 } 01266 01267 01268 CompBuf* qd_downScaledCopy(CompBuf* src, int scale) 01269 { 01270 CompBuf* fbuf; 01271 if (scale <= 1) 01272 fbuf = dupalloc_compbuf(src); 01273 else { 01274 int nw = src->x/scale, nh = src->y/scale; 01275 if ((2*(src->x % scale)) > scale) nw++; 01276 if ((2*(src->y % scale)) > scale) nh++; 01277 fbuf = alloc_compbuf(nw, nh, src->type, 1); 01278 { 01279 int x, y, xx, yy, sx, sy, mx, my; 01280 float colsum[4] = {0.0f, 0.0f, 0.0f, 0.0f}; 01281 float fscale = 1.f/(float)(scale*scale); 01282 for (y=0; y<nh; y++) { 01283 fRGB* fcolp = (fRGB*)&fbuf->rect[y*fbuf->x*fbuf->type]; 01284 yy = y*scale; 01285 my = yy + scale; 01286 if (my > src->y) my = src->y; 01287 for (x=0; x<nw; x++) { 01288 xx = x*scale; 01289 mx = xx + scale; 01290 if (mx > src->x) mx = src->x; 01291 colsum[0] = colsum[1] = colsum[2] = 0.f; 01292 for (sy=yy; sy<my; sy++) { 01293 fRGB* scolp = (fRGB*)&src->rect[sy*src->x*src->type]; 01294 for (sx=xx; sx<mx; sx++) 01295 fRGB_add(colsum, scolp[sx]); 01296 } 01297 fRGB_mult(colsum, fscale); 01298 fRGB_copy(fcolp[x], colsum); 01299 } 01300 } 01301 } 01302 } 01303 return fbuf; 01304 } 01305 01306 // fast g.blur, per channel 01307 // xy var. bits 1 & 2 ca be used to blur in x or y direction separately 01308 void IIR_gauss(CompBuf* src, float sigma, int chan, int xy) 01309 { 01310 double q, q2, sc, cf[4], tsM[9], tsu[3], tsv[3]; 01311 double *X, *Y, *W; 01312 int i, x, y, sz; 01313 01314 // <0.5 not valid, though can have a possibly useful sort of sharpening effect 01315 if (sigma < 0.5f) return; 01316 01317 if ((xy < 1) || (xy > 3)) xy = 3; 01318 01319 // XXX The YVV macro defined below explicitly expects sources of at least 3x3 pixels, 01320 // so just skiping blur along faulty direction if src's def is below that limit! 01321 if (src->x < 3) xy &= ~(int) 1; 01322 if (src->y < 3) xy &= ~(int) 2; 01323 if (xy < 1) return; 01324 01325 // see "Recursive Gabor Filtering" by Young/VanVliet 01326 // all factors here in double.prec. Required, because for single.prec it seems to blow up if sigma > ~200 01327 if (sigma >= 3.556f) 01328 q = 0.9804f*(sigma - 3.556f) + 2.5091f; 01329 else // sigma >= 0.5 01330 q = (0.0561f*sigma + 0.5784f)*sigma - 0.2568f; 01331 q2 = q*q; 01332 sc = (1.1668 + q)*(3.203729649 + (2.21566 + q)*q); 01333 // no gabor filtering here, so no complex multiplies, just the regular coefs. 01334 // all negated here, so as not to have to recalc Triggs/Sdika matrix 01335 cf[1] = q*(5.788961737 + (6.76492 + 3.0*q)*q)/ sc; 01336 cf[2] = -q2*(3.38246 + 3.0*q)/sc; 01337 // 0 & 3 unchanged 01338 cf[3] = q2*q/sc; 01339 cf[0] = 1.0 - cf[1] - cf[2] - cf[3]; 01340 01341 // Triggs/Sdika border corrections, 01342 // it seems to work, not entirely sure if it is actually totally correct, 01343 // Besides J.M.Geusebroek's anigauss.c (see http://www.science.uva.nl/~mark), 01344 // found one other implementation by Cristoph Lampert, 01345 // but neither seem to be quite the same, result seems to be ok so far anyway. 01346 // Extra scale factor here to not have to do it in filter, 01347 // though maybe this had something to with the precision errors 01348 sc = cf[0]/((1.0 + cf[1] - cf[2] + cf[3])*(1.0 - cf[1] - cf[2] - cf[3])*(1.0 + cf[2] + (cf[1] - cf[3])*cf[3])); 01349 tsM[0] = sc*(-cf[3]*cf[1] + 1.0 - cf[3]*cf[3] - cf[2]); 01350 tsM[1] = sc*((cf[3] + cf[1])*(cf[2] + cf[3]*cf[1])); 01351 tsM[2] = sc*(cf[3]*(cf[1] + cf[3]*cf[2])); 01352 tsM[3] = sc*(cf[1] + cf[3]*cf[2]); 01353 tsM[4] = sc*(-(cf[2] - 1.0)*(cf[2] + cf[3]*cf[1])); 01354 tsM[5] = sc*(-(cf[3]*cf[1] + cf[3]*cf[3] + cf[2] - 1.0)*cf[3]); 01355 tsM[6] = sc*(cf[3]*cf[1] + cf[2] + cf[1]*cf[1] - cf[2]*cf[2]); 01356 tsM[7] = sc*(cf[1]*cf[2] + cf[3]*cf[2]*cf[2] - cf[1]*cf[3]*cf[3] - cf[3]*cf[3]*cf[3] - cf[3]*cf[2] + cf[3]); 01357 tsM[8] = sc*(cf[3]*(cf[1] + cf[3]*cf[2])); 01358 01359 #define YVV(L) \ 01360 { \ 01361 W[0] = cf[0]*X[0] + cf[1]*X[0] + cf[2]*X[0] + cf[3]*X[0]; \ 01362 W[1] = cf[0]*X[1] + cf[1]*W[0] + cf[2]*X[0] + cf[3]*X[0]; \ 01363 W[2] = cf[0]*X[2] + cf[1]*W[1] + cf[2]*W[0] + cf[3]*X[0]; \ 01364 for (i=3; i<L; i++) \ 01365 W[i] = cf[0]*X[i] + cf[1]*W[i-1] + cf[2]*W[i-2] + cf[3]*W[i-3]; \ 01366 tsu[0] = W[L-1] - X[L-1]; \ 01367 tsu[1] = W[L-2] - X[L-1]; \ 01368 tsu[2] = W[L-3] - X[L-1]; \ 01369 tsv[0] = tsM[0]*tsu[0] + tsM[1]*tsu[1] + tsM[2]*tsu[2] + X[L-1]; \ 01370 tsv[1] = tsM[3]*tsu[0] + tsM[4]*tsu[1] + tsM[5]*tsu[2] + X[L-1]; \ 01371 tsv[2] = tsM[6]*tsu[0] + tsM[7]*tsu[1] + tsM[8]*tsu[2] + X[L-1]; \ 01372 Y[L-1] = cf[0]*W[L-1] + cf[1]*tsv[0] + cf[2]*tsv[1] + cf[3]*tsv[2]; \ 01373 Y[L-2] = cf[0]*W[L-2] + cf[1]*Y[L-1] + cf[2]*tsv[0] + cf[3]*tsv[1]; \ 01374 Y[L-3] = cf[0]*W[L-3] + cf[1]*Y[L-2] + cf[2]*Y[L-1] + cf[3]*tsv[0]; \ 01375 for (i=L-4; i>=0; i--) \ 01376 Y[i] = cf[0]*W[i] + cf[1]*Y[i+1] + cf[2]*Y[i+2] + cf[3]*Y[i+3]; \ 01377 } 01378 01379 // intermediate buffers 01380 sz = MAX2(src->x, src->y); 01381 X = MEM_callocN(sz*sizeof(double), "IIR_gauss X buf"); 01382 Y = MEM_callocN(sz*sizeof(double), "IIR_gauss Y buf"); 01383 W = MEM_callocN(sz*sizeof(double), "IIR_gauss W buf"); 01384 if (xy & 1) { // H 01385 for (y=0; y<src->y; ++y) { 01386 const int yx = y*src->x; 01387 for (x=0; x<src->x; ++x) 01388 X[x] = src->rect[(x + yx)*src->type + chan]; 01389 YVV(src->x); 01390 for (x=0; x<src->x; ++x) 01391 src->rect[(x + yx)*src->type + chan] = Y[x]; 01392 } 01393 } 01394 if (xy & 2) { // V 01395 for (x=0; x<src->x; ++x) { 01396 for (y=0; y<src->y; ++y) 01397 X[y] = src->rect[(x + y*src->x)*src->type + chan]; 01398 YVV(src->y); 01399 for (y=0; y<src->y; ++y) 01400 src->rect[(x + y*src->x)*src->type + chan] = Y[y]; 01401 } 01402 } 01403 01404 MEM_freeN(X); 01405 MEM_freeN(W); 01406 MEM_freeN(Y); 01407 #undef YVV 01408 } 01409