Blender V2.61 - r43446

cache.c

Go to the documentation of this file.
00001 /*
00002  * ***** BEGIN GPL LICENSE BLOCK *****
00003  *
00004  * This program is free software; you can redistribute it and/or
00005  * modify it under the terms of the GNU General Public License
00006  * as published by the Free Software Foundation; either version 2
00007  * of the License, or (at your option) any later version.
00008  *
00009  * This program is distributed in the hope that it will be useful,
00010  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00012  * GNU General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU General Public License
00015  * along with this program; if not, write to the Free Software Foundation,
00016  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
00017  *
00018  * ***** END GPL LICENSE BLOCK *****
00019  */
00020 
00026 #include "MEM_guardedalloc.h"
00027 
00028 #include "BLI_utildefines.h"
00029 #include "BLI_ghash.h"
00030 #include "BLI_listbase.h"
00031 #include "BLI_memarena.h"
00032 #include "BLI_threads.h"
00033 
00034 
00035 
00036 #include "IMB_imbuf.h"
00037 #include "IMB_imbuf_types.h"
00038 #include "IMB_filetype.h"
00039 
00040 #include "imbuf.h"
00041 
00042 /* We use a two level cache here. A per-thread cache with limited number of
00043    tiles. This can be accessed without locking and so is hoped to lead to most
00044    tile access being lock-free. The global cache is shared between all threads
00045    and requires slow locking to access, and contains all tiles.
00046    
00047    The per-thread cache should be big enough that one might hope to not fall
00048    back to the global cache every pixel, but not to big to keep too many tiles
00049    locked and using memory. */
00050 
00051 #define IB_THREAD_CACHE_SIZE    100
00052 
00053 typedef struct ImGlobalTile {
00054     struct ImGlobalTile *next, *prev;
00055 
00056     ImBuf *ibuf;
00057     int tx, ty;
00058     int refcount;
00059     volatile int loading;
00060 } ImGlobalTile;
00061 
00062 typedef struct ImThreadTile {
00063     struct ImThreadTile *next, *prev;
00064 
00065     ImBuf *ibuf;
00066     int tx, ty;
00067 
00068     ImGlobalTile *global;
00069 } ImThreadTile;
00070 
00071 typedef struct ImThreadTileCache {
00072     ListBase tiles;
00073     ListBase unused;
00074     GHash *tilehash;
00075 } ImThreadTileCache;
00076 
00077 typedef struct ImGlobalTileCache {
00078     ListBase tiles;
00079     ListBase unused;
00080     GHash *tilehash;
00081 
00082     MemArena *memarena;
00083     uintptr_t totmem, maxmem;
00084 
00085     ImThreadTileCache thread_cache[BLENDER_MAX_THREADS+1];
00086     int totthread;
00087 
00088     ThreadMutex mutex;
00089 
00090     int initialized;
00091 } ImGlobalTileCache;
00092 
00093 static ImGlobalTileCache GLOBAL_CACHE;
00094 
00095 /***************************** Hash Functions ********************************/
00096 
00097 static unsigned int imb_global_tile_hash(const void *gtile_p)
00098 {
00099     const ImGlobalTile *gtile= gtile_p;
00100 
00101     return ((unsigned int)(intptr_t)gtile->ibuf)*769 + gtile->tx*53 + gtile->ty*97;
00102 }
00103 
00104 static int imb_global_tile_cmp(const void *a_p, const void *b_p)
00105 {
00106     const ImGlobalTile *a= a_p;
00107     const ImGlobalTile *b= b_p;
00108 
00109     if(a->ibuf == b->ibuf && a->tx == b->tx && a->ty == b->ty) return 0;
00110     else if(a->ibuf < b->ibuf || a->tx < b->tx || a->ty < b->ty) return -1;
00111     else return 1;
00112 }
00113 
00114 static unsigned int imb_thread_tile_hash(const void *ttile_p)
00115 {
00116     const ImThreadTile *ttile= ttile_p;
00117 
00118     return ((unsigned int)(intptr_t)ttile->ibuf)*769 + ttile->tx*53 + ttile->ty*97;
00119 }
00120 
00121 static int imb_thread_tile_cmp(const void *a_p, const void *b_p)
00122 {
00123     const ImThreadTile *a= a_p;
00124     const ImThreadTile *b= b_p;
00125 
00126     if(a->ibuf == b->ibuf && a->tx == b->tx && a->ty == b->ty) return 0;
00127     else if(a->ibuf < b->ibuf || a->tx < b->tx || a->ty < b->ty) return -1;
00128     else return 1;
00129 }
00130 
00131 /******************************** Load/Unload ********************************/
00132 
00133 static void imb_global_cache_tile_load(ImGlobalTile *gtile)
00134 {
00135     ImBuf *ibuf= gtile->ibuf;
00136     int toffs= ibuf->xtiles*gtile->ty + gtile->tx;
00137     unsigned int *rect;
00138 
00139     rect = MEM_callocN(sizeof(unsigned int)*ibuf->tilex*ibuf->tiley, "imb_tile");
00140     imb_loadtile(ibuf, gtile->tx, gtile->ty, rect);
00141     ibuf->tiles[toffs]= rect;
00142 }
00143 
00144 static void imb_global_cache_tile_unload(ImGlobalTile *gtile)
00145 {
00146     ImBuf *ibuf= gtile->ibuf;
00147     int toffs= ibuf->xtiles*gtile->ty + gtile->tx;
00148 
00149     MEM_freeN(ibuf->tiles[toffs]);
00150     ibuf->tiles[toffs]= NULL;
00151 
00152     GLOBAL_CACHE.totmem -= sizeof(unsigned int)*ibuf->tilex*ibuf->tiley;
00153 }
00154 
00155 /* external free */
00156 void imb_tile_cache_tile_free(ImBuf *ibuf, int tx, int ty)
00157 {
00158     ImGlobalTile *gtile, lookuptile;
00159 
00160     BLI_mutex_lock(&GLOBAL_CACHE.mutex);
00161 
00162     lookuptile.ibuf = ibuf;
00163     lookuptile.tx = tx;
00164     lookuptile.ty = ty;
00165     gtile= BLI_ghash_lookup(GLOBAL_CACHE.tilehash, &lookuptile);
00166 
00167     if(gtile) {
00168         /* in case another thread is loading this */
00169         while(gtile->loading)
00170             ;
00171 
00172         BLI_ghash_remove(GLOBAL_CACHE.tilehash, gtile, NULL, NULL);
00173         BLI_remlink(&GLOBAL_CACHE.tiles, gtile);
00174         BLI_addtail(&GLOBAL_CACHE.unused, gtile);
00175     }
00176 
00177     BLI_mutex_unlock(&GLOBAL_CACHE.mutex);
00178 }
00179 
00180 /******************************* Init/Exit ***********************************/
00181 
00182 static void imb_thread_cache_init(ImThreadTileCache *cache)
00183 {
00184     ImThreadTile *ttile;
00185     int a;
00186 
00187     memset(cache, 0, sizeof(ImThreadTileCache));
00188 
00189     cache->tilehash= BLI_ghash_new(imb_thread_tile_hash, imb_thread_tile_cmp, "imb_thread_cache_init gh");
00190 
00191     /* pre-allocate all thread local tiles in unused list */
00192     for(a=0; a<IB_THREAD_CACHE_SIZE; a++) {
00193         ttile= BLI_memarena_alloc(GLOBAL_CACHE.memarena, sizeof(ImThreadTile));
00194         BLI_addtail(&cache->unused, ttile);
00195     }
00196 }
00197 
00198 static void imb_thread_cache_exit(ImThreadTileCache *cache)
00199 {
00200     BLI_ghash_free(cache->tilehash, NULL, NULL);
00201 }
00202 
00203 void imb_tile_cache_init(void)
00204 {
00205     memset(&GLOBAL_CACHE, 0, sizeof(ImGlobalTileCache));
00206 
00207     BLI_mutex_init(&GLOBAL_CACHE.mutex);
00208 
00209     /* initialize for one thread, for places that access textures
00210        outside of rendering (displace modifier, painting, ..) */
00211     IMB_tile_cache_params(0, 0);
00212 
00213     GLOBAL_CACHE.initialized = 1;
00214 }
00215 
00216 void imb_tile_cache_exit(void)
00217 {
00218     ImGlobalTile *gtile;
00219     int a;
00220 
00221     if(GLOBAL_CACHE.initialized) {
00222         for(gtile=GLOBAL_CACHE.tiles.first; gtile; gtile=gtile->next)
00223             imb_global_cache_tile_unload(gtile);
00224 
00225         for(a=0; a<GLOBAL_CACHE.totthread; a++)
00226             imb_thread_cache_exit(&GLOBAL_CACHE.thread_cache[a]);
00227 
00228         if(GLOBAL_CACHE.memarena)
00229             BLI_memarena_free(GLOBAL_CACHE.memarena);
00230 
00231         if(GLOBAL_CACHE.tilehash)
00232             BLI_ghash_free(GLOBAL_CACHE.tilehash, NULL, NULL);
00233 
00234         BLI_mutex_end(&GLOBAL_CACHE.mutex);
00235 
00236         memset(&GLOBAL_CACHE, 0, sizeof(ImGlobalTileCache));
00237     }
00238 }
00239 
00240 /* presumed to be called when no threads are running */
00241 void IMB_tile_cache_params(int totthread, int maxmem)
00242 {
00243     int a;
00244 
00245     /* always one cache for non-threaded access */
00246     totthread++;
00247 
00248     /* lazy initialize cache */
00249     if(GLOBAL_CACHE.totthread == totthread && GLOBAL_CACHE.maxmem == maxmem)
00250         return;
00251 
00252     imb_tile_cache_exit();
00253 
00254     memset(&GLOBAL_CACHE, 0, sizeof(ImGlobalTileCache));
00255 
00256     GLOBAL_CACHE.tilehash= BLI_ghash_new(imb_global_tile_hash, imb_global_tile_cmp, "tile_cache_params gh");
00257 
00258     GLOBAL_CACHE.memarena= BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "ImTileCache arena");
00259     BLI_memarena_use_calloc(GLOBAL_CACHE.memarena);
00260 
00261     GLOBAL_CACHE.maxmem= maxmem*1024*1024;
00262 
00263     GLOBAL_CACHE.totthread= totthread;
00264     for(a=0; a<totthread; a++)
00265         imb_thread_cache_init(&GLOBAL_CACHE.thread_cache[a]);
00266 
00267     BLI_mutex_init(&GLOBAL_CACHE.mutex);
00268 }
00269 
00270 /***************************** Global Cache **********************************/
00271 
00272 static ImGlobalTile *imb_global_cache_get_tile(ImBuf *ibuf, int tx, int ty, ImGlobalTile *replacetile)
00273 {
00274     ImGlobalTile *gtile, lookuptile;
00275 
00276     BLI_mutex_lock(&GLOBAL_CACHE.mutex);
00277 
00278     if(replacetile)
00279         replacetile->refcount--;
00280 
00281     /* find tile in global cache */
00282     lookuptile.ibuf = ibuf;
00283     lookuptile.tx = tx;
00284     lookuptile.ty = ty;
00285     gtile= BLI_ghash_lookup(GLOBAL_CACHE.tilehash, &lookuptile);
00286     
00287     if(gtile) {
00288         /* found tile. however it may be in the process of being loaded
00289            by another thread, in that case we do stupid busy loop waiting
00290            for the other thread to load the tile */
00291         gtile->refcount++;
00292 
00293         BLI_mutex_unlock(&GLOBAL_CACHE.mutex);
00294 
00295         while(gtile->loading)
00296             ;
00297     }
00298     else {
00299         /* not found, let's load it from disk */
00300 
00301         /* first check if we hit the memory limit */
00302         if(GLOBAL_CACHE.maxmem && GLOBAL_CACHE.totmem > GLOBAL_CACHE.maxmem) {
00303             /* find an existing tile to unload */
00304             for(gtile=GLOBAL_CACHE.tiles.last; gtile; gtile=gtile->prev)
00305                 if(gtile->refcount == 0 && gtile->loading == 0)
00306                     break;
00307         }
00308 
00309         if(gtile) {
00310             /* found a tile to unload */
00311             imb_global_cache_tile_unload(gtile);
00312             BLI_ghash_remove(GLOBAL_CACHE.tilehash, gtile, NULL, NULL);
00313             BLI_remlink(&GLOBAL_CACHE.tiles, gtile);
00314         }
00315         else {
00316             /* allocate a new tile or reuse unused */
00317             if(GLOBAL_CACHE.unused.first) {
00318                 gtile= GLOBAL_CACHE.unused.first;
00319                 BLI_remlink(&GLOBAL_CACHE.unused, gtile);
00320             }
00321             else
00322                 gtile= BLI_memarena_alloc(GLOBAL_CACHE.memarena, sizeof(ImGlobalTile));
00323         }
00324 
00325         /* setup new tile */
00326         gtile->ibuf= ibuf;
00327         gtile->tx= tx;
00328         gtile->ty= ty;
00329         gtile->refcount= 1;
00330         gtile->loading= 1;
00331 
00332         BLI_ghash_insert(GLOBAL_CACHE.tilehash, gtile, gtile);
00333         BLI_addhead(&GLOBAL_CACHE.tiles, gtile);
00334 
00335         /* mark as being loaded and unlock to allow other threads to load too */
00336         GLOBAL_CACHE.totmem += sizeof(unsigned int)*ibuf->tilex*ibuf->tiley;
00337 
00338         BLI_mutex_unlock(&GLOBAL_CACHE.mutex);
00339 
00340         /* load from disk */
00341         imb_global_cache_tile_load(gtile);
00342 
00343         /* mark as done loading */
00344         gtile->loading= 0;
00345     }
00346 
00347     return gtile;
00348 }
00349 
00350 /***************************** Per-Thread Cache ******************************/
00351 
00352 static unsigned int *imb_thread_cache_get_tile(ImThreadTileCache *cache, ImBuf *ibuf, int tx, int ty)
00353 {
00354     ImThreadTile *ttile, lookuptile;
00355     ImGlobalTile *gtile, *replacetile;
00356     int toffs= ibuf->xtiles*ty + tx;
00357 
00358     /* test if it is already in our thread local cache */
00359     if((ttile=cache->tiles.first)) {
00360         /* check last used tile before going to hash */
00361         if(ttile->ibuf == ibuf && ttile->tx == tx && ttile->ty == ty)
00362             return ibuf->tiles[toffs];
00363 
00364         /* find tile in hash */
00365         lookuptile.ibuf = ibuf;
00366         lookuptile.tx = tx;
00367         lookuptile.ty = ty;
00368 
00369         if((ttile=BLI_ghash_lookup(cache->tilehash, &lookuptile))) {
00370             BLI_remlink(&cache->tiles, ttile);
00371             BLI_addhead(&cache->tiles, ttile);
00372 
00373             return ibuf->tiles[toffs];
00374         }
00375     }
00376 
00377     /* not found, have to do slow lookup in global cache */
00378     if(cache->unused.first == NULL) {
00379         ttile= cache->tiles.last;
00380         replacetile= ttile->global;
00381         BLI_remlink(&cache->tiles, ttile);
00382         BLI_ghash_remove(cache->tilehash, ttile, NULL, NULL);
00383     }
00384     else {
00385         ttile= cache->unused.first;
00386         replacetile= NULL;
00387         BLI_remlink(&cache->unused, ttile);
00388     }
00389 
00390     BLI_addhead(&cache->tiles, ttile);
00391     BLI_ghash_insert(cache->tilehash, ttile, ttile);
00392 
00393     gtile= imb_global_cache_get_tile(ibuf, tx, ty, replacetile);
00394 
00395     ttile->ibuf= gtile->ibuf;
00396     ttile->tx= gtile->tx;
00397     ttile->ty= gtile->ty;
00398     ttile->global= gtile;
00399 
00400     return ibuf->tiles[toffs];
00401 }
00402 
00403 unsigned int *IMB_gettile(ImBuf *ibuf, int tx, int ty, int thread)
00404 {
00405     return imb_thread_cache_get_tile(&GLOBAL_CACHE.thread_cache[thread+1], ibuf, tx, ty);
00406 }
00407 
00408 void IMB_tiles_to_rect(ImBuf *ibuf)
00409 {
00410     ImBuf *mipbuf;
00411     ImGlobalTile *gtile;
00412     unsigned int *to, *from;
00413     int a, tx, ty, y, w, h;
00414 
00415     for(a=0; a<ibuf->miptot; a++) {
00416         mipbuf= IMB_getmipmap(ibuf, a);
00417 
00418         /* don't call imb_addrectImBuf, it frees all mipmaps */
00419         if(!mipbuf->rect) {
00420             if((mipbuf->rect = MEM_mapallocN(ibuf->x*ibuf->y*sizeof(unsigned int), "imb_addrectImBuf"))) {
00421                 mipbuf->mall |= IB_rect;
00422                 mipbuf->flags |= IB_rect;
00423             }
00424             else
00425                 break;
00426         }
00427 
00428         for(ty=0; ty<mipbuf->ytiles; ty++) {
00429             for(tx=0; tx<mipbuf->xtiles; tx++) {
00430                 /* acquire tile through cache, this assumes cache is initialized,
00431                    which it is always now but it's a weak assumption ... */
00432                 gtile= imb_global_cache_get_tile(mipbuf, tx, ty, NULL);
00433 
00434                 /* setup pointers */
00435                 from= mipbuf->tiles[mipbuf->xtiles*ty + tx];
00436                 to= mipbuf->rect + mipbuf->x*ty*mipbuf->tiley + tx*mipbuf->tilex;
00437 
00438                 /* exception in tile width/height for tiles at end of image */
00439                 w= (tx == mipbuf->xtiles-1)? mipbuf->x - tx*mipbuf->tilex: mipbuf->tilex;
00440                 h= (ty == mipbuf->ytiles-1)? mipbuf->y - ty*mipbuf->tiley: mipbuf->tiley;
00441 
00442                 for(y=0; y<h; y++) {
00443                     memcpy(to, from, sizeof(unsigned int)*w);
00444                     from += mipbuf->tilex;
00445                     to += mipbuf->x;
00446                 }
00447 
00448                 /* decrease refcount for tile again */
00449                 BLI_mutex_lock(&GLOBAL_CACHE.mutex);
00450                 gtile->refcount--;
00451                 BLI_mutex_unlock(&GLOBAL_CACHE.mutex);
00452             }
00453         }
00454     }
00455 }
00456