Blender V2.61 - r43446
|
00001 /* 00002 * Copyright 2011, Blender Foundation. 00003 * 00004 * This program is free software; you can redistribute it and/or 00005 * modify it under the terms of the GNU General Public License 00006 * as published by the Free Software Foundation; either version 2 00007 * of the License, or (at your option) any later version. 00008 * 00009 * This program is distributed in the hope that it will be useful, 00010 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00011 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00012 * GNU General Public License for more details. 00013 * 00014 * You should have received a copy of the GNU General Public License 00015 * along with this program; if not, write to the Free Software Foundation, 00016 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 00017 */ 00018 00019 #include <stdlib.h> 00020 #include <string.h> 00021 00022 #include "device.h" 00023 #include "device_intern.h" 00024 00025 #include "util_cuda.h" 00026 #include "util_debug.h" 00027 #include "util_foreach.h" 00028 #include "util_math.h" 00029 #include "util_opencl.h" 00030 #include "util_opengl.h" 00031 #include "util_types.h" 00032 #include "util_vector.h" 00033 00034 CCL_NAMESPACE_BEGIN 00035 00036 /* Device Task */ 00037 00038 DeviceTask::DeviceTask(Type type_) 00039 : type(type_), x(0), y(0), w(0), h(0), rng_state(0), rgba(0), buffer(0), 00040 sample(0), resolution(0), 00041 shader_input(0), shader_output(0), 00042 shader_eval_type(0), shader_x(0), shader_w(0) 00043 { 00044 } 00045 00046 void DeviceTask::split_max_size(list<DeviceTask>& tasks, int max_size) 00047 { 00048 int num; 00049 00050 if(type == SHADER) { 00051 num = (shader_w + max_size - 1)/max_size; 00052 } 00053 else { 00054 max_size = max(1, max_size/w); 00055 num = (h + max_size - 1)/max_size; 00056 } 00057 00058 split(tasks, num); 00059 } 00060 00061 void DeviceTask::split(ThreadQueue<DeviceTask>& queue, int num) 00062 { 00063 list<DeviceTask> tasks; 00064 split(tasks, num); 00065 00066 foreach(DeviceTask& task, tasks) 00067 queue.push(task); 00068 } 00069 00070 void DeviceTask::split(list<DeviceTask>& tasks, int num) 00071 { 00072 if(type == SHADER) { 00073 num = min(shader_w, num); 00074 00075 for(int i = 0; i < num; i++) { 00076 int tx = shader_x + (shader_w/num)*i; 00077 int tw = (i == num-1)? shader_w - i*(shader_w/num): shader_w/num; 00078 00079 DeviceTask task = *this; 00080 00081 task.shader_x = tx; 00082 task.shader_w = tw; 00083 00084 tasks.push_back(task); 00085 } 00086 } 00087 else { 00088 num = min(h, num); 00089 00090 for(int i = 0; i < num; i++) { 00091 int ty = y + (h/num)*i; 00092 int th = (i == num-1)? h - i*(h/num): h/num; 00093 00094 DeviceTask task = *this; 00095 00096 task.y = ty; 00097 task.h = th; 00098 00099 tasks.push_back(task); 00100 } 00101 } 00102 } 00103 00104 /* Device */ 00105 00106 void Device::pixels_alloc(device_memory& mem) 00107 { 00108 mem_alloc(mem, MEM_READ_WRITE); 00109 } 00110 00111 void Device::pixels_copy_from(device_memory& mem, int y, int w, int h) 00112 { 00113 mem_copy_from(mem, y, w, h, sizeof(uint8_t)*4); 00114 } 00115 00116 void Device::pixels_free(device_memory& mem) 00117 { 00118 mem_free(mem); 00119 } 00120 00121 void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int dy, int width, int height, bool transparent) 00122 { 00123 pixels_copy_from(rgba, y, w, h); 00124 00125 if(transparent) { 00126 glEnable(GL_BLEND); 00127 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); 00128 } 00129 00130 glPixelZoom((float)width/(float)w, (float)height/(float)h); 00131 glRasterPos2f(0, dy); 00132 00133 uint8_t *pixels = (uint8_t*)rgba.data_pointer; 00134 00135 /* for multi devices, this assumes the ineffecient method that we allocate 00136 all pixels on the device even though we only render to a subset */ 00137 pixels += 4*y*w; 00138 00139 glDrawPixels(w, h, GL_RGBA, GL_UNSIGNED_BYTE, pixels); 00140 00141 glRasterPos2f(0.0f, 0.0f); 00142 glPixelZoom(1.0f, 1.0f); 00143 00144 if(transparent) 00145 glDisable(GL_BLEND); 00146 } 00147 00148 Device *Device::create(DeviceInfo& info, bool background, int threads) 00149 { 00150 Device *device; 00151 00152 switch(info.type) { 00153 case DEVICE_CPU: 00154 device = device_cpu_create(info, threads); 00155 break; 00156 #ifdef WITH_CUDA 00157 case DEVICE_CUDA: 00158 if(cuLibraryInit()) 00159 device = device_cuda_create(info, background); 00160 else 00161 device = NULL; 00162 break; 00163 #endif 00164 #ifdef WITH_MULTI 00165 case DEVICE_MULTI: 00166 device = device_multi_create(info, background); 00167 break; 00168 #endif 00169 #ifdef WITH_NETWORK 00170 case DEVICE_NETWORK: 00171 device = device_network_create(info, "127.0.0.1"); 00172 break; 00173 #endif 00174 #ifdef WITH_OPENCL 00175 case DEVICE_OPENCL: 00176 if(clLibraryInit()) 00177 device = device_opencl_create(info, background); 00178 else 00179 device = NULL; 00180 break; 00181 #endif 00182 default: 00183 return NULL; 00184 } 00185 00186 return device; 00187 } 00188 00189 DeviceType Device::type_from_string(const char *name) 00190 { 00191 if(strcmp(name, "cpu") == 0) 00192 return DEVICE_CPU; 00193 else if(strcmp(name, "cuda") == 0) 00194 return DEVICE_CUDA; 00195 else if(strcmp(name, "opencl") == 0) 00196 return DEVICE_OPENCL; 00197 else if(strcmp(name, "network") == 0) 00198 return DEVICE_NETWORK; 00199 else if(strcmp(name, "multi") == 0) 00200 return DEVICE_MULTI; 00201 00202 return DEVICE_NONE; 00203 } 00204 00205 string Device::string_from_type(DeviceType type) 00206 { 00207 if(type == DEVICE_CPU) 00208 return "cpu"; 00209 else if(type == DEVICE_CUDA) 00210 return "cuda"; 00211 else if(type == DEVICE_OPENCL) 00212 return "opencl"; 00213 else if(type == DEVICE_NETWORK) 00214 return "network"; 00215 else if(type == DEVICE_MULTI) 00216 return "multi"; 00217 00218 return ""; 00219 } 00220 00221 vector<DeviceType>& Device::available_types() 00222 { 00223 static vector<DeviceType> types; 00224 static bool types_init = false; 00225 00226 if(!types_init) { 00227 types.push_back(DEVICE_CPU); 00228 00229 #ifdef WITH_CUDA 00230 if(cuLibraryInit()) 00231 types.push_back(DEVICE_CUDA); 00232 #endif 00233 00234 #ifdef WITH_OPENCL 00235 if(clLibraryInit()) 00236 types.push_back(DEVICE_OPENCL); 00237 #endif 00238 00239 #ifdef WITH_NETWORK 00240 types.push_back(DEVICE_NETWORK); 00241 #endif 00242 #ifdef WITH_MULTI 00243 types.push_back(DEVICE_MULTI); 00244 #endif 00245 00246 types_init = true; 00247 } 00248 00249 return types; 00250 } 00251 00252 vector<DeviceInfo>& Device::available_devices() 00253 { 00254 static vector<DeviceInfo> devices; 00255 static bool devices_init = false; 00256 00257 if(!devices_init) { 00258 #ifdef WITH_CUDA 00259 if(cuLibraryInit()) 00260 device_cuda_info(devices); 00261 #endif 00262 00263 #ifdef WITH_OPENCL 00264 if(clLibraryInit()) 00265 device_opencl_info(devices); 00266 #endif 00267 00268 #ifdef WITH_MULTI 00269 device_multi_info(devices); 00270 #endif 00271 00272 device_cpu_info(devices); 00273 00274 #ifdef WITH_NETWORK 00275 device_network_info(devices); 00276 #endif 00277 00278 devices_init = true; 00279 } 00280 00281 return devices; 00282 } 00283 00284 CCL_NAMESPACE_END 00285