diff --git a/.hgtags b/.hgtags index f7ca9dfd..8f88e9f9 100644 --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,6 @@ e3c0182ba64b77319ce84c9e2a8581649e68273d v0.2.1 cb6e89af8012fac22cc0f3c5ad247c98c701bdda v0.3.0 ee26517b27207353b0c8a7d357bcb4977b5d93fb v0.4.0 7db07694ea00eb1655f7a1adcc3ae880e8e116f9 v0.4.1 +13a39b11aceee517c19d2e2cec2e6b875546062c v0.4.2 +f1b73d39617071297167cc7ce96f3892f21105fc v0.4.3 +bd6ddacdf7955e51d9b80be639c282d4974e6f56 v0.4.4 diff --git a/CMakeLists.txt b/CMakeLists.txt index 7d38a80a..cbe39073 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,31 +35,26 @@ endif() # Load the required modules include (MitsubaUtil) +include (MtsGetVersionInfo) include (CheckCXXSourceCompiles) include (CMakeDependentOption) -# Read version (MTS_VERSION) from include/mitsuba/core/version.h -file(STRINGS "include/mitsuba/core/version.h" MITSUBA_H REGEX "^#define MTS_VERSION \"[^\"]*\"$") -string(REGEX REPLACE "^.*MTS_VERSION \"([0-9]+).*$" "\\1" MTS_VERSION_MAJOR "${MITSUBA_H}") -string(REGEX REPLACE "^.*MTS_VERSION \"[0-9]+\\.([0-9]+).*$" "\\1" MTS_VERSION_MINOR "${MITSUBA_H}") -string(REGEX REPLACE "^.*MTS_VERSION \"[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" MTS_VERSION_PATCH "${MITSUBA_H}") -set(MTS_VERSION "${MTS_VERSION_MAJOR}.${MTS_VERSION_MINOR}.${MTS_VERSION_PATCH}") -set(MITSUBA_H) -if("${MTS_VERSION_MAJOR}" MATCHES "[0-9]+" AND - "${MTS_VERSION_MINOR}" MATCHES "[0-9]+" AND - "${MTS_VERSION_PATCH}" MATCHES "[0-9]+") - message(STATUS "mitsuba ${MTS_VERSION}") +# Read the version information +MTS_GET_VERSION_INFO() +if (MTS_HAS_VALID_REV) + message(STATUS "mitsuba ${MTS_VERSION}-hg${MTS_REV_ID} (${MTS_DATE})") else() - message(FATAL_ERROR "The mitsuba version could not be determined!") + message(STATUS "mitsuba ${MTS_VERSION} (${MTS_DATE})") endif() +# Setup the build options +include (MitsubaBuildOptions) # Find the external libraries and setup the paths include (MitsubaExternal) -# Setup the build options, include paths and compile definitions -include (MitsubaBuildOptions) - +# Main mitsuba include directory +include_directories("include") # ===== Prerequisite resources ===== @@ -92,8 +87,13 @@ endif() # Additional files to add to main executables if(APPLE) set(MTS_DARWIN_STUB "${CMAKE_CURRENT_SOURCE_DIR}/src/mitsuba/darwin_stub.mm") + set(MTS_WINDOWS_STUB "") +elseif(WIN32) + set(MTS_DARWIN_STUB "") + set(MTS_WINDOWS_STUB "${CMAKE_CURRENT_SOURCE_DIR}/data/windows/wmain_stub.cpp") else() set(MTS_DARWIN_STUB "") + set(MTS_WINDOWS_STUB "") endif() diff --git a/SConstruct b/SConstruct index 44f5ef65..2538ea12 100644 --- a/SConstruct +++ b/SConstruct @@ -6,9 +6,10 @@ import os resources = [] plugins = [] stubs = [] +winstubs = [] Export('SCons', 'sys', 'os', 'glob', 'resources', - 'plugins', 'stubs') + 'plugins', 'stubs', 'winstubs') # Configure the build framework env = SConscript('build/SConscript.configure') @@ -18,6 +19,9 @@ Export('env') if sys.platform == 'win32': # Set an application icon on Windows resources += [ env.RES('data/windows/mitsuba_res.rc') ] + # Convert the command line args from UTF-8 to UTF-16 + winstubs += [ env.SharedObject('#data/windows/wmain_stub.cpp') ] + Export('winstubs') def build(scriptFile, exports = [], duplicate = 0): dirname = '/'.join(os.path.dirname(scriptFile).split('/')[1:]) diff --git a/build/SConscript.configure b/build/SConscript.configure index 3a65f817..73d917be 100644 --- a/build/SConscript.configure +++ b/build/SConscript.configure @@ -285,7 +285,7 @@ if needsBuildDependencies: print '\nThe dependency directory and your Mitsuba codebase have different version' print 'numbers! Your copy of Mitsuba has version %s, whereas the dependencies ' % MTS_VERSION print 'have version %s. Please bring them into sync, either by running\n' % depVersion - print '$ hg update -r v%s\n' % depVersion + print '$ hg update -r v%s\n' % depVersion print 'in the Mitsuba directory, or by running\n' print '$ cd dependencies' print '$ hg pull' @@ -353,7 +353,7 @@ def configure_for_objective_cpp(env): env.RemoveFlags(['-fstrict-aliasing', '-ftree-vectorize', '-std=c\+\+0x']) # Remove Intel compiler-specific optimization flags - env.RemoveFlags(['-x.*', '-ax.*', '-ipo', '-no-prec-div', + env.RemoveFlags(['-x.*', '-ax.*', '-ipo', '-no-prec-div', '-fp-model', 'fast=.*', '-wd.*', '-openmp']) env['CCFLAGS'] += ['-fno-strict-aliasing'] # Enforce GCC usage (Intel compiler doesn't handle Objective C/C++) @@ -374,7 +374,7 @@ env.__class__.ConfigureForObjectiveCPP = configure_for_objective_cpp env.__class__.RelaxCompilerSettings = relax_compiler_settings if hasCollada: - env.Append(CPPDEFINES = [['MTS_HAS_COLLADA', 1]] ) + env.Append(CPPDEFINES = [['MTS_HAS_COLLADA', 1]]) env.SConsignFile() diff --git a/build/config-linux-gcc-debug.py b/build/config-linux-gcc-debug.py index 55f32973..109f06d8 100644 --- a/build/config-linux-gcc-debug.py +++ b/build/config-linux-gcc-debug.py @@ -20,11 +20,11 @@ GLLIB = ['GL', 'GLU', 'GLEWmx', 'Xxf86vm', 'X11'] GLFLAGS = ['-DGLEW_MX'] BOOSTLIB = ['boost_system', 'boost_filesystem', 'boost_thread'] COLLADAINCLUDE = ['/usr/include/collada-dom', '/usr/include/collada-dom/1.4'] -COLLADALIB = ['collada14dom'] +COLLADALIB = ['collada14dom', 'xml2'] # The following assumes that the Mitsuba bindings should be built for the # "default" Python version. It is also possible to build bindings for multiple -# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE, +# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE, # PYTHON27LIB, PYTHON27LIBDIR and PYTHON32INCLUDE, PYTHON32LIB, PYTHON32LIBDIR pyver = os.popen("python --version 2>&1 | grep -oE '([[:digit:]].[[:digit:]])'").read().strip().replace('.', '') diff --git a/build/config-linux-gcc.py b/build/config-linux-gcc.py index 6173d921..d81b3408 100644 --- a/build/config-linux-gcc.py +++ b/build/config-linux-gcc.py @@ -20,11 +20,11 @@ GLLIB = ['GL', 'GLU', 'GLEWmx', 'Xxf86vm', 'X11'] GLFLAGS = ['-DGLEW_MX'] BOOSTLIB = ['boost_system', 'boost_filesystem', 'boost_thread'] COLLADAINCLUDE = ['/usr/include/collada-dom', '/usr/include/collada-dom/1.4'] -COLLADALIB = ['collada14dom'] +COLLADALIB = ['collada14dom', 'xml2'] # The following assumes that the Mitsuba bindings should be built for the # "default" Python version. It is also possible to build bindings for multiple -# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE, +# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE, # PYTHON27LIB, PYTHON27LIBDIR and PYTHON32INCLUDE, PYTHON32LIB, PYTHON32LIBDIR pyver = os.popen("python --version 2>&1 | grep -oE '([[:digit:]].[[:digit:]])'").read().strip().replace('.', '') diff --git a/build/config-linux-icl.py b/build/config-linux-icl.py index f376ac60..10baef50 100644 --- a/build/config-linux-icl.py +++ b/build/config-linux-icl.py @@ -20,11 +20,11 @@ GLLIB = ['GL', 'GLU', 'GLEWmx', 'Xxf86vm', 'X11'] GLFLAGS = ['-DGLEW_MX'] BOOSTLIB = ['boost_system', 'boost_filesystem', 'boost_thread'] COLLADAINCLUDE = ['/usr/include/collada-dom', '/usr/include/collada-dom/1.4'] -COLLADALIB = ['collada14dom'] +COLLADALIB = ['collada14dom', 'xml2'] # The following assumes that the Mitsuba bindings should be built for the # "default" Python version. It is also possible to build bindings for multiple -# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE, +# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE, # PYTHON27LIB, PYTHON27LIBDIR and PYTHON32INCLUDE, PYTHON32LIB, PYTHON32LIBDIR pyver = os.popen("python --version 2>&1 | grep -oE '([[:digit:]].[[:digit:]])'").read().strip().replace('.', '') diff --git a/build/config-macos10.7-gcc-x86_64.py b/build/config-macos10.7-gcc-x86_64.py index b4112976..5714db52 100644 --- a/build/config-macos10.7-gcc-x86_64.py +++ b/build/config-macos10.7-gcc-x86_64.py @@ -6,7 +6,7 @@ CCFLAGS = ['-arch', 'x86_64', '-mmacosx-version-min=10.7', '-march=nocona LINKFLAGS = ['-framework', 'OpenGL', '-framework', 'Cocoa', '-arch', 'x86_64', '-mmacosx-version-min=10.7', '-Wl,-syslibroot,/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.7.sdk', '-Wl,-headerpad,128'] BASEINCLUDE = ['#include', '#dependencies/include'] BASELIBDIR = ['#dependencies/lib'] -BASELIB = ['m', 'pthread', 'gomp', 'Half'] +BASELIB = ['m', 'pthread', 'Half'] OEXRINCLUDE = ['#dependencies/include/OpenEXR'] OEXRLIB = ['IlmImf', 'Imath', 'Iex', 'z'] PNGLIB = ['png'] diff --git a/build/mitsuba-msvc2010.vcxproj b/build/mitsuba-msvc2010.vcxproj index 1ee777d5..fb1a61ec 100644 --- a/build/mitsuba-msvc2010.vcxproj +++ b/build/mitsuba-msvc2010.vcxproj @@ -125,13 +125,15 @@ - + - + - + - + + + @@ -203,6 +205,8 @@ + + @@ -287,14 +291,14 @@ + + - - @@ -385,6 +389,8 @@ + + @@ -409,6 +415,8 @@ + + @@ -431,6 +439,8 @@ + + @@ -455,8 +465,6 @@ - - @@ -511,6 +519,8 @@ + + @@ -629,8 +639,6 @@ - - @@ -757,6 +765,8 @@ + + @@ -805,6 +815,10 @@ + + + + @@ -937,6 +951,8 @@ + + @@ -1001,12 +1017,8 @@ - - - - @@ -1021,6 +1033,8 @@ + + @@ -1033,9 +1047,9 @@ - + - + diff --git a/build/mitsuba-msvc2010.vcxproj.filters b/build/mitsuba-msvc2010.vcxproj.filters index 2a0fca14..353385b3 100644 --- a/build/mitsuba-msvc2010.vcxproj.filters +++ b/build/mitsuba-msvc2010.vcxproj.filters @@ -9,136 +9,136 @@ cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - {b54fb131-7e86-4e08-bba2-828939a038ff} + {81dd9e3e-7bca-44a1-a313-76fb5af5ab0c} - {62e70ba9-1509-4ecb-9c06-8de3ef48cd47} + {7112f301-f6d5-4351-ae06-4a29910a3766} - {81da027f-f282-46cb-a88d-abc84969dbf8} + {15522c16-64b9-487e-b9fe-dea0094cb5c1} - {8364b08a-50e5-471b-bfaa-4e45915df101} + {4df10a81-6f13-45ae-9a39-ce009db3a659} - {bb52270c-4a49-423b-ad02-4a7c042295c2} + {0c722d61-5189-477d-b242-2fa2ad26c83c} - {c4330c75-0353-41ab-a809-1471f359aec6} + {d849b48a-b61d-4540-81ab-f5228fd6fa93} + + + {32a38ee8-b255-4e23-8efc-9491fc27c645} - {0f1ec875-44fb-46b2-9116-26d659410d97} + {9f6ef52b-2504-4722-a200-7aaa9603088a} - {ab910bc2-a0a5-4f02-8232-396937071149} + {6c49af9f-8cac-477f-825b-ccb368e313c6} - {dff3f942-f016-4bad-aa2c-63c444401a94} + {182aef81-5fdf-4eb1-a7d3-5b1d7d5de2b5} - {855b0594-d28c-486a-b081-72991219654e} + {81241aff-695b-4556-bf46-e9d3ac4aa7fb} - {08449c4a-89f1-4b5b-a756-2bd2f03bf566} - - - {8437086b-afc9-48b6-a358-53d4df044974} + {f4218b48-d878-435b-915a-8da9727666b1} - {d9802d62-8614-401a-afa6-941271c63386} + {e6e1a06f-b795-4a12-9b0c-0c730e0b2ac6} - {65e0e20a-2809-4352-8c81-69a0a3824fa9} + {231a82e9-6b4b-4a95-995c-8abd0b6149b3} - {8ea19f58-a2c0-4a30-bc79-817da2f1a1ed} + {4c70e19a-a935-4b0f-88e3-10683b03f66c} - {753c9d80-ceb0-4932-8097-5c7cac92e091} + {4a433329-84ca-4a8b-938e-4d06c21ec6ff} - {b4290489-566b-4576-847b-392b4c45f1f1} + {4414bb60-269f-4934-9dc9-58bbed35bc1d} - {ad07dceb-214e-42e1-933d-c8ceeecd1389} + {4bc23857-f9a2-453d-bc64-8c9ebc5aff42} - {3ee94c12-30fb-4f65-8781-f7b5e8a95db2} + {a9318f22-caaa-40af-b8fc-9b8051ec0859} - {813aba46-5b9d-4290-9a31-18e77952929e} + {fba5da81-e3f2-4fbe-98d7-807aaa2c2cf3} - {55bcc3cd-7f6b-4bb6-b22e-b731ed0451fb} + {abd6883d-375b-4d85-bb93-12952995332d} - {6ebccae2-fbfd-4a3f-9bfc-7e9dbd2b6d74} + {4c9fdf84-d6a6-47f8-8d58-c01ed8351fa0} - {a339ab19-2bed-4d03-afdc-d73223648b72} + {366f9975-2268-4124-81b9-87e7c2c03b1e} - {87b01999-0b73-481b-b8fa-b66f594f65be} + {8d2d02ea-1957-4256-8046-f525d13e7ba5} - {5870cf94-fb1b-44c0-90f2-32c3c8cbf6b7} + {8438e074-8733-4689-95ba-86145dd9fd9d} - {90263b28-e65b-4d41-92cb-a0d1dac22564} + {9d35d85d-280c-49ef-869a-ffaa0602f9f5} - {453d13c1-1847-4a18-8f2e-028913b4ec43} + {6df94c72-e89d-451e-ad6b-160ea0313077} - {e4baad7e-ee3c-4085-961f-2851c0ed2b8f} + {7bec8429-3bb9-4dfd-8008-a10c675a9818} - {a7ab51c2-5596-458f-b5f0-1129dc3dbbf5} + {747a1613-4c01-46ef-ba17-92d2286890ad} - {a7456898-2253-48d8-abec-9c86311cca64} + {516a0fc3-a824-4486-b50c-f3165d1d8361} - {cb9f48b8-6f25-4c9d-9d2c-b1a67cc80427} + {bddbc784-474a-4cd3-bbea-3123048dbfaa} - {b44ee927-e827-4c46-bbce-c7a1b705b63b} + {7bba2a7a-8cd3-47bd-9ab2-e33939d4eba6} - {40540c0e-02d6-4296-bbdf-87dfd9f7f213} + {32cdfdd7-c3bb-41f2-b433-10165963f9ed} - {430d27a2-66db-4169-bb48-aeb0b65e4d1a} + {49edc93d-4305-45a8-a6d9-ae283837d46a} - {a247e523-dbaf-4fb8-b284-6c8c5cbc09a5} + {d24cf1e6-a301-42e8-9fc1-26fe0ddc3f04} - {91d6db52-180b-4dde-9339-807917e42f4e} + {3df649be-39d5-4416-8776-427a1696c016} - {8ae0e3a3-87db-4e93-bb20-9d198040ae86} + {2e68088f-97e2-4dee-91f5-c85e3e36dd74} - {0049f7d1-4211-4010-b53c-3ee99244046e} + {f7674a86-b0a5-484c-9db3-a20500b9bbf9} - {8bddb89b-fa54-448a-a4b3-294f18d45ded} + {f41987e3-e0b9-431d-b37b-5931db380b09} - {082f589c-eb3f-47a4-b2f6-fbd6d89dec07} + {88ed597b-50ba-4d6f-bf50-ae8f5fa17789} - {a7f59027-bc87-4d28-9b90-eb5be186d8a7} + {5499a8e9-ef31-4ab9-a58f-d3a36a784004} - {f5410d0f-0a22-4579-afbf-8e13db8fa3d5} + {b74f25a9-ff20-4888-8dfc-57dd968b0b8d} - {ebe2b998-26de-4e92-8618-c739cb43a5f2} + {18246123-74da-4fee-8581-bd2860451c40} - {4f4d9d4b-3f44-4d56-9a18-16b94e753706} + {3fdeab5a-5ccf-41c6-bf05-d9f7f510c741} @@ -247,9 +247,6 @@ Source Files\librender - - Source Files\librender - Source Files\librender @@ -439,6 +436,9 @@ Source Files\libhw + + Source Files\libhw + Source Files\libhw @@ -511,6 +511,12 @@ Source Files\libcore + + Source Files\libcore + + + Source Files\libcore + Source Files\libcore @@ -709,6 +715,9 @@ Source Files\mtsgui + + Source Files\mtsgui + Source Files\mtsgui @@ -805,15 +814,9 @@ Source Files\utils - - Source Files\utils - Source Files\utils - - Source Files\utils - Source Files\utils @@ -835,6 +838,9 @@ Source Files\shapes\ply + + Source Files\shapes + Source Files\shapes @@ -853,10 +859,10 @@ Source Files\shapes - + Source Files\shapes - + Source Files\shapes @@ -1035,17 +1041,20 @@ Source Files\samplers - - Source Files\libhw + + Source Files\libhw\data - - Source Files\libhw + + Source Files\libhw\data - - Source Files\libhw + + Source Files\libhw\data - - Source Files\libhw + + Source Files\libhw\data + + + Source Files\libhw\data Source Files\medium @@ -1152,6 +1161,9 @@ Source Files\films + + Source Files\films + Source Files\shapes @@ -1278,6 +1290,9 @@ Header Files\mitsuba\render + + Header Files\mitsuba\render + Header Files\mitsuba\render @@ -1287,9 +1302,6 @@ Header Files\mitsuba\render - - Header Files\mitsuba\render - Header Files\mitsuba\render @@ -1425,6 +1437,9 @@ Header Files\mitsuba\core + + Header Files\mitsuba\core + Header Files\mitsuba\core @@ -1461,6 +1476,9 @@ Header Files\mitsuba\core + + Header Files\mitsuba\core + Header Files\mitsuba\core @@ -1494,6 +1512,9 @@ Header Files\mitsuba\core + + Header Files\mitsuba\core + Header Files\mitsuba\core @@ -1530,9 +1551,6 @@ Header Files\mitsuba\core - - Header Files\mitsuba\core - Header Files\mitsuba\core @@ -1614,6 +1632,9 @@ Header Files\mitsuba\hw + + Header Files\mitsuba\hw + Header Files\mitsuba\hw diff --git a/data/cmake/MitsubaBuildOptions.cmake b/data/cmake/MitsubaBuildOptions.cmake index 6a36aabb..7aae4210 100644 --- a/data/cmake/MitsubaBuildOptions.cmake +++ b/data/cmake/MitsubaBuildOptions.cmake @@ -6,15 +6,17 @@ if (NOT DEFINED MTS_VERSION) message(FATAL_ERROR "This file has to be included from the main build file.") endif() -# Image format definitions -if (PNG_FOUND) - add_definitions(-DMTS_HAS_LIBPNG=1) -endif() -if (JPEG_FOUND) - add_definitions(-DMTS_HAS_LIBJPEG=1) -endif() -if (OPENEXR_FOUND) - add_definitions(-DMTS_HAS_OPENEXR=1) +# Default initial compiler flags which may be modified by advanced users +if (MTS_CMAKE_INIT) + set(MTS_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang") + set(MTS_CXX_FLAGS "-fvisibility=hidden -pipe -march=nocona -mfpmath=sse -ffast-math -Wall -Winvalid-pch") + endif() + if (MTS_CXX_FLAGS) + set(CMAKE_CXX_FLAGS "${MTS_CXX_FLAGS} ${CMAKE_CXX_FLAGS}" CACHE + STRING "Flags used by the compiler during all build types." FORCE) + set(MTS_CXX_FLAGS) + endif() endif() # Top level configuration definitions @@ -107,15 +109,7 @@ endif() if (WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8) add_definitions(-DWIN64) endif() - - -# Main mitsuba include directory -include_directories("include") - -# Includes for the common libraries -include_directories(${Boost_INCLUDE_DIRS} ${Eigen_INCLUDE_DIR}) - -# If we are using the system OpenEXR, add its headers which half.h requires -if (OPENEXR_FOUND) - include_directories(${ILMBASE_INCLUDE_DIRS}) +if (MSVC AND MTS_SSE AND NOT CMAKE_CL_64) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:SSE2") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:SSE2") endif() diff --git a/data/cmake/MitsubaExternal.cmake b/data/cmake/MitsubaExternal.cmake index 621e317a..acfbf98c 100644 --- a/data/cmake/MitsubaExternal.cmake +++ b/data/cmake/MitsubaExternal.cmake @@ -244,3 +244,23 @@ CMAKE_DEPENDENT_OPTION(BUILD_PYTHON "Build the Python bindings." ON if (PYTHONLIBS_FOUND AND mts_boost_PYTHON_FOUND) set (PYTHON_FOUND TRUE) endif () + + +# Includes for the common libraries +include_directories(${Boost_INCLUDE_DIRS} ${Eigen_INCLUDE_DIR}) + +# If we are using the system OpenEXR, add its headers which half.h requires +if (OPENEXR_FOUND) + include_directories(${ILMBASE_INCLUDE_DIRS}) +endif() + +# Image format definitions +if (PNG_FOUND) + add_definitions(-DMTS_HAS_LIBPNG=1) +endif() +if (JPEG_FOUND) + add_definitions(-DMTS_HAS_LIBJPEG=1) +endif() +if (OPENEXR_FOUND) + add_definitions(-DMTS_HAS_OPENEXR=1) +endif() diff --git a/data/cmake/MitsubaUtil.cmake b/data/cmake/MitsubaUtil.cmake index e465e535..473be190 100644 --- a/data/cmake/MitsubaUtil.cmake +++ b/data/cmake/MitsubaUtil.cmake @@ -141,14 +141,16 @@ function(mts_win_resource target_filename name ext description) endif() set(RC_DESCRIPTION "${description}") - #TODO Add the hg revision number to the version, e.g. 0.0.0-hg000000000000 - set(RC_VERSION "${MTS_VERSION}") - set(RC_VERSION_COMMA "${MTS_VERSION}.0") - string(REPLACE "." "," RC_VERSION_COMMA ${RC_VERSION_COMMA}) + if (MTS_HAS_VALID_REV) + set(RC_VERSION "${MTS_VERSION}-${MTS_VERSION_BUILD}hg${MTS_REV_ID}") + else() + set(RC_VERSION "${MTS_VERSION}") + endif() + set(RC_VERSION_COMMA "${MTS_VERSION_MAJOR},${MTS_VERSION_MINOR},${MTS_VERSION_PATCH},0") set(RC_FILENAME "${name}${ext}") set(RC_NAME "${name}") - #TODO Set the year programmatically - set(RC_YEAR "2012") + # MTS_DATE has the format YYYY.MM.DD + string(SUBSTRING "${MTS_DATE}" 0 4 RC_YEAR) configure_file("${RC_FILE}" "${target_filename}" ESCAPE_QUOTES @ONLY) endfunction() @@ -308,15 +310,15 @@ macro (add_mts_plugin _plugin_name) add_library (${_plugin_name} MODULE ${_plugin_srcs}) endif () - set(core_libraries "mitsuba-core" "mitsuba-render") + set(_plugin_core_libraries "mitsuba-core" "mitsuba-render") if (_plugin_MTS_HW) - list(APPEND core_libraries "mitsuba-hw") + list(APPEND _plugin_core_libraries "mitsuba-hw") endif() if (_plugin_MTS_BIDIR) - list(APPEND core_libraries "mitsuba-bidir") + list(APPEND _plugin_core_libraries "mitsuba-bidir") endif() target_link_libraries (${_plugin_name} - ${core_libraries} ${_plugin_LINK_LIBRARIES}) + ${_plugin_core_libraries} ${_plugin_LINK_LIBRARIES}) set_target_properties (${_plugin_name} PROPERTIES PREFIX "") if (APPLE) @@ -367,6 +369,7 @@ endif() # [RES_ICON filename] # [RES_DESCRIPTION "Description string"] # [NO_INSTALL] +# [MTS_HW] [MTS_BIDIR] # [NO_MTS_PCH | PCH pch_header] ) # # The executable name is taken from the first argument. The target gets @@ -375,6 +378,11 @@ endif() # (for example, libpng) may be specified after the optionl LINK_LIBRARIES # keyword. # +# By default the executables are linked against mitsuba-core and mitsuba-render. +# When MTS_HW is set, the executable will be linked against with mitsuba-hw. +# When MTS_BIDIR is specified, the executable will also be linked against +# mitsuba-bidir. +# # The optional keyword WIN32, if presents, gets passed to add_executable(...) # to produce a Windows executable using winmain, thus it won't have a # console. The NO_INSTALL keyword causes the target not to be installed. @@ -388,7 +396,7 @@ endif() # builds; other platforms simply ignore this value as with RES_ICON. # macro (add_mts_exe _exe_name) - CMAKE_PARSE_ARGUMENTS(_exe "WIN32;NO_INSTALL;NO_MTS_PCH" + CMAKE_PARSE_ARGUMENTS(_exe "WIN32;NO_INSTALL;MTS_HW;MTS_BIDIR;NO_MTS_PCH" "PCH;RES_ICON;RES_DESCRIPTION" "LINK_LIBRARIES" ${ARGN}) set (_exe_srcs ${_exe_UNPARSED_ARGUMENTS}) if (_exe_WIN32) @@ -425,8 +433,15 @@ macro (add_mts_exe _exe_name) else () add_executable (${_exe_name} ${_exe_TYPE} ${_exe_srcs}) endif () - target_link_libraries (${_exe_name} - ${MTS_CORELIBS} ${_exe_LINK_LIBRARIES}) + + set(_exe_core_libraries "mitsuba-core" "mitsuba-render") + if (_exe_MTS_HW) + list(APPEND _exe_core_libraries "mitsuba-hw") + endif() + if (_exe_MTS_BIDIR) + list(APPEND _exe_core_libraries "mitsuba-bidir") + endif() + target_link_libraries (${_exe_name} ${_exe_core_libraries} ${_exe_LINK_LIBRARIES}) if (WIN32) set_target_properties (${_exe_name} PROPERTIES VERSION "${MTS_VERSION}") endif() diff --git a/data/cmake/MtsGetVersionInfo.cmake b/data/cmake/MtsGetVersionInfo.cmake new file mode 100644 index 00000000..19092f44 --- /dev/null +++ b/data/cmake/MtsGetVersionInfo.cmake @@ -0,0 +1,160 @@ +# ============================================================================ +# HDRITools - High Dynamic Range Image Tools +# Copyright 2008-2011 Program of Computer Graphics, Cornell University +# +# Distributed under the OSI-approved MIT License (the "License"); +# see accompanying file LICENSE for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +# --------------------------------------------------------------------------- +# Primary author: +# Edgar Velazquez-Armendariz +# ============================================================================ + +# - Sets up the version info variables +# This module provides a function intended to be called ONLY from the root dir: +# MTS_GET_VERSION_INFO() +# This function will read the "include/mitsuba/core/version.h" file and execute +# "hg", setting the following variables: +# MTS_VERSION - Full version string: .. +# MTS_VERSION_MAJOR +# MTS_VERSION_MINOR +# MTS_VERSION_PATCH +# MTS_VERSION_BUILD - Simple build number based on MTS_DATE, +# encoded as YYYYMMDD +# MTS_HAS_VALID_REV - Flag to indicate whether MTS_REV_ID is set +# MTS_REV_ID - First 12 digits of the mercurial revision ID +# MTS_DATE - Represents the code date as YYYY.MM.DD +# MTS_MACLS_VERSION - A version for Mac Launch Services from the version and +# code date, in the format nnnnn.nn.nn[hgXXXXXXXXXXXX] + +function(MTS_GET_VERSION_INFO) + + # Simple, internal macro for zero padding values. Assumes that the number of + # digits is enough. Note that this method overwrites the variable! + macro(ZERO_PAD NUMBER_VAR NUM_DIGITS) + set(_val ${${NUMBER_VAR}}) + set(${NUMBER_VAR} "") + foreach(dummy_var RANGE 1 ${NUM_DIGITS}) + math(EXPR _digit "${_val} % 10") + set(${NUMBER_VAR} "${_digit}${${NUMBER_VAR}}") + math(EXPR _val "${_val} / 10") + endforeach() + unset(_val) + unset(_digit) + endmacro() + + + # Uses hg to get the version string and the date of such revision + # Based on info from: + # http://mercurial.selenic.com/wiki/VersioningWithMake (January 2011) + + # Try to directly get the information assuming the source is within a repo + find_program(HG_CMD hg DOC "Mercurial command line executable") + mark_as_advanced(HG_CMD) + if (HG_CMD) + execute_process( + COMMAND "${HG_CMD}" -R "${PROJECT_SOURCE_DIR}" + parents --template "{node|short},{date|shortdate}" + OUTPUT_VARIABLE HG_INFO + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if (HG_INFO) + # Extract the revision ID and the date + string(REGEX REPLACE "(.+),.+" "\\1" MTS_REV_ID "${HG_INFO}") + string(REGEX REPLACE ".+,(.+)-(.+)-(.+)" "\\1.\\2.\\3" + MTS_DATE "${HG_INFO}") + set(MTS_REV_ID ${MTS_REV_ID} PARENT_SCOPE) + set(MTS_DATE ${MTS_DATE} PARENT_SCOPE) + endif() + endif() + + # If that failed, try grabbing the id from .hg_archival.txt, in case a tarball + # made by "hg archive" is being used + if (NOT MTS_REV_ID) + set(HG_ARCHIVAL_FILENAME "${CMAKE_CURRENT_SOURCE_DIR}/.hg_archival.txt") + # Try to read from the file generated by "hg archive" + if (EXISTS "${HG_ARCHIVAL_FILENAME}") + file(READ "${HG_ARCHIVAL_FILENAME}" HG_ARCHIVAL_TXT) + # Extract just the first 12 characters of the node + string(REGEX REPLACE ".*node:[ \\t]+(............).*" "\\1" + MTS_REV_ID "${HG_ARCHIVAL_TXT}") + set(MTS_REV_ID ${MTS_REV_ID} PARENT_SCOPE) + endif() + endif() + + if (NOT MTS_DATE) + # The Windows "date" command output depends on the regional settings + if (WIN32) + set(GETDATE_CMD "${PROJECT_SOURCE_DIR}/data/windows/getdate.exe") + set(GETDATE_ARGS "") + else() + set(GETDATE_CMD "date") + set(GETDATE_ARGS "+'%Y.%m.%d'") + endif() + execute_process(COMMAND "${GETDATE_CMD}" ${GETDATE_ARGS} + OUTPUT_VARIABLE MTS_DATE + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if (NOT MTS_DATE) + message(FATAL_ERROR "Unable to get a build date!") + endif() + set(MTS_DATE ${MTS_DATE} PARENT_SCOPE) + endif() + + if (MTS_REV_ID) + set (MTS_HAS_VALID_REV 1) + else() + message(WARNING "Unable to find the mercurial revision id.") + set (MTS_HAS_VALID_REV 0) + endif() + set(MTS_HAS_VALID_REV ${MTS_HAS_VALID_REV} PARENT_SCOPE) + + + # Read version (MTS_VERSION) from include/mitsuba/core/version.h + file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/mitsuba/core/version.h" MITSUBA_H REGEX "^#define MTS_VERSION \"[^\"]*\"$") + if (MITSUBA_H MATCHES "^.*MTS_VERSION \"([0-9]+)\\.([0-9]+)\\.([0-9]+).*$") + set(MTS_VERSION_MAJOR ${CMAKE_MATCH_1}) + set(MTS_VERSION_MINOR ${CMAKE_MATCH_2}) + set(MTS_VERSION_PATCH ${CMAKE_MATCH_3}) + set(MTS_VERSION "${MTS_VERSION_MAJOR}.${MTS_VERSION_MINOR}.${MTS_VERSION_PATCH}" PARENT_SCOPE) + set(MTS_VERSION_MAJOR ${MTS_VERSION_MAJOR} PARENT_SCOPE) + set(MTS_VERSION_MINOR ${MTS_VERSION_MINOR} PARENT_SCOPE) + set(MTS_VERSION_PATCH ${MTS_VERSION_PATCH} PARENT_SCOPE) + else() + message(FATAL_ERROR "The mitsuba version could not be determined!") + endif() + + # Make a super simple build number from the date + if (MTS_DATE MATCHES "([0-9]+)\\.([0-9]+)\\.([0-9]+)") + set(MTS_VERSION_BUILD + "${CMAKE_MATCH_1}${CMAKE_MATCH_2}${CMAKE_MATCH_3}" PARENT_SCOPE) + + # Now make a Mac Launch Services version number based on version and date. + # Based on specs from: + # http://lists.apple.com/archives/carbon-dev/2006/Jun/msg00139.html (Feb 2011) + if (MTS_VERSION_MAJOR GREATER 30 OR + MTS_VERSION_MINOR GREATER 14 OR + MTS_VERSION_PATCH GREATER 14 OR + ${CMAKE_MATCH_1} GREATER 2032) + message(AUTHOR_WARNING "Mitsuba version violates the Mac LS assumptions") + endif() + math(EXPR _MACLS_MAJOR "(${MTS_VERSION_MAJOR}+1)*256 + (${MTS_VERSION_MINOR}+1)*16 + ${MTS_VERSION_PATCH}+1") + math(EXPR _MACLS_MINOR "((${CMAKE_MATCH_1}-2008)*4) + ((${CMAKE_MATCH_2}-1)*32 + ${CMAKE_MATCH_3})/100") + math(EXPR _MACLS_BUILD "((${CMAKE_MATCH_2}-1)*32 + ${CMAKE_MATCH_3})%100") + ZERO_PAD(_MACLS_MAJOR 4) + ZERO_PAD(_MACLS_MINOR 2) + ZERO_PAD(_MACLS_BUILD 2) + set(MTS_MACLS_VERSION "${_MACLS_MAJOR}.${_MACLS_MINOR}.${_MACLS_BUILD}") + if(MTS_HAS_VALID_REV) + set(MTS_MACLS_VERSION "${MTS_MACLS_VERSION}hg${MTS_REV_ID}") + endif() + set(MTS_MACLS_VERSION ${MTS_MACLS_VERSION} PARENT_SCOPE) + else() + message(FATAL_ERROR + "Mitsuba date has an unexpected format: ${MTS_DATE}") + endif() + +endfunction() diff --git a/data/darwin/Info.plist b/data/darwin/Info.plist index 05e42854..94eb2192 100644 --- a/data/darwin/Info.plist +++ b/data/darwin/Info.plist @@ -90,5 +90,7 @@ YES BreakpadVendor the Mitsuba authors + NSHighResolutionCapable + diff --git a/data/darwin/Info.plist.in b/data/darwin/Info.plist.in index 409395a9..12dc0e7b 100644 --- a/data/darwin/Info.plist.in +++ b/data/darwin/Info.plist.in @@ -70,5 +70,7 @@ @MTS_VERSION@ CSResourcesFileMapped + NSHighResolutionCapable + diff --git a/data/darwin/add-icl-libraries.sh b/data/darwin/add-icl-libraries.sh index e35e925d..9db846ad 100755 --- a/data/darwin/add-icl-libraries.sh +++ b/data/darwin/add-icl-libraries.sh @@ -1,5 +1,6 @@ #!/bin/bash cp /opt/intel/composer_xe_*/compiler/lib/libiomp5.dylib Mitsuba.app/Contents/Frameworks +install_name_tool -id @rpath/libiomp5.dylib Mitsuba.app/Contents/Frameworks/libiomp5.dylib find Mitsuba.app/Contents/MacOS/ Mitsuba.app/plugins -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib find Mitsuba.app/Contents/Frameworks/libmitsuba-* -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib -find Mitsuba.app/Contents/python -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib +find Mitsuba.app/python -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib diff --git a/data/linux/debian/changelog b/data/linux/debian/changelog index d42b3978..f05344b5 100644 --- a/data/linux/debian/changelog +++ b/data/linux/debian/changelog @@ -1,3 +1,65 @@ +mitsuba (0.4.4-1) unstable; urgency=low + * Improved Python support for rendering animations and motion blur + * Photon mapper logic rewrite to account for certain missing specular paths + * Robustness improvements for specular+diffuse materials such as 'plastic' + * Fixed a remaining issue in the instancing frame computation code + * The thindielectric plugin formerly computed incorrect transmittance values + * The cube shape is now centered at the origin by default + * The TLS cleanup logic has been fixed to avoid a potential crash in mtssrv + * Other minor improvements, which are listed in the repository log + -- Wenzel Jakob Thu, 28 Feb 2013 00:00:00 -0400 + +mitsuba (0.4.3-1) unstable; urgency=low + * Motion blur: Support for arbitrary linear camera, object, and sensor motion + to produce motion blur in renderings. + * Render-time annotations: added the ability to tag image files with additional + information by means of metadata or text labels. + * Hide directly visible emitters: convenient feature for removing an environment + light source so that an image can be composited onto documents having a + different color. + * Improved instancing: more robust instancing code with support for + non-rigid transformations. + * Threading on Windows: fixed various threading-related issues on Windows that + previously caused crashes and deadlocks. + * Caching: Caching mechanism to further accelerate the loading of + .serialized files. + * File dialogs: Native File Open/Save dialogs are now used on Windows. + * Python: Improved python bindings; easier usage on MacOS X. + * Blender interaction: Fixed a issue where GUI tabs containing scenes created + in Blender could not be cloned. + * Non-uniform scales: All triangle mesh-based shapes now permit + non-uniform scales. + * NaNs and friends: Increased resilience against various numerical corner cases. + * Index-matched participating media: Fixed an unfortunate regression in volpath + regarding index-matched media that was accidentally introduced in 0.4.2. + * roughdiffuse: Fixed texturing support in the roughdiffuse plugin. + * Photon mapping: Fixed some inaccuracies involving participating media when + rendered by the photon mapper and the Beam Radiance Estimate. + * Conductors: Switched Fresnel reflectance computations for conductors to the + exact expressions predicted by geometric optics (an approximation was + previously used). + * New cube shape: Added a cube shape plugin for convenience. This does + exactly what one would expect. + * The rest: As usual, a large number of smaller bugfixes and improvements + were below the threshold and are thus not listed individually. The + repository log has more details. + -- Wenzel Jakob Tue, 29 Jan 2013 00:00:00 -0400 + +mitsuba (0.4.2-1) unstable; urgency=low + * Volumetric path tracers: improved sampling when dealing with index-matched medium transitions. This is essentially a re-implementation of an optimization that Mitsuba 0.3.1 already had, but which got lost in the bidirectional rewrite. + * Batch tonemapper: due to an unfortunate bug, the batch tonemapper in the last release produced invalid results for images containing an alpha channel. This is now fixed. + * Shapes: corrected some differential geometry issues in the "cylinder" and "rectangle" shapes. + * MLT: fixed 2-stage MLT, which was producing incorrect results. + * MEPT: fixed the handling of directional light sources. + * Robustness: got rid of various corner-cases that could produce NaNs. + * Filenames: to facilitate loading scenes created on Windows/OSX, the Linux version now resolves files case-insensitively if they could not be found after a case-sensitive search. + * Python: added Python bindings for shapes and triangle meshes. The Python plugin should now be easier to load (previously, this was unfortunately rather difficult on several platforms). The documentation was also given an overhaul. + * Particle tracing: I've decided to disable the adjoint BSDF for shading normals in the particle tracer, since it causes an unacceptable amount of variance in scenes containing poorly tesselated geometry. This affects the plugins ptracer, ppm, sppm and photonmapper. + * Subsurface scattering: fixed parallel network renderings involving the dipole model. + * Homogeneous medium & dipole: added many more material presets by Narasimhan et al. + * OBJ loader: further robustness improvements to the OBJ loader and the associated MTL material translator. + -- Wenzel Jakob Wed, 31 Oct 2012 00:00:00 -0400 + mitsuba (0.4.1-1) unstable; urgency=low * negative pixel values in textures and environment maps are handled more gracefully. * minor robustness improvements to the OBJ and COLLADA importers. @@ -35,15 +97,15 @@ mitsuba (0.4.0-1) unstable; urgency=low mitsuba (0.3.1-1) unstable; urgency=low - * Photon mapper: The photon mapper had some serious issues in the + * Photon mapper: The photon mapper had some serious issues in the last release. These are now fixed, and it should run faster too. - * On Linux/x86_64, the performance of the single precision exp() and log() + * On Linux/x86_64, the performance of the single precision exp() and log() math library functions is extremely poor. Mitsuba now uses the double prevision versions of these functions by default. * Primitive clipping: Fixed numerical issues that occurred when using primitive clipping in a double precision build. * The adaptive integrator now better interacts with certain sub-integrators. - * Instanced analytic shapes (e.g. spheres, cylinders, ..) are now supported, + * Instanced analytic shapes (e.g. spheres, cylinders, ..) are now supported, and an error involving network rendering with instanced geometry is fixed. * Fixed a serious issue that could destroy a scene file when saving from a cloned tab! * Fixed some bad GUI behavior in multi-screen setups @@ -57,30 +119,30 @@ mitsuba (0.3.0-1) unstable; urgency=low * Added Python bindings that can be used to instantiate plugins and control rendering processes. - * Spectral rendering: most of the code pertaining to spectral - rendering has seen a significant overhaul. It is now faster and + * Spectral rendering: most of the code pertaining to spectral + rendering has seen a significant overhaul. It is now faster and in certain cases more accurate. - * Flexible material classes: this release introduces a robust and - very general suite of eight physically-based smooth and rough + * Flexible material classes: this release introduces a robust and + very general suite of eight physically-based smooth and rough (microfacet-based) material classes. * Material modifiers: two new material modifiers (bump & coating) can be applied to BSDFs to create new materials. - * Material verification: the sampling methods of all material - models in Mitsuba are now automatically verified with the help + * Material verification: the sampling methods of all material + models in Mitsuba are now automatically verified with the help of statistical hypothesis tests (using Chi^2-tests). - * Generated documentation: there is now a javadoc-like system, - which extracts documentation directly from the plugin source code + * Generated documentation: there is now a javadoc-like system, + which extracts documentation directly from the plugin source code and stitches it into a LaTeX reference document. - * lookAt: Mitsuba inherited a bug from PBRT, where the - tag changed the handedness of the coordinate system. This is now + * lookAt: Mitsuba inherited a bug from PBRT, where the + tag changed the handedness of the coordinate system. This is now fixed--also, the syntax of this tag has changed to make it easier to read. * Scene portability: A new conversion tool ensures that old and incompatible - scenes can be translated into the scene description format of the + scenes can be translated into the scene description format of the most recent version. - * Contributed plugins: Tom Kazimiers and Papas have contributed + * Contributed plugins: Tom Kazimiers and Papas have contributed implementations of the Preetham Sun & Sky model and the Hanrahan-Krueger scattering model. - * Photon mapping: The Photon map integrator has been rewritten for + * Photon mapping: The Photon map integrator has been rewritten for improved accuracy and better performance. Furthermore, the underlying data structure has been replaced with a ~50% faster implementation. @@ -125,8 +187,8 @@ mitsuba (0.2.0-1) unstable; urgency=low mitsuba (0.1.3-1) unstable; urgency=low - This is mainly a bugfix release to address a serious regression in the - material system. Other notable changes are: + This is mainly a bugfix release to address a serious regression in the + material system. Other notable changes are: * Imported scenes now store relative paths * OBJ importing works on Windows @@ -134,7 +196,7 @@ mitsuba (0.1.3-1) unstable; urgency=low * The anisotropic Ward BRDF is now supported in the preview * Faster texture loading * The renderer now has a testcase framework similar to JUnit - + -- Wenzel Jakob Wed, 8 Sep 2010 09:59:00 -0400 mitsuba (0.1.2-1) unstable; urgency=low @@ -150,8 +212,8 @@ mitsuba (0.1.2-1) unstable; urgency=low is lacking some required OpenGL features. * Create default cameras/lightsources if none are specified in a scene * Support for drag & drop in the user interface - * The Mitsuba user interface now also doubles as an EXR viewer / tonemapper. - Drag an EXR file onto the UI or open it using the File menu, and the image + * The Mitsuba user interface now also doubles as an EXR viewer / tonemapper. + Drag an EXR file onto the UI or open it using the File menu, and the image opens in a new tab. Afterwards, it is possible to export the image as a tonemapped 8-bit PNG image. * The realtime preview now has a 'force diffuse' feature to improve @@ -165,6 +227,6 @@ mitsuba (0.1.2-1) unstable; urgency=low mitsuba (0.1.1-1) unstable; urgency=low - * Initial release + * Initial release -- Wenzel Jakob Sat, 17 Jul 2010 23:56:03 -0400 diff --git a/data/linux/debian/control b/data/linux/debian/control index 8bf000ec..ea681978 100644 --- a/data/linux/debian/control +++ b/data/linux/debian/control @@ -4,10 +4,10 @@ Priority: optional Maintainer: Wenzel Jakob Build-Depends: debhelper (>= 7), build-essential, scons, qt4-dev-tools, libpng12-dev, libjpeg-dev, libilmbase-dev, libopenexr-dev, - libxerces-c-dev, libboost-dev, libglewmx1.5-dev, libxxf86vm-dev, + libxerces-c-dev, libboost-dev, libglewmx-dev, libxxf86vm-dev, collada-dom-dev, libboost-system-dev, libboost-filesystem-dev, libboost-python-dev, libboost-thread-dev, libgl1-mesa-dev, - libglu1-mesa-dev, pkg-config, libeigen3-dev + libglu1-mesa-dev, pkg-config, libeigen3-dev, libxml2-dev Standards-Version: 3.8.3 Homepage: http://www.mitsuba-renderer.org @@ -24,7 +24,7 @@ Description: Mitsuba renderer Package: mitsuba-dev Architecture: any Depends: qt4-dev-tools, libpng12-dev, libjpeg-dev, libilmbase-dev, - libopenexr-dev, libxerces-c-dev, libboost-dev, libglewmx1.5-dev, + libopenexr-dev, libxerces-c-dev, libboost-dev, libglewmx-dev, libxxf86vm-dev, collada-dom-dev, libboost-system-dev, libboost-filesystem-dev, libboost-python-dev, libboost-thread-dev, libeigen3-dev, mitsuba diff --git a/data/linux/fedora/mitsuba.spec b/data/linux/fedora/mitsuba.spec index e326f57a..3f076370 100644 --- a/data/linux/fedora/mitsuba.spec +++ b/data/linux/fedora/mitsuba.spec @@ -1,7 +1,7 @@ Name: mitsuba -Version: 0.4.1 +Version: 0.4.4 Release: 1%{?dist} -Summary: Mitsuba renderer +Summary: Mitsuba renderer Group: Applications/Graphics License: GPL-3 URL: http://www.mitsuba-renderer.org @@ -10,11 +10,11 @@ BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) BuildRequires: gcc-c++ scons boost-devel qt4-devel OpenEXR-devel xerces-c-devel python-devel glew-devel collada-dom-devel eigen3-devel Requires: boost qt4 OpenEXR-libs xerces-c python libGLEWmx collada-dom %description -Mitsuba is an extensible rendering framework written in portable C++. It implements unbiased as well as biased techniques and contains heavy optimizations targeted towards current CPU architectures. +Mitsuba is an extensible rendering framework written in portable C++. It implements unbiased as well as biased techniques and contains heavy optimizations targeted towards current CPU architectures. The program currently runs on Linux, MacOS X and Microsoft Windows and makes use of SSE2 optimizations on x86 and x86_64 platforms. So far, its main use has been as a testbed for algorithm development in computer graphics, but there are many other interesting applications. -Mitsuba comes with a command-line interface as well as a graphical frontend to interactively explore scenes. While navigating, a rough preview is shown that becomes increasingly accurate as soon as all movements are stopped. Once a viewpoint has been chosen, a wide range of rendering techniques can be used to generate images, and their parameters can be tuned from within the program. +Mitsuba comes with a command-line interface as well as a graphical frontend to interactively explore scenes. While navigating, a rough preview is shown that becomes increasingly accurate as soon as all movements are stopped. Once a viewpoint has been chosen, a wide range of rendering techniques can be used to generate images, and their parameters can be tuned from within the program. %package devel Summary: Mitsuba development files Requires: boost-devel qt4-devel OpenEXR-devel xerces-c-devel python-devel glew-devel collada-dom-devel @@ -35,13 +35,14 @@ mkdir -p $RPM_BUILD_ROOT/usr/share/mitsuba/plugins mkdir -p $RPM_BUILD_ROOT/usr/share/pixmaps mkdir -p $RPM_BUILD_ROOT/usr/share/applications mkdir -p $RPM_BUILD_ROOT/usr/include -strip dist/lib* dist/mtsgui dist/mitsuba dist/mtssrv dist/mtsutil +strip dist/lib* dist/mtsgui dist/mitsuba dist/mtssrv dist/mtsutil dist/mtsimport strip dist/plugins/* dist/python/*/* cp dist/libmitsuba-*.so $RPM_BUILD_ROOT%{_libdir} cp dist/mtsgui $RPM_BUILD_ROOT%{_bindir} cp dist/mitsuba $RPM_BUILD_ROOT%{_bindir} cp dist/mtssrv $RPM_BUILD_ROOT%{_bindir} cp dist/mtsutil $RPM_BUILD_ROOT%{_bindir} +cp dist/mtsimport $RPM_BUILD_ROOT%{_bindir} cp dist/python/2.7/mitsuba.so $RPM_BUILD_ROOT%{_libdir}/python2.7/lib-dynload cp dist/plugins/* $RPM_BUILD_ROOT/usr/share/mitsuba/plugins cp -Rdp dist/data $RPM_BUILD_ROOT/usr/share/mitsuba/data @@ -62,6 +63,15 @@ rm -rf $RPM_BUILD_ROOT /usr/include/* %changelog +* Thu Feb 28 2013 Wenzel Jakob 0.4.4%{?dist} +- Upgrade to version 0.4.4 + +* Tue Jan 29 2013 Wenzel Jakob 0.4.3%{?dist} +- Upgrade to version 0.4.3 + +* Wed Oct 31 2012 Wenzel Jakob 0.4.2%{?dist} +- Upgrade to version 0.4.2 + * Wed Oct 10 2012 Wenzel Jakob 0.4.1%{?dist} - Upgrade to version 0.4.1 diff --git a/data/linux/mitsuba.desktop b/data/linux/mitsuba.desktop index 80b80205..ac7d81a1 100644 --- a/data/linux/mitsuba.desktop +++ b/data/linux/mitsuba.desktop @@ -10,5 +10,5 @@ Exec=mtsgui %U TryExec=mtsgui Terminal=false StartupNotify=true -MimeType=application/xml -Icon=mitsuba48.png +MimeType=application/xml;image/x-exr;image/x-hdr; +Icon=mitsuba48 diff --git a/data/pch/mitsuba_precompiled.hpp b/data/pch/mitsuba_precompiled.hpp index cbe1e530..97b8f846 100644 --- a/data/pch/mitsuba_precompiled.hpp +++ b/data/pch/mitsuba_precompiled.hpp @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include diff --git a/data/schema/scene.xsd b/data/schema/scene.xsd index 1a073b82..18de916b 100644 --- a/data/schema/scene.xsd +++ b/data/schema/scene.xsd @@ -26,7 +26,7 @@ - + @@ -43,6 +43,7 @@ + @@ -50,14 +51,14 @@ - + - + @@ -140,7 +141,7 @@ - + @@ -297,7 +298,7 @@ - + @@ -314,6 +315,23 @@ + + + + + + + + + + + + + + + + + diff --git a/data/windows/getdate.c b/data/windows/getdate.c new file mode 100644 index 00000000..54ce6852 --- /dev/null +++ b/data/windows/getdate.c @@ -0,0 +1,52 @@ +/*============================================================================ + HDRITools - High Dynamic Range Image Tools + Copyright 2008-2011 Program of Computer Graphics, Cornell University + + Distributed under the OSI-approved MIT License (the "License"); + see accompanying file LICENSE for details. + + This software is distributed WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the License for more information. + ----------------------------------------------------------------------------- + Primary author: + Edgar Velazquez-Armendariz +============================================================================*/ + +#include +#include + +int main(int argc, char **argv) +{ + time_t ltime; + struct tm *today; + FILE *of; +#if _MSC_VER >= 1400 + struct tm timebuf; +#endif + + if (argc != 2) { + of = stdout; + } else { +#if _MSC_VER >= 1400 + if (fopen_s(&of, argv[1], "w") != 0) return 3; +#else + of = fopen(argv[1], "w"); + if (!of) return 3; +#endif + } + + time(<ime); +#if _MSC_VER >= 1400 + if (localtime_s(&timebuf, <ime) != 0) return 1; + today = &timebuf; +#else + today = localtime(<ime); + if (!today) return 1; +#endif + + fprintf(of, "%d.%02d.%02d", (today->tm_year + 1900), + (today->tm_mon + 1), today->tm_mday); + if (of != stdout) fclose(of); + return 0; +} diff --git a/data/windows/getdate.exe b/data/windows/getdate.exe new file mode 100644 index 00000000..9cd66244 Binary files /dev/null and b/data/windows/getdate.exe differ diff --git a/data/windows/wmain_stub.cpp b/data/windows/wmain_stub.cpp new file mode 100644 index 00000000..3580bbad --- /dev/null +++ b/data/windows/wmain_stub.cpp @@ -0,0 +1,110 @@ +/* + This file is part of Mitsuba, a physically based rendering system. + + Copyright (c) 2007-2012 by Wenzel Jakob and others. + + Mitsuba is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License Version 3 + as published by the Free Software Foundation. + + Mitsuba is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +*/ + +#include +#if defined(__WINDOWS__) + +// Stub for generating UTF-8 command line arguments from wmain (UTF-16) +#include + +extern int mts_main(int argc, char **argv); + + +namespace { + +class ArgsUTF8 { +public: + ArgsUTF8(int argc, wchar_t *wargv[]) : + m_argc(-1), m_argv(NULL), m_data(NULL) + { + if (argc > 0) + m_argc = argc; + else + return; + + m_argv = new char*[argc]; + int total = 0; + + // Pass 1: get the lengths of each converted string an allocate data + for (int i = 0; i < argc; ++i) { + const int lenUtf8 = WideCharToMultiByte(CP_UTF8, 0, + wargv[i], -1, NULL, 0, NULL, NULL); + if (lenUtf8 != 0) { + total += lenUtf8; + m_argv[i] = reinterpret_cast(lenUtf8); + } else { + m_argc = i; + break; + } + } + + if (m_argc < 1) + return; + + m_data = new char[total]; + int currOffset = 0; + + // Pass 2: perform the conversion + for (int i = 0; i < m_argc; ++i) { + int lenUtf8 = reinterpret_cast(m_argv[i]); + m_argv[i] = m_data + currOffset; + lenUtf8 = WideCharToMultiByte(CP_UTF8, 0, + wargv[i], -1, m_argv[i], lenUtf8, NULL, NULL); + if (lenUtf8 != 0) { + currOffset += lenUtf8; + } else { + m_argc = i; + return; + } + } + } + + ~ArgsUTF8() { + if (m_argv != NULL) { + delete [] m_argv; + } + if (m_data != NULL) { + delete [] m_data; + } + } + + inline int argc() const { + return m_argc; + } + + inline char** argv() const { + return m_argv; + } + +private: + int m_argc; + char** m_argv; + char* m_data; +}; + +} // namespace + + +// MSDN Documentation: +// http://msdn.microsoft.com/en-US/library/fzc2cy7w%28v=vs.110%29.aspx +int wmain(int argc, wchar_t *wargv[], wchar_t *envp[]) { + ArgsUTF8 argsUTF8(argc, wargv); + return mts_main(argsUTF8.argc(), argsUTF8.argv()); +} + +#endif // __WINDOWS__ diff --git a/doc/Makefile b/doc/Makefile index c64ad654..41c41ab1 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,2 +1,7 @@ all: ./gendoc.py + +clean: + $(RM) main.pdf + $(RM) plugins_generated.tex + $(RM) *.aux main.bbl main.blg main.log main.out main.toc diff --git a/doc/acknowledgements.tex b/doc/acknowledgements.tex index 2828ff2e..7ebe70a3 100644 --- a/doc/acknowledgements.tex +++ b/doc/acknowledgements.tex @@ -1,11 +1,11 @@ \section{Acknowledgments} I am indebted to my advisor Steve Marschner for allowing me to devote -a significant amount of my research time to this project. His insightful and +a significant amount of my research time to this project. His insightful and encouraging suggestions have helped transform this program into much more than I ever thought it would be. -The architecture of Mitsuba as well as some individual components are based on -implementations discussed in: \emph{Physically Based Rendering - From Theory +The architecture of Mitsuba as well as some individual components are based on +implementations discussed in: \emph{Physically Based Rendering - From Theory To Implementation} by Matt Pharr and Greg Humphreys. Some of the GUI icons were taken from the Humanity icon set by Canonical Ltd. @@ -13,10 +13,10 @@ The material test scene was created by Jonas Pilo, and the environment map it uses is courtesy of Bernhard Vogl. The included index of refraction data files for conductors are copied from -PBRT. They are originally from the Luxpop database (\url{www.luxpop.com}) +PBRT. They are originally from the Luxpop database (\url{www.luxpop.com}) and are based on data by Palik et al. \cite{Palik1998Handbook} and measurements of atomic scattering factors made by the Center For -X-Ray Optics (CXRO) at Berkeley and the Lawrence Livermore National +X-Ray Optics (CXRO) at Berkeley and the Lawrence Livermore National Laboratory (LLNL). The following people have kindly contributed code or bugfixes: @@ -28,7 +28,7 @@ The following people have kindly contributed code or bugfixes: \item Leonhard Gr\"unschlo\ss \end{itemize} -Mitsuba makes heavy use of the following amazing libraries and tools: +Mitsuba makes heavy use of the following amazing libraries and tools: \begin{itemize} \item Qt 4 by Digia \item OpenEXR by Industrial Light \& Magic diff --git a/doc/basics.tex b/doc/basics.tex index b344b23f..fccf5bcf 100644 --- a/doc/basics.tex +++ b/doc/basics.tex @@ -4,18 +4,18 @@ The rendering functionality of Mitsuba can be accessed through a command line interface and an interactive Qt-based frontend. This section provides some basic instructions on how to use them. \subsection{Interactive frontend} -To launch the interactive frontend, run \code{Mitsuba.app} on MacOS, +To launch the interactive frontend, run \code{Mitsuba.app} on MacOS, \code{mtsgui.exe} on Windows, and \code{mtsgui} on Linux (after sourcing \code{setpath.sh}). You can also drag and drop scene files onto the application icon or the running program to open them. A quick video tutorial on using the GUI can be found here: \url{http://vimeo.com/13480342}. \subsection{Command line interface} \label{sec:mitsuba} -The \texttt{mitsuba} binary is an alternative non-interactive rendering +The \texttt{mitsuba} binary is an alternative non-interactive rendering frontend for command-line usage and batch job operation. To get a listing of the parameters it supports, run the executable without parameters: \begin{shell} -$\texttt{\$}$ mitsuba +$\texttt{\$}$ mitsuba \end{shell} \begin{console}[label=lst:mitsuba-cli,caption=Command line options of the \texttt{mitsuba} binary] Mitsuba version $\texttt{\MitsubaVersion}$, Copyright (c) $\texttt{\MitsubaYear}$ Wenzel Jakob @@ -75,18 +75,18 @@ mode of operation is to render a single scene, which is provided as a parameter, $\texttt{\$}$ mitsuba path-to/my-scene.xml \end{shell} It is also possible to connect to network render nodes, which essentially lets Mitsuba parallelize -over additional cores. To do this, pass a semicolon-separated list of machines to -the \code{-c} parameter. +over additional cores. To do this, pass a semicolon-separated list of machines to +the \code{-c} parameter. \begin{shell} $\texttt{\$}$ mitsuba -c machine1;machine2;... path-to/my-scene.xml \end{shell} There are two different ways in which you can access render nodes: \begin{itemize} \item\textbf{Direct}: Here, you create a direct connection to a running \code{mtssrv} instance on -another machine (\code{mtssrv} is the Mitsuba server process). From the the performance -standpoint, this approach should always be preferred over the SSH method described below when there is +another machine (\code{mtssrv} is the Mitsuba server process). From the the performance +standpoint, this approach should always be preferred over the SSH method described below when there is a choice between them. There are some disadvantages though: first, you need to manually start -\code{mtssrv} on every machine you want to use. +\code{mtssrv} on every machine you want to use. And perhaps more importantly: the direct communication protocol makes no provisions for a malicious user on the remote side. It is too costly @@ -98,11 +98,11 @@ For direct connections, you can specify the remote port as follows: \begin{shell} $\texttt{\$}$ mitsuba -c machine:1234 path-to/my-scene.xml \end{shell} -When no port is explicitly specified, Mitsuba uses default value of 7554. -\item \textbf{SSH}: - This approach works as follows: The renderer creates a SSH connection +When no port is explicitly specified, Mitsuba uses default value of 7554. +\item \textbf{SSH}: + This approach works as follows: The renderer creates a SSH connection to the remote side, where it launches a Mitsuba worker instance. - All subsequent communication then passes through the encrypted link. + All subsequent communication then passes through the encrypted link. This is completely secure but slower due to the encryption overhead. If you are rendering a complex scene, there is a good chance that it won't matter much since most time is spent doing computations rather than @@ -119,18 +119,18 @@ $\texttt{\$}$ mitsuba -c username@machine path-to/my-scene.xml \begin{shell} $\texttt{\$}$ mitsuba -c username@machine:/opt/mitsuba path-to/my-scene.xml \end{shell} - For the SSH connection approach to work, you \emph{must} enable passwordless + For the SSH connection approach to work, you \emph{must} enable passwordless authentication. - Try opening a terminal window and running the command \code{ssh username@machine} + Try opening a terminal window and running the command \code{ssh username@machine} (replace with the details of your remote connection). - If you are asked for a password, something is not set up correctly --- please see + If you are asked for a password, something is not set up correctly --- please see \url{http://www.debian-administration.org/articles/152} for instructions. On Windows, the situation is a bit more difficult since there is no suitable SSH client by default. To get SSH connections to work, Mitsuba requires \code{plink.exe} (from PuTTY) to be on the path. For passwordless authentication with a Linux/OSX-based server, convert your private key to PuTTY's format using \code{puttygen.exe}. - Afterwards, start \code{pageant.exe} to load and authenticate the key. All + Afterwards, start \code{pageant.exe} to load and authenticate the key. All of these binaries are available from the PuTTY website. It is possible to mix the two approaches to access some machines directly and others @@ -152,7 +152,7 @@ machine3.domain.org:7346 Any attribute in the XML-based scene description language can be parameterized from the command line. For instance, you can render a scene several times with different reflectance values -on a certain material by changing its description to something like +on a certain material by changing its description to something like \begin{xml} @@ -160,28 +160,28 @@ on a certain material by changing its description to something like \end{xml} and running Mitsuba as follows: \begin{shell} -$\texttt{\$}$ mitsuba -Dreflectance=0.1 -o ref_0.1.exr scene.xml -$\texttt{\$}$ mitsuba -Dreflectance=0.2 -o ref_0.2.exr scene.xml -$\texttt{\$}$ mitsuba -Dreflectance=0.5 -o ref_0.5.exr scene.xml +$\texttt{\$}$ mitsuba -Dreflectance=0.1 -o ref_0.1.exr scene.xml +$\texttt{\$}$ mitsuba -Dreflectance=0.2 -o ref_0.2.exr scene.xml +$\texttt{\$}$ mitsuba -Dreflectance=0.5 -o ref_0.5.exr scene.xml \end{shell} \subsubsection{Writing partial images to disk} -When doing lengthy command line renders on Linux or OSX, it is possible -to send a signal to the process using -\begin{shell} +When doing lengthy command line renders on Linux or OSX, it is possible +to send a signal to the process using +\begin{shell} $\texttt{\$}$ killall -HUP mitsuba \end{shell} -This causes the renderer to write out the partially finished -image, after which it continues rendering. This can sometimes be useful to +This causes the renderer to write out the partially finished +image, after which it continues rendering. This can sometimes be useful to check if everything is working correctly. \subsubsection{Rendering an animation} The command line interface is ideally suited for rendering large amounts of files in batch -operation. You can simply pass in the files using a wildcard in the filename. +operation. You can simply pass in the files using a wildcard in the filename. -If you've already rendered a subset of the frames and you only want to complete the remainder, -add the \texttt{-x} flag, and all files with existing output will be skipped. You can also -let the scheduler work on several scenes at once using the \texttt{-j} parameter --- this is +If you've already rendered a subset of the frames and you only want to complete the remainder, +add the \texttt{-x} flag, and all files with existing output will be skipped. You can also +let the scheduler work on several scenes at once using the \texttt{-j} parameter --- this is especially useful when parallelizing over multiple machines: as some of the participating machines finish rendering the current frame, they can immediately start working on the next one instead of having to wait for all other cores to finish. Altogether, you @@ -189,6 +189,13 @@ might start the with parameters such as the following \begin{shell} $\texttt{\$}$ mitsuba -xj 2 -c machine1;machine2;... animation/frame_*.xml \end{shell} +Note that this requires a shell capable of expanding the asterisk into a list of +filenames. The default Windows shell \code{cmd.exe} does not do this---however, +the PowerShell supports the following syntax: +\begin{shell} +dir frame_*.xml | % $\texttt{\{}$ $\texttt{\$\_}$ $\texttt{\}}$ +\end{shell} + \subsection{Direct connection server} \label{sec:mtssrv} A Mitsuba compute node can be created using the \code{mtssrv} executable. By default, @@ -206,26 +213,26 @@ $\texttt{\$}$ mtssrv -i maxwell.cs.cornell.edu \end{shell} As advised in Section~\ref{sec:mitsuba}, it is advised to run \code{mtssrv} \emph{only} in trusted networks. -One nice feature of \code{mtssrv} is that it (like the \code{mitsuba} executable) -also supports the \code{-c} and \code{-s} parameters, which create connections +One nice feature of \code{mtssrv} is that it (like the \code{mitsuba} executable) +also supports the \code{-c} and \code{-s} parameters, which create connections to additional compute servers. Using this feature, one can create hierarchies of compute nodes. For instance, -the root \code{mttsrv} instance of such a hierarchy could share its work with a -number of other machines running \code{mtssrv}, and each of these might also +the root \code{mttsrv} instance of such a hierarchy could share its work with a +number of other machines running \code{mtssrv}, and each of these might also share their work with further machines, and so on... -The parallelization over such hierarchies happens transparently---when -connecting a renderering process to the root node, it sees a machine +The parallelization over such hierarchies happens transparently---when +connecting a renderering process to the root node, it sees a machine with hundreds or thousands of cores, to which it can submit work without -needing to worry about how exactly it is going to be spread out in +needing to worry about how exactly it is going to be spread out in the hierarchy. Such hierarchies are mainly useful to reduce communication bottlenecks when distributing large resources (such as scenes) to remote machines. Imagine the following hypothetical scenario: -you would like to render a 50MB-sized scene while at home, but rendering is too slow. +you would like to render a 50MB-sized scene while at home, but rendering is too slow. You decide to tap into some extra machines available at your workplace, but this usually doesn't make things much faster because of the relatively slow broadband -connection and the need to transmit your scene to every single compute node involved. +connection and the need to transmit your scene to every single compute node involved. Using \code{mtssrv}, you can instead designate a central scheduling node at your workplace, which accepts connections and delegates @@ -233,10 +240,10 @@ rendering tasks to the other machines. In this case, you will only have to trans and the remaining distribution happens over the fast local network at your workplace. \subsection{Utility launcher} \label{sec:mtsutil} -When working on a larger project, one often needs to implement various utility programs that +When working on a larger project, one often needs to implement various utility programs that perform simple tasks, such as applying a filter to an image or processing -a matrix stored in a file. In a framework like Mitsuba, this unfortunately involves -a significant coding overhead in initializing the necessary APIs on all supported platforms. +a matrix stored in a file. In a framework like Mitsuba, this unfortunately involves +a significant coding overhead in initializing the necessary APIs on all supported platforms. To reduce this tedious work on the side of the programmer, Mitsuba comes with a utility launcher called \code{mtsutil}. @@ -250,7 +257,7 @@ For a listing of all supported options and utilities, enter the command without \label{sec:tonemapper} One particularly useful utility that shall be mentioned here is the batch tonemapper, which loads EXR/RGBE images and writes tonemapped 8-bit PNG/JPGs. This can save much time when one has to -process many high dynamic-range images such as animation frames using the same basic operations, +process many high dynamic-range images such as animation frames using the same basic operations, e.g. gamma correction, changing the overall brightness, resizing, cropping, etc. The available command line options are shown in \lstref{tonemap-cli}. @@ -282,14 +289,14 @@ Options/Arguments: between [0, 1] chooses between low and high-key images and 'burn' (also [0, 1]) controls how much highlights may burn out - -x Temporal coherence mode: activate this flag when tonemapping + -x Temporal coherence mode: activate this flag when tonemapping frames of an animation using the '-p' option to avoid flicker -o file Save the output with a given filename -t Multithreaded: process several files in parallel - The operations are ordered as follows: 1. crop, 2. resize, 3. color-balance, - 4. tonemap, 5. annotate. To simply process a directory full of EXRs in + The operations are ordered as follows: 1. crop, 2. resize, 3. color-balance, + 4. tonemap, 5. annotate. To simply process a directory full of EXRs in parallel, run the following: 'mtsutil tonemap -t path-to-directory/*.exr' \end{console} diff --git a/doc/compiling.tex b/doc/compiling.tex index 353cfc71..6faa5c36 100644 --- a/doc/compiling.tex +++ b/doc/compiling.tex @@ -1,23 +1,23 @@ \section{Compiling the renderer} \label{sec:compiling} -To compile Mitsuba, you will need a recent C++ compiler (e.g. GCC 4.2+ or -Visual Studio 2010) and some additional libraries, which Mitsuba uses internally. +To compile Mitsuba, you will need a recent C++ compiler (e.g. GCC 4.2+ or +Visual Studio 2010) and some additional libraries, which Mitsuba uses internally. Builds on all supported platforms are done using a unified system -based on SCons (\url{http://www.scons.org}), which is a Python-based -software construction tool. The exact process is different depending on +based on SCons (\url{http://www.scons.org}), which is a Python-based +software construction tool. The exact process is different depending on which operating system is used and will be explained in the following subsections. \subsection{Common steps} To get started, you will need to download a recent version of the Mitsuba source code. Before -doing this, ensure that you have read the licensing agreement -(Section~\ref{sec:license}), and that you abide by its contents. Note that, being a ``viral'' +doing this, ensure that you have read the licensing agreement +(Section~\ref{sec:license}), and that you abide by its contents. Note that, being a ``viral'' license, the GPL automatically applies to derivative work. Amongst other things, this means that Mitsuba's source code is \emph{off-limits} to those who develop rendering software not distributed under a compatible license. -Check that the Mercurial (\url{http://mercurial.selenic.com/}) versioning -system\footnote{On Windows, you might want to use the convenient TortoiseHG shell -extension (\url{http://tortoisehg.bitbucket.org/}) to run the subsequent steps directly from the Explorer.} +Check that the Mercurial (\url{http://mercurial.selenic.com/}) versioning +system\footnote{On Windows, you might want to use the convenient TortoiseHG shell +extension (\url{http://tortoisehg.bitbucket.org/}) to run the subsequent steps directly from the Explorer.} is installed, which is required to fetch the most recent source code release. Begin by entering the following at the command prompt (or run an equivalent command from a graphical Mercurial frontend): \begin{shell} @@ -38,7 +38,7 @@ will run extremely slowly. Its main use is to track down elusive bugs. \paragraph{Windows:} On Windows, builds can either be performed using the Visual Studio 2010\footnote{No other Visual Studio versions are currently supported.} compiler or Intel XE Composer (on top of Visual Studio 2010). -Note that Visual Studio 2010 Service Pack 1 \emph{must} be installed or the resulting binaries will crash. +Note that Visual Studio 2010 Service Pack 1 \emph{must} be installed or the resulting binaries will crash. \begin{description} \item[\code{build/config-\{win32, win64\}-\{msvc2010, msvc2010-debug\}.py}:] Create 32 or 64 bit binaries using Microsoft Visual C++ version 2010. The configurations with the suffix \code{-debug} will include debug symbols in all binaries, which run very slowly. @@ -66,7 +66,7 @@ $\texttt{\$}$ cp build/config-linux-gcc.py config.py \subsection{Compilation flags} \label{sec:compiling-flags} There are several flags that affect the behavior of Mitsuba and must be specified at compile time. -These usually don't need to be changed, but if you want to compile Mitsuba for spectral rendering, or +These usually don't need to be changed, but if you want to compile Mitsuba for spectral rendering, or to use double precision for internal computations then the following may be useful. Otherwise, you may skip ahead to the subsection that covers your operating system. @@ -77,13 +77,13 @@ The following options are available: enabled by default (even in release builds). \item[\texttt{MTS\_KD\_DEBUG}] Enable additional checks in the kd-tree. This is quite slow and mainly useful to track down bugs when they are suspected. -\item[\texttt{MTS\_KD\_CONSERVE\_MEMORY}] Use a more compact representation +\item[\texttt{MTS\_KD\_CONSERVE\_MEMORY}] Use a more compact representation for triangle geometry (at the cost of speed). This flag causes Mitsuba to use the somewhat slower Moeller-Trumbore triangle intersection method instead of the default Wald intersection test, which has an overhead of 48 bytes per triangle. Off by default. \item[\texttt{MTS\_SSE}]Activate optimized SSE routines. On by default. -\item[\texttt{MTS\_HAS\_COHERENT\_RT}]Include coherent ray tracing support (depends on \texttt{MTS\_SSE}). This flag is activated by default. +\item[\texttt{MTS\_HAS\_COHERENT\_RT}]Include coherent ray tracing support (depends on \texttt{MTS\_SSE}). This flag is activated by default. \item[\texttt{MTS\_DEBUG\_FP}]Generated NaNs and overflows will cause floating point exceptions, which can be caught in a debugger. This is slow and mainly meant as a debugging tool for developers. Off by default. \item[\texttt{SPECTRUM\_SAMPLES=}$\langle ..\rangle$]This setting defines the number of spectral samples (in the 368-830 $nm$ range) that are used to render scenes. The default is 3 samples, in which case the renderer automatically turns into an RGB-based system. For high-quality spectral rendering, this should be set to 30 or higher. Refer also to \secref{colorspaces}. @@ -95,24 +95,24 @@ fallback instead of the hardware-accelerated realtime preview. This is useful when the binary will be executed over a remote link using a protocol such as RDP (which does not provide the requisite OpenGL features). \end{description} -All of the default configurations files located in the \texttt{build} directory use the flags +All of the default configurations files located in the \texttt{build} directory use the flags \code{SINGLE\_PRECISION}, \code{SPECTRUM\_SAMPLES=3}, \code{MTS\_DEBUG}, \code{MTS\_SSE}, as well as \code{MTS\_HAS\_COHERENT\_RT}. \subsection{Building on Debian or Ubuntu Linux} \label{sec:compiling-ubuntu} -You'll first need to install a number of dependencies. It is assumed here that you are using a -recent version of Ubuntu Linux (Precise Pangolin / 12.04 LTS or later), hence some of the +You'll first need to install a number of dependencies. It is assumed here that you are using a +recent version of Ubuntu Linux (Precise Pangolin / 12.04 LTS or later), hence some of the package may be named differently if you are using Debian Linux or another Ubuntu version. First, run \begin{shell} -$\text{\$}$ sudo apt-get install build-essential scons mercurial qt4-dev-tools libpng12-dev - libjpeg62-dev libilmbase-dev libxerces-c-dev libboost-all-dev +$\text{\$}$ sudo apt-get install build-essential scons mercurial qt4-dev-tools libpng12-dev + libjpeg62-dev libilmbase-dev libxerces-c-dev libboost-all-dev libopenexr-dev libglewmx1.5-dev libxxf86vm-dev libpcrecpp0 libeigen3-dev \end{shell} -To get COLLADA support, you will also need to install the \texttt{collada-dom} packages -or build them from scratch. Here, we install the \code{x86\_64} binaries and development +To get COLLADA support, you will also need to install the \texttt{collada-dom} packages +or build them from scratch. Here, we install the \code{x86\_64} binaries and development headers that can be found on the Mitsuba website (at \url{http://www.mitsuba-renderer.org/releases/current}) \begin{shell} $\text{\$}$ sudo dpkg --install collada-dom_*.deb @@ -121,7 +121,7 @@ To start a regular build, run \begin{shell} $\text{\$}$ scons \end{shell} -inside the Mitsuba directory. In the case that you have multiple processors, you might want to parallelize the +inside the Mitsuba directory. In the case that you have multiple processors, you might want to parallelize the build by appending \code{-j }\emph{core count} to the \code{scons} command. If all goes well, SCons should finish successfully within a few minutes: \begin{shell} @@ -129,16 +129,15 @@ scons: $\texttt{done}$ building targets. \end{shell} To run the renderer from the command line, you first have to import it into your shell environment: \begin{shell} -$\text{\$}$ . setpath.sh +$\text{\$}$ source setpath.sh \end{shell} -(note the period at the beginning -- this assumes that you are using \code{bash}). Having set up everything, you can now move on to \secref{basics}. \subsubsection{Creating Debian or Ubuntu Linux packages} The preferred way of redistristributing executables on Debian or Ubuntu Linux is to create \code{.deb} package files. To make custom Mitsuba packages, it is strongly recommended -that you work with a pristine installation of the target operating system\footnote{Several commercial graphics -drivers ``pollute'' the OpenGL setup so that the compiled Mitsuba binaries -can only be used on machines using the same drivers. For this reason, it is +that you work with a pristine installation of the target operating system\footnote{Several commercial graphics +drivers ``pollute'' the OpenGL setup so that the compiled Mitsuba binaries +can only be used on machines using the same drivers. For this reason, it is better to work from a clean boostrapped install.}. This can be done as follows: first, install \code{debootstrap} and download the most recent operating system release to a subdirectory. The following example is based on Ubuntu 12.04 LTS (``Precise Pangolin''), @@ -169,18 +168,18 @@ $\text{\$}$ dpkg-buildpackage -nc After everything has been built, you should find the created package files in the root directory. \subsubsection{Releasing Ubuntu packages} -To redistribute Ubuntu packages over the Internet or a local network, it is convenient to +To redistribute Ubuntu packages over the Internet or a local network, it is convenient to put them into an \code{apt}-compatible repository. To prepare such a -repository, put the two \code{deb}-files built in the last section, -as well as the \code{collada-dom} \code{deb}-files into a public directory +repository, put the two \code{deb}-files built in the last section, +as well as the \code{collada-dom} \code{deb}-files into a public directory made available by a HTTP server and inside it, run \begin{shell} path-to-htdocs$\text{\$}$ dpkg-scanpackages path/to/deb-directory /dev/null | gzip -9c > path/to/deb-directory/Packages.gz \end{shell} This will create a respository index file named \code{Packages.gz}. Note that you must execute this command in the root directory of the -HTTP server's web directory and provide the relative path to the -package files -- otherwise, the index file will specify the wrong package +HTTP server's web directory and provide the relative path to the +package files -- otherwise, the index file will specify the wrong package paths. Finally, the whole directory can be uploaded to some public location and then referenced by placing a line following the pattern \begin{shell} @@ -190,7 +189,7 @@ into the \code{/etc/apt/sources.list} file. This setup is convenient for distributing a custom Mitsuba build to many Debian or Ubuntu machines running (e.g. to nodes in a rendering cluster). \subsection{Building on Fedora Core} You'll first need to install a number of dependencies. It is assumed here -that you are using FC15, hence some of the package may be named differently if you are +that you are using FC15, hence some of the package may be named differently if you are using another version. First, run @@ -208,9 +207,8 @@ scons: $\texttt{done}$ building targets. \end{shell} To run the renderer from the command line, you first have to import it into your shell environment: \begin{shell} -$\text{\$}$ . setpath.sh +$\text{\$}$ source setpath.sh \end{shell} -(note the period at the beginning -- this assumes that you are using \code{bash}). Having set up everything, you can now move on to \secref{basics}. \subsubsection{Creating Fedora Core packages} To create \code{RPM} packages, you will need to install the \code{RPM} development tools: @@ -232,7 +230,7 @@ $\text{\$}$ rpmbuild -bb mitsuba-$\code{\MitsubaVersion}$/data/linux/fedora/mits \end{shell} After this command finishes, its output can be found in the directory \code{rpmbuild/RPMS}. \subsection{Building on Arch Linux} -You'll first need to install a number of dependencies: +You'll first need to install a number of dependencies: \begin{shell} $\text{\$}$ sudo pacman -S gcc xerces-c glew openexr boost libpng libjpeg qt scons mercurial python \end{shell} @@ -261,9 +259,9 @@ scons: $\texttt{done}$ building targets. \end{shell} To run the renderer from the command line, you first have to import it into your shell environment: \begin{shell} -$\text{\$}$ . setpath.sh +$\text{\$}$ source setpath.sh \end{shell} -(note the period at the beginning -- this assumes that you are using \code{bash}). + Having set up everything, you can now move on to \secref{basics}. \subsubsection{Creating Arch Linux packages} Mitsuba ships with a \code{PKGBUILD} file, which automatically builds @@ -292,16 +290,16 @@ There are a few other things that need to be set up: make sure that your installation of Visual Studio is up to date, since Mitsuba binaries created with versions prior to Service Pack 1 will crash. -Next, you will need to install Python 2.6.x -(\url{www.python.org}) and SCons\footnote{Note that on some Windows machines, the SCons -installer generates a warning about not finding Python in the registry. In this case, you -can instead run \code{python setup.py install} within the source release of SCons.} -(\url{http://www.scons.org}, any 2.x version will do) and ensure that they are contained in the \code{\%PATH\%} -environment variable so that entering \code{scons} on the command prompt (\code{cmd.exe}) +Next, you will need to install Python 2.6.x +(\url{www.python.org}) and SCons\footnote{Note that on some Windows machines, the SCons +installer generates a warning about not finding Python in the registry. In this case, you +can instead run \code{python setup.py install} within the source release of SCons.} +(\url{http://www.scons.org}, any 2.x version will do) and ensure that they are contained in the \code{\%PATH\%} +environment variable so that entering \code{scons} on the command prompt (\code{cmd.exe}) launches the build system. -Having installed all dependencies, run the ``Visual Studio \emph{2010} Command -Prompt'' from the Start Menu (\code{x86} for 32-bit or \code{x64} for 64bit), +Having installed all dependencies, run the ``Visual Studio \emph{2010} Command +Prompt'' from the Start Menu (\code{x86} for 32-bit or \code{x64} for 64bit), navigate to the Mitsuba directory, and simply run \begin{shell} C:\mitsuba\>scons @@ -309,17 +307,17 @@ C:\mitsuba\>scons In the case that you have multiple processors, you might want to parallelize the build by appending the option \code{-j }\emph{core count} to the \code{scons} command. If all goes well, the build process will finish successfully after a few -minutes. \emph{Note} that in comparison to the other platforms, you don't have to run the \code{setpath.sh} script at this point. +minutes. \emph{Note} that in comparison to the other platforms, you don't have to run the \code{setpath.sh} script at this point. All binaries are automatically copied into the \code{dist} directory, and they should be executed directly from there. \subsubsection{Integration with the Visual Studio interface} -Basic Visual Studio 2010 integration with support for code completion -exists for those who develop Mitsuba code on Windows. -To use the supplied projects, simply double-click on one of the two files \code{build/mitsuba-msvc2010.sln} +Basic Visual Studio 2010 integration with support for code completion +exists for those who develop Mitsuba code on Windows. +To use the supplied projects, simply double-click on one of the two files \code{build/mitsuba-msvc2010.sln} and \code{build/mitsuba-msvc2010.sln}. These Visual Studio projects still internally -use the SCons-based build system to compile Mitsuba; whatever +use the SCons-based build system to compile Mitsuba; whatever build configuration is selected within Visual Studio will be used to pick a matching -configuration file from the \texttt{build} directory. +configuration file from the \texttt{build} directory. \subsection{Building on Mac OS X} \vspace{-5mm} @@ -332,13 +330,13 @@ configuration file from the \texttt{build} directory. Compiling Mitsuba's dependencies on Mac OS is a laborious process; for convenience, there is a repository that provides them in precompiled form. To use this repository, clone it using Mercurial and rename the directory so that it forms the \code{dependencies} subdirectory -inside the main Mitsuba directory, i.e. run something like +inside the main Mitsuba directory, i.e. run something like \begin{shell} $\text{\$}$ cd mitsuba $\text{\$}$ hg clone https://www.mitsuba-renderer.org/hg/dependencies_macos $\text{\$}$ mv dependencies_macos dependencies \end{shell} -You will also need to install SCons (>2.0.0, available at \code{www.scons.org}) and +You will also need to install SCons (>2.0.0, available at \code{www.scons.org}) and a recent release of XCode, including its command-line compilation tools. Next, run \begin{shell} $\text{\$}$ scons @@ -350,7 +348,6 @@ scons: $\texttt{done}$ building targets. \end{shell} To run the renderer from the command line, you first have to import it into your shell environment: \begin{shell} -$\text{\$}$ . setpath.sh +$\text{\$}$ source setpath.sh \end{shell} -(note the period at the beginning -- this assumes that you are using \code{bash}). diff --git a/doc/development.tex b/doc/development.tex index 228576b2..aae78c42 100644 --- a/doc/development.tex +++ b/doc/development.tex @@ -1,7 +1,7 @@ \part{Development guide} This chapter and the subsequent ones will provide an overview -of the the coding conventions and general architecture of Mitsuba. -You should only read them if if you wish to interface with the API +of the the coding conventions and general architecture of Mitsuba. +You should only read them if if you wish to interface with the API in some way (e.g. by developing your own plugins). The coding style section is only relevant if you plan to submit patches that are meant to become part of the main codebase. @@ -9,15 +9,15 @@ to become part of the main codebase. \section{Code structure} Mitsuba is split into four basic support libraries: \begin{itemize} -\item The core library (\code{libcore}) implements basic functionality such as +\item The core library (\code{libcore}) implements basic functionality such as cross-platform file and bitmap I/O, data structures, scheduling, as well as logging and plugin management. -\item The rendering library (\code{librender}) contains abstractions +\item The rendering library (\code{librender}) contains abstractions needed to load and represent scenes containing light sources, shapes, materials, and participating media. \item The hardware acceleration library (\code{libhw}) implements a cross-platform display library, an object-oriented OpenGL - wrapper, as well as support for rendering interactive previews of scenes. + wrapper, as well as support for rendering interactive previews of scenes. \item Finally, the bidirectional library (\code{libbidir}) - contains a support layer that is used to implement bidirectional rendering algorithms such as + contains a support layer that is used to implement bidirectional rendering algorithms such as Bidirectional Path Tracing and Metropolis Light Transport. \end{itemize} A detailed reference of these APIs is available at @@ -25,11 +25,11 @@ A detailed reference of these APIs is available at present a few basic examples to get familiar with them. \section{Coding style} -\paragraph{Indentation:} The Mitsuba codebase uses tabs for indentation, +\paragraph{Indentation:} The Mitsuba codebase uses tabs for indentation, which expand to \emph{four} spaces. Please make sure that you configure your editor this way, otherwise the source code layout will look garbled. -\paragraph{Placement of braces:} Opening braces should be placed on the +\paragraph{Placement of braces:} Opening braces should be placed on the same line to make the best use of vertical space, i.e. \begin{cpp} if (x > y) { @@ -54,9 +54,9 @@ if ( x==y ){ .. \end{cpp} -\paragraph{Name format:} Names are always written in camel-case. +\paragraph{Name format:} Names are always written in camel-case. Classes and structures start with a capital letter, whereas member functions -and attributes start with a lower-case letter. Attributes of classes +and attributes start with a lower-case letter. Attributes of classes have the prefix \code{m\_}. Here is an example: \begin{cpp} class MyClass { @@ -86,14 +86,14 @@ and properly conveys the semantics. as getters and setters. \paragraph{Documentation:} Headers files should contain Doxygen-compatible documentation. It is also a good idea to add -comments to a \code{.cpp} file to explain subtleties of an implemented algorithm. +comments to a \code{.cpp} file to explain subtleties of an implemented algorithm. However, anything pertaining to the API should go into the header file. \paragraph{Boost:} Use the boost libraries whenever this helps to save time or write more compact code. \paragraph{Classes vs structures:}In Mitsuba, classes usually go onto the heap, -whereas structures may be allocated both on the stack and the heap. +whereas structures may be allocated both on the stack and the heap. Classes that derive from \code{Object} implement a protected virtual deconstructor, which explicitly prevents them from being allocated on the stack. @@ -110,6 +110,6 @@ if (..) { \paragraph{Separation of plugins:}Mitsuba encourages that plugins are only used via the generic interface they implement. You will find that almost all plugins (e.g. emitters) don't actually provide a header file, hence they can only be accessed -using the generic \code{Emitter} interface they implement. If any kind of special -interaction between plugins is needed, this is usually an indication that the +using the generic \code{Emitter} interface they implement. If any kind of special +interaction between plugins is needed, this is usually an indication that the generic interface should be extended to accomodate this. diff --git a/doc/format.tex b/doc/format.tex index c9de4401..c7e31189 100644 --- a/doc/format.tex +++ b/doc/format.tex @@ -1,8 +1,8 @@ \section{Scene file format} \label{sec:format} -Mitsuba uses a very simple and general XML-based format to represent scenes. -Since the framework's philosophy is to represent discrete blocks of functionality as plugins, -a scene file can essentially be interpreted as description that determines which +Mitsuba uses a very simple and general XML-based format to represent scenes. +Since the framework's philosophy is to represent discrete blocks of functionality as plugins, +a scene file can essentially be interpreted as description that determines which plugins should be instantiated and how they should interface with each other. In the following, we'll look at a few examples to get a feeling for the scope of the format. @@ -18,17 +18,17 @@ something like this: \end{xml} The scene version attribute denotes the release of Mitsuba that was used to -create the scene. This information allows Mitsuba to always correctly process the +create the scene. This information allows Mitsuba to always correctly process the file irregardless of any potential future changes in the scene description language. This example already contains the most important things to know about format: you can have -\emph{objects} (such as the objects instantiated by the \code{scene} or \code{shape} tags), -which are allowed to be nested within each other. Each object optionally accepts \emph{properties} -(such as the \code{string} tag), which further characterize its behavior. All objects except +\emph{objects} (such as the objects instantiated by the \code{scene} or \code{shape} tags), +which are allowed to be nested within each other. Each object optionally accepts \emph{properties} +(such as the \code{string} tag), which further characterize its behavior. All objects except for the root object (the \code{scene}) cause the renderer to search and load a plugin from disk, -hence you must provide the plugin name using \code{type=".."} parameter. +hence you must provide the plugin name using \code{type=".."} parameter. -The object tags also let the renderer know \emph{what kind} of object is to be instantiated: for instance, +The object tags also let the renderer know \emph{what kind} of object is to be instantiated: for instance, any plugin loaded using the \code{shape} tag must conform to the \emph{Shape} interface, which is certainly the case for the plugin named \code{obj} (it contains a WaveFront OBJ loader). Similarly, you could write @@ -40,12 +40,12 @@ Similarly, you could write \end{xml} -This loads a different plugin (\code{sphere}) which is still a \emph{Shape}, but instead represents -a sphere configured with a radius of 10 world-space units. Mitsuba ships with +This loads a different plugin (\code{sphere}) which is still a \emph{Shape}, but instead represents +a sphere configured with a radius of 10 world-space units. Mitsuba ships with a large number of plugins; please refer to the next chapter for a detailed overview of them. -The most common scene setup is to declare an integrator, some geometry, a sensor (e.g. a camera), a film, a sampler +The most common scene setup is to declare an integrator, some geometry, a sensor (e.g. a camera), a film, a sampler and one or more emitters. Here is a more complex example: \begin{xml} @@ -92,20 +92,20 @@ and one or more emitters. Here is a more complex example: - + - + \end{xml} This example introduces several new object types (\code{integrator, sensor, bsdf, sampler, film}, and \code{emitter}) -and property types (\code{integer}, \code{transform}, and \code{rgb}). +and property types (\code{integer}, \code{transform}, and \code{rgb}). As you can see in the example, objects are usually declared at the top level except if there is some -inherent relation that links them to another object. For instance, BSDFs are usually specific to a certain geometric object, so +inherent relation that links them to another object. For instance, BSDFs are usually specific to a certain geometric object, so they appear as a child object of a shape. Similarly, the sampler and film affect the way in which rays are generated from the sensor and how it records the resulting radiance samples, hence they are nested inside it. @@ -135,45 +135,45 @@ uses a basic linear RGB representation\footnote{The official releases all use linear RGB---to do spectral renderings, you will have to compile Mitsuba yourself.}. Irrespective of which internal representation is used, Mitsuba supports -several different ways of specifying color information, which is then +several different ways of specifying color information, which is then converted appropriately. -The preferred way of passing color spectra to the renderer is to explicitly +The preferred way of passing color spectra to the renderer is to explicitly denote the associated wavelengths of each value: \begin{xml} \end{xml} -This is a mapping from wavelength in nanometers (before the colon) +This is a mapping from wavelength in nanometers (before the colon) to a reflectance or intensity value (after the colon). Values in between are linearly interpolated from the two closest neighbors. -A useful shortcut to get a completely uniform spectrum, it is to provide +A useful shortcut to get a completely uniform spectrum, it is to provide only a single value: \begin{xml} \end{xml} Another (discouraged) option is to directly provide the spectrum in Mitsuba's -internal representation, avoiding the need for any kind of conversion. +internal representation, avoiding the need for any kind of conversion. However, this is problematic, since the associated scene will likely not work -anymore when Mitsuba is compiled with a different value of -\texttt{SPECTRUM\_SAMPLES}. -For completeness, the possibility is explained nonetheless. Assuming that -the 360-830$nm$ range is discretized into ten 47$nm$-sized blocks -(i.e. \texttt{SPECTRUM\_SAMPLES} is set to 10), their values can be specified +anymore when Mitsuba is compiled with a different value of +\texttt{SPECTRUM\_SAMPLES}. +For completeness, the possibility is explained nonetheless. Assuming that +the 360-830$nm$ range is discretized into ten 47$nm$-sized blocks +(i.e. \texttt{SPECTRUM\_SAMPLES} is set to 10), their values can be specified as follows: \begin{xml} \end{xml} -Another convenient way of providing color spectra is by specifying linear RGB +Another convenient way of providing color spectra is by specifying linear RGB or sRGB values using floating-point triplets or hex values: \begin{xml} \end{xml} -When Mitsuba is compiled with the default settings, it internally uses -linear RGB to represent colors, so these values can directly be used. +When Mitsuba is compiled with the default settings, it internally uses +linear RGB to represent colors, so these values can directly be used. However, when configured for doing spectral rendering, a suitable color spectrum with the requested RGB reflectance must be found. This is a tricky problem, since there is an infinite number of spectra with this property. @@ -190,9 +190,9 @@ The \texttt{reflectance} intent is used by default, so remember to set it to \texttt{illuminant} when defining the brightness of a light source with the \texttt{} tag. -When spectral power or reflectance distributions are obtained from measurements +When spectral power or reflectance distributions are obtained from measurements (e.g. at 10$nm$ intervals), they are usually quite unwiedy and can clutter -the scene description. For this reason, there is yet another way to pass +the scene description. For this reason, there is yet another way to pass a spectrum by loading it from an external file: \begin{xml} @@ -213,23 +213,23 @@ are allowed. Here is an example: \renderings{ \fbox{\includegraphics[width=10cm]{images/blackbody}} \hfill\, - \caption{\label{fig:blackbody}A few simulated + \caption{\label{fig:blackbody}A few simulated black body emitters over a range of temperature values} } \label{sec:blackbody} -Finally, it is also possible to specify the spectral distribution of a black body emitter (\figref{blackbody}), +Finally, it is also possible to specify the spectral distribution of a black body emitter (\figref{blackbody}), where the temperature is given in Kelvin. \begin{xml} \end{xml} Note that attaching a black body spectrum to the \texttt{intensity} property -of a emitter introduces physical units into the rendering process of -Mitsuba, which is ordinarily a unitless system\footnote{This means that the -units of pixel values in a rendering are completely dependent on the units of -the user input, including the unit of world-space distance and the units of +of a emitter introduces physical units into the rendering process of +Mitsuba, which is ordinarily a unitless system\footnote{This means that the +units of pixel values in a rendering are completely dependent on the units of +the user input, including the unit of world-space distance and the units of the light source emission profile.}. -Specifically, the black body spectrum has units of power ($W$) per +Specifically, the black body spectrum has units of power ($W$) per unit area ($m^{-2}$) per steradian ($sr^{-1}$) per unit wavelength ($nm^{-1}$). If these units are inconsistent with your scene description, you may use the optional \texttt{scale} attribute to adjust them, e.g.: @@ -244,11 +244,11 @@ Points and vectors can be specified as follows: \end{xml} -It is important that whatever you choose as world-space units (meters, inches, etc.) is +It is important that whatever you choose as world-space units (meters, inches, etc.) is used consistently in all places. \subsubsection{Transformations} Transformations are the only kind of property that require more than a single tag. The idea is that, starting -with the identity, one can build up a transformation using a sequence of commands. For instance, a transformation that +with the identity, one can build up a transformation using a sequence of commands. For instance, a transformation that does a translation followed by a rotation might be written like this: \begin{xml} @@ -276,8 +276,8 @@ choices are available: \begin{xml} \end{xml} -\item lookat transformations --- this is primarily useful for setting up cameras (and spot lights). The \code{origin} coordinates -specify the camera origin, \code{target} is the point that the camera will look at, and the +\item \code{lookat} transformations --- this is primarily useful for setting up cameras (and spot lights). The \code{origin} coordinates +specify the camera origin, \code{target} is the point that the camera will look at, and the (optional) \code{up} parameter determines the ``upward'' direction in the final rendered image. The \code{up} parameter is not needed for spot lights. \begin{xml} @@ -286,7 +286,38 @@ The \code{up} parameter is not needed for spot lights. \end{itemize} Cordinates that are zero (for \code{translate} and \code{rotate}) or one (for \code{scale}) do not explicitly have to be specified. -\subsection{Instancing} +\subsection{Animated transformations} +Most shapes, emitters, and sensors in Mitsuba can accept both normal transformations +and \emph{animated transformations} as parameters. The latter is useful to +render scenes involving motion blur (Figure~\ref{fig:animated-transform}). The syntax used to specify these +is slightly different: +\begin{xml} + + + .. chained list of transformations as discussed above .. + + + + .. chained list of transformations as discussed above .. + + + .. additional transformations (optional) .. + +\end{xml} +\renderings{ + \fbox{\includegraphics[width=.6\textwidth]{images/animated_transform}}\hfill\, + \caption{\label{fig:animated-transform}Beware the dragon: a triangle mesh undergoing linear motion with several keyframes (object courtesy of XYZRGB)} +} + +Mitsuba then decomposes each transformation into a scale, translation, and +rotation component and interpolates\footnote{Using linear interpolation +for the scale and translation component and spherical linear quaternion +interpolation for the rotation component.} these for intermediate +time values. +It is important to specify appropriate shutter open/close times +to the sensor so that the motion is visible. +\newpage +\subsection{References} Quite often, you will find yourself using an object (such as a material) in many places. To avoid having to declare it over and over again, which wastes memory, you can make use of references. Here is an example of how this works: @@ -297,7 +328,7 @@ of how this works: - @@ -311,7 +342,7 @@ of how this works: \end{xml} By providing a unique \texttt{id} attribute in the -object declaration, the object is bound to that identifier +object declaration, the object is bound to that identifier upon instantiation. Referencing this identifier at a later point (using the \texttt{} tag) will add the instance to the parent object, with no further memory @@ -326,7 +357,7 @@ it cannot be used to instantiate geometry---if this functionality is needed, take a look at the \pluginref{instance} plugin. \subsection{Including external files} -A scene can be split into multiple pieces for better readability. +A scene can be split into multiple pieces for better readability. to include an external file, please use the following command: \begin{xml} diff --git a/doc/gendoc.py b/doc/gendoc.py index 3d9b4713..d83aed88 100755 --- a/doc/gendoc.py +++ b/doc/gendoc.py @@ -1,6 +1,6 @@ -#! /usr/bin/python -# -# This script walks through all plugin files and +#!/usr/bin/env python +# +# This script walks through all plugin files and # extracts documentation that should go into the # reference manual @@ -52,11 +52,8 @@ def process(path, target): fileList += [fname] fileList = [] - # Wrap the walk function to make this work in python 2 and 3. - if pyVer >= 3: - os.walk(path, capture, fileList) - else: - os.path.walk(path, capture, fileList) + for (dirname, subdirs, files) in os.walk(path): + capture(fileList, dirname, files) ordering = [(findOrderID(fname), fname) for fname in fileList] ordering = sorted(ordering, key = lambda entry: entry[0]) @@ -64,34 +61,40 @@ def process(path, target): for entry in ordering: extract(target, entry[1]) -os.chdir(os.path.dirname(__file__)) -f = open('plugins_generated.tex', 'w') -f.write('\input{section_shapes}\n') -process('../src/shapes', f) -f.write('\input{section_bsdf}\n') -process('../src/bsdfs', f) -f.write('\input{section_textures}\n') -process('../src/textures', f) -f.write('\input{section_subsurface}\n') -process('../src/subsurface', f) -f.write('\input{section_media}\n') -process('../src/medium', f) -f.write('\input{section_phase}\n') -process('../src/phase', f) -f.write('\input{section_volumes}\n') -process('../src/volume', f) -f.write('\input{section_emitters}\n') -process('../src/emitters', f) -f.write('\input{section_sensors}\n') -process('../src/sensors', f) -f.write('\input{section_integrators}\n') -process('../src/integrators', f) -f.write('\input{section_samplers}\n') -process('../src/samplers', f) -f.write('\input{section_films}\n') -process('../src/films', f) -f.write('\input{section_rfilters}\n') -f.close() -os.system('bibtex main.aux') -os.system('pdflatex main.tex') -#os.system('pdflatex main.tex | grep -i warning | grep -v "Package \(typearea\|hyperref\)"') +def process_src(target, src_subdir, section=None): + if section is None: + section = "section_" + src_subdir + target.write('\input{{{0}}}\n'.format(section)) + process('../src/{0}'.format(src_subdir), target) + +def texify(texfile): + from subprocess import Popen, PIPE, check_call + version = Popen(["pdflatex", "-version"], stdout=PIPE).communicate()[0] + # Call decode() to convert from bytes to string, required in Python 3 + if re.match('.*MiKTeX.*', version.decode()): + # MiKTeX's "texify" calls latex/bibtex in tandem automatically + print("Running texify on {0}...".format(texfile)) + check_call(['texify', '-pq', texfile]) + else: + check_call(['pdflatex', texfile]) + check_call(['bibtex', texfile.replace('.tex', '.aux')]) + check_call(['pdflatex', texfile]) + check_call(['pdflatex', texfile]) + +os.chdir(os.path.dirname(os.path.abspath(__file__))) +with open('plugins_generated.tex', 'w') as f: + process_src(f, 'shapes') + process_src(f, 'bsdfs', 'section_bsdf') + process_src(f, 'textures') + process_src(f, 'subsurface') + process_src(f, 'medium', 'section_media') + process_src(f, 'phase') + process_src(f, 'volume', 'section_volumes') + process_src(f, 'emitters') + process_src(f, 'sensors') + process_src(f, 'integrators') + process_src(f, 'samplers') + process_src(f, 'films') + process_src(f, 'rfilters') + +texify('main.tex') diff --git a/doc/gpl-v3.tex b/doc/gpl-v3.tex index 54c869a2..d5a64851 100644 --- a/doc/gpl-v3.tex +++ b/doc/gpl-v3.tex @@ -1,7 +1,7 @@ \section{License} \label{sec:license} Mitsuba is licensed under the terms of Version 3 of the GNU General Public License, -which is reproduced here in its entirety. The license itself is copyrighted +which is reproduced here in its entirety. The license itself is copyrighted \copyright\ 2007 by the Free Software Foundation, Inc. \texttt{http://fsf.org/}. \subsection{Preamble} diff --git a/doc/images/animated_transform.jpg b/doc/images/animated_transform.jpg new file mode 100644 index 00000000..fd287ab1 Binary files /dev/null and b/doc/images/animated_transform.jpg differ diff --git a/doc/images/annotation_example.jpg b/doc/images/annotation_example.jpg new file mode 100644 index 00000000..0420429f Binary files /dev/null and b/doc/images/annotation_example.jpg differ diff --git a/doc/images/integrator_volpath_hideemitters.jpg b/doc/images/integrator_volpath_hideemitters.jpg new file mode 100644 index 00000000..d7869c0a Binary files /dev/null and b/doc/images/integrator_volpath_hideemitters.jpg differ diff --git a/doc/images/integrator_volpath_normal.jpg b/doc/images/integrator_volpath_normal.jpg new file mode 100644 index 00000000..adf69e63 Binary files /dev/null and b/doc/images/integrator_volpath_normal.jpg differ diff --git a/doc/images/shape_cube_basic.jpg b/doc/images/shape_cube_basic.jpg new file mode 100644 index 00000000..d5f26251 Binary files /dev/null and b/doc/images/shape_cube_basic.jpg differ diff --git a/doc/images/shape_cube_parameterization.jpg b/doc/images/shape_cube_parameterization.jpg new file mode 100644 index 00000000..d9f34781 Binary files /dev/null and b/doc/images/shape_cube_parameterization.jpg differ diff --git a/doc/images/shape_instance_fractal_bot.jpg b/doc/images/shape_instance_fractal_bot.jpg new file mode 100644 index 00000000..c136c9a7 Binary files /dev/null and b/doc/images/shape_instance_fractal_bot.jpg differ diff --git a/doc/images/shape_instance_fractal_top.jpg b/doc/images/shape_instance_fractal_top.jpg new file mode 100644 index 00000000..e827827c Binary files /dev/null and b/doc/images/shape_instance_fractal_top.jpg differ diff --git a/doc/integrator.tex b/doc/integrator.tex index f8bdf85a..e4501830 100644 --- a/doc/integrator.tex +++ b/doc/integrator.tex @@ -3,9 +3,9 @@ Suppose you want to design a custom integrator to render scenes in Mitsuba. There are two general ways you can do this, and which one you should take mostly depends on the characteristics of your particular integrator. -The framework distinguishes between \emph{sampling-based} integrators and -\emph{generic} ones. A sampling-based integrator is able to generate -(usually unbiased) estimates of the incident radiance along a specified rays, and this +The framework distinguishes between \emph{sampling-based} integrators and +\emph{generic} ones. A sampling-based integrator is able to generate +(usually unbiased) estimates of the incident radiance along a specified rays, and this is done a large number of times to render a scene. A generic integrator is more like a black box, where no assumptions are made on how the the image is created. For instance, the VPL renderer uses OpenGL to rasterize the scene @@ -13,44 +13,44 @@ using hardware acceleration, which certainly doesn't fit into the sampling-based For that reason, it must be implemented as a generic integrator. Generally, if you can package up your code to fit into the -\code{SampleIntegrator} interface, you should do it, because you'll get +\code{SamplingIntegrator} interface, you should do it, because you'll get parallelization and network rendering essentially for free. This is done by transparently sending instances of your integrator class to all participating cores and assigning small image blocks for each one to work on. Also, sampling-based integrators can be nested within some other integrators, such as an irradiance cache or an adaptive integrator. This cannot be done with generic -integrators due to their black-box nature. Note that it is often still -possible to parallelize generic integrators, but this involves significantly +integrators due to their black-box nature. Note that it is often still +possible to parallelize generic integrators, but this involves significantly more work. -In this section, we'll design a rather contrived sampling-based integrator, -which renders a monochromatic image of your scene, where the intensity -denotes the distance to the camera. But to get a feel for the overall -framework, we'll start with an even simpler one, that just renders a +In this section, we'll design a rather contrived sampling-based integrator, +which renders a monochromatic image of your scene, where the intensity +denotes the distance to the camera. But to get a feel for the overall +framework, we'll start with an even simpler one, that just renders a solid-color image. \subsection{Basic implementation} -In Mitsuba's \code{src/integrators} directory, create a file named -\code{myIntegrator.cpp}. +In Mitsuba's \code{src/integrators} directory, create a file named +\code{myIntegrator.cpp}. \begin{cpp} #include MTS_NAMESPACE_BEGIN -class MyIntegrator : public SampleIntegrator { +class MyIntegrator : public SamplingIntegrator { public: MTS_DECLARE_CLASS() }; -MTS_IMPLEMENT_CLASS_S(MyIntegrator, false, SampleIntegrator) +MTS_IMPLEMENT_CLASS_S(MyIntegrator, false, SamplingIntegrator) MTS_EXPORT_PLUGIN(MyIntegrator, "A contrived integrator"); MTS_NAMESPACE_END \end{cpp} The \code{scene.h} header file contains all of the dependencies we'll need for now. To avoid conflicts with other libraries, the whole framework is located in -a separate namespace named \code{mitsuba}, and the lines starting with +a separate namespace named \code{mitsuba}, and the lines starting with \code{MTS\_NAMESPACE} ensure that our integrator is placed there as well. @@ -61,17 +61,17 @@ and serialization/unserialization support. Let's take a look at the second of th lines, because it contains several important pieces of information: The suffix \code{S} in \code{MTS\_IMPLEMENT\_CLASS\_S} specifies that this is -a serializable class, which means that it can be sent over the network or +a serializable class, which means that it can be sent over the network or written to disk and later restored. That also implies that certain methods need to be provided by the implementation --- we'll add those in a moment. The three following parameters specify the name of this class (\code{MyIntegrator}), the fact that it is \emph{not} an abstract class (\code{false}), and the name of its -parent class (\code{SampleIntegrator}). +parent class (\code{SamplingIntegrator}). Just below, you can see a line that starts with \code{MTS\_EXPORT\_PLUGIN}. As the name suggests, this line is only necessary -for plugins, and it ensures that the specified class (\code{MyIntegrator}) is +for plugins, and it ensures that the specified class (\code{MyIntegrator}) is what you want to be instantiated when somebody loads this plugin. It is also possible to supply a short descriptive string. \vspace{3mm} @@ -80,7 +80,7 @@ Let's add an instance variable and a constructor: \begin{cpp} public: /// Initialize the integrator with the specified properties - MyIntegrator(const Properties &props) : SampleIntegrator(props) { + MyIntegrator(const Properties &props) : SamplingIntegrator(props) { Spectrum defaultColor; defaultColor.fromLinearRGB(0.2f, 0.5f, 0.2f); m_color = props.getSpectrum("color", defaultColor); @@ -106,41 +106,41 @@ Next, we need to add serialization and unserialization support: \begin{cpp} /// Unserialize from a binary data stream MyIntegrator(Stream *stream, InstanceManager *manager) - : SampleIntegrator(stream, manager) { + : SamplingIntegrator(stream, manager) { m_color = Spectrum(stream); } /// Serialize to a binary data stream void serialize(Stream *stream, InstanceManager *manager) const { - SampleIntegrator::serialize(stream, manager); + SamplingIntegrator::serialize(stream, manager); m_color.serialize(stream); } \end{cpp} -This makes use of a \emph{stream} abstraction similar in style to Java. -A stream can represent various things, such as a file, a console session, or a +This makes use of a \emph{stream} abstraction similar in style to Java. +A stream can represent various things, such as a file, a console session, or a network communication link. Especially when dealing with multiple machines, it is important to realize that the machines may use different binary representations related to their respective \emph{endianness}. To prevent issues from arising, -the \code{Stream} interface provides many methods for writing and reading +the \code{Stream} interface provides many methods for writing and reading small chunks of data (e.g. \code{writeShort}, \code{readFloat}, ..), which automatically perform endianness translation. In our case, the \code{Spectrum} class already provides serialization/unserialization support, so we don't really have to do anything. -Note that it is crucial that your code calls the serialization and unserialization +Note that it is crucial that your code calls the serialization and unserialization implementations of the superclass, since it will also read/write some information to the stream. We haven't used the \texttt{manager} parameter yet, so here is a quick overview of what it does: if many cases, we don't just want to serialize a single class, but a whole graph of objects. Some may be referenced many -times from different places, and potentially there are even cycles. If we just -naively called the serialization and unserialization implementation of members -recursively within each class, we'd waste much bandwitdth and potentially +times from different places, and potentially there are even cycles. If we just +naively called the serialization and unserialization implementation of members +recursively within each class, we'd waste much bandwitdth and potentially end up stuck in an infinite recursion. This is where the instance manager comes in. Every time you want to serialize -a heap-allocated object (suppose it is of type \code{SomeClass}), +a heap-allocated object (suppose it is of type \code{SomeClass}), instead of calling its serialize method, write \begin{cpp} @@ -155,7 +155,7 @@ ref myObject = static_cast(manager->getInstance(stream)) Behind the scenes, the object manager adds annotations to the data stream, which ensure that you will end up with the exact same reference graph on the -remote side, while only one copy of every object is transmitted and no +remote side, while only one copy of every object is transmitted and no infinite recursion can occur. But we digress -- let's go back to our integrator. \vspace{3mm} @@ -168,10 +168,11 @@ radiance along a ray differential: here, we simply return the stored color } \end{cpp} -Let's try building the plugin: edit the \code{SConstruct} file in the main -directory, and add the following line after the comment ''\code{\# Integrators}'': +Let's try building the plugin: edit the \code{SConscript} file in the +\code{integrator} directory, and add the following line before the +last line containing ''\code{Export('plugins')}'': \begin{cpp} -plugins += env.SharedLibrary('plugins/myIntegrator', ['src/integrators/myIntegrator.cpp']) +plugins += env.SharedLibrary('myIntegrator', ['myIntegrator.cpp']) \end{cpp} After calling, \texttt{scons}, you should be able to use your new integrator in parallel rendering jobs and you'll get something like this: @@ -182,8 +183,8 @@ That is admittedly not very exciting --- so let's do some actual computation. \subsection{Visualizing depth} Add an instance variable \code{Float m\_maxDist;} to the implementation. This will store the maximum distance from the camera to any object, which is needed -to map distances into the $[0,1]$ range. Note the upper-case \code{Float} --- -this means that either a single- or a double-precision variable is +to map distances into the $[0,1]$ range. Note the upper-case \code{Float} --- +this means that either a single- or a double-precision variable is substituted based the compilation flags. This variable constitutes local state, thus it must not be forgotten in the serialization- and unserialization routines: append @@ -202,18 +203,19 @@ To avoid having to do this every time \code{Li()} is called, we can override the \code{preprocess} function: \begin{cpp} /// Preprocess function -- called on the initiating machine - bool preprocess(const Scene *scene, RenderQueue *queue, - const RenderJob *job, int sceneResID, int cameraResID, + bool preprocess(const Scene *scene, RenderQueue *queue, + const RenderJob *job, int sceneResID, int cameraResID, int samplerResID) { - SampleIntegrator::preprocess(scene, queue, job, sceneResID, + SamplingIntegrator::preprocess(scene, queue, job, sceneResID, cameraResID, samplerResID); const AABB &sceneAABB = scene->getAABB(); - Point cameraPosition = scene->getCamera()->getPosition(); + /* Find the camera position at t=0 seconds */ + Point cameraPosition = scene->getSensor()->getWorldTransform()->eval(0).transformAffine(Point(0.0f)); m_maxDist = - std::numeric_limits::infinity(); for (int i=0; i<8; ++i) - m_maxDist = std::max(m_maxDist, + m_maxDist = std::max(m_maxDist, (cameraPosition - sceneAABB.getCorner(i)).length()); return true; @@ -228,13 +230,13 @@ global resource identifiers. When a network render job runs, many associated pieces of information (the scene, the camera, etc.) are wrapped into global resource chunks shared amongst all nodes, and these can be referenced using such identifiers. -One important aspect of the \code{preprocess} function is that it is executed -on the initiating node and before any of the parallel rendering begins. +One important aspect of the \code{preprocess} function is that it is executed +on the initiating node and before any of the parallel rendering begins. This can be used to compute certain things only once. Any information updated here (such as \code{m\_maxDist}) will be forwarded to the other nodes before the rendering begins. -Now, replace the body of the \code{Li} method with +Now, replace the body of the \code{Li} method with \begin{cpp} if (rRec.rayIntersect(r)) { Float distance = rRec.its.t; @@ -255,36 +257,36 @@ to intersect a ray against the scene actually works like this: /* Do something with the intersection stored in 'its' */ } \end{cpp} -As you can see, we did something slightly different in the distance +As you can see, we did something slightly different in the distance renderer fragment above (we called \code{RadianceQueryRecord::rayIntersect()} on the supplied parameter \code{rRec}), and the reason for this is \emph{nesting}. \subsection{Nesting} The idea of of nesting is that sampling-based rendering techniques can be -embedded within each other for added flexibility: for instance, one -might concoct a 1-bounce indirect rendering technique complete with -irradiance caching and adaptive integration simply by writing the following +embedded within each other for added flexibility: for instance, one +might concoct a 1-bounce indirect rendering technique complete with +irradiance caching and adaptive integration simply by writing the following into a scene XML file: \begin{xml} - + - + - + \end{xml} -To support this kind of complex interaction, some information needs to be passed between the +To support this kind of complex interaction, some information needs to be passed between the integrators, and the \code{RadianceQueryRecord} parameter of the function -\code{SampleIntegrator::Li} is used for this. +\code{SamplingIntegrator::Li} is used for this. -This brings us back to the odd way of computing an intersection a moment ago: -the reason why we didn't just do this by calling +This brings us back to the odd way of computing an intersection a moment ago: +the reason why we didn't just do this by calling \code{scene->rayIntersect()} is that our technique might actually be nested within a parent technique, which has already computed this intersection. -To avoid wasting resources, the function \code{rRec.rayIntersect} first -determines whether an intersection record has already been provided. -If yes, it does nothing. Otherwise, it takes care of computing one. +To avoid wasting resources, the function \code{rRec.rayIntersect} first +determines whether an intersection record has already been provided. +If yes, it does nothing. Otherwise, it takes care of computing one. The radiance query record also lists the particular \emph{types} of radiance requested by the parent integrator -- your implementation should respect these as much diff --git a/doc/introduction.tex b/doc/introduction.tex index 0fb36cc9..aa9996f2 100644 --- a/doc/introduction.tex +++ b/doc/introduction.tex @@ -1,69 +1,69 @@ \part{Using Mitsuba} \textbf{Disclaimer:} This is manual documents the usage, file format, and -internal design of the Mitsuba rendering system. It is currently a work +internal design of the Mitsuba rendering system. It is currently a work in progress, hence some parts may still be incomplete or missing. \section{About Mitsuba} -Mitsuba is a research-oriented rendering system in the style of PBRT +Mitsuba is a research-oriented rendering system in the style of PBRT (\url{www.pbrt.org}), from which it derives much inspiration. -It is written in portable C++, implements unbiased as well -as biased techniques, and contains heavy optimizations targeted -towards current CPU architectures. -Mitsuba is extremely modular: it consists of a small set of core libraries -and over 100 different plugins that implement functionality ranging +It is written in portable C++, implements unbiased as well +as biased techniques, and contains heavy optimizations targeted +towards current CPU architectures. +Mitsuba is extremely modular: it consists of a small set of core libraries +and over 100 different plugins that implement functionality ranging from materials and light sources to complete rendering algorithms. -In comparison to other open source renderers, Mitsuba places a strong -emphasis on experimental rendering techniques, such as path-based +In comparison to other open source renderers, Mitsuba places a strong +emphasis on experimental rendering techniques, such as path-based formulations of Metropolis Light Transport and volumetric modeling approaches. Thus, it may be of genuine interest to those who -would like to experiment with such techniques that haven't yet found -their way into mainstream renderers, and it also provides a solid +would like to experiment with such techniques that haven't yet found +their way into mainstream renderers, and it also provides a solid foundation for research in this domain. Other design considerations are: \parheader{Performance:} -Mitsuba provides optimized implementations of the most commonly +Mitsuba provides optimized implementations of the most commonly used rendering algorithms. By virtue of running on a shared foundation, comparisons between them can -better highlight the merits and limitations of different approaches. This is in contrast to, say, -comparing two completely different rendering products, where technical information on the underlying +better highlight the merits and limitations of different approaches. This is in contrast to, say, +comparing two completely different rendering products, where technical information on the underlying implementation is often intentionally not provided. \parheader{Robustness:} -In many cases, physically-based rendering packages force the user to model scenes with the underlying -algorithm (specifically: its convergence behavior) in mind. For instance, glass windows are routinely -replaced with light portals, photons must be manually guided to the relevant parts of a scene, and -interactions with complex materials are taboo, since they cannot be importance sampled exactly. -One focus of Mitsuba will be to develop path-space light transport algorithms, which handle such +In many cases, physically-based rendering packages force the user to model scenes with the underlying +algorithm (specifically: its convergence behavior) in mind. For instance, glass windows are routinely +replaced with light portals, photons must be manually guided to the relevant parts of a scene, and +interactions with complex materials are taboo, since they cannot be importance sampled exactly. +One focus of Mitsuba will be to develop path-space light transport algorithms, which handle such cases more gracefully. -\parheader{Scalability:} Mitsuba instances can be merged into large clusters, which transparently distribute and +\parheader{Scalability:} Mitsuba instances can be merged into large clusters, which transparently distribute and jointly execute tasks assigned to them using only node-to-node communcation. It has successfully scaled to large-scale renderings that involved more than 1000 cores working on a single image. -Most algorithms in Mitsuba are written using a generic parallelization layer, which can tap +Most algorithms in Mitsuba are written using a generic parallelization layer, which can tap into this cluster-wide parallelism. The principle is that if any component of the renderer produces -work that takes longer than a second or so, it at least ought to use all of the processing power +work that takes longer than a second or so, it at least ought to use all of the processing power it can get. -The renderer also tries to be very conservative in its use of memory, which allows it to handle +The renderer also tries to be very conservative in its use of memory, which allows it to handle large scenes (>30 million triangles) and multi-gigabyte heterogeneous volumes on consumer hardware. \parheader{Realism and accuracy:} Mitsuba comes with a large repository of physically-based reflectance models for surfaces and participating media. These implementations are designed so that they can be used to build complex shader networks, while providing enough flexibility to be compatible with a wide range of different -rendering techniques, including path tracing, photon mapping, hardware-accelerated rendering +rendering techniques, including path tracing, photon mapping, hardware-accelerated rendering and bidirectional methods. -The unbiased path tracers in Mitsuba are battle-proven and produce -reference-quality results that can be used for predictive rendering, and to verify -implementations of other rendering methods. +The unbiased path tracers in Mitsuba are battle-proven and produce +reference-quality results that can be used for predictive rendering, and to verify +implementations of other rendering methods. \parheader{Usability:} -Mitsuba comes with a graphical user interface to interactively explore scenes. Once a suitable -viewpoint has been found, it is straightforward to perform renderings using any of the -implemented rendering techniques, while tweaking their parameters to find the most suitable +Mitsuba comes with a graphical user interface to interactively explore scenes. Once a suitable +viewpoint has been found, it is straightforward to perform renderings using any of the +implemented rendering techniques, while tweaking their parameters to find the most suitable settings. Experimental integration into Blender 2.5 is also available. \section{Limitations} @@ -72,22 +72,22 @@ However, there are some inherent limitations of the system that users should be \begin{enumerate}[(i)] \item \textbf{Wave Optics}: Mitsuba is fundamentally based on the geometric optics toolbox, which means that it generally does not simulate phenomena that arise due to -the wave properties of light (diffraction, for instance). +the wave properties of light (diffraction, for instance). \item \textbf{Polarization}: Mitsuba does not account for polarization. In other words, light is always assumed to be randomly polarized. This can be a problem for some predictive rendering applications. \item \textbf{Numerical accuracy}: The accuracy of any result produced with this -system is constrained by the underlying floating point computations. +system is constrained by the underlying floating point computations. For instance, an intricate scene that can be rendered without problems, may produce the wrong answer when all objects are translated away from the origin by a large distance, since floating point numbers are spaced less densely at the -new position. To avoid these sorts of pitfalls, it is good to have a basic -understanding of the IEEE-754 standard. +new position. To avoid these sorts of pitfalls, it is good to have a basic +understanding of the IEEE-754 standard. \end{enumerate} \section{License} -Mitsuba is free software and can be redistributed and modified under the terms of the GNU General +Mitsuba is free software and can be redistributed and modified under the terms of the GNU General Public License (Version 3) as provided by the Free Software Foundation. \remarks{ diff --git a/doc/macros.sty b/doc/macros.sty index ea25e5e3..6eae9806 100644 --- a/doc/macros.sty +++ b/doc/macros.sty @@ -92,7 +92,7 @@ }}\hspace*{1cm}}\end{figure} \setlength\fboxrule\fboxrulebackup } - + \newcommand{\renderings}[1]{ \begin{figure}[htb!] \setcounter{subfigure}{0} diff --git a/doc/main.tex b/doc/main.tex index b063a71a..29322217 100644 --- a/doc/main.tex +++ b/doc/main.tex @@ -27,6 +27,7 @@ \usepackage{ifthen} \usepackage{longtable} \usepackage{wrapfig} +\usepackage{footnote} % savenotes environment % Make sure that ligatures remain searchable in the PDF \input glyphtounicode @@ -39,8 +40,8 @@ \setcounter{secnumdepth}{3} \setcounter{tocdepth}{3} -\newcommand{\MitsubaVersion}{0.4.1} -\newcommand{\MitsubaYear}{2012} +\newcommand{\MitsubaVersion}{0.4.4} +\newcommand{\MitsubaYear}{2013} \typearea[current]{last} \raggedbottom @@ -116,7 +117,7 @@ medium,film,sampler,integrator,emitter,sensor, translate,rotate,scale,lookat,point,vector,matrix, include,fscat,volume,alias,rfilter,boolean, - subsurface + subsurface,animation }, } diff --git a/doc/misc.tex b/doc/misc.tex index a25f372f..890915db 100644 --- a/doc/misc.tex +++ b/doc/misc.tex @@ -2,9 +2,9 @@ \label{sec:miscellaneous} \subsection{A word about color spaces} \label{sec:colorspaces} -When using one of the downloadable release builds of Mitsuba, or a version +When using one of the downloadable release builds of Mitsuba, or a version that was compiled with the default settings, the renderer internally -operates in \emph{RGB mode}: all computations are performed using a representation +operates in \emph{RGB mode}: all computations are performed using a representation that is based on the three colors red, green, and blue. More specifically, these are the intensities of the red, green, and blue primaries @@ -14,20 +14,20 @@ rendering. This is an intuitive default which yields fast computations and satisfactory results for most applications. Low dynamic range images exported using the \pluginref{ldrfilm} will be stored in a -sRGB-compatible format that accounts for the custom gamma curves mandated by this +sRGB-compatible format that accounts for the custom gamma curves mandated by this standard. They should display as intended across a wide range of display devices. When saving high dynamic range output (e.g. OpenEXR, RGBE, or PFM), the computed radiance values are exported in a linear form (i.e. without having the sRGB gamma curve applied to it), which is the most common way of storing high dynamic range data. -It is important to keep in mind that other applications may not support this +It is important to keep in mind that other applications may not support this ``linearized sRGB'' space---in particular, the Mac OS preview currently does not display images with this encoding correctly. -\subsubsection{Spectral mode} +\subsubsection{Spectral rendering} Some predictive rendering applications will require a more realistic space for interreflection computations. In such cases, Mitsuba can be switched to \emph{spectral mode}. -This can be done by compiling it with the \code{SPECTRUM\_SAMPLES=}$n$ parameter +This can be done by compiling it with the \code{SPECTRUM\_SAMPLES=}$n$ parameter (\secref{compiling}), where $n$ is usually between 15 and 30. Now, all input parameters are converted into color spectra with the specified diff --git a/doc/parallelization.tex b/doc/parallelization.tex index 20ace309..224e3dfb 100644 --- a/doc/parallelization.tex +++ b/doc/parallelization.tex @@ -5,23 +5,23 @@ The guiding principle is that if an operation can potentially take longer than a few seconds, it ought to use all the cores it can get. Here, we will go through a basic example, which will hopefully provide sufficient intuition -to realize more complex tasks. +to realize more complex tasks. To obtain good (i.e. close to linear) speedups, the parallelization layer depends on several key assumptions of the task to be parallelized: \begin{itemize} \item The task can easily be split up into a discrete number of \emph{work units}, which requires a negligible amount of computation. -\item Each work unit is small in footprint so that it can easily be transferred over the network or shared memory. +\item Each work unit is small in footprint so that it can easily be transferred over the network or shared memory. \item A work unit constitutes a significant amount of computation, which by far outweighs the cost of transmitting it to another node. \item The \emph{work result} obtained by processing a work unit is again small in footprint, so that it can easily be transferred back. \item Merging all work results to a solution of the whole problem requires a negligible amount of additional computation. \end{itemize} -This essentially corresponds to a parallel version of \emph{Map} (one part of \emph{Map\&Reduce}) and is -ideally suited for most rendering workloads. +This essentially corresponds to a parallel version of \emph{Map} (one part of \emph{Map\&Reduce}) and is +ideally suited for most rendering workloads. -The example we consider here computes a \code{ROT13} ``encryption'' of a string, which +The example we consider here computes a \code{ROT13} ``encryption'' of a string, which most certainly violates the ``significant amount of computation'' assumption. It was chosen due to the inherent parallelism and simplicity of this task. -While of course over-engineered to the extreme, the example hopefully +While of course over-engineered to the extreme, the example hopefully communicates how this framework might be used in more complex scenarios. We will implement this program as a plugin for the utility launcher \code{mtsutil}, which @@ -49,9 +49,9 @@ MTS_NAMESPACE_END \end{cpp} The file must also be added to the build system: insert the line \begin{shell} -plugins += $\texttt{env}$.SharedLibrary('plugins/rot13', ['src/utils/rot13.cpp']) +plugins += $\texttt{env}$.SharedLibrary('rot13', ['rot13.cpp']) \end{shell} -into the SConscript (near the comment ``\code{Build the plugins -- utilities}''). After compiling +into the \code{utils/SConscript} file. After compiling using \code{scons}, the \code{mtsutil} binary should automatically pick up your new utility plugin: \begin{shell} $\texttt{\$}$ mtsutil @@ -69,7 +69,7 @@ $\texttt{\$}$ mtsutil rot13 Hello world! \end{shell} -Our approach for implementing distributed ROT13 will be to treat each character as an +Our approach for implementing distributed ROT13 will be to treat each character as an indpendent work unit. Since the ordering is lost when sending out work units, we must also include the position of the character in both the work units and the work results. @@ -78,7 +78,7 @@ For reference, here are the interfaces of \code{WorkUnit} and \code{WorkResult}: \begin{cpp} /** * Abstract work unit. Represents a small amount of information - * that encodes part of a larger processing task. + * that encodes part of a larger processing task. */ class MTS_EXPORT_CORE WorkUnit : public Object { public: @@ -100,7 +100,7 @@ protected: virtual ~WorkUnit() { } }; /** - * Abstract work result. Represents the information that encodes + * Abstract work result. Represents the information that encodes * the result of a processed WorkUnit instance. */ class MTS_EXPORT_CORE WorkResult : public Object { @@ -125,7 +125,7 @@ In our case, the \code{WorkUnit} implementation then looks like this: class ROT13WorkUnit : public WorkUnit { public: void set(const WorkUnit *workUnit) { - const ROT13WorkUnit *wu = + const ROT13WorkUnit *wu = static_cast(workUnit); m_char = wu->m_char; m_pos = wu->m_pos; @@ -135,10 +135,10 @@ public: m_char = stream->readChar(); m_pos = stream->readInt(); } - + void save(Stream *stream) const { stream->writeChar(m_char); - stream->writeInt(m_pos); + stream->writeInt(m_pos); } std::string toString() const { @@ -163,14 +163,14 @@ private: MTS_IMPLEMENT_CLASS(ROT13WorkUnit, false, WorkUnit) \end{cpp} -The \code{ROT13WorkResult} implementation is not reproduced since it is almost identical +The \code{ROT13WorkResult} implementation is not reproduced since it is almost identical (except that it doesn't need the \code{set} method). -The similarity is not true in general: for most algorithms, the work unit and result +The similarity is not true in general: for most algorithms, the work unit and result will look completely different. Next, we need a class, which does the actual work of turning a work unit into a work result (a subclass of \code{WorkProcessor}). Again, we need to implement a range of support -methods to enable the various ways in which work processor instances will be submitted to +methods to enable the various ways in which work processor instances will be submitted to remote worker nodes and replicated amongst local threads. \begin{cpp} class ROT13WorkProcessor : public WorkProcessor { @@ -190,7 +190,7 @@ public: return new ROT13WorkUnit(); } - ref createWorkResult() const { + ref createWorkResult() const { return new ROT13WorkResult(); } @@ -202,9 +202,9 @@ public: void prepare() { } /// Do the actual computation - void process(const WorkUnit *workUnit, WorkResult *workResult, + void process(const WorkUnit *workUnit, WorkResult *workResult, const bool &stop) { - const ROT13WorkUnit *wu + const ROT13WorkUnit *wu = static_cast(workUnit); ROT13WorkResult *wr = static_cast(workResult); wr->setPos(wu->getPos()); @@ -252,9 +252,9 @@ public: } void processResult(const WorkResult *result, bool cancelled) { - if (cancelled) // indicates a work unit, which was + if (cancelled) // indicates a work unit, which was return; // cancelled partly through its execution - const ROT13WorkResult *wr = + const ROT13WorkResult *wr = static_cast(result); m_output[wr->getPos()] = wr->getChar(); } @@ -273,7 +273,7 @@ MTS_IMPLEMENT_CLASS(ROT13Process, false, ParallelProcess) \end{cpp} The \code{generateWork} method produces work units until we have moved past the end of the string, after which it returns the status code \code{EFailure}. -Note the method \code{getRequiredPlugins()}: this is necessary to use +Note the method \code{getRequiredPlugins()}: this is necessary to use the utility across machines. When communicating with another node, it ensures that the remote side loads the \code{ROT13*} classes at the right moment. @@ -302,9 +302,9 @@ from the main utility function (the `Hello World' code we wrote earlier). We can } \end{cpp} After compiling everything using \code{scons}, a simple example -involving the utility would be to encode a string (e.g. \code{SECUREBYDESIGN}), while +involving the utility would be to encode a string (e.g. \code{SECUREBYDESIGN}), while forwarding all computation to a network machine. (\code{-p0} disables -all local worker threads). Adding a verbose flag (\code{-v}) shows +all local worker threads). Adding a verbose flag (\code{-v}) shows some additional scheduling information: \begin{shell} $\texttt{\$}$ mtsutil -vc feynman -p0 rot13 SECUREBYDESIGN diff --git a/doc/plugins.tex b/doc/plugins.tex index a86ef0f5..91824168 100644 --- a/doc/plugins.tex +++ b/doc/plugins.tex @@ -1,9 +1,9 @@ \section{Plugin reference} \vspace{-2mm} -The following subsections describe the available Mitsuba plugins, usually along +The following subsections describe the available Mitsuba plugins, usually along with example renderings and a description of what each parameter does. They are separated into subsections covering textures, surface scattering -models, etc. +models, etc. Each subsection begins with a brief general description. The documentation of a plugin always starts on a new page and is preceded @@ -14,7 +14,7 @@ by a table similar to the one below: \default{\code{false}} } \parameter{darkMatter}{\Float}{ - Controls the proportionate amount of dark matter present in the scene. + Controls the proportionate amount of dark matter present in the scene. \default{0.83} } } @@ -27,10 +27,10 @@ this description, it can be instantiated from an XML scene file using a custom c \end{xml} -In some cases\footnote{Note that obvious parameters are generally omitted. -For instance, all shape plugins accept a surface scattering plugin, but this -is left out from the documentation for brevity.}, plugins also indicate that they accept nested plugins -as input arguments. These can either be \emph{named} or \emph{unnamed}. If +In some cases\footnote{Note that obvious parameters are generally omitted. +For instance, all shape plugins accept a surface scattering plugin, but this +is left out from the documentation for brevity.}, plugins also indicate that they accept nested plugins +as input arguments. These can either be \emph{named} or \emph{unnamed}. If the \code{amazing} integrator also accepted the following two parameters\vspace{-2mm} \parameters{ \parameter{\Unnamed}{\Integrator}{A nested integrator which does the actual hard work} diff --git a/doc/python.tex b/doc/python.tex index 775c1c3e..43227b7d 100644 --- a/doc/python.tex +++ b/doc/python.tex @@ -1,14 +1,29 @@ \section{Python integration} \label{sec:python} -A recent feature of Mitsuba is a simple Python interface to the renderer API. +A recent feature of Mitsuba is a Python interface to the renderer API. While the interface is still limited at this point, it can already be used for many useful purposes. To access the API, start your Python interpreter and enter \begin{python} import mitsuba \end{python} +\paragraph{Mac OS:} For this to work on MacOS X, you will first have to run the ``\emph{Apple Menu}$\to$\emph{Command-line access}'' menu item from within Mitsuba. +In the unlikely case that you run into shared library loading issues (this is +taken care of by default), you may have to set the \code{LD\_LIBRARY\_PATH} +environment variable before starting Python so that it points to where the +Mitsuba libraries are installed (e.g. the \code{Mitsuba.app/Contents/Frameworks} +directory). + +When Python crashes directly after the \code{import mitsuba} statement, +make sure that Mitsuba is linked against the right Python distribution +(i.e. matching the \code{python} binary you are using). For e.g. Python +2.7, can be done by adjusting the \code{PYTHON27INCLUDE} and +\code{PYTHON27LIBDIR} variables in \code{config.py}. For other versions, +adjust the numbers accordingly. + +\paragraph{Windows and Linux:} On Windows and \emph{non-packaged} Linux builds, you may have to explicitly specify the required extension search path before issuing the \code{import} command, e.g.: \begin{python} @@ -29,6 +44,9 @@ os.environ['PATH'] = 'path-to-mitsuba-directory' + os.pathsep + os.environ['PATH import mitsuba \end{python} +In rare cases when running on Linux, it may also be necessary to set the +\code{LD\_LIBRARY\_PATH} environment variable before starting Python so that it +points to where the Mitsuba core libraries are installed. For an overview of the currently exposed API subset, please refer to the following page: \url{http://www.mitsuba-renderer.org/api/group__libpython.html}. @@ -64,8 +82,8 @@ classes, function, or entire namespaces when running an interactive Python shell ... \end{shell} The docstrings list the currently exported functionality, as well as C++ and Python signatures, but they -don't document what these functions actually do. The web API documentation is the preferred source for -this information. +don't document what these functions actually do. The web API documentation is +the preferred source of this information. \subsection{Basics} Generally, the Python API tries to mimic the C++ API as closely as possible. @@ -337,3 +355,43 @@ logger.setLogLevel(EDebug) Log(EInfo, 'Test message') \end{python} +\subsubsection{Rendering a turntable animation with motion blur} +Rendering a turntable animation is a fairly common task that is +conveniently accomplished via the Python interface. In a turntable +video, the camera rotates around a completely static object or scene. +The following snippet does this for the material test ball scene downloadable +on the main website, complete with motion blur. It assumes that the +scene and scheduler have been set up approriately using one of the previous +snippets. +\begin{python} +sensor = scene.getSensor() +sensor.setShutterOpen(0) +sensor.setShutterOpenTime(1) + +stepSize = 5 +for i in range(0,360 / stepSize): + rotationCur = Transform.rotate(Vector(0, 0, 1), i*stepSize); + rotationNext = Transform.rotate(Vector(0, 0, 1), (i+1)*stepSize); + + trafoCur = Transform.lookAt(rotationCur * Point(0,-6,4), + Point(0, 0, .5), rotationCur * Vector(0, 1, 0)) + trafoNext = Transform.lookAt(rotationNext * Point(0,-6,4), + Point(0, 0, .5), rotationNext * Vector(0, 1, 0)) + + atrafo = AnimatedTransform() + atrafo.appendTransform(0, trafoCur) + atrafo.appendTransform(1, trafoNext) + atrafo.sortAndSimplify() + sensor.setWorldTransform(atrafo) + + scene.setDestinationFile('frame_%03i.png' % i) + job = RenderJob('job_%i' % i, scene, queue) + job.start() + + queue.waitLeft(0) + queue.join() +\end{python} +A useful property of this approach is that scene loading and initialization +must only take place once. Performance-wise, this compares favourably with +running many separate rendering jobs, e.g. using the \code{mitsuba} +command-line executable. diff --git a/doc/section_bsdf.tex b/doc/section_bsdf.tex index a03050ec..2e835c94 100644 --- a/doc/section_bsdf.tex +++ b/doc/section_bsdf.tex @@ -13,34 +13,34 @@ \label{sec:bsdfs} Surface scattering models describe the manner in which light interacts -with surfaces in the scene. They conveniently summarize the mesoscopic -scattering processes that take place within the material and +with surfaces in the scene. They conveniently summarize the mesoscopic +scattering processes that take place within the material and cause it to look the way it does. -This represents one central component of the material system in Mitsuba---another -part of the renderer concerns itself with what happens -\emph{in between} surface interactions. For more information on this aspect, +This represents one central component of the material system in Mitsuba---another +part of the renderer concerns itself with what happens +\emph{in between} surface interactions. For more information on this aspect, please refer to Sections~\ref{sec:media} and \ref{sec:subsurface}. -This section presents an overview of all surface scattering models that are +This section presents an overview of all surface scattering models that are supported, along with their parameters. \subsubsection*{BSDFs} -To achieve realistic results, Mitsuba comes with a library of both +To achieve realistic results, Mitsuba comes with a library of both general-purpose surface scattering models (smooth or rough glass, metal, plastic, etc.) and specializations to particular materials (woven cloth, masks, etc.). Some model plugins fit neither category and can best be described -as \emph{modifiers} that are applied on top of one or more scattering models. +as \emph{modifiers} that are applied on top of one or more scattering models. -Throughout the documentation and within the scene description +Throughout the documentation and within the scene description language, the word \emph{BSDF} is used synonymously with the term ``surface -scattering model''. This is an abbreviation for \emph{Bidirectional -Scattering Distribution Function}, a more precise technical -term. +scattering model''. This is an abbreviation for \emph{Bidirectional +Scattering Distribution Function}, a more precise technical +term. -In Mitsuba, BSDFs are +In Mitsuba, BSDFs are assigned to \emph{shapes}, which describe the visible surfaces in the scene. In the scene description language, this assignment can -either be performed by nesting BSDFs within shapes, or they can -be named and then later referenced by their name. +either be performed by nesting BSDFs within shapes, or they can +be named and then later referenced by their name. The following fragment shows an example of both kinds of usages: \begin{xml} @@ -75,35 +75,35 @@ memory usage. \label{fig:glass-explanation} Some of the scattering models in Mitsuba need to know the indices of refraction on the exterior and interior-facing - side of a surface. + side of a surface. It is therefore important to decompose the mesh into meaningful separate surfaces corresponding to each index of refraction change. The example here shows such a decomposition for a water-filled Glass. } \end{figure} -A vital consideration when modeling a scene in a physically-based rendering -system is that the used materials do not violate physical properties, and +A vital consideration when modeling a scene in a physically-based rendering +system is that the used materials do not violate physical properties, and that their arrangement is meaningful. For instance, imagine having designed -an architectural interior scene that looks good except for a white desk that -seems a bit too dark. A closer inspection reveals that it uses a Lambertian -material with a diffuse reflectance of $0.9$. +an architectural interior scene that looks good except for a white desk that +seems a bit too dark. A closer inspection reveals that it uses a Lambertian +material with a diffuse reflectance of $0.9$. -In many rendering systems, it would be feasible to increase the -reflectance value above $1.0$ in such a situation. But in Mitsuba, even a -small surface that reflects a little more light than it receives will -likely break the available rendering algorithms, or cause them to produce otherwise +In many rendering systems, it would be feasible to increase the +reflectance value above $1.0$ in such a situation. But in Mitsuba, even a +small surface that reflects a little more light than it receives will +likely break the available rendering algorithms, or cause them to produce otherwise unpredictable results. In fact, the right solution in this case would be to switch to a different the lighting setup that causes more illumination to be received by -the desk and then \emph{reduce} the material's reflectance---after all, it is quite unlikely that +the desk and then \emph{reduce} the material's reflectance---after all, it is quite unlikely that one could find a real-world desk that reflects 90\% of all incident light. As another example of the necessity for a meaningful material description, consider -the glass model illustrated in \figref{glass-explanation}. Here, careful thinking -is needed to decompose the object into boundaries that mark index of +the glass model illustrated in \figref{glass-explanation}. Here, careful thinking +is needed to decompose the object into boundaries that mark index of refraction-changes. If this is done incorrectly and a beam of light can potentially pass through a sequence of incompatible index of refraction changes (e.g. $1.00\to 1.33$ followed by $1.50\to1.33$), the output is undefined and will quite likely -even contain inaccuracies in parts of the scene that are far +even contain inaccuracies in parts of the scene that are far away from the glass. diff --git a/doc/section_films.tex b/doc/section_films.tex index 9fdb0b68..325ec17d 100644 --- a/doc/section_films.tex +++ b/doc/section_films.tex @@ -3,7 +3,7 @@ \label{sec:films} A film defines how conducted measurements are stored and converted into the final output file that is written to disk at the end of the rendering process. Mitsuba comes with a few -films that can write to high and low dynamic range image formats (OpenEXR, JPEG or PNG), as well +films that can write to high and low dynamic range image formats (OpenEXR, JPEG or PNG), as well more scientifically oriented data formats (e.g. MATLAB or Mathematica). In the XML scene description language, a normal film configuration might look as follows @@ -20,7 +20,7 @@ In the XML scene description language, a normal film configuration might look as - @@ -28,6 +28,6 @@ In the XML scene description language, a normal film configuration might look as \end{xml} The \code{film} plugin should be instantiated nested inside a \code{sensor} declaration. -Note how the output filename is never specified---it is automatically inferred -from the scene filename and can be manually overridden by passing the configuration parameter +Note how the output filename is never specified---it is automatically inferred +from the scene filename and can be manually overridden by passing the configuration parameter \code{-o} to the \code{mitsuba} executable when rendering from the command line. diff --git a/doc/section_integrators.tex b/doc/section_integrators.tex index 1afce34d..16e6a671 100644 --- a/doc/section_integrators.tex +++ b/doc/section_integrators.tex @@ -1,14 +1,14 @@ \newpage \subsection{Integrators} \label{sec:integrators} -In Mitsuba, the different rendering techniques are collectively referred to as +In Mitsuba, the different rendering techniques are collectively referred to as \emph{integrators}, since they perform integration over a high-dimensional space. Each integrator represents a specific approach for solving the light transport equation---usually favored in certain scenarios, but at the same time affected by its own set of intrinsic limitations. -Therefore, it is important to carefully select an integrator based on -user-specified accuracy requirements and properties of the scene to be -rendered. +Therefore, it is important to carefully select an integrator based on +user-specified accuracy requirements and properties of the scene to be +rendered. In Mitsuba's XML description language, a single integrator is usually instantiated by declaring it at the top level within the @@ -28,15 +28,15 @@ scene, e.g. \end{xml} -This section gives a brief overview of the available choices +This section gives a brief overview of the available choices along with their parameters. \subsubsection*{Choosing an integrator} -Due to the large number of integrators in Mitsuba, the decision of which +Due to the large number of integrators in Mitsuba, the decision of which one is suitable may seem daunting. Assuming that the goal is to solve the full light transport equation without approximations, a few integrators (\pluginref{ao}, \pluginref{direct}, \pluginref{vpl}) -can already be ruled out. The adjoint particle tracer \pluginref{ptracer} is +can already be ruled out. The adjoint particle tracer \pluginref{ptracer} is also rarely used. The following ``algorithm'' may help to decide amongst the remaining ones: @@ -46,11 +46,11 @@ The following ``algorithm'' may help to decide amongst the remaining ones: Mitsuba currently comes with three path tracer variations that target different setups: It your scene contains no media and no surfaces with opacity masks, use the plain path tracer (\pluginref{path}). -Otherwise, use one of the volumetric path tracers (\pluginref[volpathsimple]{volpath\_simple} +Otherwise, use one of the volumetric path tracers (\pluginref[volpathsimple]{volpath\_simple} or \pluginref{volpath}). The latter is preferable if the scene contains glossy surface scattering models. -\item If step 1 produced poor (i.e. noisy and slowly converging) results, try +\item If step 1 produced poor (i.e. noisy and slowly converging) results, try the bidirectional path tracer (\pluginref{bdpt}). -\item If steps 1 and 2 failed, the scene contains a relatively difficult lighting setup, potentially +\item If steps 1 and 2 failed, the scene contains a relatively difficult lighting setup, potentially including interaction with complex materials. In many cases, these difficulties can be greatly ameliorated by running a ``metropolized'' version of a path tracer. This is implemented in the Primary Sample Space MLT (\pluginref{pssmlt}) plugin. @@ -69,7 +69,7 @@ method (\pluginref{mlt}, \pluginref{erpt}). \smallrendering{Max. depth = $\infty$}{pathdepth-all} \caption{ \label{fig:pathdepths} - These Cornell box renderings demonstrate the visual + These Cornell box renderings demonstrate the visual effect of a maximum path depth. As the paths are allowed to grow longer, the color saturation increases due to multiple scattering interactions @@ -79,13 +79,13 @@ method (\pluginref{mlt}, \pluginref{erpt}). \end{figure} Almost all integrators use the concept of \emph{path depth}. -Here, a path refers to a chain of scattering events that +Here, a path refers to a chain of scattering events that starts at the light source and ends at the eye or sensor. -It is often useful to limit the path depth (\figref{pathdepths}) -when rendering scenes for preview purposes, since this reduces the amount +It is often useful to limit the path depth (\figref{pathdepths}) +when rendering scenes for preview purposes, since this reduces the amount of computation that is necessary per pixel. Furthermore, such renderings usually converge faster and therefore need fewer samples per pixel. -When reference-quality is desired, one should always leave the path +When reference-quality is desired, one should always leave the path depth unlimited. \begin{figure}[h!] @@ -95,21 +95,42 @@ depth unlimited. \vspace{-5mm} \caption{ \label{fig:path-explanation} - A ray of emitted light is scattered by an object and subsequently + A ray of emitted light is scattered by an object and subsequently reaches the eye/sensor. In Mitsuba, this is a \emph{depth-2} path, since it has two edges. } \end{figure} Mitsuba counts depths starting at $1$, which correspond to -visible light sources (i.e. a path that starts at the light -source and ends at the eye or sensor without any scattering +visible light sources (i.e. a path that starts at the light +source and ends at the eye or sensor without any scattering interaction in between). A depth-$2$ path (also known as ``direct illumination'') includes a single scattering event (\figref{path-explanation}). \subsubsection*{Progressive versus non-progressive} Some of the rendering techniques in Mitsuba are \emph{progressive}. -What this means is that they display a rough preview, which improves over time. +What this means is that they display a rough preview, which improves over time. Leaving them running indefinitely will continually reduce noise (in unbiased algorithms -such as Metropolis Light Transport) or noise and bias (in biased +such as Metropolis Light Transport) or noise and bias (in biased rendering techniques such as Progressive Photon Mapping). +\newpage +\subsubsection*{Hiding directly visible emitters} +\label{sec:hideemitters} +Several rendering algorithms in Mitsuba have a feature to hide directly +visible light sources (e.g. environment maps or area lights). While not +particularly realistic, this feature is often convenient to remove a background +from a rendering so that it can be pasted into a differently-colored document. + +Note that only directly visible emitters can be hidden using this feature---a +reflection on a shiny surface will be unaffected. To perform the kind of +compositing shown in Figure~\ref{fig:hideemitters}, it is also necessary to +enable the alpha channel in the scene's film instance (Section~\ref{sec:films}). + +\renderings{ + \unframedrendering{Daylit smoke rendered with \code{hideEmitters} set to \code{false} + (the default setting)}{integrator_volpath_normal} + \unframedrendering{Rendered with \code{hideEmitters} set to \code{true} and alpha-composited + onto a white background.}{integrator_volpath_hideemitters} + \caption{\label{fig:hideemitters}An example application of the \code{hideEmitters} parameter + together with alpha blending} +} diff --git a/doc/section_media.tex b/doc/section_media.tex index 72ca44b5..3a6a42ed 100644 --- a/doc/section_media.tex +++ b/doc/section_media.tex @@ -11,7 +11,7 @@ \caption{Participating media are not limited to smoke or fog: they are also great for rendering fuzzy materials such as these knitted sweaters (made using the \pluginref{heterogeneous} and \pluginref{microflake} plugins). - Figure courtesy of Yuksel et al. \cite{Yuksel2012Stitch}, models courtesy of + Figure courtesy of Yuksel et al. \cite{Yuksel2012Stitch}, models courtesy of Rune Spaans and Christer Sveen.} } In Mitsuba, participating media are used to simulate materials ranging from diff --git a/doc/section_phase.tex b/doc/section_phase.tex index bed50d2a..6092f9f3 100644 --- a/doc/section_phase.tex +++ b/doc/section_phase.tex @@ -1,17 +1,17 @@ \newpage \subsection{Phase functions} \label{sec:phase} -This section contains a description of all implemented medium scattering models, which -are also known as \emph{phase functions}. These are very similar in principle to surface -scattering models (or \emph{BSDF}s), and essentially describe where light travels after +This section contains a description of all implemented medium scattering models, which +are also known as \emph{phase functions}. These are very similar in principle to surface +scattering models (or \emph{BSDF}s), and essentially describe where light travels after hitting a particle within the medium. The most commonly used models for smoke, fog, and other homogeneous media -are isotropic scattering (\pluginref{isotropic}) and the Henyey-Greenstein +are isotropic scattering (\pluginref{isotropic}) and the Henyey-Greenstein phase function (\pluginref{hg}). Mitsuba also supports \emph{anisotropic} -media, where the behavior of the medium changes depending on the direction -of light propagation (e.g. in volumetric representations of fabric). These -are the Kajiya-Kay (\pluginref{kkay}) and Micro-flake (\pluginref{microflake}) +media, where the behavior of the medium changes depending on the direction +of light propagation (e.g. in volumetric representations of fabric). These +are the Kajiya-Kay (\pluginref{kkay}) and Micro-flake (\pluginref{microflake}) models. Finally, there is also a phase function for simulating scattering in diff --git a/doc/section_rfilters.tex b/doc/section_rfilters.tex index 04e587e2..78861546 100644 --- a/doc/section_rfilters.tex +++ b/doc/section_rfilters.tex @@ -2,7 +2,7 @@ \subsection{Reconstruction filters} \label{sec:rfilters} Image reconstruction filters are responsible for converting a series of radiance samples generated -jointly by the \emph{sampler} and \emph{integrator} into the final output image that will be written +jointly by the \emph{sampler} and \emph{integrator} into the final output image that will be written to disk at the end of a rendering process. This section gives a brief overview of the reconstruction filters that are available in Mitsuba. There is no universally superior filter, and the final choice depends on a trade-off between @@ -15,44 +15,44 @@ near discontinuities, such as a light-shadow transiton. \begin{description} \item[Box filter (\code{box}):] -the fastest, but also about the worst possible -reconstruction filter, since it is extremely prone to aliasing. +the fastest, but also about the worst possible +reconstruction filter, since it is extremely prone to aliasing. It is included mainly for completeness, though some rare situations may warrant its use. \item[Tent filter (\code{tent}):] Simple tent, or triangle filter. This reconstruction filter never -suffers from ringing and usually causes less aliasing than a naive +suffers from ringing and usually causes less aliasing than a naive box filter. When rendering scenes with sharp brightness discontinuities, this may be useful; otherwise, negative-lobed filters will be preferable (e.g. Mitchell-Netravali or Lanczos Sinc) \item[Gaussian filter (\code{gaussian}):] this is a windowed Gaussian filter with configurable standard deviation. -It produces pleasing results and never suffers from ringing, but may +It produces pleasing results and never suffers from ringing, but may occasionally introduce too much blurring. When no reconstruction filter is explicitly requested, this is the default choice in Mitsuba. \item[Mitchell-Netravali filter (\code{mitchell}):] Separable cubic spline reconstruction filter by Mitchell and Netravali \cite{Mitchell:1988:Reconstruction} -This is often a good compromise between sharpness and ringing. +This is often a good compromise between sharpness and ringing. -The plugin has two \code{float}-valued parameters named \texttt{B} and \texttt{C} that +The plugin has two \code{float}-valued parameters named \texttt{B} and \texttt{C} that correspond to the two parameters in the original research paper. By default, these are set to the recommended value of $1/3$, but can be tweaked if desired. \item[Catmull-Rom filter (\code{catmullrom}):] -This is a special version of the Mitchell-Netravali filter that has the +This is a special version of the Mitchell-Netravali filter that has the constants \texttt{B} and \texttt{C} adjusted to produce higher sharpness at the cost of increased susceptibility to ringing. \item[Lanczos Sinc filter (\code{lanczos}):] This is a windowed version of the theoretically optimal low-pass filter. -It is generally one of the best available filters in terms of producing sharp -high-quality output. Its main disadvantage is that it produces strong ringing around -discontinuities, which can become a serious problem when rendering bright objects -with sharp edges (for instance, a directly visible light source will have black -fringing artifacts around it). +It is generally one of the best available filters in terms of producing sharp +high-quality output. Its main disadvantage is that it produces strong ringing around +discontinuities, which can become a serious problem when rendering bright objects +with sharp edges (for instance, a directly visible light source will have black +fringing artifacts around it). This is also the computationally slowest reconstruction filter. This plugin has an \code{integer}-valued parameter named \code{lobes}, that @@ -60,19 +60,19 @@ sets the desired number of filter side-lobes. The higher, the closer the filter will approximate an optimal low-pass filter, but this also increases the susceptibility to ringing. Values of 2 or 3 are common (3 is the default). \end{description} -The next section contains a series of comparisons between reconstruction filters. In the first -case, a very high-resolution input image (corresponding to a hypothetical radiance field +The next section contains a series of comparisons between reconstruction filters. In the first +case, a very high-resolution input image (corresponding to a hypothetical radiance field incident at the camera) is reconstructed at low resolutions. \newpage \subsubsection{Reconstruction filter comparison 1: frequency attenuation and aliasing} \vspace{-2mm} -Here, a high frequency function is reconstructed at low resolutions. A good filter +Here, a high frequency function is reconstructed at low resolutions. A good filter (e.g. Lanczos Sinc) will capture all oscillations that are representable at the desired resolution and attenuate the remainder to a uniform gray. The filters are ordered by their approximate level of success at this benchmark. \renderings{ - \subfloat[A high resolution input image whose frequency decreases + \subfloat[A high resolution input image whose frequency decreases towards the borders. If you are looking at this on a computer, you may have to zoom in.]{\fbox{\includegraphics[width=0.43\textwidth]{images/rfilter_sines_input}}} \hfill @@ -92,7 +92,7 @@ approximate level of success at this benchmark. } \newpage \subsubsection{Reconstruction filter comparison 2: ringing} -This comparison showcases the ringing artifacts that can occur when the rendered +This comparison showcases the ringing artifacts that can occur when the rendered image contains extreme and discontinuous brightness transitions. The Mitchell-Netravali, Catmull-Rom, and Lanczos Sinc filters are affected by this problem. Note the black fringing around the light source in the cropped Cornell box renderings below. diff --git a/doc/section_samplers.tex b/doc/section_samplers.tex index ffd1e5c0..97762b70 100644 --- a/doc/section_samplers.tex +++ b/doc/section_samplers.tex @@ -1,16 +1,16 @@ \newpage \subsection{Sample generators} \label{sec:samplers} -When rendering an image, Mitsuba has to solve a high-dimensional integration problem that involves the -geometry, materials, lights, and sensors that make up the scene. Because of the mathematical complexity +When rendering an image, Mitsuba has to solve a high-dimensional integration problem that involves the +geometry, materials, lights, and sensors that make up the scene. Because of the mathematical complexity of these integrals, it is generally impossible to solve them analytically --- instead, they -are solved \emph{numerically} by evaluating the function to be integrated at a large number of -different positions referred to as \emph{samples}. Sample generators are an essential ingredient to this -process: they produce points in a (hypothetical) infinite dimensional hypercube $[0,1]^{\infty}$ that +are solved \emph{numerically} by evaluating the function to be integrated at a large number of +different positions referred to as \emph{samples}. Sample generators are an essential ingredient to this +process: they produce points in a (hypothetical) infinite dimensional hypercube $[0,1]^{\infty}$ that constitute the canonical representation of these samples. To do its work, a rendering algorithm, or \emph{integrator}, will send many queries to the sample generator. -Generally, it will request subsequent 1D or 2D components of this infinite-dimensional ``point'' and map +Generally, it will request subsequent 1D or 2D components of this infinite-dimensional ``point'' and map them into a more convenient space (for instance, positions on surfaces). This allows it to construct light paths to eventually evaluate the flow of light through the scene. @@ -18,6 +18,6 @@ Since the whole process starts with a large number of points in the abstract spa it is natural to consider different ways of positioning them. Desirable properties of a sampler are that it ``randomly'' covers the whole space evenly with samples, but without placing samples too close to each other. This leads to such notions as \emph{stratified sampling} and \emph{low-discrepancy} -number sequences. The samplers in this section make different guarantees on the quality of generated -samples based on these criteria. To obtain intuition about their behavior, the provided point plots +number sequences. The samplers in this section make different guarantees on the quality of generated +samples based on these criteria. To obtain intuition about their behavior, the provided point plots illustrate the resulting sample placement. diff --git a/doc/section_sensors.tex b/doc/section_sensors.tex index 06dd9168..cb5ed783 100644 --- a/doc/section_sensors.tex +++ b/doc/section_sensors.tex @@ -8,12 +8,12 @@ into a given direction or the irradiance received by a certain surface. The foll section lists the available choices. \subsubsection*{Handedness convention} -Sensors in Mitsuba are \emph{right-handed}. +Sensors in Mitsuba are \emph{right-handed}. Any number of rotations and translations can be applied to them without changing this property. By default they are located at the origin and oriented in such a way that in the rendered image, $+X$ points left, $+Y$ points upwards, and $+Z$ points along the viewing direction. Left-handed sensors are also supported. To switch the handedness, -flip any one of the axes, e.g. by passing a scale transformation like +flip any one of the axes, e.g. by passing a scale transformation like \code{} to the sensor's \code{toWorld} parameter. diff --git a/doc/section_shapes.tex b/doc/section_shapes.tex index f9e4673e..ca8582a8 100644 --- a/doc/section_shapes.tex +++ b/doc/section_shapes.tex @@ -5,11 +5,11 @@ This section presents an overview of the shape plugins that are released along w In Mitsuba, shapes define surfaces that mark transitions between different types of materials. For instance, a shape could describe a boundary between air and a solid object, such as a piece of rock. Alternatively, -a shape can mark the beginning of a region of space that isn't solid at all, but rather contains a participating +a shape can mark the beginning of a region of space that isn't solid at all, but rather contains a participating medium, such as smoke or steam. Finally, a shape can be used to create an object that emits light on its own. -Shapes are usually declared along with a surface scattering model (named ``BSDF'', see \secref{bsdfs} for details). -This BSDF characterizes what happens \emph{at the surface}. In the XML scene description language, this might look like +Shapes are usually declared along with a surface scattering model (named ``BSDF'', see \secref{bsdfs} for details). +This BSDF characterizes what happens \emph{at the surface}. In the XML scene description language, this might look like the following: \begin{xml} @@ -20,7 +20,7 @@ the following: ... $\code{bsdf}$ parameters .. - + * + * + * + * + * + * \end{xml} + * + * The \code{value="..."} argument may also include certain keywords that will be + * evaluated and substituted when the rendered image is written to disk. A list all available + * keywords is provided in Table~\ref{tbl:film-keywords}. + * + * Apart from querying the render time, + * memory usage, and other scene-related information, it is also possible + * to `paste' an existing parameter that was provided to another plugin---for instance,the + * the camera transform matrix would be obtained as \code{\$sensor['toWorld']}. The name of + * the active integrator plugin is given by \code{\$integrator['type']}, and so on. + * All of these can be mixed to build larger fragments, as following example demonstrates. + * The result of this annotation is shown in Figure~\ref{fig:annotation-example}. + * \begin{xml}[mathescape=false] + * + * \end{xml} + * \vspace{1cm} + * \renderings{ + * \fbox{\includegraphics[width=.8\textwidth]{images/annotation_example}}\hfill\, + * \caption{\label{fig:annotation-example}A demonstration of the label annotation feature + * given the example string shown above.} + * } + * \vspace{2cm} + * \begin{table}[htb] + * \centering + * \begin{savenotes} + * \begin{tabular}{ll} + * \toprule + * \code{\$scene['renderTime']}& Image render time, use \code{renderTimePrecise} for more digits.\\ + * \code{\$scene['memUsage']}& Mitsuba memory usage\footnote{The definition of this quantity unfortunately + * varies a bit from platform to platform. On Linux and Windows, it denotes the total + * amount of allocated RAM and disk-based memory that is private to the process (i.e. not + * shared or shareable), which most intuitively captures the amount of memory required for + * rendering. On OSX, it denotes the working set size---roughly speaking, this is the + * amount of RAM apportioned to the process (i.e. excluding disk-based memory).}. + * Use \code{memUsagePrecise} for more digits.\\ + * \code{\$scene['coreCount']}& Number of local and remote cores working on the rendering job\\ + * \code{\$scene['blockSize']}& Block size used to parallelize up the rendering workload\\ + * \code{\$scene['sourceFile']}& Source file name\\ + * \code{\$scene['destFile']}& Destination file name\\ + * \code{\$integrator['..']}& Copy a named integrator parameter\\ + * \code{\$sensor['..']}& Copy a named sensor parameter\\ + * \code{\$sampler['..']}& Copy a named sampler parameter\\ + * \code{\$film['..']}& Copy a named film parameter\\ + * \bottomrule + * \end{tabular} + * \end{savenotes} + * \caption{\label{tbl:film-keywords}A list of all special + * keywords supported by the annotation feature} + * \end{table} + * */ + class HDRFilm : public Film { public: HDRFilm(const Properties &props) : Film(props) { @@ -127,7 +211,7 @@ public: std::string fileFormat = boost::to_lower_copy( props.getString("fileFormat", "openexr")); std::string pixelFormat = boost::to_lower_copy( - props.getString("pixelFormat", "rgba")); + props.getString("pixelFormat", "rgb")); std::string componentFormat = boost::to_lower_copy( props.getString("componentFormat", "float16")); @@ -204,6 +288,16 @@ public: } + std::vector keys = props.getPropertyNames(); + for (size_t i=0; i bitmap = m_storage->getBitmap()->convert( @@ -322,26 +416,43 @@ public: } fs::path filename = m_destFile; + std::string properExtension; + if (m_fileFormat == Bitmap::EOpenEXR) + properExtension = ".exr"; + else if (m_fileFormat == Bitmap::ERGBE) + properExtension = ".rgbe"; + else + properExtension = ".pfm"; + std::string extension = boost::to_lower_copy(filename.extension().string()); - std::string properExtension = (m_fileFormat == Bitmap::EOpenEXR) ? ".exr" : ".rgbe"; if (extension != properExtension) filename.replace_extension(properExtension); Log(EInfo, "Writing image to \"%s\" ..", filename.string().c_str()); ref stream = new FileStream(filename, FileStream::ETruncWrite); + annotate(scene, m_properties, bitmap, renderTime, 1.0f); + /* Attach the log file to the image if this is requested */ Logger *logger = Thread::getThread()->getLogger(); std::string log; if (m_attachLog && logger->readLog(log)) { log += "\n\n"; log += Statistics::getInstance()->getStats(); - bitmap->setString("log", log); + bitmap->setMetadataString("log", log); } bitmap->write(m_fileFormat, stream); } + bool hasAlpha() const { + return + m_pixelFormat == Bitmap::ELuminanceAlpha || + m_pixelFormat == Bitmap::ERGBA || + m_pixelFormat == Bitmap::EXYZA || + m_pixelFormat == Bitmap::ESpectrumAlpha; + } + bool destinationExists(const fs::path &baseName) const { fs::path filename = baseName; std::string extension = (m_fileFormat == Bitmap::EOpenEXR) ? ".exr" : ".rgbe"; diff --git a/src/films/ldrfilm.cpp b/src/films/ldrfilm.cpp index b26f2ca1..07eece1f 100644 --- a/src/films/ldrfilm.cpp +++ b/src/films/ldrfilm.cpp @@ -22,6 +22,7 @@ #include #include #include "banner.h" +#include "annotations.h" MTS_NAMESPACE_BEGIN @@ -90,8 +91,10 @@ MTS_NAMESPACE_BEGIN * be used by the film. \default{\code{gaussian}, a windowed Gaussian filter}} * } * This plugin implements a low dynamic range film that can write out 8-bit PNG - * and JPEG images. It also provides basic tonemapping techniques to map recorded - * radiance values into a reasonable displayable range. + * and JPEG images in various configurations. It provides basic tonemapping techniques + * to map recorded radiance values into a reasonable displayable range. An alpha (opacity) + * channel can be written if desired. By default, the plugin writes gamma-corrected + * PNG files using the sRGB color space and no alpha channel. * * This film is a good choice when low dynamic range output is desired * and the rendering setup can be configured to capture the relevant portion @@ -113,6 +116,9 @@ MTS_NAMESPACE_BEGIN * The RGB values exported by this plugin correspond to the ITU-R Rec. BT. 709-3 * primaries with a D65 white point. When $\texttt{gamma}$ is set to $\code{-1}$ (the default), * the output is in the sRGB color space and will display as intended on compatible devices. + * + * Note that this plugin supports render-time \emph{annotations}, which + * are described on page~\pageref{sec:film-annotations}. */ class LDRFilm : public Film { public: @@ -128,7 +134,7 @@ public: std::string fileFormat = boost::to_lower_copy( props.getString("fileFormat", "png")); std::string pixelFormat = boost::to_lower_copy( - props.getString("pixelFormat", "rgba")); + props.getString("pixelFormat", "rgb")); std::string tonemapMethod = boost::to_lower_copy( props.getString("tonemapMethod", "gamma")); @@ -176,6 +182,16 @@ public: m_reinhardKey = props.getFloat("key", 0.18f); m_reinhardBurn = props.getFloat("burn", 0.0); + std::vector keys = props.getPropertyNames(); + for (size_t i=0; iwriteBool(m_hasBanner); - stream->writeUInt(m_fileFormat); stream->writeUInt(m_pixelFormat); + stream->writeUInt(m_fileFormat); stream->writeFloat(m_gamma); stream->writeUInt(m_tonemapMethod); stream->writeFloat(m_exposure); @@ -281,7 +297,7 @@ public: m_destFile = destFile; } - void develop() { + void develop(const Scene *scene, Float renderTime) { Log(EDebug, "Developing film .."); ref bitmap = m_storage->getBitmap(); @@ -328,9 +344,17 @@ public: Log(EInfo, "Writing image to \"%s\" ..", filename.string().c_str()); ref stream = new FileStream(filename, FileStream::ETruncWrite); + annotate(scene, m_properties, bitmap, renderTime, m_gamma); + bitmap->write(m_fileFormat, stream); } + bool hasAlpha() const { + return + m_pixelFormat == Bitmap::ELuminanceAlpha || + m_pixelFormat == Bitmap::ERGBA; + } + bool destinationExists(const fs::path &baseName) const { fs::path filename = baseName; std::string extension; diff --git a/src/films/mfilm.cpp b/src/films/mfilm.cpp index 97107f89..3a46e190 100644 --- a/src/films/mfilm.cpp +++ b/src/films/mfilm.cpp @@ -53,7 +53,7 @@ MTS_NAMESPACE_BEGIN * and \code{spectrumAlpha}. In the latter two cases, * the number of written channels depends on the value assigned to * \code{SPECTRUM\_SAMPLES} during compilation (see Section~\ref{sec:compiling} - * section for details) \default{\code{rgba}} + * section for details) \default{\code{luminance}} * } * \parameter{highQualityEdges}{\Boolean}{ * If set to \code{true}, regions slightly outside of the film @@ -238,7 +238,7 @@ public: m_destFile = destFile; } - void develop() { + void develop(const Scene *scene, Float renderTime) { Log(EDebug, "Developing film .."); fs::path filename = m_destFile; @@ -323,6 +323,14 @@ public: return fs::exists(filename); } + bool hasAlpha() const { + return + m_pixelFormat == Bitmap::ELuminanceAlpha || + m_pixelFormat == Bitmap::ERGBA || + m_pixelFormat == Bitmap::EXYZA || + m_pixelFormat == Bitmap::ESpectrumAlpha; + } + std::string toString() const { std::ostringstream oss; oss << "MFilm[" << endl diff --git a/src/films/tiledhdrfilm.cpp b/src/films/tiledhdrfilm.cpp index 0f15b59e..5476168c 100644 --- a/src/films/tiledhdrfilm.cpp +++ b/src/films/tiledhdrfilm.cpp @@ -53,7 +53,7 @@ MTS_NAMESPACE_BEGIN * the number of written channels depends on the value assigned to * \code{SPECTRUM\_SAMPLES} during compilation (see Section~\ref{sec:compiling} * section for details) - * \default{\code{rgba}} + * \default{\code{rgb}} * } * \parameter{componentFormat}{\String}{Specifies the desired floating * point component format used for the output. The options are @@ -81,7 +81,7 @@ MTS_NAMESPACE_BEGIN * Based on the provided parameter values, the film will either write a luminance, * luminance/alpha, RGB(A), XYZ(A) tristimulus, or spectrum/spectrum-alpha-based * bitmap having a \code{float16}, \code{float32}, or \code{uint32}-based - * internal representation. The default is RGBA and \code{float16}. + * internal representation. The default is RGB and \code{float16}. * Note that the spectral output options only make sense when using a * custom compiled Mitsuba distribution that has spectral rendering * enabled. This is not the case for the downloadable release builds. @@ -102,7 +102,7 @@ class TiledHDRFilm : public Film { public: TiledHDRFilm(const Properties &props) : Film(props), m_output(NULL), m_frameBuffer(NULL) { std::string pixelFormat = boost::to_lower_copy( - props.getString("pixelFormat", "rgba")); + props.getString("pixelFormat", "rgb")); std::string componentFormat = boost::to_lower_copy( props.getString("componentFormat", "float16")); @@ -156,7 +156,7 @@ public: } virtual ~TiledHDRFilm() { - develop(); + develop(NULL, 0); } void serialize(Stream *stream, InstanceManager *manager) const { @@ -167,7 +167,7 @@ public: void setDestinationFile(const fs::path &destFile, uint32_t blockSize) { if (m_output) - develop(); + develop(NULL, 0); Bitmap::EPixelFormat pixelFormat = m_pixelFormat; #if SPECTRUM_SAMPLES == 3 @@ -432,11 +432,11 @@ public: bool develop(const Point2i &sourceOffset, const Vector2i &size, const Point2i &targetOffset, Bitmap *target) const { - target->fill(targetOffset, size, Spectrum(0.0f)); + target->fillRect(targetOffset, size, Spectrum(0.0f)); return false; /* Not supported by the tiled EXR film! */ } - void develop() { + void develop(const Scene *scene, Float renderTime) { if (m_output) { Log(EInfo, "Closing EXR file (%u tiles in total, peak memory usage: %u tiles)..", m_blocksH * m_blocksV, m_peakUsage); @@ -469,6 +469,14 @@ public: void clear() { /* Do nothing */ } + bool hasAlpha() const { + return + m_pixelFormat == Bitmap::ELuminanceAlpha || + m_pixelFormat == Bitmap::ERGBA || + m_pixelFormat == Bitmap::EXYZA || + m_pixelFormat == Bitmap::ESpectrumAlpha; + } + bool destinationExists(const fs::path &baseName) const { fs::path filename = baseName; if (boost::to_lower_copy(filename.extension().string()) != ".exr") diff --git a/src/integrators/direct/direct.cpp b/src/integrators/direct/direct.cpp index 0d77f61e..c2c7ba27 100644 --- a/src/integrators/direct/direct.cpp +++ b/src/integrators/direct/direct.cpp @@ -25,20 +25,29 @@ MTS_NAMESPACE_BEGIN * \parameters{ * \parameter{shadingSamples}{\Integer}{This convenience parameter can be * used to set both \code{emitterSamples} and \code{bsdfSamples} at - * the same time.} + * the same time. + * } * \parameter{emitterSamples}{\Integer}{Optional more fine-grained * parameter: specifies the number of samples that should be generated * using the direct illumination strategies implemented by the scene's - * emitters\default{set to the value of \code{shadingSamples}}} + * emitters\default{set to the value of \code{shadingSamples}} + * } * \parameter{bsdfSamples}{\Integer}{Optional more fine-grained * parameter: specifies the number of samples that should be generated * using the BSDF sampling strategies implemented by the scene's - * surfaces\default{set to the value of \code{shadingSamples}}} + * surfaces\default{set to the value of \code{shadingSamples}} + * } * \parameter{strictNormals}{\Boolean}{Be strict about potential - * inconsistencies involving shading normals? See \pluginref{path} - * for details.\default{no, i.e. \code{false}}} + * inconsistencies involving shading normals? See + * page~\pageref{sec:strictnormals} for details. + * \default{no, i.e. \code{false}} + * } + * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters? + * See page~\pageref{sec:hideemitters} for details. + * \default{no, i.e. \code{false}} + * } * } - * + * \vspace{-1mm} * \renderings{ * \medrendering{Only BSDF sampling}{integrator_direct_bsdf} * \medrendering{Only emitter sampling}{integrator_direct_lum} @@ -92,6 +101,9 @@ public: m_bsdfSamples = props.getSize("bsdfSamples", shadingSamples); /* Be strict about potential inconsistencies involving shading normals? */ m_strictNormals = props.getBoolean("strictNormals", false); + /* When this flag is set to true, contributions from directly + * visible emitters will not be included in the rendered image */ + m_hideEmitters = props.getBoolean("hideEmitters", false); Assert(m_emitterSamples + m_bsdfSamples > 0); } @@ -101,6 +113,7 @@ public: m_emitterSamples = stream->readSize(); m_bsdfSamples = stream->readSize(); m_strictNormals = stream->readBool(); + m_hideEmitters = stream->readBool(); configure(); } @@ -109,6 +122,7 @@ public: stream->writeSize(m_emitterSamples); stream->writeSize(m_bsdfSamples); stream->writeBool(m_strictNormals); + stream->writeBool(m_hideEmitters); } void configure() { @@ -142,14 +156,14 @@ public: if (!rRec.rayIntersect(ray)) { /* If no intersection could be found, possibly return radiance from a background emitter */ - if (rRec.type & RadianceQueryRecord::EEmittedRadiance) + if (rRec.type & RadianceQueryRecord::EEmittedRadiance && !m_hideEmitters) return scene->evalEnvironment(ray); else return Spectrum(0.0f); } /* Possibly include emitted radiance if requested */ - if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance)) + if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance) && !m_hideEmitters) Li += its.Le(-ray.d); /* Include radiance from a subsurface scattering model if requested */ @@ -271,7 +285,7 @@ public: /* Intersected nothing -- perhaps there is an environment map? */ const Emitter *env = scene->getEnvironmentEmitter(); - if (!env) + if (!env || (m_hideEmitters && bRec.sampledType == BSDF::ENull)) continue; value = env->evalEnvironment(RayDifferential(bsdfRay)); @@ -316,6 +330,7 @@ private: Float m_fracBSDF, m_fracLum; Float m_weightBSDF, m_weightLum; bool m_strictNormals; + bool m_hideEmitters; }; MTS_IMPLEMENT_CLASS_S(MIDirectIntegrator, false, SamplingIntegrator) diff --git a/src/integrators/erpt/erpt_proc.cpp b/src/integrators/erpt/erpt_proc.cpp index 13708297..fe5b7714 100644 --- a/src/integrators/erpt/erpt_proc.cpp +++ b/src/integrators/erpt/erpt_proc.cpp @@ -234,15 +234,17 @@ public: } #endif - #if defined(MTS_BD_DEBUG) - if (Qxy <= 0 || Qyx < 0 || std::isnan(Qxy) || std::isnan(Qyx)) { - Log(EDebug, "Source path: %s", current->toString().c_str()); - Log(EDebug, "Proposal path: %s", proposed->toString().c_str()); - Log(EWarn, "Internal error while computing acceptance probabilities: " - "Qxy=%f, Qyx=%f, muRec=%s", Qxy, Qyx, muRec.toString().c_str()); + if (Qxy == 0) { // be tolerant of this (can occasionally happen due to floating point inaccuracies) + a = 0; + } else if (Qxy < 0 || Qyx < 0 || std::isnan(Qxy) || std::isnan(Qyx)) { + #if defined(MTS_BD_DEBUG) + Log(EDebug, "Source path: %s", current->toString().c_str()); + Log(EDebug, "Proposal path: %s", proposed->toString().c_str()); + Log(EWarn, "Internal error while computing acceptance probabilities: " + "Qxy=%f, Qyx=%f, muRec=%s", Qxy, Qyx, muRec.toString().c_str()); + #endif a = 0; } - #endif accumulatedWeight += 1-a; diff --git a/src/integrators/misc/adaptive.cpp b/src/integrators/misc/adaptive.cpp index 65bb0119..77b6f868 100644 --- a/src/integrators/misc/adaptive.cpp +++ b/src/integrators/misc/adaptive.cpp @@ -212,7 +212,10 @@ public: Float mean = 0, meanSqr = 0.0f; sampleCount = 0; - while (!stop) { + while (true) { + if (stop) + return; + rRec.newQuery(RadianceQueryRecord::ESensorRay, sensor->getMedium()); rRec.extra = RadianceQueryRecord::EAdaptiveQuery; diff --git a/src/integrators/mlt/mlt.cpp b/src/integrators/mlt/mlt.cpp index a5df2575..0dff0ac4 100644 --- a/src/integrators/mlt/mlt.cpp +++ b/src/integrators/mlt/mlt.cpp @@ -259,15 +259,21 @@ public: bool nested = m_config.twoStage && m_config.firstStage; - Vector2i cropSize = film->getCropSize();; + Vector2i cropSize = film->getCropSize(); + Assert(cropSize.x > 0 && cropSize.y > 0); Log(EInfo, "Starting %srender job (%ix%i, " SIZE_T_FMT " %s, " SSE_STR ", approx. " SIZE_T_FMT " mutations/pixel) ..", nested ? "nested " : "", cropSize.x, cropSize.y, nCores, nCores == 1 ? "core" : "cores", sampleCount); - if (m_config.workUnits <= 0) - m_config.workUnits = std::max((int) std::ceil((cropSize.x - * cropSize.y * sampleCount) / 200000.0f), 1); + if (m_config.workUnits <= 0) { + const size_t desiredMutationsPerWorkUnit = 200000; + const size_t cropArea = (size_t) cropSize.x * cropSize.y; + const size_t workUnits = ((desiredMutationsPerWorkUnit - 1) + + (cropArea * sampleCount)) / desiredMutationsPerWorkUnit; + Assert(workUnits <= (size_t) std::numeric_limits::max()); + m_config.workUnits = (int) std::max(workUnits, (size_t) 1); + } m_config.nMutations = (cropSize.x * cropSize.y * sampleCount) / m_config.workUnits; @@ -290,12 +296,7 @@ public: m_config, directImage, pathSeeds); m_config.luminance = pathSampler->generateSeeds(m_config.luminanceSamples, - m_config.workUnits, false, pathSeeds); - - pathSeeds.clear(); - - m_config.luminance = pathSampler->generateSeeds(m_config.luminanceSamples, - m_config.workUnits, true, pathSeeds); + m_config.workUnits, true, m_config.importanceMap, pathSeeds); if (!nested) m_config.dump(); diff --git a/src/integrators/mlt/mlt_proc.cpp b/src/integrators/mlt/mlt_proc.cpp index 610df074..7d820505 100644 --- a/src/integrators/mlt/mlt_proc.cpp +++ b/src/integrators/mlt/mlt_proc.cpp @@ -122,7 +122,7 @@ public: result->clear(); /// Reconstruct the seed path - m_pathSampler->reconstructPath(wu->getSeed(), *current); + m_pathSampler->reconstructPath(wu->getSeed(), m_config.importanceMap, *current); relWeight = current->getRelativeWeight(); BDAssert(!relWeight.isZero()); @@ -235,7 +235,9 @@ public: } #endif - if (Qxy <= 0 || Qyx < 0 || std::isnan(Qxy) || std::isnan(Qyx)) { + if (Qxy == 0) { // be tolerant of this (can occasionally happen due to floating point inaccuracies) + a = 0; + } else if (Qxy < 0 || Qyx < 0 || std::isnan(Qxy) || std::isnan(Qyx)) { #if defined(MTS_BD_DEBUG) Log(EDebug, "Source path: %s", current->toString().c_str()); Log(EDebug, "Proposal path: %s", proposed->toString().c_str()); @@ -367,6 +369,7 @@ void MLTProcess::develop() { value += direct[i]; target[i] = value; } + m_film->setBitmap(m_developBuffer); m_refreshTimer->reset(); diff --git a/src/integrators/path/path.cpp b/src/integrators/path/path.cpp index 19ed0466..3fca3f9b 100644 --- a/src/integrators/path/path.cpp +++ b/src/integrators/path/path.cpp @@ -38,7 +38,12 @@ static StatsCounter avgPathLength("Path tracer", "Average path length", EAverage * } * \parameter{strictNormals}{\Boolean}{Be strict about potential * inconsistencies involving shading normals? See the description below - * for details.\default{no, i.e. \code{false}}} + * for details.\default{no, i.e. \code{false}} + * } + * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters? + * See page~\pageref{sec:hideemitters} for details. + * \default{no, i.e. \code{false}} + * } * } * * This integrator implements a basic path tracer and is a \emph{good default choice} @@ -75,7 +80,8 @@ static StatsCounter avgPathLength("Path tracer", "Average path length", EAverage * low-discrepancy sample generators (i.e. \pluginref{ldsampler}, * \pluginref{halton}, or \pluginref{sobol}). * - * \paragraph{Strict normals:} Triangle meshes often rely on interpolated shading normals + * \paragraph{Strict normals:}\label{sec:strictnormals} + * Triangle meshes often rely on interpolated shading normals * to suppress the inherently faceted appearance of the underlying geometry. These * ``fake'' normals are not without problems, however. They can lead to paradoxical * situations where a light ray impinges on an object from a direction that is classified as ``outside'' @@ -116,6 +122,7 @@ public: Intersection &its = rRec.its; RayDifferential ray(r); Spectrum Li(0.0f); + bool scattered = false; /* Perform the first ray intersection (or ignore if the intersection has already been provided). */ @@ -129,7 +136,8 @@ public: if (!its.isValid()) { /* If no intersection could be found, potentially return radiance from a environment luminaire if it exists */ - if (rRec.type & RadianceQueryRecord::EEmittedRadiance) + if ((rRec.type & RadianceQueryRecord::EEmittedRadiance) + && (!m_hideEmitters || scattered)) Li += throughput * scene->evalEnvironment(ray); break; } @@ -137,7 +145,8 @@ public: const BSDF *bsdf = its.getBSDF(ray); /* Possibly include emitted radiance if requested */ - if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance)) + if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance) + && (!m_hideEmitters || scattered)) Li += throughput * its.Le(-ray.d); /* Include radiance from a subsurface scattering model if requested */ @@ -201,6 +210,8 @@ public: if (bsdfWeight.isZero()) break; + scattered |= bRec.sampledType != BSDF::ENull; + /* Prevent light leaks due to the use of shading normals */ const Vector wo = its.toWorld(bRec.wo); Float woDotGeoN = dot(its.geoFrame.n, wo); @@ -224,6 +235,9 @@ public: const Emitter *env = scene->getEnvironmentEmitter(); if (env) { + if (m_hideEmitters && !scattered) + break; + value = env->evalEnvironment(ray); if (!env->fillDirectSamplingRecord(dRec, ray)) break; diff --git a/src/integrators/path/volpath.cpp b/src/integrators/path/volpath.cpp index bc1fa419..788c3c8e 100644 --- a/src/integrators/path/volpath.cpp +++ b/src/integrators/path/volpath.cpp @@ -37,8 +37,14 @@ static StatsCounter avgPathLength("Volumetric path tracer", "Average path length * path termination criterion. \default{\code{5}} * } * \parameter{strictNormals}{\Boolean}{Be strict about potential - * inconsistencies involving shading normals? See \pluginref{path} - * for details.\default{no, i.e. \code{false}}} + * inconsistencies involving shading normals? See + * page~\pageref{sec:strictnormals} for details. + * \default{no, i.e. \code{false}} + * } + * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters? + * See page~\pageref{sec:hideemitters} for details. + * \default{no, i.e. \code{false}} + * } * } * * This plugin provides a volumetric path tracer that can be used to @@ -130,8 +136,7 @@ public: if (phaseVal != 0) { /* Calculate prob. of having sampled that direction using phase function sampling */ - Float phasePdf = (emitter->isOnSurface() && dRec.measure == ESolidAngle - && interactions == 0) + Float phasePdf = (emitter->isOnSurface() && dRec.measure == ESolidAngle) ? phase->pdf(pRec) : (Float) 0.0f; /* Weight using the power heuristic */ @@ -152,7 +157,6 @@ public: break; throughput *= phaseVal; - /* Trace a ray in this direction */ ray = Ray(mRec.p, pRec.wo, ray.time); ray.mint = 0; @@ -187,13 +191,20 @@ public: if (!its.isValid()) { /* If no intersection could be found, possibly return attenuated radiance from a background luminaire */ - if (rRec.type & RadianceQueryRecord::EEmittedRadiance) - Li += throughput * scene->evalEnvironment(ray); + if ((rRec.type & RadianceQueryRecord::EEmittedRadiance) + && (!m_hideEmitters || scattered)) { + Spectrum value = throughput * scene->evalEnvironment(ray); + if (rRec.medium) + value *= rRec.medium->evalTransmittance(ray, rRec.sampler); + Li += value; + } + break; } /* Possibly include emitted radiance if requested */ - if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance)) + if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance) + && (!m_hideEmitters || scattered)) Li += throughput * its.Le(-ray.d); /* Include radiance from a subsurface integrator if requested */ diff --git a/src/integrators/path/volpath_simple.cpp b/src/integrators/path/volpath_simple.cpp index 3733fc5d..e7f3bde9 100644 --- a/src/integrators/path/volpath_simple.cpp +++ b/src/integrators/path/volpath_simple.cpp @@ -37,8 +37,14 @@ static StatsCounter avgPathLength("Volumetric path tracer", "Average path length * path termination criterion. \default{\code{5}} * } * \parameter{strictNormals}{\Boolean}{Be strict about potential - * inconsistencies involving shading normals? See \pluginref{path} - * for details.\default{no, i.e. \code{false}}} + * inconsistencies involving shading normals? See + * page~\pageref{sec:strictnormals} for details. + * \default{no, i.e. \code{false}} + * } + * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters? + * See page~\pageref{sec:hideemitters} for details. + * \default{no, i.e. \code{false}} + * } * } * * This plugin provides a basic volumetric path tracer that can be used to @@ -86,7 +92,7 @@ public: MediumSamplingRecord mRec; RayDifferential ray(r); Spectrum Li(0.0f); - bool nullChain = true; + bool nullChain = true, scattered = false; Float eta = 1.0f; /* Perform the first ray intersection (or ignore if the @@ -153,6 +159,7 @@ public: ray.mint = 0; scene->rayIntersect(ray, its); nullChain = false; + scattered = true; } else { /* Sample tau(x, y) * (Surface integral). This happens with probability mRec.pdfFailure @@ -165,13 +172,19 @@ public: if (!its.isValid()) { /* If no intersection could be found, possibly return attenuated radiance from a background luminaire */ - if (rRec.type & RadianceQueryRecord::EEmittedRadiance) - Li += throughput * scene->evalEnvironment(ray); + if ((rRec.type & RadianceQueryRecord::EEmittedRadiance) + && (!m_hideEmitters || scattered)) { + Spectrum value = throughput * scene->evalEnvironment(ray); + if (rRec.medium) + value *= rRec.medium->evalTransmittance(ray); + Li += value; + } break; } /* Possibly include emitted radiance if requested */ - if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance)) + if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance) + && (!m_hideEmitters || scattered)) Li += throughput * its.Le(-ray.d); /* Include radiance from a subsurface integrator if requested */ @@ -263,6 +276,7 @@ public: /* In the next iteration, trace a ray in this direction */ ray = Ray(its.p, wo, ray.time); scene->rayIntersect(ray, its); + scattered |= bRec.sampledType != BSDF::ENull; } if (rRec.depth++ >= m_rrDepth) { diff --git a/src/integrators/photonmapper/bre.cpp b/src/integrators/photonmapper/bre.cpp index dcfc431b..d92c76e1 100644 --- a/src/integrators/photonmapper/bre.cpp +++ b/src/integrators/photonmapper/bre.cpp @@ -116,21 +116,20 @@ void BeamRadianceEstimator::serialize(Stream *stream, InstanceManager *manager) AABB BeamRadianceEstimator::buildHierarchy(IndexType index) { BRENode &node = m_nodes[index]; + Point center = node.photon.getPosition(); + Float radius = node.radius; + node.aabb = AABB( + center - Vector(radius, radius, radius), + center + Vector(radius, radius, radius) + ); + if (!node.photon.isLeaf()) { IndexType left = node.photon.getLeftIndex(index); IndexType right = node.photon.getRightIndex(index); - node.aabb.reset(); if (left) node.aabb.expandBy(buildHierarchy(left)); if (right) node.aabb.expandBy(buildHierarchy(right)); - } else { - Point center = node.photon.getPosition(); - Float radius = node.radius; - node.aabb = AABB( - center - Vector(radius, radius, radius), - center + Vector(radius, radius, radius) - ); } return node.aabb; @@ -170,7 +169,7 @@ Spectrum BeamRadianceEstimator::query(const Ray &r, const Medium *medium) const Float diskDistance = dot(originToCenter, ray.d), radSqr = node.radius * node.radius; Float distSqr = (ray(diskDistance) - node.photon.getPosition()).lengthSquared(); - if (distSqr < radSqr) { + if (diskDistance > 0 && distSqr < radSqr) { Float weight = K2(distSqr/radSqr)/radSqr; Vector wi = -node.photon.getDirection(); diff --git a/src/integrators/photonmapper/photonmapper.cpp b/src/integrators/photonmapper/photonmapper.cpp index cbda46f9..a1849833 100644 --- a/src/integrators/photonmapper/photonmapper.cpp +++ b/src/integrators/photonmapper/photonmapper.cpp @@ -46,6 +46,10 @@ MTS_NAMESPACE_BEGIN * Granularity of photon tracing work units for the purpose * of parallelization (in \# of shot particles) \default{0, i.e. decide automatically} * } + * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters? + * See page~\pageref{sec:hideemitters} for details. + * \default{no, i.e. \code{false}} + * } * \parameter{rrDepth}{\Integer}{Specifies the minimum path depth, after * which the implementation will start to use the ``russian roulette'' * path termination criterion. \default{\code{5}} @@ -83,7 +87,8 @@ MTS_NAMESPACE_BEGIN */ class PhotonMapIntegrator : public SamplingIntegrator { public: - PhotonMapIntegrator(const Properties &props) : SamplingIntegrator(props) { + PhotonMapIntegrator(const Properties &props) : SamplingIntegrator(props), + m_parentIntegrator(NULL) { /* Number of lsamples for direct illumination */ m_directSamples = props.getInteger("directSamples", 16); /* Number of BSDF samples when intersecting a glossy material */ @@ -126,6 +131,9 @@ public: m_gatherLocally = props.getBoolean("gatherLocally", true); /* Indicates if the gathering steps should be canceled if not enough photons are generated. */ m_autoCancelGathering = props.getBoolean("autoCancelGathering", true); + /* When this flag is set to true, contributions from directly + * visible emitters will not be included in the rendered image */ + m_hideEmitters = props.getBoolean("hideEmitters", false); if (m_maxDepth == 0) { Log(EError, "maxDepth must be greater than zero!"); @@ -143,7 +151,7 @@ public: /// Unserialize from a binary data stream PhotonMapIntegrator(Stream *stream, InstanceManager *manager) - : SamplingIntegrator(stream, manager) { + : SamplingIntegrator(stream, manager), m_parentIntegrator(NULL) { m_directSamples = stream->readInt(); m_glossySamples = stream->readInt(); m_maxDepth = stream->readInt(); @@ -159,6 +167,7 @@ public: m_volumeLookupSize = stream->readInt(); m_gatherLocally = stream->readBool(); m_autoCancelGathering = stream->readBool(); + m_hideEmitters = stream->readBool(); m_causticPhotonMapID = m_globalPhotonMapID = m_breID = 0; configure(); } @@ -190,6 +199,7 @@ public: stream->writeInt(m_volumeLookupSize); stream->writeBool(m_gatherLocally); stream->writeBool(m_autoCancelGathering); + stream->writeBool(m_hideEmitters); } /// Configure the sampler for a specified amount of direct illumination samples @@ -201,9 +211,6 @@ public: if (bsdfSamples > 1) sampler->request2DArray(bsdfSamples); - if (scene->getMedia().size() == 0) - m_volumePhotons = 0; - bool hasDelta = false; const ref_vector &shapes = scene->getShapes(); for (size_t i=0; igetReturnStatus() != ParallelProcess::ESuccess) return false; - Log(EDebug, "Global photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: " - SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons()); - ref globalPhotonMap = proc->getPhotonMap(); if (globalPhotonMap->isFull()) { + Log(EDebug, "Global photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: " + SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons()); + m_globalPhotonMap = globalPhotonMap; m_globalPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles()); m_globalPhotonMap->build(); @@ -280,37 +287,6 @@ public: /* Generate the caustic photon map */ ref proc = new GatherPhotonProcess( GatherPhotonProcess::ECausticPhotons, m_causticPhotons, - m_granularity, 3, m_rrDepth, m_gatherLocally, - m_autoCancelGathering, job); - - proc->bindResource("scene", sceneResID); - proc->bindResource("sensor", sensorResID); - proc->bindResource("sampler", qmcSamplerID); - - m_proc = proc; - sched->schedule(proc); - sched->wait(proc); - m_proc = NULL; - - if (proc->getReturnStatus() != ParallelProcess::ESuccess) - return false; - - Log(EDebug, "Caustic photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: " - SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons()); - - ref causticPhotonMap = proc->getPhotonMap(); - if (causticPhotonMap->isFull()) { - m_causticPhotonMap = causticPhotonMap; - m_causticPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles()); - m_causticPhotonMap->build(); - m_causticPhotonMapID = sched->registerResource(m_causticPhotonMap); - } - } - - if (m_volumePhotonMap.get() == NULL && m_volumePhotons > 0) { - /* Generate the volume photon map */ - ref proc = new GatherPhotonProcess( - GatherPhotonProcess::EVolumePhotons, m_volumePhotons, m_granularity, m_maxDepth-1, m_rrDepth, m_gatherLocally, m_autoCancelGathering, job); @@ -326,11 +302,43 @@ public: if (proc->getReturnStatus() != ParallelProcess::ESuccess) return false; - Log(EDebug, "Volume photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: " - SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons()); + ref causticPhotonMap = proc->getPhotonMap(); + if (causticPhotonMap->isFull()) { + Log(EDebug, "Caustic photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: " + SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons()); + + m_causticPhotonMap = causticPhotonMap; + m_causticPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles()); + m_causticPhotonMap->build(); + m_causticPhotonMapID = sched->registerResource(m_causticPhotonMap); + } + } + + size_t volumePhotons = scene->getMedia().size() == 0 ? 0 : m_volumePhotons; + if (m_volumePhotonMap.get() == NULL && volumePhotons > 0) { + /* Generate the volume photon map */ + ref proc = new GatherPhotonProcess( + GatherPhotonProcess::EVolumePhotons, volumePhotons, + m_granularity, m_maxDepth-1, m_rrDepth, m_gatherLocally, + m_autoCancelGathering, job); + + proc->bindResource("scene", sceneResID); + proc->bindResource("sensor", sensorResID); + proc->bindResource("sampler", qmcSamplerID); + + m_proc = proc; + sched->schedule(proc); + sched->wait(proc); + m_proc = NULL; + + if (proc->getReturnStatus() != ParallelProcess::ESuccess) + return false; ref volumePhotonMap = proc->getPhotonMap(); if (volumePhotonMap->isFull()) { + Log(EDebug, "Volume photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: " + SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons()); + volumePhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles()); volumePhotonMap->build(); m_bre = new BeamRadianceEstimator(volumePhotonMap, m_volumeLookupSize); @@ -409,13 +417,13 @@ public: if (!its.isValid()) { /* If no intersection could be found, possibly return attenuated radiance from a background luminaire */ - if (rRec.type & RadianceQueryRecord::EEmittedRadiance) + if ((rRec.type & RadianceQueryRecord::EEmittedRadiance) && !m_hideEmitters) LiSurf = scene->evalEnvironment(ray); return LiSurf * transmittance + LiMedium; } /* Possibly include emitted radiance if requested */ - if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance)) + if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance) && !m_hideEmitters) LiSurf += its.Le(-ray.d); /* Include radiance from a subsurface scattering model if requested */ @@ -428,9 +436,17 @@ public: return LiSurf * transmittance + LiMedium; unsigned int bsdfType = bsdf->getType() & BSDF::EAll; - bool isDiffuse = (bsdfType == BSDF::EDiffuseReflection); - if (isDiffuse || cacheQuery) { + /* Irradiance cachq query -> trat as diffuse */ + bool isDiffuse = (bsdfType == BSDF::EDiffuseReflection) || cacheQuery; + + bool hasSpecular = bsdfType & BSDF::EDelta; + + /* Exhaustively recurse into all specular lobes? */ + bool exhaustiveSpecular = rRec.depth < m_maxSpecularDepth && !cacheQuery; + + if (isDiffuse) { + /* 1. Diffuse indirect */ int maxDepth = m_maxDepth == -1 ? INT_MAX : (m_maxDepth-rRec.depth); if (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance && m_globalPhotonMap.get()) LiSurf += m_globalPhotonMap->estimateIrradiance(its.p, @@ -442,50 +458,55 @@ public: m_causticLookupSize) * bsdf->getDiffuseReflectance(its) * INV_PI; } - if ((bsdfType & BSDF::EDelta) && (bsdfType & ~BSDF::EDelta) == 0 && rRec.depth < m_maxSpecularDepth && !cacheQuery) { - if (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance) { - int compCount = bsdf->getComponentCount(); - RadianceQueryRecord rRec2; - for (int i=0; isample(bRec, Point2(0.0f)); - if (bsdfVal.isZero()) - continue; + if (hasSpecular && exhaustiveSpecular + && (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance)) { + /* 1. Specular indirect */ + int compCount = bsdf->getComponentCount(); + RadianceQueryRecord rRec2; + for (int i=0; igetType(i); + if (!(type & BSDF::EDelta)) + continue; + /* Sample the BSDF and recurse */ + BSDFSamplingRecord bRec(its, rRec.sampler, ERadiance); + bRec.component = i; + Spectrum bsdfVal = bsdf->sample(bRec, Point2(0.5f)); + if (bsdfVal.isZero()) + continue; - rRec2.recursiveQuery(rRec, RadianceQueryRecord::ERadiance); - RayDifferential bsdfRay(its.p, its.toWorld(bRec.wo), ray.time); - if (its.isMediumTransition()) - rRec2.medium = its.getTargetMedium(bsdfRay.d); + rRec2.recursiveQuery(rRec, RadianceQueryRecord::ERadiance); + RayDifferential bsdfRay(its.p, its.toWorld(bRec.wo), ray.time); + if (its.isMediumTransition()) + rRec2.medium = its.getTargetMedium(bsdfRay.d); - LiSurf += bsdfVal * m_parentIntegrator->Li(bsdfRay, rRec2); - } + LiSurf += bsdfVal * m_parentIntegrator->Li(bsdfRay, rRec2); } - } else if (rRec.type & RadianceQueryRecord::EDirectSurfaceRadiance) { - /* Estimate the direct illumination if this is requested */ - Point2 *sampleArray; - Point2 sample; - int numEmitterSamples = m_directSamples, - numBSDFSamples; + } - Float weightLum, weightBSDF; + /* Estimate the direct illumination if this is requested */ + int numEmitterSamples = m_directSamples, numBSDFSamples; + Float weightLum, weightBSDF; + Point2 *sampleArray; + Point2 sample; - if (rRec.depth > 1 || cacheQuery || adaptiveQuery) { - /* This integrator is used recursively by another integrator. - Be less accurate as this sample will not directly be observed. */ - numBSDFSamples = numEmitterSamples = 1; - weightLum = weightBSDF = 1.0f; + if (rRec.depth > 1 || cacheQuery || adaptiveQuery) { + /* This integrator is used recursively by another integrator. + Be less accurate as this sample will not directly be observed. */ + numBSDFSamples = numEmitterSamples = 1; + weightLum = weightBSDF = 1.0f; + } else { + if (isDiffuse) { + numBSDFSamples = m_directSamples; + weightBSDF = weightLum = m_invEmitterSamples; } else { - if (isDiffuse) { - numBSDFSamples = m_directSamples; - weightBSDF = weightLum = m_invEmitterSamples; - } else { - numBSDFSamples = m_glossySamples; - weightLum = m_invEmitterSamples; - weightBSDF = m_invGlossySamples; - } + numBSDFSamples = m_glossySamples; + weightLum = m_invEmitterSamples; + weightBSDF = m_invGlossySamples; } + } + + if ((bsdfType & BSDF::ESmooth) && (rRec.type & RadianceQueryRecord::EDirectSurfaceRadiance)) { + DirectSamplingRecord dRec(its); if (numEmitterSamples > 1) { sampleArray = rRec.sampler->next2DArray(m_directSamples); @@ -493,45 +514,62 @@ public: sample = rRec.nextSample2D(); sampleArray = &sample; } - DirectSamplingRecord dRec(its); - if (bsdf->getType() & BSDF::ESmooth) { - for (int i=0; isampleAttenuatedEmitterDirect( - dRec, its, rRec.medium, interactions, - sampleArray[i], rRec.sampler); + for (int i=0; isampleAttenuatedEmitterDirect( + dRec, its, rRec.medium, interactions, + sampleArray[i], rRec.sampler); - /* Estimate the direct illumination if this is requested */ - if (!value.isZero()) { - const Emitter *emitter = static_cast(dRec.object); + /* Estimate the direct illumination if this is requested */ + if (!value.isZero()) { + const Emitter *emitter = static_cast(dRec.object); - /* Allocate a record for querying the BSDF */ - BSDFSamplingRecord bRec(its, its.toLocal(dRec.d)); + /* Allocate a record for querying the BSDF */ + BSDFSamplingRecord bRec(its, its.toLocal(dRec.d)); - /* Evaluate BSDF * cos(theta) */ - const Spectrum bsdfVal = bsdf->eval(bRec); + /* Evaluate BSDF * cos(theta) */ + const Spectrum bsdfVal = bsdf->eval(bRec); - if (!bsdfVal.isZero()) { - /* Calculate prob. of having sampled that direction - using BSDF sampling */ - Float bsdfPdf = (emitter->isOnSurface() - && dRec.measure == ESolidAngle - && interactions == 0) - ? bsdf->pdf(bRec) : (Float) 0.0f; + if (!bsdfVal.isZero()) { + /* Calculate prob. of having sampled that direction + using BSDF sampling */ - /* Weight using the power heuristic */ - const Float weight = miWeight(dRec.pdf * numEmitterSamples, - bsdfPdf * numBSDFSamples) * weightLum; - LiSurf += value * bsdfVal * weight; - } + if (!hasSpecular || exhaustiveSpecular) + bRec.typeMask = BSDF::ESmooth; + + Float bsdfPdf = (emitter->isOnSurface() + && dRec.measure == ESolidAngle + && interactions == 0) + ? bsdf->pdf(bRec) : (Float) 0.0f; + + /* Weight using the power heuristic */ + const Float weight = miWeight(dRec.pdf * numEmitterSamples, + bsdfPdf * numBSDFSamples) * weightLum; + + LiSurf += value * bsdfVal * weight; } } } + } - /* ==================================================================== */ - /* BSDF sampling */ - /* ==================================================================== */ + /* ==================================================================== */ + /* BSDF sampling */ + /* ==================================================================== */ + /* Sample direct compontent via BSDF sampling if this is generally requested AND + the BSDF is smooth, or there is a delta component that was not handled by the + exhaustive sampling loop above */ + bool bsdfSampleDirect = (rRec.type & RadianceQueryRecord::EDirectSurfaceRadiance) && + ((bsdfType & BSDF::ESmooth) || (hasSpecular && !exhaustiveSpecular)); + + /* Sample indirect component via BSDF sampling if this is generally requested AND + the BSDF is non-diffuse (diffuse is handled by the global photon map) + or there is a delta component that was not handled by the exhaustive sampling loop + above. */ + bool bsdfSampleIndirect = (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance) && + !isDiffuse && ((bsdfType & BSDF::ESmooth) || (hasSpecular && !exhaustiveSpecular)); + + if (bsdfSampleDirect || bsdfSampleIndirect) { if (numBSDFSamples > 1) { sampleArray = rRec.sampler->next2DArray( std::max(m_directSamples, m_glossySamples)); @@ -542,9 +580,13 @@ public: RadianceQueryRecord rRec2; Intersection &bsdfIts = rRec2.its; + DirectSamplingRecord dRec(its); for (int i=0; isample(bRec, bsdfPdf, sampleArray[i]); if (bsdfVal.isZero()) @@ -557,12 +599,12 @@ public: bool hitEmitter = false; if (scene->rayIntersect(bsdfRay, bsdfIts)) { /* Intersected something - check if it was a luminaire */ - if (bsdfIts.isEmitter()) { + if (bsdfIts.isEmitter() && bsdfSampleDirect) { value = bsdfIts.Le(-bsdfRay.d); dRec.setQuery(bsdfRay, bsdfIts); hitEmitter = true; } - } else { + } else if (bsdfSampleDirect) { /* Intersected nothing -- perhaps there is an environment map? */ const Emitter *env = scene->getEnvironmentEmitter(); @@ -574,8 +616,7 @@ public: } if (hitEmitter) { - const Float emitterPdf = (!(bRec.sampledType & BSDF::EDelta)) ? - scene->pdfEmitterDirect(dRec) : 0; + const Float emitterPdf = scene->pdfEmitterDirect(dRec); Spectrum transmittance = rRec2.medium ? rRec2.medium->evalTransmittance(Ray(bsdfRay, 0, bsdfIts.t)) : Spectrum(1.0f); @@ -587,7 +628,7 @@ public: } /* Recurse */ - if (!isDiffuse && (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance) && !cacheQuery) { + if (bsdfSampleIndirect) { rRec2.recursiveQuery(rRec, RadianceQueryRecord::ERadianceNoEmission); rRec2.type ^= RadianceQueryRecord::EIntersection; @@ -598,37 +639,6 @@ public: LiSurf += bsdfVal * m_parentIntegrator->Li(bsdfRay, rRec2) * weightBSDF; } } - if (true) - return LiSurf; - } else if (!isDiffuse && (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance) && !cacheQuery) { - int numBSDFSamples = (rRec.depth > 1 || adaptiveQuery) ? 1 : m_glossySamples; - Float weightBSDF; - Point2 *sampleArray; - Point2 sample; - - if (numBSDFSamples > 1) { - sampleArray = rRec.sampler->next2DArray( - std::max(m_directSamples, m_glossySamples)); - weightBSDF = m_invGlossySamples; - } else { - sample = rRec.nextSample2D(); sampleArray = &sample; - weightBSDF = 1.0f; - } - - RadianceQueryRecord rRec2; - for (int i=0; isample(bRec, bsdfPdf, sampleArray[i]); - if (bsdfVal.isZero()) - continue; - rRec2.recursiveQuery(rRec, - RadianceQueryRecord::ERadianceNoEmission); - - RayDifferential bsdfRay(its.p, its.toWorld(bRec.wo), ray.time); - LiSurf += bsdfVal * m_parentIntegrator->Li(bsdfRay, rRec2) * weightBSDF; - } } return LiSurf * transmittance + LiMedium; @@ -677,6 +687,7 @@ private: int m_granularity, m_directSamples, m_glossySamples; int m_rrDepth, m_maxDepth, m_maxSpecularDepth; bool m_gatherLocally, m_autoCancelGathering; + bool m_hideEmitters; }; MTS_IMPLEMENT_CLASS_S(PhotonMapIntegrator, false, SamplingIntegrator) diff --git a/src/integrators/photonmapper/ppm.cpp b/src/integrators/photonmapper/ppm.cpp index f031791d..d4e2963a 100644 --- a/src/integrators/photonmapper/ppm.cpp +++ b/src/integrators/photonmapper/ppm.cpp @@ -43,6 +43,8 @@ MTS_NAMESPACE_BEGIN * which the implementation will start to use the ``russian roulette'' * path termination criterion. \default{\code{5}} * } + * \parameter{maxPasses}{\Integer}{Maximum number of passes to render (where \code{-1} + * corresponds to rendering until stopped manually). \default{\code{-1}}} * } * This plugin implements the progressive photon mapping algorithm by Hachisuka et al. * \cite{Hachisuka2008Progressive}. Progressive photon mapping is a variant of photon @@ -109,9 +111,14 @@ public: m_rrDepth = props.getInteger("rrDepth", 3); /* Indicates if the gathering steps should be canceled if not enough photons are generated. */ m_autoCancelGathering = props.getBoolean("autoCancelGathering", true); + /* Maximum number of passes to render. -1 renders until the process is stopped. */ + m_maxPasses = props.getInteger("maxPasses", -1); + m_mutex = new Mutex(); if (m_maxDepth <= 1 && m_maxDepth != -1) Log(EError, "Maximum depth must either be set to \"-1\" or \"2\" or higher!"); + if (m_maxPasses <= 0 && m_maxPasses != -1) + Log(EError, "Maximum number of Passes must either be set to \"-1\" or \"1\" or higher!"); } virtual ~PPMIntegrator() { @@ -242,9 +249,14 @@ public: } } + #if defined(MTS_OPENMP) + Thread::initializeOpenMP(nCores); + #endif + int it = 0; - while (m_running) + while (m_running && (m_maxPasses == -1 || it < m_maxPasses)) { photonMapPass(++it, queue, job, film, sceneResID, sensorResID, indepSamplerResID); + } #ifdef MTS_DEBUG_FP disableFPExceptions(); @@ -384,7 +396,8 @@ public: << " initialRadius = " << m_initialRadius << "," << endl << " alpha = " << m_alpha << "," << endl << " photonCount = " << m_photonCount << "," << endl - << " granularity = " << m_granularity << endl + << " granularity = " << m_granularity << "," << endl + << " maxPasses = " << m_maxPasses << endl << "]"; return oss.str(); } @@ -400,6 +413,7 @@ private: bool m_running; bool m_autoCancelGathering; ref m_mutex; + int m_maxPasses; }; MTS_IMPLEMENT_CLASS(PPMIntegrator, false, Integrator) diff --git a/src/integrators/photonmapper/sppm.cpp b/src/integrators/photonmapper/sppm.cpp index 58811832..a4625631 100644 --- a/src/integrators/photonmapper/sppm.cpp +++ b/src/integrators/photonmapper/sppm.cpp @@ -48,6 +48,8 @@ MTS_NAMESPACE_BEGIN * which the implementation will start to use the ``russian roulette'' * path termination criterion. \default{\code{5}} * } + * \parameter{maxPasses}{\Integer}{Maximum number of passes to render (where \code{-1} + * corresponds to rendering until stopped manually). \default{\code{-1}}} * } * This plugin implements stochastic progressive photon mapping by Hachisuka et al. * \cite{Hachisuka2009Stochastic}. This algorithm is an extension of progressive photon @@ -101,9 +103,13 @@ public: m_rrDepth = props.getInteger("rrDepth", 3); /* Indicates if the gathering steps should be canceled if not enough photons are generated. */ m_autoCancelGathering = props.getBoolean("autoCancelGathering", true); + /* Maximum number of passes to render. -1 renders until the process is stopped. */ + m_maxPasses = props.getInteger("maxPasses", -1); m_mutex = new Mutex(); if (m_maxDepth <= 1 && m_maxDepth != -1) Log(EError, "Maximum depth must be set to \"2\" or higher!"); + if (m_maxPasses <= 0 && m_maxPasses != -1) + Log(EError, "Maximum number of Passes must either be set to \"-1\" or \"1\" or higher!"); } SPPMIntegrator(Stream *stream, InstanceManager *manager) @@ -187,8 +193,12 @@ public: enableFPExceptions(); #endif - int it=0; - while (m_running) { +#if defined(MTS_OPENMP) + Thread::initializeOpenMP(nCores); +#endif + + int it = 0; + while (m_running && (m_maxPasses == -1 || it < m_maxPasses)) { distributedRTPass(scene, samplers); photonMapPass(++it, queue, job, film, sceneResID, sensorResID, samplerResID); @@ -378,7 +388,8 @@ public: << " initialRadius = " << m_initialRadius << "," << endl << " alpha = " << m_alpha << "," << endl << " photonCount = " << m_photonCount << "," << endl - << " granularity = " << m_granularity << endl + << " granularity = " << m_granularity << "," << endl + << " maxPasses = " << m_maxPasses << endl << "]"; return oss.str(); } @@ -395,6 +406,7 @@ private: size_t m_totalEmitted, m_totalPhotons; bool m_running; bool m_autoCancelGathering; + int m_maxPasses; }; MTS_IMPLEMENT_CLASS_S(SPPMIntegrator, false, Integrator) diff --git a/src/integrators/pssmlt/pssmlt.cpp b/src/integrators/pssmlt/pssmlt.cpp index 2787aeb4..ad20260d 100644 --- a/src/integrators/pssmlt/pssmlt.cpp +++ b/src/integrators/pssmlt/pssmlt.cpp @@ -310,7 +310,8 @@ public: bool nested = m_config.twoStage && m_config.firstStage; - Vector2i cropSize = film->getCropSize();; + Vector2i cropSize = film->getCropSize(); + Assert(cropSize.x > 0 && cropSize.y > 0); Log(EInfo, "Starting %srender job (%ix%i, " SIZE_T_FMT " %s, " SSE_STR ", approx. " SIZE_T_FMT " mutations/pixel) ..", nested ? "nested " : "", cropSize.x, cropSize.y, @@ -319,9 +320,13 @@ public: size_t desiredMutationsPerWorkUnit = m_config.technique == PathSampler::EBidirectional ? 100000 : 200000; - if (m_config.workUnits <= 0) - m_config.workUnits = std::max((int) std::ceil((cropSize.x - * cropSize.y * sampleCount) / (Float) desiredMutationsPerWorkUnit), 1); + if (m_config.workUnits <= 0) { + const size_t cropArea = (size_t) cropSize.x * cropSize.y; + const size_t workUnits = ((desiredMutationsPerWorkUnit - 1) + + (cropArea * sampleCount)) / desiredMutationsPerWorkUnit; + Assert(workUnits <= (size_t) std::numeric_limits::max()); + m_config.workUnits = (int) std::max(workUnits, (size_t) 1); + } m_config.nMutations = (cropSize.x * cropSize.y * sampleCount) / m_config.workUnits; @@ -344,7 +349,7 @@ public: m_config, directImage, pathSeeds); m_config.luminance = pathSampler->generateSeeds(m_config.luminanceSamples, - m_config.workUnits, false, pathSeeds); + m_config.workUnits, false, m_config.importanceMap, pathSeeds); if (!nested) m_config.dump(); diff --git a/src/integrators/pssmlt/pssmlt_sampler.cpp b/src/integrators/pssmlt/pssmlt_sampler.cpp index 2b4e8aad..98288e03 100644 --- a/src/integrators/pssmlt/pssmlt_sampler.cpp +++ b/src/integrators/pssmlt/pssmlt_sampler.cpp @@ -50,7 +50,7 @@ void PSSMLTSampler::serialize(Stream *stream, InstanceManager *manager) const { } void PSSMLTSampler::configure() { - m_logRatio = -std::log(m_s2/m_s1); + m_logRatio = -math::fastlog(m_s2/m_s1); m_time = 0; m_largeStepTime = 0; m_largeStep = false; diff --git a/src/integrators/ptracer/ptracer_proc.cpp b/src/integrators/ptracer/ptracer_proc.cpp index b5a2886f..e7c224c2 100644 --- a/src/integrators/ptracer/ptracer_proc.cpp +++ b/src/integrators/ptracer/ptracer_proc.cpp @@ -88,8 +88,13 @@ void CaptureParticleWorker::handleEmission(const PositionSamplingRecord &pRec, DirectSamplingRecord dRec(pRec.p, pRec.time); int maxInteractions = m_maxPathDepth - 1; + /* Create a dummy intersection to ensure that sampleAttenuatedSensorDirect() + treats the light source vertex as being located on a surface */ + Intersection its; + its.p = pRec.p; + Spectrum value = weight * m_scene->sampleAttenuatedSensorDirect( - dRec, medium, maxInteractions, m_sampler->next2D(), m_sampler); + dRec, its, medium, maxInteractions, m_sampler->next2D(), m_sampler); if (value.isZero()) return; @@ -101,7 +106,7 @@ void CaptureParticleWorker::handleEmission(const PositionSamplingRecord &pRec, m_workResult->put(dRec.uv, (Float *) &value[0]); } -void CaptureParticleWorker::handleSurfaceInteraction(int depth, +void CaptureParticleWorker::handleSurfaceInteraction(int depth, int nullInteractions, bool caustic, const Intersection &its, const Medium *medium, const Spectrum &weight) { @@ -159,7 +164,7 @@ void CaptureParticleWorker::handleSurfaceInteraction(int depth, m_workResult->put(dRec.uv, (Float *) &value[0]); } -void CaptureParticleWorker::handleMediumInteraction(int depth, bool caustic, +void CaptureParticleWorker::handleMediumInteraction(int depth, int nullInteractions, bool caustic, const MediumSamplingRecord &mRec, const Medium *medium, const Vector &wi, const Spectrum &weight) { diff --git a/src/integrators/ptracer/ptracer_proc.h b/src/integrators/ptracer/ptracer_proc.h index 15ede4c4..ca12938d 100644 --- a/src/integrators/ptracer/ptracer_proc.h +++ b/src/integrators/ptracer/ptracer_proc.h @@ -89,28 +89,31 @@ public: const bool &stop); /** - * Handles particles emitted by a light source - if a connection to the - * sensor is possible, compute the importance and accumulate in the proper - * pixel of the accumulation buffer. + * \brief Handles particles emitted by a light source + * + * If a connection to the sensor is possible, compute the importance + * and accumulate in the proper pixel of the accumulation buffer. */ void handleEmission(const PositionSamplingRecord &pRec, const Medium *medium, const Spectrum &weight); /** - * Handles particles interacting with a surface - if a connection to the - * sensor is possible, compute the importance and accumulate in the proper - * pixel of the accumulation buffer. + * \brief Handles particles interacting with a surface + * + * If a connection to the sensor is possible, compute the importance + * and accumulate in the proper pixel of the accumulation buffer. */ - void handleSurfaceInteraction(int depth, bool caustic, + void handleSurfaceInteraction(int depth, int nullInteractions, bool caustic, const Intersection &its, const Medium *medium, const Spectrum &weight); /** - * Handles particles interacting with a medium - if a connection to the - * sensor is possible, compute the importance and accumulate in the proper - * pixel of the accumulation buffer. + * \brief Handles particles interacting with a medium + * + * If a connection to the sensor is possible, compute the importance + * and accumulate in the proper pixel of the accumulation buffer. */ - void handleMediumInteraction(int depth, bool caustic, + void handleMediumInteraction(int depth, int nullInteractions, bool caustic, const MediumSamplingRecord &mRec, const Medium *medium, const Vector &wi, const Spectrum &weight); diff --git a/src/libbidir/SConscript b/src/libbidir/SConscript index 5a78630c..69161888 100644 --- a/src/libbidir/SConscript +++ b/src/libbidir/SConscript @@ -9,7 +9,7 @@ bidirEnv.Append(CPPDEFINES = [['MTS_BUILD_MODULE', 'MTS_MODULE_BIDIR']]) libbidir = bidirEnv.SharedLibrary('mitsuba-bidir', [ 'common.cpp', 'rsampler.cpp', 'vertex.cpp', 'edge.cpp', 'path.cpp', 'verification.cpp', 'util.cpp', 'pathsampler.cpp', - 'mut_bidir.cpp', 'mut_lens.cpp', 'mut_caustic.cpp', + 'mut_bidir.cpp', 'mut_lens.cpp', 'mut_caustic.cpp', 'mut_mchain.cpp', 'manifold.cpp', 'mut_manifold.cpp' ]) diff --git a/src/libbidir/edge.cpp b/src/libbidir/edge.cpp index 6ad50fb1..4e867899 100644 --- a/src/libbidir/edge.cpp +++ b/src/libbidir/edge.cpp @@ -48,6 +48,9 @@ bool PathEdge::sampleNext(const Scene *scene, Sampler *sampler, return false; } + if (length == 0) + return false; + if (!medium) { weight[ERadiance] = weight[EImportance] = Spectrum(1.0f); pdf[ERadiance] = pdf[EImportance] = 1.0f; @@ -103,6 +106,9 @@ bool PathEdge::perturbDirection(const Scene *scene, } d = ray.d; + if (length == 0) + return false; + if (!medium) { weight[ERadiance] = weight[EImportance] = Spectrum(1.0f); pdf[ERadiance] = pdf[EImportance] = 1.0f; diff --git a/src/libbidir/manifold.cpp b/src/libbidir/manifold.cpp index c5690da0..f6fd5aa4 100644 --- a/src/libbidir/manifold.cpp +++ b/src/libbidir/manifold.cpp @@ -643,7 +643,11 @@ bool SpecularManifold::update(Path &path, int start, int end) { step = -1; mode = ERadiance; } - for (int j=0, i=start; j < (int) m_vertices.size()-2; ++j, i += step) { + int last = (int) m_vertices.size() - 2; + if (m_vertices[0].type == EPinnedDirection) + last = std::max(last, 1); + + for (int j=0, i=start; j < last; ++j, i += step) { const SimpleVertex &v = m_vertices[j], &vn = m_vertices[j+1]; @@ -664,7 +668,8 @@ bool SpecularManifold::update(Path &path, int start, int end) { PathVertex::EMediumInteraction : PathVertex::ESurfaceInteraction; if (v.type == EPinnedDirection) { - /* Create a fake vertex and use it to call sampleDirect() */ + /* Create a fake vertex and use it to call sampleDirect(). This is + kind of terrible -- a nicer API is needed to cleanly support this */ PathVertex temp; temp.type = PathVertex::EMediumInteraction; temp.degenerate = false; @@ -681,7 +686,7 @@ bool SpecularManifold::update(Path &path, int start, int end) { return false; } - if (m_vertices.size() > 3) { + if (m_vertices.size() >= 3) { PathVertex *succ2 = path.vertex(i+2*step); PathEdge *succ2Edge = path.edge(predEdgeIdx + 2*step); if (!succ->sampleNext(m_scene, NULL, vertex, succEdge, succ2Edge, succ2, mode)) { @@ -863,37 +868,30 @@ Float SpecularManifold::det(const Path &path, int a, int b, int c) { } Float SpecularManifold::multiG(const Path &path, int a, int b) { - if (a == 0) { + if (a == 0) ++a; - if (!path.vertex(a)->isConnectable()) - ++a; - } else if (a == path.length()) { + else if (a == path.length()) --a; - if (!path.vertex(a)->isConnectable()) - --a; - } - if (b == 0) { + if (b == 0) ++b; - if (!path.vertex(b)->isConnectable()) - ++b; - } else if (b == path.length()) { + else if (b == path.length()) --b; - if (!path.vertex(b)->isConnectable()) - --b; - } + int step = b > a ? 1 : -1; + while (!path.vertex(b)->isConnectable()) + b -= step; + while (!path.vertex(a)->isConnectable()) + a += step; - int step = b > a ? 1 : -1, start = a; Float result = 1; BDAssert(path.vertex(a)->isConnectable() && path.vertex(b)->isConnectable()); - for (int i = a + step; i != b + step; i += step) { + for (int i = a + step, start = a; i != b + step; i += step) { if (path.vertex(i)->isConnectable()) { result *= G(path, start, i); start = i; } } - BDAssert(start == b); return result; } @@ -915,7 +913,7 @@ Float SpecularManifold::G(const Path &path, int a, int b) { SimpleVertex &last = m_vertices[m_vertices.size()-1]; const PathVertex *vb = path.vertex(b); - if (vb->isMediumInteraction()) { + if (!vb->isOnSurface()) { last.n = Vector(path.edge(a < b ? (b-1) : b)->d); } else { last.n = vb->getShadingNormal(); diff --git a/src/libbidir/mut_lens.cpp b/src/libbidir/mut_lens.cpp index 107702e8..e5ce662d 100644 --- a/src/libbidir/mut_lens.cpp +++ b/src/libbidir/mut_lens.cpp @@ -100,7 +100,7 @@ bool LensPerturbation::sampleMutation( return false; Float focusDistance = sensor->getFocusDistance() / - absDot(sensor->getInverseViewTransform(0)(Vector(0,0,1)), ray.d); + absDot(sensor->getWorldTransform(0)(Vector(0,0,1)), ray.d); /* Correct direction based on the current aperture sample. This is necessary to support thin lens cameras */ diff --git a/src/libbidir/mut_manifold.cpp b/src/libbidir/mut_manifold.cpp index 52b9e313..5835456f 100644 --- a/src/libbidir/mut_manifold.cpp +++ b/src/libbidir/mut_manifold.cpp @@ -274,6 +274,7 @@ bool ManifoldPerturbation::sampleMutation( proposal.append(m_pool.allocEdge()); } proposal.append(source, m, k+1); + proposal.vertex(a) = proposal.vertex(a)->clone(m_pool); proposal.vertex(c) = proposal.vertex(c)->clone(m_pool); @@ -521,8 +522,8 @@ bool ManifoldPerturbation::sampleMutation( } } - if ((vb_old->isSurfaceInteraction() && m_thetaDiffSurfaceSamples < DIFF_SAMPLES) || - (vb_old->isMediumInteraction() && m_thetaDiffMediumSamples < DIFF_SAMPLES)) { + if (((vb_old->isSurfaceInteraction() && m_thetaDiffSurfaceSamples < DIFF_SAMPLES) || + (vb_old->isMediumInteraction() && m_thetaDiffMediumSamples < DIFF_SAMPLES)) && b+1 != k && b-1 != 0) { LockGuard guard(m_thetaDiffMutex); if ((vb_old->isSurfaceInteraction() && m_thetaDiffSurfaceSamples < DIFF_SAMPLES) || @@ -586,6 +587,7 @@ bool ManifoldPerturbation::sampleMutation( } } } + if (!PathVertex::connect(m_scene, proposal.vertexOrNull(q-1), proposal.edgeOrNull(q-1), @@ -662,6 +664,10 @@ Float ManifoldPerturbation::Q(const Path &source, const Path &proposal, if (prob == 0) return 0.0f; weight /= prob; + + /* Catch very low probabilities which round to +inf in the above division operation */ + if (!std::isfinite(weight.average())) + return 0.0f; } else { Frame frame(source.vertex(a+step)->getGeometricNormal()); diff --git a/src/libbidir/mut_mchain.cpp b/src/libbidir/mut_mchain.cpp index 4e08b2c4..d5d119b3 100644 --- a/src/libbidir/mut_mchain.cpp +++ b/src/libbidir/mut_mchain.cpp @@ -103,7 +103,7 @@ bool MultiChainPerturbation::sampleMutation( return false; Float focusDistance = sensor->getFocusDistance() / - absDot(sensor->getInverseViewTransform(0)(Vector(0,0,1)), ray.d); + absDot(sensor->getWorldTransform(0)(Vector(0,0,1)), ray.d); /* Correct direction based on the current aperture sample. This is necessary to support thin lens cameras */ diff --git a/src/libbidir/pathsampler.cpp b/src/libbidir/pathsampler.cpp index 2aef655c..2c177d21 100644 --- a/src/libbidir/pathsampler.cpp +++ b/src/libbidir/pathsampler.cpp @@ -579,12 +579,26 @@ Float PathSampler::computeAverageLuminance(size_t sampleCount) { return mean; } -static void seedCallback(std::vector &output, int s, int t, Float weight, Path &) { +static void seedCallback(std::vector &output, const Bitmap *importanceMap, + Float &accum, int s, int t, Float weight, Path &path) { + accum += weight; + + if (importanceMap) { + const Float *luminanceValues = importanceMap->getFloatData(); + Vector2i size = importanceMap->getSize(); + + const Point2 &pos = path.getSamplePosition(); + Point2i intPos( + std::min(std::max(0, (int) pos.x), size.x-1), + std::min(std::max(0, (int) pos.y), size.y-1)); + weight /= luminanceValues[intPos.x + intPos.y * size.x]; + } + output.push_back(PathSeed(0, weight, s, t)); } Float PathSampler::generateSeeds(size_t sampleCount, size_t seedCount, - bool fineGrained, std::vector &seeds) { + bool fineGrained, const Bitmap *importanceMap, std::vector &seeds) { Log(EInfo, "Integrating luminance values over the image plane (" SIZE_T_FMT " samples)..", sampleCount); @@ -596,39 +610,40 @@ Float PathSampler::generateSeeds(size_t sampleCount, size_t seedCount, tempSeeds.reserve(sampleCount); SplatList splatList; + Float luminance; PathCallback callback = boost::bind(&seedCallback, - boost::ref(tempSeeds), _1, _2, _3, _4); + boost::ref(tempSeeds), importanceMap, boost::ref(luminance), + _1, _2, _3, _4); Float mean = 0.0f, variance = 0.0f; for (size_t i=0; igetSampleIndex(); - Float lum = 0.0f; + luminance = 0.0f; if (fineGrained) { samplePaths(Point2i(-1), callback); /* Fine seed granularity (e.g. for Veach-MLT). Set the correct the sample index value */ - for (size_t j = seedIndex; jgetFloatData(); + Vector2i size = importanceMap->getSize(); + + const Point2 &pos = path.getSamplePosition(); + Point2i intPos( + std::min(std::max(0, (int) pos.x), size.x-1), + std::min(std::max(0, (int) pos.y), size.y-1)); + weight /= luminanceValues[intPos.x + intPos.y * size.x]; + } + if (seed.luminance != weight) SLog(EError, "Internal error in reconstructPath(): luminances " "don't match (%f vs %f)!", weight, seed.luminance); @@ -669,7 +695,7 @@ static void reconstructCallback(const PathSeed &seed, Path &result, MemoryPool & } } -void PathSampler::reconstructPath(const PathSeed &seed, Path &result) { +void PathSampler::reconstructPath(const PathSeed &seed, const Bitmap *importanceMap, Path &result) { ReplayableSampler *rplSampler = static_cast(m_sensorSampler.get()); Assert(result.length() == 0); @@ -678,7 +704,8 @@ void PathSampler::reconstructPath(const PathSeed &seed, Path &result) { rplSampler->setSampleIndex(seed.sampleIndex); PathCallback callback = boost::bind(&reconstructCallback, - boost::cref(seed), boost::ref(result), boost::ref(m_pool), _1, _2, _3, _4); + boost::cref(seed), importanceMap, + boost::ref(result), boost::ref(m_pool), _1, _2, _3, _4); samplePaths(Point2i(-1), callback); diff --git a/src/libbidir/vertex.cpp b/src/libbidir/vertex.cpp index 1729a1fe..7bfc193e 100644 --- a/src/libbidir/vertex.cpp +++ b/src/libbidir/vertex.cpp @@ -245,7 +245,7 @@ bool PathVertex::sampleNext(const Scene *scene, Sampler *sampler, ray.setDirection(pRec.wo); measure = ESolidAngle; - if (!(phase->getType() & BSDF::ENonSymmetric)) { + if (!(phase->getType() & PhaseFunction::ENonSymmetric)) { /* Make use of symmetry -- no need to re-evaluate */ pdf[1-mode] = pdf[mode]; weight[1-mode] = weight[mode]; @@ -636,7 +636,7 @@ bool PathVertex::perturbDirection(const Scene *scene, const PathVertex *pred, measure = ESolidAngle; - if (!(phase->getType() & BSDF::ENonSymmetric)) { + if (!(phase->getType() & PhaseFunction::ENonSymmetric)) { /* Make use of symmetry -- no need to re-evaluate */ pdf[1-mode] = pdf[mode]; weight[1-mode] = weight[mode]; @@ -710,7 +710,13 @@ bool PathVertex::propagatePerturbation(const Scene *scene, const PathVertex *pre bRec.typeMask = BSDF::EAll; Float prob = bsdf->pdf(bRec, EDiscrete); - weight[mode] = bsdf->eval(bRec, EDiscrete)/prob; + if (prob == 0) { + SLog(EWarn, "Unable to recreate specular vertex in perturbation (bsdf=%s)", + bsdf->toString().c_str()); + return false; + } + + weight[mode] = bsdf->eval(bRec, EDiscrete) / prob; pdf[mode] = prob; measure = EDiscrete; componentType = componentType_; @@ -1139,6 +1145,9 @@ bool PathVertex::cast(const Scene *scene, EVertexType desired) { PositionSamplingRecord pRec(its); pRec.object = sensor; pRec.pdf = 0.0f; + + Vector2i size = sensor->getFilm()->getSize(); + pRec.uv.x *= size.x; pRec.uv.y *= size.y; getPositionSamplingRecord() = pRec; degenerate = sensor->getType() & Sensor::EDeltaDirection; diff --git a/src/libcore/CMakeLists.txt b/src/libcore/CMakeLists.txt index c646daf8..090d23dd 100644 --- a/src/libcore/CMakeLists.txt +++ b/src/libcore/CMakeLists.txt @@ -58,17 +58,18 @@ set(HDRS ${INCLUDE_DIR}/sfcurve.h ${INCLUDE_DIR}/shvector.h ${INCLUDE_DIR}/spectrum.h + ${INCLUDE_DIR}/spline.h ${INCLUDE_DIR}/sse.h ${INCLUDE_DIR}/ssemath.h ${INCLUDE_DIR}/ssevector.h ${INCLUDE_DIR}/sshstream.h ${INCLUDE_DIR}/sstream.h ${INCLUDE_DIR}/statistics.h - ${INCLUDE_DIR}/stl.h ${INCLUDE_DIR}/stream.h ${INCLUDE_DIR}/thread.h ${INCLUDE_DIR}/timer.h ${INCLUDE_DIR}/tls.h + ${INCLUDE_DIR}/track.h ${INCLUDE_DIR}/transform.h ${INCLUDE_DIR}/triangle.h ${INCLUDE_DIR}/util.h @@ -108,6 +109,7 @@ set(SRCS serialization.cpp shvector.cpp spectrum.cpp + spline.cpp ssemath.cpp sshstream.cpp sstream.cpp @@ -116,6 +118,7 @@ set(SRCS thread.cpp timer.cpp tls.cpp + track.cpp transform.cpp triangle.cpp util.cpp @@ -141,7 +144,7 @@ set(LIBS ${ZLIB_LIBRARIES} ${PNG_LIBRARIES} ${JPEG_LIBRARIES} ${ILMBASE_LIBRARIES} ${OPENEXR_LIBRARIES} ${Boost_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS}) if(WIN32) - list(APPEND LIBS ws2_32) + list(APPEND LIBS ws2_32 psapi) elseif (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") list(APPEND LIBS rt) elseif (APPLE) diff --git a/src/libcore/SConscript b/src/libcore/SConscript index 62ba5036..57e69154 100644 --- a/src/libcore/SConscript +++ b/src/libcore/SConscript @@ -24,17 +24,22 @@ if coreEnv.has_key('JPEGLIB'): coreEnv.Prepend(LIBS=env['JPEGLIB']) coreEnv.Prepend(CPPDEFINES = [['MTS_BUILD_MODULE', 'MTS_MODULE_CORE']]) + + +if sys.platform == 'win32': + coreEnv.Append(LIBS=['psapi']) + libcore_objects = [ 'class.cpp', 'object.cpp', 'statistics.cpp', 'thread.cpp', 'brent.cpp', 'logger.cpp', 'appender.cpp', 'formatter.cpp', 'lock.cpp', 'qmc.cpp', 'random.cpp', 'timer.cpp', 'util.cpp', 'properties.cpp', 'half.cpp', - 'transform.cpp', 'spectrum.cpp', 'aabb.cpp', 'stream.cpp', + 'transform.cpp', 'spectrum.cpp', 'aabb.cpp', 'stream.cpp', 'fstream.cpp', 'plugin.cpp', 'triangle.cpp', 'bitmap.cpp', - 'fmtconv.cpp', 'serialization.cpp', 'sstream.cpp', 'cstream.cpp', - 'mstream.cpp', 'sched.cpp', 'sched_remote.cpp', 'sshstream.cpp', + 'fmtconv.cpp', 'serialization.cpp', 'sstream.cpp', 'cstream.cpp', + 'mstream.cpp', 'sched.cpp', 'sched_remote.cpp', 'sshstream.cpp', 'zstream.cpp', 'shvector.cpp', 'fresolver.cpp', 'rfilter.cpp', 'quad.cpp', 'mmap.cpp', 'chisquare.cpp', 'warp.cpp', 'vmf.cpp', - 'tls.cpp', 'ssemath.cpp' + 'tls.cpp', 'ssemath.cpp', 'spline.cpp', 'track.cpp' ] # Add some platform-specific components diff --git a/src/libcore/bitmap.cpp b/src/libcore/bitmap.cpp index a29cc62d..99b93ae3 100644 --- a/src/libcore/bitmap.cpp +++ b/src/libcore/bitmap.cpp @@ -38,6 +38,11 @@ #include #include #include +#include +#include +#include +#include +#include #include #include #include @@ -233,7 +238,7 @@ extern "C" { p->mgr.free_in_buffer = 0; } - METHODDEF(void) jpeg_error_exit (j_common_ptr cinfo) { + METHODDEF(void) jpeg_error_exit (j_common_ptr cinfo) throw(std::runtime_error) { char msg[JMSG_LENGTH_MAX]; (*cinfo->err->format_message) (cinfo, msg); SLog(EError, "Critcal libjpeg error: %s", msg); @@ -386,19 +391,6 @@ int Bitmap::getBytesPerComponent() const { } } - -void Bitmap::setString(const std::string &key, const std::string &value) { - m_metadata[key] = value; -} - -std::string Bitmap::getString(const std::string &key) const { - std::map::const_iterator it = m_metadata.find(key); - if (it != m_metadata.end()) - return it->second; - else - return ""; -} - Bitmap::~Bitmap() { if (m_data) freeAligned(m_data); @@ -430,47 +422,113 @@ void Bitmap::flipVertically() { } } -void Bitmap::accumulate(const Bitmap *bitmap, const Point2i &offset) { +ref Bitmap::rotateFlip(ERotateFlipType type) const { + /* Based on the GDI+ rotate/flip function in Wine */ + if (m_componentFormat == EBitmask) + Log(EError, "Transformations involving bitmasks are currently not supported!"); + + int width = m_size.x, height = m_size.y; + bool flip_x = (type & 6) == 2 || (type & 6) == 4; + bool flip_y = (type & 3) == 1 || (type & 3) == 2; + bool rotate_90 = type & 1; + + if (rotate_90) + std::swap(width, height); + + ref result = new Bitmap(m_pixelFormat, m_componentFormat, + Vector2i(width, height), m_channelCount); + + ssize_t bypp = getBytesPerPixel(), + src_stride = m_size.x * bypp, + dst_stride = width * bypp; + + uint8_t *dst = result->getUInt8Data(); + uint8_t *dst_row = dst, *src_row = m_data; + + if (flip_x) + src_row += bypp * (m_size.x - 1); + + if (flip_y) + src_row += src_stride * (m_size.y - 1); + + ssize_t src_x_step, src_y_step; + if (rotate_90) { + src_x_step = flip_y ? -src_stride : src_stride; + src_y_step = flip_x ? -bypp : bypp; + } else { + src_x_step = flip_x ? -bypp : bypp; + src_y_step = flip_y ? -src_stride : src_stride; + } + + for (int y=0; ygetPixelFormat() && getComponentFormat() == bitmap->getComponentFormat() && getChannelCount() == bitmap->getChannelCount()); - const int - offsetX = std::max(offset.x, 0), - offsetY = std::max(offset.y, 0), - endX = std::min(offset.x + bitmap->getSize().x, m_size.x), - endY = std::min(offset.y + bitmap->getSize().y, m_size.y); + Vector2i offsetIncrease( + std::max(0, std::max(-sourceOffset.x, -targetOffset.x)), + std::max(0, std::max(-sourceOffset.y, -targetOffset.y)) + ); - if (offsetX >= endX || offsetY >= endY) + sourceOffset += offsetIncrease; + targetOffset += offsetIncrease; + size -= offsetIncrease; + + Vector2i sizeDecrease( + std::max(0, std::max(sourceOffset.x + size.x - bitmap->getWidth(), targetOffset.x + size.x - getWidth())), + std::max(0, std::max(sourceOffset.y + size.y - bitmap->getHeight(), targetOffset.y + size.y - getHeight()))); + + size -= sizeDecrease; + + if (size.x <= 0 || size.y <= 0) return; const size_t - columns = (endX - offsetX) * m_channelCount, + columns = size.x * m_channelCount, pixelStride = getBytesPerPixel(), - sourceStride = bitmap->getSize().x * pixelStride, - targetStride = m_size.x * pixelStride; + sourceStride = bitmap->getWidth() * pixelStride, + targetStride = getWidth() * pixelStride; const uint8_t *source = bitmap->getUInt8Data() + - (offsetX - offset.x + (offsetY - offset.y) * bitmap->getSize().x) * pixelStride; + (sourceOffset.x + sourceOffset.y * (size_t) bitmap->getWidth()) * pixelStride; uint8_t *target = m_data + - (offsetX + offsetY * m_size.x) * pixelStride; + (targetOffset.x + targetOffset.y * (size_t) m_size.x) * pixelStride; - for (int y = offsetY; y < endY; ++y) { + for (int y = 0; y < size.y; ++y) { switch (m_componentFormat) { case EUInt8: for (size_t i = 0; i < columns; ++i) - ((uint8_t *) target)[i] += ((uint8_t *) source)[i]; + ((uint8_t *) target)[i] = (uint8_t) std::min(0xFF, ((uint8_t *) source)[i] + ((uint8_t *) target)[i]); + break; case EUInt16: for (size_t i = 0; i < columns; ++i) - ((uint16_t *) target)[i] += ((uint16_t *) source)[i]; + ((uint16_t *) target)[i] = (uint16_t) std::min(0xFFFF, ((uint16_t *) source)[i] + ((uint16_t *) target)[i]); break; case EUInt32: for (size_t i = 0; i < columns; ++i) - ((uint32_t *) target)[i] += ((uint32_t *) source)[i]; + ((uint32_t *) target)[i] = std::min((uint32_t) 0xFFFFFFFFUL, ((uint32_t *) source)[i] + ((uint32_t *) target)[i]); break; case EFloat16: @@ -554,8 +612,9 @@ void Bitmap::setPixel(const Point2i &pos, const Spectrum &value) { } void Bitmap::drawHLine(int y, int x1, int x2, const Spectrum &value) { - AssertEx( y >= 0 && y < m_size.y && - x1 >= 0 && x2 < m_size.x, "Bitmap::drawVLine(): out of bounds!"); + if (y < 0 || y >= m_size.y) + return; + x1 = std::max(x1, 0); x2 = std::min(x2, m_size.x-1); const FormatConverter *cvt = FormatConverter::getInstance( std::make_pair(EFloat, m_componentFormat) @@ -574,8 +633,9 @@ void Bitmap::drawHLine(int y, int x1, int x2, const Spectrum &value) { } void Bitmap::drawVLine(int x, int y1, int y2, const Spectrum &value) { - AssertEx( x >= 0 && x < m_size.x && - y1 >= 0 && y2 < m_size.y, "Bitmap::drawVLine(): out of bounds!"); + if (x < 0 || x >= m_size.x) + return; + y1 = std::max(y1, 0); y2 = std::min(y2, m_size.y-1); const FormatConverter *cvt = FormatConverter::getInstance( std::make_pair(EFloat, m_componentFormat) @@ -601,9 +661,12 @@ void Bitmap::drawRect(const Point2i &offset, const Vector2i &size, const Spectru drawVLine(offset.x + size.x - 1, offset.y, offset.y + size.y - 1, value); } -void Bitmap::fill(const Point2i &offset, const Vector2i &size, const Spectrum &value) { - AssertEx(offset.x >= 0 && offset.x + size.x <= m_size.x && - offset.y >= 0 && offset.y + size.y <= m_size.y, "Bitmap::fill(): out of bounds!"); +void Bitmap::fillRect(Point2i offset, Vector2i size, const Spectrum &value) { + int sx = std::max(0, -offset.x), sy = std::max(0, -offset.y); + size.x -= sx; size.y -= sy; offset.x += sx; offset.y += sy; + + size.x -= std::max(0, offset.x + size.x - m_size.x); + size.y -= std::max(0, offset.y + size.y - m_size.y); const FormatConverter *cvt = FormatConverter::getInstance( std::make_pair(EFloat, m_componentFormat) @@ -897,7 +960,9 @@ ref Bitmap::separateChannel(int channelIndex) { if (channelIndex == 0 && channelCount == 1) return this; - Assert(channelIndex > 0 && channelIndex < channelCount); + if (channelIndex < 0 || channelIndex >= channelCount) + Log(EError, "Bitmap::separateChannel(%i): channel index " + "must be between 0 and %i", channelIndex, channelCount-1); ref result = new Bitmap(ELuminance, m_componentFormat, m_size); result->setMetadata(m_metadata); @@ -995,6 +1060,79 @@ ref Bitmap::crop(const Point2i &offset, const Vector2i &size) const { return result; } +void Bitmap::applyMatrix(Float matrix_[3][3]) { + int stride = 0; + + if (m_pixelFormat == ERGB || m_pixelFormat == EXYZ) + stride = 3; + else if (m_pixelFormat == ERGBA || m_pixelFormat == EXYZA) + stride = 4; + else + Log(EError, "Bitmap::applyMatrix(): unsupported pixel format!"); + + size_t pixels = (size_t) m_size.x * (size_t) m_size.y; + + switch (m_componentFormat) { + case EFloat16: { + float matrix[3][3]; + half *data = getFloat16Data(); + for (int i=0; i<3; ++i) + for (int j=0; j<3; ++j) + matrix[i][j] = (float) matrix_[i][j]; + + for (size_t i=0; i static void resample(const ReconstructionFilter *rfilter, ReconstructionFilter::EBoundaryCondition bch, @@ -1099,12 +1237,17 @@ std::string Bitmap::toString() const { << " type = " << m_pixelFormat << endl << " componentFormat = " << m_componentFormat << endl << " size = " << m_size.toString() << endl; - if (!m_metadata.empty()) { + + std::vector keys = m_metadata.getPropertyNames(); + if (!keys.empty()) { oss << " metadata = {" << endl; - for (std::map::const_iterator it = m_metadata.begin(); - it != m_metadata.end();) { - oss << " \"" << it->first << "\" => \"" << it->second << "\""; - if (++it != m_metadata.end()) + for (std::vector::const_iterator it = keys.begin(); it != keys.end(); ) { + std::string value = m_metadata.getAsString(*it); + if (value.size() > 50) + value = value.substr(0, 50) + ".. [truncated]"; + + oss << " \"" << *it << "\" => \"" << value << "\""; + if (++it != keys.end()) oss << ","; oss << endl; } @@ -1191,7 +1334,7 @@ void Bitmap::readPNG(Stream *stream) { png_get_text(png_ptr, info_ptr, &text_ptr, &textIdx); for (int i=0; ikey] = text_ptr->text; + setMetadataString(text_ptr->key, text_ptr->text); int intent; double gamma; if (png_get_sRGB(png_ptr, info_ptr, &intent)) { @@ -1267,20 +1410,23 @@ void Bitmap::writePNG(Stream *stream, int compression) const { png_text *text = NULL; - std::map metadata = m_metadata; - metadata["generated-by"] = "Mitsuba version " MTS_VERSION; + Properties metadata(m_metadata); + metadata.setString("generatedBy", "Mitsuba version " MTS_VERSION); - text = new png_text[metadata.size()]; - memset(text, 0, sizeof(png_text) * metadata.size()); - int textIndex = 0; - for (std::map::iterator it = metadata.begin(); - it != metadata.end(); ++it) { - text[textIndex].key = const_cast(it->first.c_str()); - text[textIndex].text = const_cast(it->second.c_str()); - text[textIndex++].compression = PNG_TEXT_COMPRESSION_NONE; + std::vector keys = metadata.getPropertyNames(); + std::vector values(keys.size()); + + text = new png_text[keys.size()]; + memset(text, 0, sizeof(png_text) * keys.size()); + + for (size_t i = 0; i(keys[i].c_str()); + text[i].text = const_cast(values[i].c_str()); + text[i].compression = PNG_TEXT_COMPRESSION_NONE; } - png_set_text(png_ptr, info_ptr, text, textIndex); + png_set_text(png_ptr, info_ptr, text, (int) keys.size()); if (m_gamma == -1) png_set_sRGB_gAMA_and_cHRM(png_ptr, info_ptr, PNG_sRGB_INTENT_ABSOLUTE); @@ -1575,11 +1721,37 @@ void Bitmap::readOpenEXR(Stream *stream, const std::string &_prefix) { /* Load metadata if present */ for (Imf::Header::ConstIterator it = header.begin(); it != header.end(); ++it) { std::string name = it.name(), typeName = it.attribute().typeName(); - const Imf::StringAttribute *sattr = NULL; + const Imf::StringAttribute *sattr; + const Imf::IntAttribute *iattr; + const Imf::FloatAttribute *fattr; + const Imf::DoubleAttribute *dattr; + const Imf::V3fAttribute *vattr; + const Imf::M44fAttribute *mattr; if (typeName == "string" && (sattr = header.findTypedAttribute(name.c_str()))) - m_metadata[name] = sattr->value(); + m_metadata.setString(name, sattr->value()); + else if (typeName == "int" && + (iattr = header.findTypedAttribute(name.c_str()))) + m_metadata.setInteger(name, iattr->value()); + else if (typeName == "float" && + (fattr = header.findTypedAttribute(name.c_str()))) + m_metadata.setFloat(name, (Float) fattr->value()); + else if (typeName == "double" && + (dattr = header.findTypedAttribute(name.c_str()))) + m_metadata.setFloat(name, (Float) dattr->value()); + else if (typeName == "v3f" && + (vattr = header.findTypedAttribute(name.c_str()))) { + Imath::V3f vec = vattr->value(); + m_metadata.setVector(name, Vector(vec.x, vec.y, vec.z)); + } else if (typeName == "m44f" && + (mattr = header.findTypedAttribute(name.c_str()))) { + Matrix4x4 M; + for (int i=0; i<4; ++i) + for (int j=0; j<4; ++j) + M(i, j) = mattr->value().x[i][j]; + m_metadata.setTransform(name, Transform(M)); + } } updateChannelCount(); @@ -1806,13 +1978,45 @@ void Bitmap::writeOpenEXR(Stream *stream, pixelFormat = ERGBA; #endif - std::map metadata = m_metadata; - metadata["generated-by"] = "Mitsuba version " MTS_VERSION; + Properties metadata(m_metadata); + metadata.setString("generatedBy", "Mitsuba version " MTS_VERSION); + + std::vector keys = metadata.getPropertyNames(); Imf::Header header(m_size.x, m_size.y); - for (std::map::const_iterator it = metadata.begin(); - it != metadata.end(); ++it) - header.insert(it->first.c_str(), Imf::StringAttribute(it->second.c_str())); + for (std::vector::const_iterator it = keys.begin(); it != keys.end(); ++it) { + Properties::EPropertyType type = metadata.getType(*it); + + switch (type) { + case Properties::EString: + header.insert(it->c_str(), Imf::StringAttribute(metadata.getString(*it))); + break; + case Properties::EInteger: + header.insert(it->c_str(), Imf::IntAttribute(metadata.getInteger(*it))); + break; + case Properties::EFloat: + header.insert(it->c_str(), Imf::FloatAttribute((float) metadata.getFloat(*it))); + break; + case Properties::EPoint: { + Point val = metadata.getPoint(*it); + header.insert(it->c_str(), Imf::V3fAttribute( + Imath::V3f((float) val.x, (float) val.y, (float) val.z))); + } + break; + case Properties::ETransform: { + Matrix4x4 val = metadata.getTransform(*it).getMatrix(); + header.insert(it->c_str(), Imf::M44fAttribute(Imath::M44f( + (float) val(0, 0), (float) val(0, 1), (float) val(0, 2), (float) val(0, 3), + (float) val(1, 0), (float) val(1, 1), (float) val(1, 2), (float) val(1, 3), + (float) val(2, 0), (float) val(2, 1), (float) val(2, 2), (float) val(2, 3), + (float) val(3, 0), (float) val(3, 1), (float) val(3, 2), (float) val(3, 3)))); + } + break; + default: + header.insert(it->c_str(), Imf::StringAttribute(metadata.getAsString(*it))); + break; + } + } if (pixelFormat == EXYZ || pixelFormat == EXYZA) { Imf::addChromaticities(header, Imf::Chromaticities( @@ -2273,14 +2477,16 @@ void Bitmap::writeRGBE(Stream *stream) const { Log(EError, "writeRGBE(): pixel format must be ERGB or ERGBA!"); stream->writeLine("#?RGBE"); - for (std::map::const_iterator it = m_metadata.begin(); - it != m_metadata.end(); ++it) { - stream->writeLine(formatString("# Metadata [%s]:", it->first.c_str())); - std::istringstream iss(it->second); + + std::vector keys = m_metadata.getPropertyNames(); + for (std::vector::const_iterator it = keys.begin(); it != keys.end(); ) { + stream->writeLine(formatString("# Metadata [%s]:", it->c_str())); + std::istringstream iss(m_metadata.getAsString(*it)); std::string buf; while (std::getline(iss, buf)) stream->writeLine(formatString("# %s", buf.c_str())); } + stream->writeLine("FORMAT=32-bit_rle_rgbe\n"); stream->writeLine(formatString("-Y %i +X %i", m_size.y, m_size.x)); diff --git a/src/libcore/class.cpp b/src/libcore/class.cpp index 954b4f05..5fb696df 100644 --- a/src/libcore/class.cpp +++ b/src/libcore/class.cpp @@ -78,9 +78,9 @@ void Class::initializeOnce(Class *theClass) { } void Class::staticInitialization() { - std::for_each(__classes->begin(), __classes->end(), - compose1(std::ptr_fun(initializeOnce), - select2nd())); + for (ClassMap::iterator it = __classes->begin(); + it != __classes->end(); ++it) + initializeOnce(it->second); m_isInitialized = true; } diff --git a/src/libcore/fmtconv.cpp b/src/libcore/fmtconv.cpp index ae831528..f463de68 100644 --- a/src/libcore/fmtconv.cpp +++ b/src/libcore/fmtconv.cpp @@ -156,7 +156,6 @@ template struct FormatConverterImpl : public FormatConverter { precomp[i] = convertScalar(detail::safe_cast(i), sourceGamma, NULL, multiplier, invDestGamma); } - const DestFormat zero = convertScalar(0.0f); const DestFormat one = convertScalar(1.0f); Spectrum spec; @@ -192,15 +191,20 @@ template struct FormatConverterImpl : public FormatConverter { case Bitmap::EXYZ: for (size_t i=0; i(*source++, sourceGamma, precomp, multiplier, invDestGamma); - *dest++ = zero; *dest++ = value; *dest++ = zero; + Float value = convertScalar(*source++, sourceGamma); + *dest++ = convertScalar(value * 0.950456f, 1.0f, NULL, multiplier, invDestGamma); + *dest++ = convertScalar(value, 1.0f, NULL, multiplier, invDestGamma); + *dest++ = convertScalar(value * 1.08875f, 1.0f, NULL, multiplier, invDestGamma); } break; case Bitmap::EXYZA: for (size_t i=0; i(*source++, sourceGamma, precomp, multiplier, invDestGamma); - *dest++ = zero; *dest++ = value; *dest++ = zero; *dest++ = one; + Float value = convertScalar(*source++, sourceGamma); + *dest++ = convertScalar(value * 0.950456f, 1.0f, NULL, multiplier, invDestGamma); + *dest++ = convertScalar(value, 1.0f, NULL, multiplier, invDestGamma); + *dest++ = convertScalar(value * 1.08875f, 1.0f, NULL, multiplier, invDestGamma); + *dest++ = one; } break; @@ -270,16 +274,20 @@ template struct FormatConverterImpl : public FormatConverter { case Bitmap::EXYZ: for (size_t i=0; i(*source++, sourceGamma, precomp, multiplier, invDestGamma); - *dest++ = zero; *dest++ = value; *dest++ = zero; + Float value = convertScalar(*source++, sourceGamma); + *dest++ = convertScalar(value * 0.950456f, 1.0f, NULL, multiplier, invDestGamma); + *dest++ = convertScalar(value, 1.0f, NULL, multiplier, invDestGamma); + *dest++ = convertScalar(value * 1.08875f, 1.0f, NULL, multiplier, invDestGamma); source++; } break; case Bitmap::EXYZA: for (size_t i=0; i(*source++, sourceGamma, precomp, multiplier, invDestGamma); - *dest++ = zero; *dest++ = value; *dest++ = zero; + Float value = convertScalar(*source++, sourceGamma); + *dest++ = convertScalar(value * 0.950456f, 1.0f, NULL, multiplier, invDestGamma); + *dest++ = convertScalar(value, 1.0f, NULL, multiplier, invDestGamma); + *dest++ = convertScalar(value * 1.08875f, 1.0f, NULL, multiplier, invDestGamma); *dest++ = convertScalar(*source++); } break; diff --git a/src/libcore/fresolver.cpp b/src/libcore/fresolver.cpp index b32d53d0..6131e5c4 100644 --- a/src/libcore/fresolver.cpp +++ b/src/libcore/fresolver.cpp @@ -1,61 +1,93 @@ #include #include -#if defined(__WINDOWS__) +#if defined(__LINUX__) +# if !defined(_GNU_SOURCE) +# define _GNU_SOURCE +# endif +# include +#elif defined(__OSX__) +# include +#elif defined(__WINDOWS__) # include # include #endif + + MTS_NAMESPACE_BEGIN -FileResolver::FileResolver() { - m_paths.push_back(fs::current_path()); -#if defined(__LINUX__) - char exePathTemp[PATH_MAX]; - memset(exePathTemp, 0, PATH_MAX); - if (readlink("/proc/self/exe", exePathTemp, PATH_MAX) != -1) { - fs::path exePath(exePathTemp); +#if defined(__WINDOWS__) || defined(__LINUX__) + namespace { + void dummySymbol() { } + } +#endif - /* Make sure that we're not running inside a Python interpreter */ - if (exePath.filename().string().find("python") == std::string::npos) { - prependPath(exePath.parent_path()); - // Handle local installs: ~/local/bin/:~/local/share/mitsuba/* - fs::path sharedDir = exePath.parent_path().parent_path() - / fs::path("share") / fs::path("mitsuba"); - if (fs::exists(sharedDir)) - prependPath(sharedDir); +FileResolver::FileResolver() { + /* Try to detect the base path of the Mitsuba installation */ + fs::path basePath; +#if defined(__LINUX__) + Dl_info info; + + dladdr((const void *) &dummySymbol, &info); + if (info.dli_fname) { + /* Try to detect a few default setups */ + if (boost::starts_with(info.dli_fname, "/usr/lib") || + boost::starts_with(info.dli_fname, "/lib")) { + basePath = fs::path("/usr/share/mitsuba"); + } else if (boost::starts_with(info.dli_fname, "/usr/local/lib")) { + basePath = fs::path("/usr/local/share/mitsuba"); + } else { + /* This is a locally-compiled repository */ + basePath = fs::path(info.dli_fname).parent_path(); } - } else { - Log(EError, "Could not detect the executable path!"); } #elif defined(__OSX__) MTS_AUTORELEASE_BEGIN() - fs::path path = __mts_bundlepath(); - if (path.filename() != fs::path("Python.app")) - prependPath(path); + uint32_t imageCount = _dyld_image_count(); + for (uint32_t i=0; i lpFilename(MAX_PATH); + // Module handle to this DLL. If the function fails it sets handle to NULL. + // In that case GetModuleFileName will get the name of the executable which + // is acceptable soft-failure behavior. + HMODULE handle; + GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS + | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + reinterpret_cast(&dummySymbol), &handle); + // Try to get the path with the default MAX_PATH length (260 chars) - DWORD nSize = GetModuleFileNameW(NULL, &lpFilename[0], MAX_PATH); + DWORD nSize = GetModuleFileNameW(handle, &lpFilename[0], MAX_PATH); // Adjust the buffer size in case if was too short - while (nSize == lpFilename.size()) { + while (nSize != 0 && nSize == lpFilename.size()) { lpFilename.resize(nSize * 2); - nSize = GetModuleFileNameW(NULL, &lpFilename[0], nSize); + nSize = GetModuleFileNameW(handle, &lpFilename[0], + static_cast(lpFilename.size())); } // There is an error if and only if the function returns 0 - if (nSize != 0) { - fs::path path(lpFilename); - if (boost::to_lower_copy(path.filename().string()).find("python") == std::string::npos) - prependPath(path.parent_path()); - } else { - const std::string msg(lastErrorText()); - Log(EError, "Could not detect the executable path! (%s)", msg.c_str()); - } + if (nSize != 0) + basePath = fs::path(lpFilename).parent_path(); + else + Log(EError, "Could not detect the executable path! (%s)", lastErrorText().c_str()); #endif + #if BOOST_VERSION >= 104800 + m_paths.push_back(fs::canonical(basePath)); + #else + m_paths.push_back(fs::absolute(basePath)); + #endif + m_paths.push_back(fs::current_path()); } FileResolver *FileResolver::clone() const { diff --git a/src/libcore/logger.cpp b/src/libcore/logger.cpp index cbab6b8c..179288bb 100644 --- a/src/libcore/logger.cpp +++ b/src/libcore/logger.cpp @@ -111,7 +111,11 @@ void Logger::log(ELogLevel level, const Class *theClass, memset(exePath, 0, PATH_MAX); if (readlink(formatString("/proc/%i/exe", ppid).c_str(), exePath, PATH_MAX) != -1) { if (!strcmp(exePath, "/usr/bin/gdb")) { +#if defined(__i386__) || defined(__x86_64__) __asm__ ("int $3"); +#else + __builtin_trap(); +#endif } } #elif defined(__OSX__) diff --git a/src/libcore/properties.cpp b/src/libcore/properties.cpp index da93cf3c..23ab813e 100644 --- a/src/libcore/properties.cpp +++ b/src/libcore/properties.cpp @@ -18,6 +18,7 @@ #include #include +#include /* Keep the boost::variant includes outside of properties.h, since they noticeably add to the overall compile times */ @@ -26,7 +27,7 @@ MTS_NAMESPACE_BEGIN typedef boost::variant< - bool, int64_t, Float, Point, Vector, Transform, + bool, int64_t, Float, Point, Vector, Transform, AnimatedTransform *, Spectrum, std::string, Properties::Data> ElementData; struct PropertyElement { @@ -45,7 +46,7 @@ struct PropertyElement { Type Properties::get##TypeName(const std::string &name) const { \ std::map::const_iterator it = m_elements->find(name); \ if (it == m_elements->end()) \ - SLog(EError, "Property \"%s\" missing", name.c_str()); \ + SLog(EError, "Property \"%s\" has not been specified!", name.c_str()); \ const BaseType *result = boost::get(&it->second.data); \ if (!result) \ SLog(EError, "The property \"%s\" has the wrong type (expected <" #ReadableName ">). The " \ @@ -78,18 +79,127 @@ DEFINE_PROPERTY_ACCESSOR(Spectrum, Spectrum, Spectrum, spectrum) DEFINE_PROPERTY_ACCESSOR(std::string, std::string, String, string) DEFINE_PROPERTY_ACCESSOR(Properties::Data, Properties::Data, Data, data) -class type_visitor : public boost::static_visitor { -public: - Properties::EPropertyType operator()(const bool &) const { return Properties::EBoolean; } - Properties::EPropertyType operator()(const int64_t &) const { return Properties::EInteger; } - Properties::EPropertyType operator()(const Float &) const { return Properties::EFloat; } - Properties::EPropertyType operator()(const Point &) const { return Properties::EPoint; } - Properties::EPropertyType operator()(const Vector &) const { return Properties::EVector; } - Properties::EPropertyType operator()(const Transform &) const { return Properties::ETransform; } - Properties::EPropertyType operator()(const Spectrum &) const { return Properties::ESpectrum; } - Properties::EPropertyType operator()(const std::string &) const { return Properties::EString; } - Properties::EPropertyType operator()(const Properties::Data &) const { return Properties::EData; } -}; +void Properties::setAnimatedTransform(const std::string &name, const AnimatedTransform *value, bool warnDuplicates) { + if (hasProperty(name)) { + AnimatedTransform **old = boost::get(&((*m_elements)[name].data)); + if (old) + (*old)->decRef(); + if (warnDuplicates) + SLog(EWarn, "Property \"%s\" was specified multiple times!", name.c_str()); + } + (*m_elements)[name].data = (AnimatedTransform *) value; + (*m_elements)[name].queried = false; + value->incRef(); +} + +ref Properties::getAnimatedTransform(const std::string &name) const { + std::map::const_iterator it = m_elements->find(name); + if (it == m_elements->end()) + SLog(EError, "Property \"%s\" missing", name.c_str()); + const AnimatedTransform * const * result1 = boost::get(&it->second.data); + const Transform *result2 = boost::get(&it->second.data); + + if (!result1 && !result2) + SLog(EError, "The property \"%s\" has the wrong type (expected or ). The " + "complete property record is :\n%s", name.c_str(), toString().c_str()); + it->second.queried = true; + + if (result1) + return *result1; + else + return new AnimatedTransform(*result2); +} + +ref Properties::getAnimatedTransform(const std::string &name, const AnimatedTransform *defVal) const { + std::map::const_iterator it = m_elements->find(name); + if (it == m_elements->end()) + return defVal; + AnimatedTransform * const * result1 = boost::get(&it->second.data); + const Transform *result2 = boost::get(&it->second.data); + + if (!result1 && !result2) + SLog(EError, "The property \"%s\" has the wrong type (expected or ). The " + "complete property record is :\n%s", name.c_str(), toString().c_str()); + + it->second.queried = true; + + if (result1) + return *result1; + else + return new AnimatedTransform(*result2); +} + +ref Properties::getAnimatedTransform(const std::string &name, const Transform &defVal) const { + std::map::const_iterator it = m_elements->find(name); + if (it == m_elements->end()) + return new AnimatedTransform(defVal); + + AnimatedTransform * const * result1 = boost::get(&it->second.data); + const Transform *result2 = boost::get(&it->second.data); + + if (!result1 && !result2) + SLog(EError, "The property \"%s\" has the wrong type (expected or ). The " + "complete property record is :\n%s", name.c_str(), toString().c_str()); + it->second.queried = true; + + if (result1) + return *result1; + else + return new AnimatedTransform(*result2); +} + +namespace { + class TypeVisitor : public boost::static_visitor { + public: + Properties::EPropertyType operator()(const bool &) const { return Properties::EBoolean; } + Properties::EPropertyType operator()(const int64_t &) const { return Properties::EInteger; } + Properties::EPropertyType operator()(const Float &) const { return Properties::EFloat; } + Properties::EPropertyType operator()(const Point &) const { return Properties::EPoint; } + Properties::EPropertyType operator()(const Vector &) const { return Properties::EVector; } + Properties::EPropertyType operator()(const Transform &) const { return Properties::ETransform; } + Properties::EPropertyType operator()(const AnimatedTransform *) const { return Properties::EAnimatedTransform; } + Properties::EPropertyType operator()(const Spectrum &) const { return Properties::ESpectrum; } + Properties::EPropertyType operator()(const std::string &) const { return Properties::EString; } + Properties::EPropertyType operator()(const Properties::Data &) const { return Properties::EData; } + }; + + class EqualityVisitor : public boost::static_visitor { + public: + EqualityVisitor(const ElementData *ref) : ref(ref) { } + + bool operator()(const bool &v) const { const bool *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + bool operator()(const int64_t &v) const { const int64_t *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + bool operator()(const Float &v) const { const Float *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + bool operator()(const Point &v) const { const Point *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + bool operator()(const Vector &v) const { const Vector *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + bool operator()(const Transform &v) const { const Transform *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + bool operator()(const AnimatedTransform *v) const { AnimatedTransform * const *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + bool operator()(const Spectrum &v) const { const Spectrum *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + bool operator()(const std::string &v) const { const std::string *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + bool operator()(const Properties::Data &v) const { const Properties::Data *v2 = boost::get(ref); return v2 ? (v == *v2) : false; } + private: + const ElementData *ref; + }; + + class StringVisitor : public boost::static_visitor { + public: + StringVisitor(std::ostringstream &oss, bool quote) : oss(oss), quote(quote) { } + + void operator()(const bool &v) const { oss << (v ? "true" : "false"); } + void operator()(const int64_t &v) const { oss << v; } + void operator()(const Float &v) const { oss << v; } + void operator()(const Point &v) const { oss << v.toString(); } + void operator()(const Vector &v) const { oss << v.toString(); } + void operator()(const Transform &v) const { oss << v.toString(); } + void operator()(const AnimatedTransform *v) const { oss << ((Object *) v)->toString(); } + void operator()(const Spectrum &v) const { oss << v.toString(); } + void operator()(const std::string &v) const { oss << (quote ? "\"" : "") << v << (quote ? "\"" : ""); } + void operator()(const Properties::Data &v) const { oss << v.ptr << " (size=" << v.size << ")"; } + private: + std::ostringstream &oss; + bool quote; + }; +} Properties::Properties() : m_id("unnamed") { @@ -104,16 +214,44 @@ Properties::Properties(const std::string &pluginName) Properties::Properties(const Properties &props) : m_pluginName(props.m_pluginName), m_id(props.m_id) { m_elements = new std::map(*props.m_elements); + + for (std::map::iterator it = m_elements->begin(); + it != m_elements->end(); ++it) { + AnimatedTransform **trafo = boost::get(&(*it).second.data); + if (trafo) + (*trafo)->incRef(); + } } Properties::~Properties() { + for (std::map::iterator it = m_elements->begin(); + it != m_elements->end(); ++it) { + AnimatedTransform **trafo = boost::get(&(*it).second.data); + if (trafo) + (*trafo)->decRef(); + } + delete m_elements; } void Properties::operator=(const Properties &props) { + for (std::map::iterator it = m_elements->begin(); + it != m_elements->end(); ++it) { + AnimatedTransform **trafo = boost::get(&(*it).second.data); + if (trafo) + (*trafo)->decRef(); + } + m_pluginName = props.m_pluginName; m_id = props.m_id; *m_elements = *props.m_elements; + + for (std::map::iterator it = m_elements->begin(); + it != m_elements->end(); ++it) { + AnimatedTransform **trafo = boost::get(&(*it).second.data); + if (trafo) + (*trafo)->incRef(); + } } bool Properties::hasProperty(const std::string &name) const { @@ -124,6 +262,9 @@ bool Properties::removeProperty(const std::string &name) { std::map::iterator it = m_elements->find(name); if (it == m_elements->end()) return false; + AnimatedTransform **trafo = boost::get(&(*it).second.data); + if (trafo) + (*trafo)->decRef(); m_elements->erase(it); return true; } @@ -145,13 +286,32 @@ Properties::EPropertyType Properties::getType(const std::string &name) const { if (it == m_elements->end()) SLog(EError, "Property \"%s\" has not been specified!", name.c_str()); - type_visitor myVisitor; - return boost::apply_visitor(myVisitor, it->second.data); + return boost::apply_visitor(TypeVisitor(), it->second.data); +} + +std::string Properties::getAsString(const std::string &name, const std::string &defVal) const { + if (m_elements->find(name) == m_elements->end()) + return defVal; + return getAsString(name); +} + +std::string Properties::getAsString(const std::string &name) const { + std::map::const_iterator it = m_elements->find(name); + if (it == m_elements->end()) + SLog(EError, "Property \"%s\" has not been specified!", name.c_str()); + + std::ostringstream oss; + StringVisitor strVisitor(oss, false); + boost::apply_visitor(strVisitor, it->second.data); + it->second.queried = true; + + return oss.str(); } std::string Properties::toString() const { std::map::const_iterator it = m_elements->begin(); std::ostringstream oss; + StringVisitor strVisitor(oss, true); oss << "Properties[" << endl << " pluginName = \"" << m_pluginName << "\"," << endl @@ -160,36 +320,7 @@ std::string Properties::toString() const { while (it != m_elements->end()) { oss << " \"" << (*it).first << "\" -> "; const ElementData &data = (*it).second.data; - EPropertyType type = boost::apply_visitor(type_visitor(), data); - switch (type) { - case EBoolean: - oss << (boost::get(data) ? "true" : "false"); - break; - case EInteger: - oss << boost::get(data); - break; - case EFloat: - oss << boost::get(data); - break; - case EPoint: - oss << boost::get(data).toString(); - break; - case ETransform: - oss << indent(boost::get(data).toString()); - break; - case ESpectrum: - oss << boost::get(data).toString(); - break; - case EString: - oss << "\"" << boost::get(data) << "\""; - break; - case EData: - oss << boost::get(data).ptr << " (size=" - << boost::get(data).size << ")"; - break; - default: - oss << ""; - } + boost::apply_visitor(strVisitor, data); if (++it != m_elements->end()) oss << ","; oss << endl; @@ -219,6 +350,29 @@ void Properties::putPropertyNames(std::vector &results) const { results.push_back((*it).first); } +void Properties::copyAttribute(const Properties &properties, + const std::string &sourceName, const std::string &targetName) { + std::map::const_iterator it = properties.m_elements->find(sourceName); + if (it == properties.m_elements->end()) + SLog(EError, "copyAttribute(): Could not find parameter \"%s\"!", sourceName.c_str()); + m_elements->operator[](targetName) = it->second; +} + +bool Properties::operator==(const Properties &p) const { + if (m_pluginName != p.m_pluginName || m_id != p.m_id || m_elements->size() != p.m_elements->size()) + return false; + + std::map::const_iterator it = m_elements->begin(); + for (; it != m_elements->end(); ++it) { + const PropertyElement &first = it->second; + const PropertyElement &second = (*p.m_elements)[it->first]; + + if (!boost::apply_visitor(EqualityVisitor(&first.data), second.data)) + return false; + } + + return true; +} ConfigurableObject::ConfigurableObject(Stream *stream, InstanceManager *manager) : SerializableObject(stream, manager) { diff --git a/src/libcore/quad.cpp b/src/libcore/quad.cpp index ab78a584..f54534d9 100644 --- a/src/libcore/quad.cpp +++ b/src/libcore/quad.cpp @@ -96,6 +96,7 @@ static std::pair legendreQ(int l, double x) { if (l == 1) { return std::make_pair(0.5 * (3*x*x-1) - 1, 3*x); } else { + /* Evaluate the recurrence in double precision */ double Lppred = 1.0, Lpred = x, Lcur = 0.0, Dppred = 0.0, Dpred = 1.0, Dcur = 0.0; @@ -113,6 +114,65 @@ static std::pair legendreQ(int l, double x) { } } +double legendreP(int l, int m, double x) { + double p_mm = 1; + + if (m > 0) { + double somx2 = std::sqrt((1 - x) * (1 + x)); + double fact = 1; + for (int i=1; i<=m; i++) { + p_mm *= (-fact) * somx2; + fact += 2; + } + } + + if (l == m) + return p_mm; + + double p_mmp1 = x * (2*m + 1) * p_mm; + if (l == m+1) + return p_mmp1; + + double p_ll = 0; + for (int ll=m+2; ll <= l; ++ll) { + p_ll = ((2*ll-1)*x*p_mmp1 - (ll+m-1) * p_mm) / (ll-m); + p_mm = p_mmp1; + p_mmp1 = p_ll; + } + + return p_ll; +} + +float legendreP(int l, int m, float x) { + /* Evaluate the recurrence in double precision */ + double p_mm = 1; + + if (m > 0) { + double somx2 = std::sqrt((1 - x) * (1 + x)); + double fact = 1; + for (int i=1; i<=m; i++) { + p_mm *= (-fact) * somx2; + fact += 2; + } + } + + if (l == m) + return (float) p_mm; + + double p_mmp1 = x * (2*m + 1) * p_mm; + if (l == m+1) + return (float) p_mmp1; + + double p_ll = 0; + for (int ll=m+2; ll <= l; ++ll) { + p_ll = ((2*ll-1)*x*p_mmp1 - (ll+m-1) * p_mm) / (ll-m); + p_mm = p_mmp1; + p_mmp1 = p_ll; + } + + return (float) p_ll; +} + void gaussLegendre(int n, Float *nodes, Float *weights) { if (n-- < 1) SLog(EError, "gaussLegendre(): n must be >= 1"); diff --git a/src/libcore/random.cpp b/src/libcore/random.cpp index a91f5849..f85625cb 100644 --- a/src/libcore/random.cpp +++ b/src/libcore/random.cpp @@ -477,11 +477,11 @@ Random::Random() : mt(NULL) { seed(); #else #if 0 - uint64_t buf[MT_N]; - memset(buf, 0, MT_N * sizeof(uint64_t)); /* Make GCC happy */ + uint64_t buf[N64]; + memset(buf, 0, N64 * sizeof(uint64_t)); /* Make GCC happy */ ref urandom = new FileStream("/dev/urandom", FileStream::EReadOnly); - urandom->readULongArray(buf, MT_N); - seed(buf, MT_N); + urandom->readULongArray(buf, N64); + seed(buf, N64); #else seed(); #endif diff --git a/src/libcore/sched.cpp b/src/libcore/sched.cpp index a2765adb..21f9a734 100644 --- a/src/libcore/sched.cpp +++ b/src/libcore/sched.cpp @@ -158,10 +158,11 @@ void Scheduler::retainResource(int id) { rec->refCount++; } -void Scheduler::unregisterResource(int id) { +bool Scheduler::unregisterResource(int id) { LockGuard lock(m_mutex); if (m_resources.find(id) == m_resources.end()) { - Log(EError, "unregisterResource(): could not find the resource with ID %i!", id); + Log(EWarn, "unregisterResource(): could not find the resource with ID %i!", id); + return false; } ResourceRecord *rec = m_resources[id]; if (--rec->refCount == 0) { @@ -175,6 +176,7 @@ void Scheduler::unregisterResource(int id) { for (size_t i=0; isignalResourceExpiration(id); } + return true; } SerializableObject *Scheduler::getResource(int id, int coreIndex) { diff --git a/src/libcore/sched_remote.cpp b/src/libcore/sched_remote.cpp index d11e763a..95f737c5 100644 --- a/src/libcore/sched_remote.cpp +++ b/src/libcore/sched_remote.cpp @@ -325,14 +325,7 @@ StreamBackend::StreamBackend(const std::string &thrName, Scheduler *scheduler, m_memStream->setByteOrder(Stream::ENetworkByteOrder); } -StreamBackend::~StreamBackend() { - if (m_stream->getClass()->derivesFrom(MTS_CLASS(SocketStream))) { - SocketStream *sstream = static_cast(m_stream.get()); - Log(EInfo, "Closing connection to %s - received %i KB / sent %i KB", - sstream->getPeer().c_str(), (int) (sstream->getReceivedBytes() / 1024), - (int) (sstream->getSentBytes() / 1024)); - } -} +StreamBackend::~StreamBackend() { } void StreamBackend::run() { if (m_detach) @@ -489,6 +482,13 @@ void StreamBackend::run() { Log(EWarn, "Removing stray resource %i", (*it).first); m_scheduler->unregisterResource((*it).second); } + + if (m_stream->getClass()->derivesFrom(MTS_CLASS(SocketStream))) { + SocketStream *sstream = static_cast(m_stream.get()); + Log(EInfo, "Closing connection to %s - received %i KB / sent %i KB", + sstream->getPeer().c_str(), (int) (sstream->getReceivedBytes() / 1024), + (int) (sstream->getSentBytes() / 1024)); + } } void StreamBackend::sendCancellation(int id, int numLost) { diff --git a/src/libcore/shvector.cpp b/src/libcore/shvector.cpp index d08a1778..3a19bbff 100644 --- a/src/libcore/shvector.cpp +++ b/src/libcore/shvector.cpp @@ -62,12 +62,12 @@ Float SHVector::eval(Float theta, Float phi) const { for (int l=0; l0) { - Float somx2 = std::sqrt(((Float) 1 - x) * ((Float) 1 + x)); - Float fact = 1.0; - for (int i=1; i<=m; i++) { - pmm *= (-fact) * somx2; - fact += (Float) 2; - } - } - - if (l==m) - return pmm; - - Float pmmp1 = x * ((Float) 2 * m + (Float) 1) * pmm; - if (l==m+1) - return pmmp1; - - Float pll = (Float) 0; - for (int ll=m+2; ll<=l; ++ll) { - pll = (((Float) 2 * ll - (Float) 1)*x*pmmp1 - - (ll + m - (Float) 1) * pmm ) / (ll-m); - pmm = pmmp1; - pmmp1 = pll; - } - - return pll; -} - void SHVector::normalize() { Float correction = 1/(2 * (Float) std::sqrt(M_PI)*operator()(0,0)); @@ -559,8 +529,8 @@ Float *SHSampler::legendreIntegrals(Float a, Float b) { for (int l=0; l. +*/ + +#include + +MTS_NAMESPACE_BEGIN + +Float evalCubicInterp1D(Float x, const Float *values, size_t size, Float min, Float max, bool extrapolate) { + /* Give up when given an out-of-range or NaN argument */ + if (!(x >= min && x <= max) && !extrapolate) + return 0.0f; + + /* Transform 'x' so that knots lie at integer positions */ + Float t = ((x - min) * (size - 1)) / (max - min); + + /* Find the index of the left knot in the queried subinterval, be + robust to cases where 't' lies exactly on the right endpoint */ + size_t k = std::max((size_t) 0, std::min((size_t) t, size - 2)); + + Float f0 = values[k], + f1 = values[k+1], + d0, d1; + + /* Approximate the derivatives */ + if (k > 0) + d0 = 0.5f * (values[k+1] - values[k-1]); + else + d0 = values[k+1] - values[k]; + + if (k + 2 < size) + d1 = 0.5f * (values[k+2] - values[k]); + else + d1 = values[k+1] - values[k]; + + /* Compute the relative position within the interval */ + t = t - (Float) k; + + Float t2 = t*t, t3 = t2*t; + + return + ( 2*t3 - 3*t2 + 1) * f0 + + (-2*t3 + 3*t2) * f1 + + ( t3 - 2*t2 + t) * d0 + + ( t3 - t2) * d1; +} + +Float evalCubicInterp1DN(Float x, const Float *nodes, const Float *values, size_t size, bool extrapolate) { + /* Give up when given an out-of-range or NaN argument */ + if (!(x >= nodes[0] && x <= nodes[size-1]) && !extrapolate) + return 0.0f; + + size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size - 2, + std::lower_bound(nodes, nodes + size, x) - nodes - 1)); + + Float f0 = values[k], + f1 = values[k+1], + width = nodes[k+1] - nodes[k], + d0, d1; + + /* Approximate the derivatives */ + if (k > 0) + d0 = width * (f1 - values[k-1]) / (nodes[k+1] - nodes[k-1]); + else + d0 = f1 - f0; + + if (k + 2 < size) + d1 = width * (values[k+2] - f0) / (nodes[k+2] - nodes[k]); + else + d1 = f1 - f0; + + Float t = (x - nodes[k]) / width; + Float t2 = t*t, t3 = t2*t; + + return + ( 2*t3 - 3*t2 + 1) * f0 + + (-2*t3 + 3*t2) * f1 + + ( t3 - 2*t2 + t) * d0 + + ( t3 - t2) * d1; +} + +Float integrateCubicInterp1D(size_t idx, const Float *values, size_t size, Float min, Float max) { + Float f0 = values[idx], f1 = values[idx+1], d0, d1; + + /* Approximate the derivatives */ + if (idx > 0) + d0 = 0.5f * (values[idx+1] - values[idx-1]); + else + d0 = values[idx+1] - values[idx]; + + if (idx + 2 < size) + d1 = 0.5f * (values[idx+2] - values[idx]); + else + d1 = values[idx+1] - values[idx]; + + return ((d0-d1) * (Float) (1.0 / 12.0) + (f0+f1) * 0.5f) * (max-min) / (size - 1); +} + +Float integrateCubicInterp1DN(size_t idx, const Float *nodes, const Float *values, size_t size) { + Float f0 = values[idx], + f1 = values[idx+1], + width = nodes[idx+1] - nodes[idx], + d0, d1; + + /* Approximate the derivatives */ + if (idx > 0) + d0 = width * (f1 - values[idx-1]) / (nodes[idx+1] - nodes[idx-1]); + else + d0 = f1 - f0; + + if (idx + 2 < size) + d1 = width * (values[idx+2] - f0) / (nodes[idx+2] - nodes[idx]); + else + d1 = f1 - f0; + + return ((d0-d1) * (Float) (1.0 / 12.0) + (f0+f1) * 0.5f) * width; +} + +Float sampleCubicInterp1D(size_t idx, Float *values, size_t size, Float min, + Float max, Float sample, Float *fval) { + Float f0 = values[idx], f1 = values[idx+1], d0, d1; + + /* Approximate the derivatives */ + if (idx > 0) + d0 = 0.5f * (values[idx+1] - values[idx-1]); + else + d0 = values[idx+1] - values[idx]; + + if (idx + 2 < size) + d1 = 0.5f * (values[idx+2] - values[idx]); + else + d1 = values[idx+1] - values[idx]; + + /* Bracketing interval and starting guess */ + Float a = 0, c = 1, b; + + if (f0 != f1) /* Importance sample linear interpolant */ + b = (f0-math::safe_sqrt(f0*f0 + sample * (f1*f1-f0*f0))) / (f0-f1); + else + b = sample; + + sample *= ((d0-d1) * (Float) (1.0 / 12.0) + (f0+f1) * 0.5f); + + /* Invert CDF using Newton-Bisection */ + while (true) { + if (!(b >= a && b <= c)) + b = 0.5f * (a + c); + + /* CDF and PDF in Horner form */ + Float value = b*(f0 + b*(.5f*d0 + b*((Float) (1.0f/3.0f) * (-2*d0-d1) + + f1 - f0 + b*(0.25f*(d0 + d1) + 0.5f * (f0 - f1))))) - sample; + Float deriv = f0 + b*(d0 + b*(-2*d0 - d1 + 3*(f1-f0) + b*(d0 + d1 + 2*(f0 - f1)))); + + if (std::abs(value) < 1e-6f) { + if (fval) + *fval = deriv; + return min + (idx+b) * (max-min) / (size-1); + } + + if (value > 0) + c = b; + else + a = b; + + b -= value / deriv; + } +} + +Float sampleCubicInterp1DN(size_t idx, Float *nodes, Float *values, + size_t size, Float sample, Float *fval) { + Float f0 = values[idx], + f1 = values[idx+1], + width = nodes[idx+1] - nodes[idx], + d0, d1; + + /* Approximate the derivatives */ + if (idx > 0) + d0 = width * (f1 - values[idx-1]) / (nodes[idx+1] - nodes[idx-1]); + else + d0 = f1 - f0; + + if (idx + 2 < size) + d1 = width * (values[idx+2] - f0) / (nodes[idx+2] - nodes[idx]); + else + d1 = f1 - f0; + + /* Bracketing interval and starting guess */ + Float a = 0, c = 1, b; + + if (f0 != f1) /* Importance sample linear interpolant */ + b = (f0-math::safe_sqrt(f0*f0 + sample * (f1*f1-f0*f0))) / (f0-f1); + else + b = sample; + + sample *= ((d0-d1) * (Float) (1.0 / 12.0) + (f0+f1) * 0.5f); + + /* Invert CDF using Newton-Bisection */ + while (true) { + if (!(b >= a && b <= c)) + b = 0.5f * (a + c); + + /* CDF and PDF in Horner form */ + Float value = b*(f0 + b*(.5f*d0 + b*((Float) (1.0f/3.0f) * (-2*d0-d1) + + f1 - f0 + b*(0.25f*(d0 + d1) + 0.5f * (f0 - f1))))) - sample; + Float deriv = f0 + b*(d0 + b*(-2*d0 - d1 + 3*(f1-f0) + b*(d0 + d1 + 2*(f0 - f1)))); + + if (std::abs(value) < 1e-6f) { + if (fval) + *fval = deriv; + return nodes[idx] + width*b; + } + + if (value > 0) + c = b; + else + a = b; + + b -= value / deriv; + } +} + +Float evalCubicInterp2D(const Point2 &p, const Float *values, const Size2 &size, + const Point2 &min, const Point2 &max, bool extrapolate) { + Float knotWeights[2][4]; + Size2 knot; + + /* Compute interpolation weights separately for each dimension */ + for (int dim=0; dim<2; ++dim) { + Float *weights = knotWeights[dim]; + /* Give up when given an out-of-range or NaN argument */ + if (!(p[dim] >= min[dim] && p[dim] <= max[dim]) && !extrapolate) + return 0.0f; + + /* Transform 'p' so that knots lie at integer positions */ + Float t = ((p[dim] - min[dim]) * (size[dim] - 1)) + / (max[dim]-min[dim]); + + /* Find the index of the left knot in the queried subinterval, be + robust to cases where 't' lies exactly on the right endpoint */ + knot[dim] = std::min((size_t) t, size[dim] - 2); + + /* Compute the relative position within the interval */ + t = t - (Float) knot[dim]; + + /* Compute node weights */ + Float t2 = t*t, t3 = t2*t; + weights[0] = 0.0f; + weights[1] = 2*t3 - 3*t2 + 1; + weights[2] = -2*t3 + 3*t2; + weights[3] = 0.0f; + + /* Derivative weights */ + Float d0 = t3 - 2*t2 + t, + d1 = t3 - t2; + + /* Turn derivative weights into node weights using + an appropriate chosen finite differences stencil */ + if (knot[dim] > 0) { + weights[2] += 0.5f * d0; + weights[0] -= 0.5f * d0; + } else { + weights[2] += d0; + weights[1] -= d0; + } + + if (knot[dim] + 2 < size[dim]) { + weights[3] += 0.5f * d1; + weights[1] -= 0.5f * d1; + } else { + weights[2] += d1; + weights[1] -= d1; + } + } + + Float result = 0.0f; + for (int y=-1; y<=2; ++y) { + Float wy = knotWeights[1][y+1]; + for (int x=-1; x<=2; ++x) { + Float wxy = knotWeights[0][x+1] * wy; + + if (wxy == 0) + continue; + + size_t pos = (knot[1] + y) * size[0] + knot[0] + x; + + result += values[pos] * wxy; + } + } + return result; +} + +Float evalCubicInterp2DN(const Point2 &p, const Float **nodes_, + const Float *values, const Size2 &size, bool extrapolate) { + Float knotWeights[2][4]; + Size2 knot; + + /* Compute interpolation weights separately for each dimension */ + for (int dim=0; dim<2; ++dim) { + const Float *nodes = nodes_[dim]; + Float *weights = knotWeights[dim]; + + /* Give up when given an out-of-range or NaN argument */ + if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1]) && !extrapolate) + return 0.0f; + + /* Find the index of the left knot in the queried subinterval, be + robust to cases where 't' lies exactly on the right endpoint */ + size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size[dim] - 2, + std::lower_bound(nodes, nodes + size[dim], p[dim]) - nodes - 1)); + knot[dim] = k; + + Float width = nodes[k+1] - nodes[k]; + + /* Compute the relative position within the interval */ + Float t = (p[dim] - nodes[k]) / width, + t2 = t*t, t3 = t2*t; + + /* Compute node weights */ + weights[0] = 0.0f; + weights[1] = 2*t3 - 3*t2 + 1; + weights[2] = -2*t3 + 3*t2; + weights[3] = 0.0f; + + /* Derivative weights */ + Float d0 = t3 - 2*t2 + t, d1 = t3 - t2; + + /* Turn derivative weights into node weights using + an appropriate chosen finite differences stencil */ + if (k > 0) { + Float factor = width / (nodes[k+1]-nodes[k-1]); + weights[2] += d0 * factor; + weights[0] -= d0 * factor; + } else { + weights[2] += d0; + weights[1] -= d0; + } + + if (k + 2 < size[dim]) { + Float factor = width / (nodes[k+2]-nodes[k]); + weights[3] += d1 * factor; + weights[1] -= d1 * factor; + } else { + weights[2] += d1; + weights[1] -= d1; + } + } + + Float result = 0.0f; + for (int y=-1; y<=2; ++y) { + Float wy = knotWeights[1][y+1]; + for (int x=-1; x<=2; ++x) { + Float wxy = knotWeights[0][x+1] * wy; + + if (wxy == 0) + continue; + + size_t pos = (knot[1] + y) * size[0] + knot[0] + x; + + result += values[pos] * wxy; + } + } + return result; +} + +Float evalCubicInterp3D(const Point3 &p, const Float *values, const Size3 &size, + const Point3 &min, const Point3 &max, bool extrapolate) { + Float knotWeights[3][4]; + Size3 knot; + + /* Compute interpolation weights separately for each dimension */ + for (int dim=0; dim<3; ++dim) { + Float *weights = knotWeights[dim]; + /* Give up when given an out-of-range or NaN argument */ + if (!(p[dim] >= min[dim] && p[dim] <= max[dim]) && !extrapolate) + return 0.0f; + + /* Transform 'p' so that knots lie at integer positions */ + Float t = ((p[dim] - min[dim]) * (size[dim] - 1)) + / (max[dim]-min[dim]); + + /* Find the index of the left knot in the queried subinterval, be + robust to cases where 't' lies exactly on the right endpoint */ + knot[dim] = std::min((size_t) t, size[dim] - 2); + + /* Compute the relative position within the interval */ + t = t - (Float) knot[dim]; + + /* Compute node weights */ + Float t2 = t*t, t3 = t2*t; + weights[0] = 0.0f; + weights[1] = 2*t3 - 3*t2 + 1; + weights[2] = -2*t3 + 3*t2; + weights[3] = 0.0f; + + /* Derivative weights */ + Float d0 = t3 - 2*t2 + t, + d1 = t3 - t2; + + /* Turn derivative weights into node weights using + an appropriate chosen finite differences stencil */ + if (knot[dim] > 0) { + weights[2] += 0.5f * d0; + weights[0] -= 0.5f * d0; + } else { + weights[2] += d0; + weights[1] -= d0; + } + + if (knot[dim] + 2 < size[dim]) { + weights[3] += 0.5f * d1; + weights[1] -= 0.5f * d1; + } else { + weights[2] += d1; + weights[1] -= d1; + } + } + + Float result = 0.0f; + for (int z=-1; z<=2; ++z) { + Float wz = knotWeights[2][z+1]; + for (int y=-1; y<=2; ++y) { + Float wyz = knotWeights[1][y+1] * wz; + for (int x=-1; x<=2; ++x) { + Float wxyz = knotWeights[0][x+1] * wyz; + + if (wxyz == 0) + continue; + + size_t pos = ((knot[2] + z) * size[1] + (knot[1] + y)) + * size[0] + knot[0] + x; + + result += values[pos] * wxyz; + } + } + } + return result; +} + +Float evalCubicInterp3DN(const Point3 &p, const Float **nodes_, + const Float *values, const Size3 &size, bool extrapolate) { + Float knotWeights[3][4]; + Size3 knot; + + /* Compute interpolation weights separately for each dimension */ + for (int dim=0; dim<3; ++dim) { + const Float *nodes = nodes_[dim]; + Float *weights = knotWeights[dim]; + + /* Give up when given an out-of-range or NaN argument */ + if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1]) && !extrapolate) + return 0.0f; + + /* Find the index of the left knot in the queried subinterval, be + robust to cases where 't' lies exactly on the right endpoint */ + size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size[dim] - 2, + std::lower_bound(nodes, nodes + size[dim], p[dim]) - nodes - 1)); + knot[dim] = k; + + Float width = nodes[k+1] - nodes[k]; + + /* Compute the relative position within the interval */ + Float t = (p[dim] - nodes[k]) / width, + t2 = t*t, t3 = t2*t; + + /* Compute node weights */ + weights[0] = 0.0f; + weights[1] = 2*t3 - 3*t2 + 1; + weights[2] = -2*t3 + 3*t2; + weights[3] = 0.0f; + + /* Derivative weights */ + Float d0 = t3 - 2*t2 + t, d1 = t3 - t2; + + /* Turn derivative weights into node weights using + an appropriate chosen finite differences stencil */ + if (k > 0) { + Float factor = width / (nodes[k+1]-nodes[k-1]); + weights[2] += d0 * factor; + weights[0] -= d0 * factor; + } else { + weights[2] += d0; + weights[1] -= d0; + } + + if (k + 2 < size[dim]) { + Float factor = width / (nodes[k+2]-nodes[k]); + weights[3] += d1 * factor; + weights[1] -= d1 * factor; + } else { + weights[2] += d1; + weights[1] -= d1; + } + } + + Float result = 0.0f; + for (int z=-1; z<=2; ++z) { + Float wz = knotWeights[2][z+1]; + for (int y=-1; y<=2; ++y) { + Float wyz = knotWeights[1][y+1] * wz; + for (int x=-1; x<=2; ++x) { + Float wxyz = knotWeights[0][x+1] * wyz; + + if (wxyz == 0) + continue; + + size_t pos = ((knot[2] + z) * size[1] + (knot[1] + y)) + * size[0] + knot[0] + x; + + result += values[pos] * wxyz; + } + } + } + return result; +} + +MTS_NAMESPACE_END diff --git a/src/libcore/ssemath.cpp b/src/libcore/ssemath.cpp index 83a11a2b..b1cc466d 100644 --- a/src/libcore/ssemath.cpp +++ b/src/libcore/ssemath.cpp @@ -16,6 +16,12 @@ along with this program. If not, see . */ +#if defined(__GXX_EXPERIMENTAL_CXX0X__) + /* Needed to prevent a segmentation fault in the Intel C++ + compiler on Linux (as of Nov 2012) */ + #undef __GXX_EXPERIMENTAL_CXX0X__ +#endif + #if MTS_SSE #include #include diff --git a/src/libcore/statistics.cpp b/src/libcore/statistics.cpp index f8b1fb82..7ae41f44 100644 --- a/src/libcore/statistics.cpp +++ b/src/libcore/statistics.cpp @@ -134,7 +134,7 @@ void Statistics::logPlugin(const std::string &name, const std::string &descr) { } void Statistics::printStats() { - SLog(EInfo, "Statistics: \n%s", getStats().c_str()); + SLog(EInfo, "Statistics:\n%s", getStats().c_str()); } std::string Statistics::getStats() { diff --git a/src/libcore/thread.cpp b/src/libcore/thread.cpp index 2811463b..9fd9e7fb 100644 --- a/src/libcore/thread.cpp +++ b/src/libcore/thread.cpp @@ -18,7 +18,7 @@ #include #include -#ifdef MTS_OPENMP +#if defined(MTS_OPENMP) # include #endif @@ -373,9 +373,9 @@ void Thread::yield() { void Thread::exit() { Log(EDebug, "Thread \"%s\" has finished", d->name.c_str()); d->running = false; - decRef(); - ThreadPrivate::self->set(NULL); + Assert(ThreadPrivate::self->get() == this); detail::destroyLocalTLS(); + decRef(); } std::string Thread::toString() const { @@ -396,6 +396,7 @@ static boost::mutex __unmanagedMutex; #if defined(MTS_OPENMP) && defined(__OSX__) static int __omp_threadCount = 0; static pthread_key_t __omp_key; +static bool __omp_key_created; int mts_omp_get_max_threads() { /* This function exists to sidestep an annoying @@ -446,35 +447,44 @@ void Thread::staticShutdown() { __unmanagedThreads[i]->decRef(); __unmanagedThreads.clear(); getThread()->d->running = false; - ThreadPrivate::self->set(NULL); + detail::destroyLocalTLS(); delete ThreadPrivate::self; ThreadPrivate::self = NULL; - detail::destroyLocalTLS(); detail::destroyGlobalTLS(); #if defined(__OSX__) + #if defined(MTS_OPENMP) + if (__omp_key_created) + pthread_key_delete(__omp_key); + #endif __mts_autorelease_shutdown(); #endif } void Thread::initializeOpenMP(size_t threadCount) { -#ifdef MTS_OPENMP +#if defined(MTS_OPENMP) ref logger = Thread::getThread()->getLogger(); ref fResolver = Thread::getThread()->getFileResolver(); #if defined(__OSX__) + if (!__omp_key_created) { + pthread_key_create(&__omp_key, NULL); + __omp_key_created = true; + } __omp_threadCount = threadCount; - pthread_key_create(&__omp_key, NULL); #endif + if (omp_get_dynamic()) + omp_set_dynamic(0); + omp_set_num_threads((int) threadCount); - omp_set_dynamic(false); int counter = 0; #pragma omp parallel { #if defined(__OSX__) - pthread_setspecific(__omp_key, reinterpret_cast(counter)); + if (!pthread_getspecific(__omp_key)) + pthread_setspecific(__omp_key, reinterpret_cast(counter)); #endif detail::initializeLocalTLS(); Thread *thread = Thread::getThread(); @@ -485,14 +495,25 @@ void Thread::initializeOpenMP(size_t threadCount) { formatString("omp%i", counter)); counter++; } + const std::string threadName = "Mitsuba: " + thread->getName(); + + #if defined(__LINUX__) + prctl(PR_SET_NAME, threadName.c_str()); + #elif defined(__OSX__) + pthread_setname_np(threadName.c_str()); + #elif defined(__WINDOWS__) + SetThreadName(threadName.c_str()); + #endif + thread->d->running = false; thread->d->joined = false; thread->d->fresolver = fResolver; thread->d->logger = logger; thread->incRef(); ThreadPrivate::self->set(thread); + #pragma omp critical - __unmanagedThreads.push_back((UnmanagedThread *) thread); + __unmanagedThreads.push_back((UnmanagedThread *) thread); } } #else diff --git a/src/libcore/tls.cpp b/src/libcore/tls.cpp index 7237ec54..75f6912b 100644 --- a/src/libcore/tls.cpp +++ b/src/libcore/tls.cpp @@ -17,16 +17,26 @@ */ #include + #include +#include #include -#include -#include +#include + +#include +#include +#include +#include +#include + #if defined(__OSX__) # include #endif MTS_NAMESPACE_BEGIN +namespace mi = boost::multi_index; + /* The native TLS classes on Linux/MacOS/Windows only support a limited number of dynamically allocated entries (usually 1024 or 1088). Furthermore, they do not provide appropriate cleanup semantics when the TLS object or one of @@ -45,16 +55,37 @@ struct TLSEntry { inline TLSEntry() : data(NULL), destructFunctor(NULL) { } }; -/// Per-thread TLS entry map -struct PerThreadData { - typedef boost::unordered_map Map; +/// boost multi-index element to act as replacement of map +template +struct mutable_pair { + mutable_pair(const T1 &f, const T2 &s) : first(f), second(s) { } - Map map; - boost::mutex mutex; + T1 first; + mutable T2 second; }; -/// List of all PerThreadData data structures (one for each thred) -std::set ptdGlobal; +/// Per-thread TLS entry map +struct PerThreadData { + typedef mutable_pair MapData; + typedef mi::member key_member; + struct seq_tag {}; + struct key_tag {}; + + typedef mi::multi_index_container, key_member>, + mi::sequenced > + > + > Map; + typedef mi::index::type::iterator key_iterator; + typedef mi::index::type::reverse_iterator reverse_iterator; + + Map map; + boost::recursive_mutex mutex; +}; + +/// List of all PerThreadData data structures (one for each thread) +boost::unordered_set ptdGlobal; /// Lock to protect ptdGlobal boost::mutex ptdGlobalLock; @@ -79,10 +110,10 @@ struct ThreadLocalBase::ThreadLocalPrivate { and clean up where necessary */ boost::lock_guard guard(ptdGlobalLock); - for (std::set::iterator it = ptdGlobal.begin(); + for (boost::unordered_set::iterator it = ptdGlobal.begin(); it != ptdGlobal.end(); ++it) { PerThreadData *ptd = *it; - boost::unique_lock lock(ptd->mutex); + boost::unique_lock lock(ptd->mutex); PerThreadData::Map::iterator it2 = ptd->map.find(this); TLSEntry entry; @@ -102,25 +133,32 @@ struct ThreadLocalBase::ThreadLocalPrivate { /// Look up a TLS entry. The goal is to make this operation very fast! std::pair get() { bool existed = true; + void *data; #if defined(__OSX__) PerThreadData *ptd = (PerThreadData *) pthread_getspecific(ptdLocal); #else PerThreadData *ptd = ptdLocal; #endif + if (EXPECT_NOT_TAKEN(!ptd)) + throw std::runtime_error("Internal error: call to ThreadLocalPrivate::get() " + " precedes the construction of thread-specific data structures!"); /* This is an uncontended thread-local lock (i.e. not to worry) */ - boost::lock_guard guard(ptd->mutex); - TLSEntry &entry = ptd->map[this]; - - if (EXPECT_NOT_TAKEN(!entry.data)) { + boost::lock_guard guard(ptd->mutex); + PerThreadData::key_iterator it = ptd->map.find(this); + if (EXPECT_TAKEN(it != ptd->map.end())) { + data = it->second.data; + } else { /* This is the first access from this thread */ - entry.data = constructFunctor(); + TLSEntry entry; + entry.data = data = constructFunctor(); entry.destructFunctor = destructFunctor; + ptd->map.insert(PerThreadData::MapData(this, entry)); existed = false; } - return std::make_pair(entry.data, existed); + return std::make_pair(data, existed); } }; @@ -191,10 +229,11 @@ void destroyLocalTLS() { PerThreadData *ptd = ptdLocal; #endif - boost::unique_lock lock(ptd->mutex); + boost::unique_lock lock(ptd->mutex); - for (PerThreadData::Map::iterator it = ptd->map.begin(); - it != ptd->map.end(); ++it) { + // Destroy the data in reverse order of creation + for (PerThreadData::reverse_iterator it = mi::get(ptd->map).rbegin(); + it != mi::get(ptd->map).rend(); ++it) { TLSEntry &entry = it->second; entry.destructFunctor(entry.data); } diff --git a/src/libcore/track.cpp b/src/libcore/track.cpp new file mode 100644 index 00000000..dc4ac3ac --- /dev/null +++ b/src/libcore/track.cpp @@ -0,0 +1,393 @@ +#include +#include +#include + +MTS_NAMESPACE_BEGIN + +AnimatedTransform::AnimatedTransform(const AnimatedTransform *trafo) + : m_transform(trafo->m_transform) { + m_tracks.reserve(trafo->getTrackCount()); + for (size_t i=0; igetTrackCount(); ++i) { + AbstractAnimationTrack *track = trafo->getTrack(i)->clone(); + m_tracks.push_back(track); + track->incRef(); + } +} + +AnimatedTransform::AnimatedTransform(Stream *stream) { + size_t nTracks = stream->readSize(); + if (nTracks == 0) { + m_transform = Transform(stream); + } else { + for (size_t i=0; ireadUInt(); + AbstractAnimationTrack *track = NULL; + switch (type) { + case AbstractAnimationTrack::ETranslationX: + case AbstractAnimationTrack::ETranslationY: + case AbstractAnimationTrack::ETranslationZ: + case AbstractAnimationTrack::EScaleX: + case AbstractAnimationTrack::EScaleY: + case AbstractAnimationTrack::EScaleZ: + case AbstractAnimationTrack::ERotationX: + case AbstractAnimationTrack::ERotationY: + case AbstractAnimationTrack::ERotationZ: + track = new FloatTrack(type, stream); + break; + case AbstractAnimationTrack::ETranslationXYZ: + case AbstractAnimationTrack::EScaleXYZ: + track = new VectorTrack(type, stream); + break; + case AbstractAnimationTrack::ERotationQuat: + track = new QuatTrack(type, stream); + break; + default: + Log(EError, "Encountered an unknown animation track type (%i)!", type); + } + + track->incRef(); + m_tracks.push_back(track); + } + } +} + +void AnimatedTransform::addTrack(AbstractAnimationTrack *track) { + track->incRef(); + m_tracks.push_back(track); +} + +AABB1 AnimatedTransform::getTimeBounds() const { + if (m_tracks.size() == 0) + return AABB1(0.0f, 0.0f); + + Float min = std::numeric_limits::infinity(); + Float max = -std::numeric_limits::infinity(); + + for (size_t i=0; igetSize(); + SAssert(size > 0); + min = std::min(min, track->getTime(0)); + max = std::max(max, track->getTime(size-1)); + } + + return AABB1(min, max); +} + +AABB AnimatedTransform::getTranslationBounds() const { + if (m_tracks.size() == 0) { + Point p = m_transform(Point(0.0f)); + return AABB(p, p); + } + + AABB aabb; + + for (size_t i=0; igetType()) { + case AbstractAnimationTrack::ETranslationX: + case AbstractAnimationTrack::ETranslationY: + case AbstractAnimationTrack::ETranslationZ: { + int idx = absTrack->getType() - AbstractAnimationTrack::ETranslationX; + const FloatTrack *track = + static_cast(absTrack); + for (size_t j=0; jgetSize(); ++j) { + Float value = track->getValue(j); + aabb.max[idx] = std::max(aabb.max[idx], value); + aabb.min[idx] = std::min(aabb.min[idx], value); + } + } + break; + + case AbstractAnimationTrack::ETranslationXYZ: { + const VectorTrack *track = + static_cast(absTrack); + for (size_t j=0; jgetSize(); ++j) + aabb.expandBy(Point(track->getValue(j))); + } + break; + default: + break; + } + } + for (int i=0; i<3; ++i) { + if (aabb.min[i] > aabb.max[i]) + aabb.min[i] = aabb.max[i] = 0.0f; + } + + return aabb; +} + +AABB AnimatedTransform::getSpatialBounds(const AABB &aabb) const { + AABB result; + + if (m_tracks.size() == 0) { + for (int j=0; j<8; ++j) + result.expandBy(m_transform(aabb.getCorner(j))); + } else { + /* Compute approximate bounds */ + int nSteps = 100; + AABB1 timeBounds = getTimeBounds(); + Float step = timeBounds.getExtents().x / (nSteps-1); + + for (int i=0; idecRef(); +} + +void AnimatedTransform::sortAndSimplify() { + bool isStatic = true; + + for (size_t i=0; igetType()) { + case AbstractAnimationTrack::ETranslationX: + case AbstractAnimationTrack::ETranslationY: + case AbstractAnimationTrack::ETranslationZ: + case AbstractAnimationTrack::ERotationX: + case AbstractAnimationTrack::ERotationY: + case AbstractAnimationTrack::ERotationZ: + case AbstractAnimationTrack::EScaleX: + case AbstractAnimationTrack::EScaleY: + case AbstractAnimationTrack::EScaleZ: + isNeeded = static_cast(track)->sortAndSimplify(); + break; + case AbstractAnimationTrack::ETranslationXYZ: + case AbstractAnimationTrack::EScaleXYZ: + isNeeded = static_cast(track)->sortAndSimplify(); + break; + case AbstractAnimationTrack::ERotationQuat: + isNeeded = static_cast(track)->sortAndSimplify(); + break; + default: + Log(EError, "Encountered an unsupported " + "animation track type: %i!", track->getType()); + } + if (isNeeded) { + isStatic &= track->getSize() == 1; + } else { + m_tracks.erase(m_tracks.begin() + i); + track->decRef(); + --i; + } + } + + if (isStatic) { + Transform temp; + temp = eval(0); + m_transform = temp; + for (size_t i=0; idecRef(); + m_tracks.clear(); + } +} + + +const AbstractAnimationTrack *AnimatedTransform::findTrack(AbstractAnimationTrack::EType type) const { + for (size_t i=0; igetType() == type) + return track; + } + return NULL; +} +AbstractAnimationTrack *AnimatedTransform::findTrack(AbstractAnimationTrack::EType type) { + for (size_t i=0; igetType() == type) + return track; + } + return NULL; +} + +void AnimatedTransform::prependScale(const Vector &scale) { + FloatTrack *trackX = (FloatTrack *) findTrack(AbstractAnimationTrack::EScaleX); + FloatTrack *trackY = (FloatTrack *) findTrack(AbstractAnimationTrack::EScaleY); + FloatTrack *trackZ = (FloatTrack *) findTrack(AbstractAnimationTrack::EScaleZ); + VectorTrack *trackXYZ = (VectorTrack *) findTrack(AbstractAnimationTrack::EScaleXYZ); + + if (m_tracks.empty()) { + m_transform = m_transform * Transform::scale(scale); + } else if (trackXYZ) { + trackXYZ->prependTransformation(scale); + } else if (trackX && trackY && trackZ) { + if (trackX) { + trackX->prependTransformation(scale.x); + } else { + trackX = new FloatTrack(AbstractAnimationTrack::EScaleX); + trackX->append(0.0f, scale.x); addTrack(trackX); + } + + if (trackY) { + trackY->prependTransformation(scale.y); + } else { + trackY = new FloatTrack(AbstractAnimationTrack::EScaleY); + trackY->append(0.0f, scale.y); addTrack(trackY); + } + + if (trackZ) { + trackZ->prependTransformation(scale.z); + } else { + trackZ = new FloatTrack(AbstractAnimationTrack::EScaleZ); + trackZ->append(0.0f, scale.z); addTrack(trackZ); + } + } else { + trackXYZ = new VectorTrack(AbstractAnimationTrack::EScaleXYZ); + trackXYZ->append(0.0f, scale); + addTrack(trackXYZ); + } +} + +void AnimatedTransform::collectKeyframes(std::set &result) const { + for (size_t i=0; igetSize(); ++j) + result.insert(track->getTime(j)); + } + + if (result.size() == 0) + result.insert((Float) 0); +} + +void AnimatedTransform::serialize(Stream *stream) const { + stream->writeSize(m_tracks.size()); + if (m_tracks.size() == 0) { + m_transform.serialize(stream); + } else { + for (size_t i=0; iserialize(stream); + } +} + +void AnimatedTransform::TransformFunctor::operator()(const Float &t, Transform &trafo) const { + Vector translation(0.0f); + Vector scale(1.0f); + Quaternion rotation; + + for (size_t i=0; igetType()) { + case AbstractAnimationTrack::ETranslationX: + translation.x = static_cast(track)->eval(t); + break; + case AbstractAnimationTrack::ETranslationY: + translation.y = static_cast(track)->eval(t); + break; + case AbstractAnimationTrack::ETranslationZ: + translation.z = static_cast(track)->eval(t); + break; + case AbstractAnimationTrack::ETranslationXYZ: + translation = static_cast(track)->eval(t); + break; + case AbstractAnimationTrack::EScaleX: + scale.x = static_cast(track)->eval(t); + break; + case AbstractAnimationTrack::EScaleY: + scale.y = static_cast(track)->eval(t); + break; + case AbstractAnimationTrack::EScaleZ: + scale.z = static_cast(track)->eval(t); + break; + case AbstractAnimationTrack::EScaleXYZ: + scale = static_cast(track)->eval(t); + break; + case AbstractAnimationTrack::ERotationQuat: + rotation = static_cast(track)->eval(t); + break; + default: + Log(EError, "Encountered an unsupported " + "animation track type: %i!", track->getType()); + } + } + + trafo = Transform::translate(translation); + + if (!rotation.isIdentity()) + trafo = trafo * rotation.toTransform(); + + if (scale != Vector(0.0f)) + trafo = trafo * Transform::scale(scale); +} + +void AnimatedTransform::appendTransform(Float time, const Transform &trafo) { + /* Compute the polar decomposition and insert into the animated transform; + uh oh.. we have to get rid of the two separate matrix libraries at some point :) */ + typedef Eigen::Matrix EMatrix; + + if (m_tracks.size() == 0) { + ref translation = new VectorTrack(VectorTrack::ETranslationXYZ); + ref rotation = new QuatTrack(VectorTrack::ERotationQuat); + ref scaling = new VectorTrack(VectorTrack::EScaleXYZ); + translation->reserve(2); + rotation->reserve(2); + scaling->reserve(2); + addTrack(translation); + addTrack(rotation); + addTrack(scaling); + } else if (m_tracks.size() != 3 || + m_tracks[0]->getType() != VectorTrack::ETranslationXYZ || + m_tracks[1]->getType() != VectorTrack::ERotationQuat || + m_tracks[2]->getType() != VectorTrack::EScaleXYZ) { + Log(EError, "AnimatedTransform::appendTransform(): unsupported internal configuration!"); + } + + const Matrix4x4 m = trafo.getMatrix(); + EMatrix A; + + A << m(0, 0), m(0, 1), m(0, 2), + m(1, 0), m(1, 1), m(1, 2), + m(2, 0), m(2, 1), m(2, 2); + + Eigen::JacobiSVD svd(A, Eigen::ComputeFullU | Eigen::ComputeFullV); + EMatrix U = svd.matrixU(), V = svd.matrixV(), S = svd.singularValues().asDiagonal(); + + if (svd.singularValues().prod() < 0) { + S = -S; U = -U; + } + + EMatrix Q = U*V.transpose(); + EMatrix P = V*S*V.transpose(); + + VectorTrack *translation = (VectorTrack *) m_tracks[0]; + QuatTrack *rotation = (QuatTrack *) m_tracks[1]; + VectorTrack *scaling = (VectorTrack *) m_tracks[2]; + + rotation->append(time, Quaternion::fromMatrix( + Matrix4x4( + Q(0, 0), Q(0, 1), Q(0, 2), 0.0f, + Q(1, 0), Q(1, 1), Q(1, 2), 0.0f, + Q(2, 0), Q(2, 1), Q(2, 2), 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f + ) + )); + + scaling->append(time, Vector(P(0, 0), P(1, 1), P(2, 2))); + translation->append(time, Vector(m(0, 3), m(1, 3), m(2, 3))); +} + +std::string AnimatedTransform::toString() const { + if (m_tracks.size() == 0) { + return m_transform.toString(); + } else { + std::ostringstream oss; + oss << "AnimatedTransform[tracks=" << m_tracks.size() << "]"; + return oss.str(); + } +} + +MTS_IMPLEMENT_CLASS(AbstractAnimationTrack, true, Object) +MTS_IMPLEMENT_CLASS(AnimatedTransform, false, Object) +MTS_NAMESPACE_END diff --git a/src/libcore/transform.cpp b/src/libcore/transform.cpp index 71004bec..5f056310 100644 --- a/src/libcore/transform.cpp +++ b/src/libcore/transform.cpp @@ -189,8 +189,8 @@ Transform Transform::glOrthographic(Float clipLeft, Float clipRight, } Transform Transform::lookAt(const Point &p, const Point &t, const Vector &up) { - Vector dir = normalize(t-p); - Vector left = normalize(cross(up, dir)); + Vector dir = normalizeStrict(t-p, "lookAt(): 'origin' and 'target' coincide!"); + Vector left = normalizeStrict(cross(up, dir), "lookAt(): the forward and upward direction must be linearly independent!"); Vector newUp = cross(dir, left); Matrix4x4 result, inverse; diff --git a/src/libcore/triangle.cpp b/src/libcore/triangle.cpp index fb761445..a1965bd2 100644 --- a/src/libcore/triangle.cpp +++ b/src/libcore/triangle.cpp @@ -22,7 +22,7 @@ MTS_NAMESPACE_BEGIN Point Triangle::sample(const Point *positions, const Normal *normals, - Normal &normal, const Point2 &sample) const { + const Point2 *texCoords, Normal &normal, Point2 &uv, const Point2 &sample) const { const Point &p0 = positions[idx[0]]; const Point &p1 = positions[idx[1]]; const Point &p2 = positions[idx[2]]; @@ -44,6 +44,17 @@ Point Triangle::sample(const Point *positions, const Normal *normals, normal = Normal(normalize(cross(sideA, sideB))); } + if (texCoords) { + const Point2 &uv0 = texCoords[idx[0]]; + const Point2 &uv1 = texCoords[idx[1]]; + const Point2 &uv2 = texCoords[idx[2]]; + + uv = uv0 * (1.0f - bary.x - bary.y) + + uv1 * bary.x + uv2 * bary.y; + } else { + uv = bary; + } + return p; } diff --git a/src/libcore/util.cpp b/src/libcore/util.cpp index 3f2bf951..199f5eb5 100644 --- a/src/libcore/util.cpp +++ b/src/libcore/util.cpp @@ -28,13 +28,16 @@ #if defined(__OSX__) #include -#elif defined(WIN32) +#include +#elif defined(__WINDOWS__) +#include #include +#include #else #include #endif -#if defined(WIN32) +#if defined(__WINDOWS__) # include # include # include @@ -120,21 +123,8 @@ std::string indent(const std::string &string, int amount) { return oss.str(); } -std::string memString(size_t size) { - Float value = (Float) size; - const char *prefixes[] = { - "B", "KiB", "MiB", "GiB", "TiB", "PiB" - }; - int prefix = 0; - while (prefix < 5 && value > 1024.0f) { - value /= 1024.0f; ++prefix; - } - return formatString(prefix == 0 ? - "%.0f %s" : "%.2f %s", value, prefixes[prefix]); -} - void * __restrict allocAligned(size_t size) { -#if defined(WIN32) +#if defined(__WINDOWS__) return _aligned_malloc(size, L1_CACHE_LINE_SIZE); #elif defined(__OSX__) /* OSX malloc already returns 16-byte aligned data suitable @@ -146,7 +136,7 @@ void * __restrict allocAligned(size_t size) { } void freeAligned(void *ptr) { -#if defined(WIN32) +#if defined(__WINDOWS__) _aligned_free(ptr); #else free(ptr); @@ -154,7 +144,7 @@ void freeAligned(void *ptr) { } int getCoreCount() { -#if defined(WIN32) +#if defined(__WINDOWS__) SYSTEM_INFO sys_info; GetSystemInfo(&sys_info); return sys_info.dwNumberOfProcessors; @@ -169,7 +159,45 @@ int getCoreCount() { #endif } -#if defined(WIN32) +size_t getPrivateMemoryUsage() { +#if defined(__WINDOWS__) + PROCESS_MEMORY_COUNTERS_EX pmc; + GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS *) &pmc, sizeof(pmc)); + return (size_t) pmc.PrivateUsage; /* Process-private memory usage (RAM + swap) */ +#elif defined(__OSX__) + struct task_basic_info_64 t_info; + mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_64_COUNT; + + if (task_info(mach_task_self(), TASK_BASIC_INFO_64, + (task_info_t)&t_info, &t_info_count) != KERN_SUCCESS) + return 0; + + return (size_t) t_info.resident_size; /* Not exactly what we want -- oh well.. */ +#else + FILE* file = fopen("/proc/self/status", "r"); + if (!file) + return 0; + + char buffer[128]; + size_t result = 0; + while (fgets(buffer, sizeof(buffer), file) != NULL) { + if (strncmp(buffer, "VmRSS:", 6) != 0 && /* Non-swapped physical memory specific to this process */ + strncmp(buffer, "VmSwap:", 7) != 0) /* Swapped memory specific to this process */ + continue; + + char *line = buffer; + while (*line < '0' || *line > '9') + ++line; + line[strlen(line)-3] = '\0'; + result += (size_t) atoi(line) * 1024; + } + + fclose(file); + return result; +#endif +} + +#if defined(__WINDOWS__) std::string lastErrorText() { DWORD errCode = GetLastError(); char *errorText = NULL; @@ -192,7 +220,7 @@ std::string lastErrorText() { bool enableFPExceptions() { bool exceptionsWereEnabled = false; -#if defined(WIN32) +#if defined(__WINDOWS__) _clearfp(); uint32_t cw = _controlfp(0, 0); exceptionsWereEnabled = ~cw & (_EM_INVALID | _EM_ZERODIVIDE | _EM_OVERFLOW); @@ -211,7 +239,7 @@ bool enableFPExceptions() { bool disableFPExceptions() { bool exceptionsWereEnabled = false; -#if defined(WIN32) +#if defined(__WINDOWS__) _clearfp(); uint32_t cw = _controlfp(0, 0); exceptionsWereEnabled = ~cw & (_EM_INVALID | _EM_ZERODIVIDE | _EM_OVERFLOW); @@ -230,7 +258,7 @@ bool disableFPExceptions() { void restoreFPExceptions(bool oldState) { bool currentState; -#if defined(WIN32) +#if defined(__WINDOWS__) uint32_t cw = _controlfp(0, 0); currentState = ~cw & (_EM_INVALID | _EM_ZERODIVIDE | _EM_OVERFLOW); #elif defined(__OSX__) @@ -249,7 +277,7 @@ void restoreFPExceptions(bool oldState) { std::string getHostName() { char hostName[128]; if (gethostname(hostName, sizeof(hostName)) != 0) -#if defined(WIN32) +#if defined(__WINDOWS__) SLog(EError, "Could not retrieve the computer's host name: %s!", lastErrorText().c_str()); #else @@ -280,7 +308,7 @@ std::string getFQDN() { fqdn, NI_MAXHOST, NULL, 0, 0); if (retVal != 0) { freeaddrinfo(addrInfo); -#if defined(WIN32) +#if defined(__WINDOWS__) SLog(EWarn, "Could not retrieve the computer's fully " "qualified domain name: error %i!", WSAGetLastError()); #else @@ -304,7 +332,7 @@ std::string formatString(const char *fmt, ...) { char tmp[512]; va_list iterator; -#if defined(WIN32) +#if defined(__WINDOWS__) va_start(iterator, fmt); size_t size = _vscprintf(fmt, iterator) + 1; @@ -458,7 +486,7 @@ bool solveQuadraticDouble(double a, double b, double c, double &x0, double &x1) bool solveLinearSystem2x2(const Float a[2][2], const Float b[2], Float x[2]) { Float det = a[0][0] * a[1][1] - a[0][1] * a[1][0]; - if (det == 0) + if (std::abs(det) <= RCPOVERFLOW) return false; Float inverse = (Float) 1.0f / det; @@ -469,377 +497,6 @@ bool solveLinearSystem2x2(const Float a[2][2], const Float b[2], Float x[2]) { return true; } -Float interpCubic1D(Float x, const Float *data, Float min, Float max, size_t size) { - /* Give up when given an out-of-range or NaN argument */ - if (!(x >= min && x <= max)) - return 0.0f; - - /* Transform 'x' so that knots lie at integer positions */ - Float t = ((x - min) * (size - 1)) / (max - min); - - /* Find the index of the left knot in the queried subinterval, be - robust to cases where 't' lies exactly on the right endpoint */ - size_t k = std::max((size_t) 0, std::min((size_t) t, size - 2)); - - Float f0 = data[k], - f1 = data[k+1], - d0, d1; - - /* Approximate the derivatives */ - if (k > 0) - d0 = 0.5f * (data[k+1] - data[k-1]); - else - d0 = data[k+1] - data[k]; - - if (k + 2 < size) - d1 = 0.5f * (data[k+2] - data[k]); - else - d1 = data[k+1] - data[k]; - - /* Compute the relative position within the interval */ - t = t - (Float) k; - - Float t2 = t*t, t3 = t2*t; - - return - ( 2*t3 - 3*t2 + 1) * f0 + - (-2*t3 + 3*t2) * f1 + - ( t3 - 2*t2 + t) * d0 + - ( t3 - t2) * d1; -} - -Float interpCubic1DIrregular(Float x, const Float *nodes, const Float *data, size_t size) { - /* Give up when given an out-of-range or NaN argument */ - if (!(x >= nodes[0] && x <= nodes[size-1])) - return 0.0f; - - size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size - 2, - std::lower_bound(nodes, nodes + size, x) - nodes - 1)); - - Float f0 = data[k], - f1 = data[k+1], - width = nodes[k+1] - nodes[k], - invWidth = 1.0f / width, - d0, d1; - - /* Approximate the derivatives */ - if (k > 0) - d0 = (f1 - data[k-1]) / (nodes[k+1] - nodes[k-1]); - else - d0 = (f1 - f0) * invWidth; - - if (k + 2 < size) - d1 = (data[k+2] - f0) / (nodes[k+2] - nodes[k]); - else - d1 = (f1 - f0) * invWidth; - - Float t = (x - nodes[k]) * invWidth; - Float t2 = t*t, t3 = t2*t; - - return - ( 2*t3 - 3*t2 + 1) * f0 + - (-2*t3 + 3*t2) * f1 + - (( t3 - 2*t2 + t) * d0 + - ( t3 - t2) * d1) * width; -} - - -Float interpCubic2D(const Point2 &p, const Float *data, - const Point2 &min, const Point2 &max, const Size2 &size) { - Float knotWeights[2][4]; - Size2 knot; - - /* Compute interpolation weights separately for each dimension */ - for (int dim=0; dim<2; ++dim) { - Float *weights = knotWeights[dim]; - /* Give up when given an out-of-range or NaN argument */ - if (!(p[dim] >= min[dim] && p[dim] <= max[dim])) - return 0.0f; - - /* Transform 'p' so that knots lie at integer positions */ - Float t = ((p[dim] - min[dim]) * (size[dim] - 1)) - / (max[dim]-min[dim]); - - /* Find the index of the left knot in the queried subinterval, be - robust to cases where 't' lies exactly on the right endpoint */ - knot[dim] = std::min((size_t) t, size[dim] - 2); - - /* Compute the relative position within the interval */ - t = t - (Float) knot[dim]; - - /* Compute node weights */ - Float t2 = t*t, t3 = t2*t; - weights[0] = 0.0f; - weights[1] = 2*t3 - 3*t2 + 1; - weights[2] = -2*t3 + 3*t2; - weights[3] = 0.0f; - - /* Derivative weights */ - Float d0 = t3 - 2*t2 + t, - d1 = t3 - t2; - - /* Turn derivative weights into node weights using - an appropriate chosen finite differences stencil */ - if (knot[dim] > 0) { - weights[2] += 0.5f * d0; - weights[0] -= 0.5f * d0; - } else { - weights[2] += d0; - weights[1] -= d0; - } - - if (knot[dim] + 2 < size[dim]) { - weights[3] += 0.5f * d1; - weights[1] -= 0.5f * d1; - } else { - weights[2] += d1; - weights[1] -= d1; - } - } - - Float result = 0.0f; - for (int y=-1; y<=2; ++y) { - Float wy = knotWeights[1][y+1]; - for (int x=-1; x<=2; ++x) { - Float wxy = knotWeights[0][x+1] * wy; - - if (wxy == 0) - continue; - - size_t pos = (knot[1] + y) * size[0] + knot[0] + x; - - result += data[pos] * wxy; - } - } - return result; -} - -Float interpCubic2DIrregular(const Point2 &p, const Float **nodes_, - const Float *data, const Size2 &size) { - Float knotWeights[2][4]; - Size2 knot; - - /* Compute interpolation weights separately for each dimension */ - for (int dim=0; dim<2; ++dim) { - const Float *nodes = nodes_[dim]; - Float *weights = knotWeights[dim]; - - /* Give up when given an out-of-range or NaN argument */ - if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1])) - return 0.0f; - - /* Find the index of the left knot in the queried subinterval, be - robust to cases where 't' lies exactly on the right endpoint */ - size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size[dim] - 2, - std::lower_bound(nodes, nodes + size[dim], p[dim]) - nodes - 1)); - knot[dim] = k; - - Float width = nodes[k+1] - nodes[k], invWidth = 1 / width; - - /* Compute the relative position within the interval */ - Float t = (p[dim] - nodes[k]) * invWidth, - t2 = t*t, t3 = t2*t; - - /* Compute node weights */ - weights[0] = 0.0f; - weights[1] = 2*t3 - 3*t2 + 1; - weights[2] = -2*t3 + 3*t2; - weights[3] = 0.0f; - - /* Derivative weights */ - Float d0 = (t3 - 2*t2 + t) * width, - d1 = (t3 - t2) * width; - - /* Turn derivative weights into node weights using - an appropriate chosen finite differences stencil */ - if (k > 0) { - Float factor = 1 / (nodes[k+1]-nodes[k-1]); - weights[2] += d0 * factor; - weights[0] -= d0 * factor; - } else { - weights[2] += d0 * invWidth; - weights[1] -= d0 * invWidth; - } - - if (k + 2 < size[dim]) { - Float factor = 1 / (nodes[k+2]-nodes[k]); - weights[3] += d1 * factor; - weights[1] -= d1 * factor; - } else { - weights[2] += d1 * invWidth; - weights[1] -= d1 * invWidth; - } - } - - Float result = 0.0f; - for (int y=-1; y<=2; ++y) { - Float wy = knotWeights[1][y+1]; - for (int x=-1; x<=2; ++x) { - Float wxy = knotWeights[0][x+1] * wy; - - if (wxy == 0) - continue; - - size_t pos = (knot[1] + y) * size[0] + knot[0] + x; - - result += data[pos] * wxy; - } - } - return result; -} - -Float interpCubic3D(const Point3 &p, const Float *data, - const Point3 &min, const Point3 &max, const Size3 &size) { - Float knotWeights[3][4]; - Size3 knot; - - /* Compute interpolation weights separately for each dimension */ - for (int dim=0; dim<3; ++dim) { - Float *weights = knotWeights[dim]; - /* Give up when given an out-of-range or NaN argument */ - if (!(p[dim] >= min[dim] && p[dim] <= max[dim])) - return 0.0f; - - /* Transform 'p' so that knots lie at integer positions */ - Float t = ((p[dim] - min[dim]) * (size[dim] - 1)) - / (max[dim]-min[dim]); - - /* Find the index of the left knot in the queried subinterval, be - robust to cases where 't' lies exactly on the right endpoint */ - knot[dim] = std::min((size_t) t, size[dim] - 2); - - /* Compute the relative position within the interval */ - t = t - (Float) knot[dim]; - - /* Compute node weights */ - Float t2 = t*t, t3 = t2*t; - weights[0] = 0.0f; - weights[1] = 2*t3 - 3*t2 + 1; - weights[2] = -2*t3 + 3*t2; - weights[3] = 0.0f; - - /* Derivative weights */ - Float d0 = t3 - 2*t2 + t, - d1 = t3 - t2; - - /* Turn derivative weights into node weights using - an appropriate chosen finite differences stencil */ - if (knot[dim] > 0) { - weights[2] += 0.5f * d0; - weights[0] -= 0.5f * d0; - } else { - weights[2] += d0; - weights[1] -= d0; - } - - if (knot[dim] + 2 < size[dim]) { - weights[3] += 0.5f * d1; - weights[1] -= 0.5f * d1; - } else { - weights[2] += d1; - weights[1] -= d1; - } - } - - Float result = 0.0f; - for (int z=-1; z<=2; ++z) { - Float wz = knotWeights[2][z+1]; - for (int y=-1; y<=2; ++y) { - Float wyz = knotWeights[1][y+1] * wz; - for (int x=-1; x<=2; ++x) { - Float wxyz = knotWeights[0][x+1] * wyz; - - if (wxyz == 0) - continue; - - size_t pos = ((knot[2] + z) * size[1] + (knot[1] + y)) - * size[0] + knot[0] + x; - - result += data[pos] * wxyz; - } - } - } - return result; -} - -Float interpCubic3DIrregular(const Point3 &p, const Float **nodes_, - const Float *data, const Size3 &size) { - Float knotWeights[3][4]; - Size3 knot; - - /* Compute interpolation weights separately for each dimension */ - for (int dim=0; dim<3; ++dim) { - const Float *nodes = nodes_[dim]; - Float *weights = knotWeights[dim]; - - /* Give up when given an out-of-range or NaN argument */ - if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1])) - return 0.0f; - - /* Find the index of the left knot in the queried subinterval, be - robust to cases where 't' lies exactly on the right endpoint */ - size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size[dim] - 2, - std::lower_bound(nodes, nodes + size[dim], p[dim]) - nodes - 1)); - knot[dim] = k; - - Float width = nodes[k+1] - nodes[k], invWidth = 1 / width; - - /* Compute the relative position within the interval */ - Float t = (p[dim] - nodes[k]) * invWidth, - t2 = t*t, t3 = t2*t; - - /* Compute node weights */ - weights[0] = 0.0f; - weights[1] = 2*t3 - 3*t2 + 1; - weights[2] = -2*t3 + 3*t2; - weights[3] = 0.0f; - - /* Derivative weights */ - Float d0 = (t3 - 2*t2 + t) * width, - d1 = (t3 - t2) * width; - - /* Turn derivative weights into node weights using - an appropriate chosen finite differences stencil */ - if (k > 0) { - Float factor = 1 / (nodes[k+1]-nodes[k-1]); - weights[2] += d0 * factor; - weights[0] -= d0 * factor; - } else { - weights[2] += d0 * invWidth; - weights[1] -= d0 * invWidth; - } - - if (k + 2 < size[dim]) { - Float factor = 1 / (nodes[k+2]-nodes[k]); - weights[3] += d1 * factor; - weights[1] -= d1 * factor; - } else { - weights[2] += d1 * invWidth; - weights[1] -= d1 * invWidth; - } - } - - Float result = 0.0f; - for (int z=-1; z<=2; ++z) { - Float wz = knotWeights[2][z+1]; - for (int y=-1; y<=2; ++y) { - Float wyz = knotWeights[1][y+1] * wz; - for (int x=-1; x<=2; ++x) { - Float wxyz = knotWeights[0][x+1] * wyz; - - if (wxyz == 0) - continue; - - size_t pos = ((knot[2] + z) * size[1] + (knot[1] + y)) - * size[0] + knot[0] + x; - - result += data[pos] * wxyz; - } - } - } - return result; -} - void stratifiedSample1D(Random *random, Float *dest, int count, bool jitter) { Float invCount = 1.0f / count; @@ -957,18 +614,84 @@ Float fresnelDielectricExt(Float cosThetaI_, Float &cosThetaT_, Float eta) { return 0.5f * (Rs * Rs + Rp * Rp); } -Spectrum fresnelConductor(Float cosThetaI, const Spectrum &eta, const Spectrum &k) { - Spectrum tmp = (eta*eta + k*k) * (cosThetaI * cosThetaI); +Float fresnelConductorApprox(Float cosThetaI, Float eta, Float k) { + Float cosThetaI2 = cosThetaI*cosThetaI; - Spectrum rParl2 = (tmp - (eta * (2.0f * cosThetaI)) + Spectrum(1.0f)) - / (tmp + (eta * (2.0f * cosThetaI)) + Spectrum(1.0f)); + Float tmp = (eta*eta + k*k) * cosThetaI2; + + Float Rp2 = (tmp - (eta * (2 * cosThetaI)) + 1) + / (tmp + (eta * (2 * cosThetaI)) + 1); + + Float tmpF = eta*eta + k*k; + + Float Rs2 = (tmpF - (eta * (2 * cosThetaI)) + cosThetaI2) / + (tmpF + (eta * (2 * cosThetaI)) + cosThetaI2); + + return 0.5f * (Rp2 + Rs2); +} + +Spectrum fresnelConductorApprox(Float cosThetaI, const Spectrum &eta, const Spectrum &k) { + Float cosThetaI2 = cosThetaI*cosThetaI; + + Spectrum tmp = (eta*eta + k*k) * cosThetaI2; + + Spectrum Rp2 = (tmp - (eta * (2 * cosThetaI)) + Spectrum(1.0f)) + / (tmp + (eta * (2 * cosThetaI)) + Spectrum(1.0f)); Spectrum tmpF = eta*eta + k*k; - Spectrum rPerp2 = (tmpF - (eta * (2.0f * cosThetaI)) + Spectrum(cosThetaI*cosThetaI)) / - (tmpF + (eta * (2.0f * cosThetaI)) + Spectrum(cosThetaI*cosThetaI)); + Spectrum Rs2 = (tmpF - (eta * (2 * cosThetaI)) + Spectrum(cosThetaI2)) / + (tmpF + (eta * (2 * cosThetaI)) + Spectrum(cosThetaI2)); - return (rParl2 + rPerp2) / 2.0f; + return 0.5f * (Rp2 + Rs2); +} + +Float fresnelConductorExact(Float cosThetaI, Float eta, Float k) { + /* Modified from "Optics" by K.D. Moeller, University Science Books, 1988 */ + + Float cosThetaI2 = cosThetaI*cosThetaI, + sinThetaI2 = 1-cosThetaI2, + sinThetaI4 = sinThetaI2*sinThetaI2; + + Float temp1 = eta*eta - k*k - sinThetaI2, + a2pb2 = math::safe_sqrt(temp1*temp1 + 4*k*k*eta*eta), + a = math::safe_sqrt(0.5f * (a2pb2 + temp1)); + + Float term1 = a2pb2 + cosThetaI2, + term2 = 2*a*cosThetaI; + + Float Rs2 = (term1 - term2) / (term1 + term2); + + Float term3 = a2pb2*cosThetaI2 + sinThetaI4, + term4 = term2*sinThetaI2; + + Float Rp2 = Rs2 * (term3 - term4) / (term3 + term4); + + return 0.5f * (Rp2 + Rs2); +} + +Spectrum fresnelConductorExact(Float cosThetaI, const Spectrum &eta, const Spectrum &k) { + /* Modified from "Optics" by K.D. Moeller, University Science Books, 1988 */ + + Float cosThetaI2 = cosThetaI*cosThetaI, + sinThetaI2 = 1-cosThetaI2, + sinThetaI4 = sinThetaI2*sinThetaI2; + + Spectrum temp1 = eta*eta - k*k - Spectrum(sinThetaI2), + a2pb2 = (temp1*temp1 + k*k*eta*eta*4).safe_sqrt(), + a = ((a2pb2 + temp1) * 0.5f).safe_sqrt(); + + Spectrum term1 = a2pb2 + Spectrum(cosThetaI2), + term2 = a*(2*cosThetaI); + + Spectrum Rs2 = (term1 - term2) / (term1 + term2); + + Spectrum term3 = a2pb2*cosThetaI2 + Spectrum(sinThetaI4), + term4 = term2*sinThetaI2; + + Spectrum Rp2 = Rs2 * (term3 - term4) / (term3 + term4); + + return 0.5f * (Rp2 + Rs2); } Vector reflect(const Vector &wi, const Normal &n) { @@ -1073,8 +796,6 @@ Float fresnelDiffuseReflectance(Float eta, bool fast) { } std::string timeString(Float time, bool precise) { - std::ostringstream os; - if (std::isnan(time) || std::isinf(time)) return "inf"; @@ -1089,12 +810,31 @@ std::string timeString(Float time, bool precise) { } } + std::ostringstream os; os << std::setprecision(precise ? 4 : 1) << std::fixed << time << suffix; return os.str(); } +std::string memString(size_t size, bool precise) { + Float value = (Float) size; + const char *suffixes[] = { + "B", "KiB", "MiB", "GiB", "TiB", "PiB" + }; + int suffix = 0; + while (suffix < 5 && value > 1024.0f) { + value /= 1024.0f; ++suffix; + } + + std::ostringstream os; + os << std::setprecision(suffix == 0 ? 0 : (precise ? 4 : 1)) + << std::fixed << value << suffixes[suffix]; + + return os.str(); +} + + Float hypot2(Float a, Float b) { Float r; if (std::abs(a) > std::abs(b)) { diff --git a/src/libcore/warp.cpp b/src/libcore/warp.cpp index 349c776b..5a5b147b 100644 --- a/src/libcore/warp.cpp +++ b/src/libcore/warp.cpp @@ -49,7 +49,6 @@ Vector Warp::squareToCosineHemisphere(const Point2 &sample) { return Vector(p.x, p.y, z); } - Vector Warp::squareToUniformCone(Float cosCutoff, const Point2 &sample) { Float cosTheta = (1-sample.x) + sample.x * cosCutoff; Float sinTheta = math::safe_sqrt(1.0f - cosTheta * cosTheta); @@ -81,24 +80,23 @@ Point2 Warp::squareToUniformDiskConcentric(const Point2 &sample) { Float r1 = 2.0f*sample.x - 1.0f; Float r2 = 2.0f*sample.y - 1.0f; - Point2 coords; + /* Modified concencric map code with less branching (by Dave Cline), see + http://psgraphics.blogspot.ch/2011/01/improved-code-for-concentric-map.html */ + Float phi, r; if (r1 == 0 && r2 == 0) { - coords = Point2(0, 0); - } else if (r1 > -r2) { /* Regions 1/2 */ - if (r1 > r2) - coords = Point2(r1, (M_PI/4.0f) * r2/r1); - else - coords = Point2(r2, (M_PI/4.0f) * (2.0f - r1/r2)); - } else { /* Regions 3/4 */ - if (r1 r2*r2) { + r = r1; + phi = (M_PI/4.0f) * (r2/r1); + } else { + r = r2; + phi = (M_PI/2.0f) - (r1/r2) * (M_PI/4.0f); } - Point2 result; - math::sincos(coords.y, &result.y, &result.x); - return result*coords.x; + Float cosPhi, sinPhi; + math::sincos(phi, &sinPhi, &cosPhi); + + return Point2(r * cosPhi, r * sinPhi); } Point2 Warp::uniformDiskToSquareConcentric(const Point2 &p) { @@ -137,7 +135,7 @@ Point2 Warp::squareToStdNormal(const Point2 &sample) { } Float Warp::squareToStdNormalPdf(const Point2 &pos) { - return INV_TWOPI * std::exp(-(pos.x*pos.x + pos.y*pos.y)/2.0f); + return INV_TWOPI * math::fastexp(-(pos.x*pos.x + pos.y*pos.y)/2.0f); } static Float intervalToTent(Float sample) { @@ -161,6 +159,18 @@ Point2 Warp::squareToTent(const Point2 &sample) { ); } +Float Warp::intervalToNonuniformTent(Float a, Float b, Float c, Float sample) { + Float factor; + if (sample * (c-a) < b-a) { + factor = a-b; + sample *= (a-c)/(a-b); + } else { + factor = c-b; + sample = (a-c)/(b-c) * (sample - (a-b)/(a-c)); + } + + return b + factor * (1-math::safe_sqrt(sample)); +} MTS_NAMESPACE_END diff --git a/src/libhw/SConscript b/src/libhw/SConscript index c0ca0c82..34ef87af 100644 --- a/src/libhw/SConscript +++ b/src/libhw/SConscript @@ -1,8 +1,8 @@ Import('env', 'sys', 'os') libhw_objects = [ - 'session.cpp', 'device.cpp', 'gputexture.cpp', 'gpugeometry.cpp', - 'gpuprogram.cpp', 'renderer.cpp', 'glrenderer.cpp', 'glprogram.cpp', + 'session.cpp', 'device.cpp', 'gputexture.cpp', 'gpugeometry.cpp', + 'gpuprogram.cpp', 'renderer.cpp', 'glrenderer.cpp', 'glprogram.cpp', 'glgeometry.cpp', 'gltexture.cpp', 'gpusync.cpp', 'glsync.cpp', 'vpl.cpp', 'font.cpp', 'viewer.cpp', 'basicshader.cpp', 'shadow.cpp'] @@ -14,7 +14,7 @@ if sys.platform == 'win32': elif sys.platform == 'linux2': libhw_objects += ['x11session.cpp', 'x11device.cpp', - 'glxdevice.cpp', + 'glxdevice.cpp', 'glxrenderer.cpp'] glEnv = env.Clone() diff --git a/src/libhw/font.cpp b/src/libhw/font.cpp index dc705f3c..5349fd72 100644 --- a/src/libhw/font.cpp +++ b/src/libhw/font.cpp @@ -74,6 +74,70 @@ Font::Font(EFont font) { dscStream->read(m_kerningMatrix, 256*256); } +void Font::convert(Bitmap::EPixelFormat pixelFormat, Bitmap::EComponentFormat componentFormat, Float gamma) { + m_bitmap = m_bitmap->convert(pixelFormat, componentFormat, gamma); +} + +void Font::drawText(Bitmap *dest, Point2i pos, const std::string &text) const { + int initial = pos.x; + + for (size_t i=0; igetWidth()), + (int) (glyph.tx.y * m_bitmap->getHeight())); + + dest->accumulate(m_bitmap.get(), sourceOffset, targetOffset, glyph.size); + + pos.x += glyph.horizontalAdvance; + + if (i+1 < text.length()) + pos.x += getKerning(character, text[i+1]); + } +} + +Vector2i Font::getSize(const std::string &text) const { + Vector2i size(0, getMaxVerticalBearing()); + int pos = 0; + + for (size_t i=0; icreateGPUTexture(m_name, m_bitmap); m_texture->setFilterType(GPUTexture::ENearest); diff --git a/src/libhw/glrenderer.cpp b/src/libhw/glrenderer.cpp index 1c8c6083..cbe01eec 100644 --- a/src/libhw/glrenderer.cpp +++ b/src/libhw/glrenderer.cpp @@ -157,7 +157,7 @@ void GLRenderer::init(Device *device, Renderer *other) { Log(m_warnLogLevel, "Capabilities: Vertex buffer objects are NOT supported!"); } - if (glewIsSupported("GL_EXT_geometry_shader4")) { + if (glewIsSupported("GL_EXT_geometry_shader4") && glewIsSupported("GL_EXT_gpu_shader4")) { m_capabilities->setSupported( RendererCapabilities::EGeometryShaders, true); Log(m_logLevel, "Capabilities: Geometry shaders are supported."); diff --git a/src/libhw/vpl.cpp b/src/libhw/vpl.cpp index 79cd24e4..877d7715 100644 --- a/src/libhw/vpl.cpp +++ b/src/libhw/vpl.cpp @@ -146,6 +146,7 @@ void VPLShaderManager::setScene(const Scene *scene) { m_geometry.reserve(shapes.size()); m_opaqueGeometry.clear(); m_opaqueGeometry.reserve(shapes.size()); + m_animatedGeometry.clear(); Matrix4x4 identityTrafo; identityTrafo.setIdentity(); @@ -158,7 +159,8 @@ void VPLShaderManager::setScene(const Scene *scene) { const Instance *instance = static_cast(shape); const std::vector &instantiatedShapes = instance->getShapeGroup()->getKDTree()->getShapes(); - const Matrix4x4 &trafo = instance->getWorldTransform().getMatrix(); + const AnimatedTransform *atrafo = instance->getWorldTransform(); + const Matrix4x4 &trafo = atrafo->eval(0).getMatrix(); for (size_t j=0; jsetShader(shader); + ssize_t geometryIndex = (ssize_t) m_geometry.size(), opaqueGeometryIndex = -1; m_geometry.push_back(std::make_pair(gpuGeo, trafo)); - if (shader && !(shader->getFlags() & Shader::ETransparent)) + if (shader && !(shader->getFlags() & Shader::ETransparent)) { + opaqueGeometryIndex = (ssize_t) m_opaqueGeometry.size(); m_opaqueGeometry.push_back(std::make_pair(gpuGeo, trafo)); + } + + if (!atrafo->isStatic()) { + m_animatedGeometry.push_back(AnimatedGeometryRecord(atrafo, + geometryIndex, opaqueGeometryIndex)); + } } } else { GPUGeometry *gpuGeo = m_renderer->registerGeometry(shape); @@ -233,8 +243,40 @@ void VPLShaderManager::setScene(const Scene *scene) { m_backgroundParam_emitterScale = prog->getParameterID("emitterScale", false); } + std::vector geometryPermutation(m_geometry.size()), + opaqueGeometryPermutation(m_opaqueGeometry.size()); + + for (size_t i=0; i geometryPermutationInv(m_geometry.size()), + opaqueGeometryPermutationInv(m_opaqueGeometry.size()); + + for (size_t i=0; i= 0) + agRec.geometryIndex = geometryPermutationInv[agRec.geometryIndex]; + if (agRec.opaqueGeometryIndex >= 0) + agRec.opaqueGeometryIndex = opaqueGeometryPermutationInv[agRec.opaqueGeometryIndex]; + } + } + + permute_inplace(&m_geometry[0], geometryPermutation); + permute_inplace(&m_opaqueGeometry[0], opaqueGeometryPermutation); } void VPLShaderManager::setVPL(const VPL &vpl) { @@ -244,6 +286,18 @@ void VPLShaderManager::setVPL(const VPL &vpl) { m_nearClip = std::numeric_limits::infinity(); m_farClip = -std::numeric_limits::infinity(); + /* Update animations */ + for (size_t i=0; ieval(vpl.its.time).getMatrix(); + + if (agRec.geometryIndex >= 0) + m_geometry[agRec.geometryIndex].second = matrix; + + if (agRec.opaqueGeometryIndex >= 0) + m_opaqueGeometry[agRec.opaqueGeometryIndex].second = matrix; + } + if (vpl.type != EDirectionalEmitterVPL) { /* Trace a few rays from the VPL to estimate a suitable depth range */ for (size_t i=0; i(); \ Name ##_struct +#define BP_SUBSTRUCT(Name, Base, Init) \ + bp::class_ > Name ##_struct(#Name, Init); \ + bp::register_ptr_to_python(); \ + Name ##_struct + #define BP_CLASS(Name, Base, Init) \ bp::class_, bp::bases, boost::noncopyable> Name ##_class(#Name, Init); \ bp::register_ptr_to_python(); \ @@ -59,6 +64,8 @@ .def(bp::self / Scalar()) \ .def(bp::self /= Scalar()) \ .def("serialize", &Name::serialize) \ + .def("length", &Name::length) \ + .def("lengthSquared", &Name::lengthSquared) \ .def("__repr__", &Name::toString) \ .def("__len__", &FixedSizeSupport::len) \ .def("__getitem__", &FixedSizeSupport::get) \ @@ -150,6 +157,7 @@ public: namespace mitsuba { class SerializableObject; + class ConfigurableObject; }; typedef std::vector StringVector; @@ -158,6 +166,7 @@ typedef std::map StringMap; extern void export_core(); extern void export_render(); +extern bp::object cast(mitsuba::ConfigurableObject *obj); #endif /* __PYTHON_BASE_H */ diff --git a/src/libpython/core.cpp b/src/libpython/core.cpp index fe404641..1ef776ff 100644 --- a/src/libpython/core.cpp +++ b/src/libpython/core.cpp @@ -21,17 +21,6 @@ #include #include -#if defined(__LINUX__) -# if !defined(_GNU_SOURCE) -# define _GNU_SOURCE -# endif -# include -#elif defined(__OSX__) -# include -#elif defined(__WINDOWS__) -# include -#endif - using namespace mitsuba; void initializeFramework() { @@ -48,54 +37,6 @@ void initializeFramework() { Scheduler::staticInitialization(); SHVector::staticInitialization(); SceneHandler::staticInitialization(); - - fs::path basePath; - - /* Try to detect the base path of the Mitsuba installation */ - #if defined(__LINUX__) - Dl_info info; - dladdr((void *) &initializeFramework, &info); - if (info.dli_fname) { - /* Try to detect a few default setups */ - if (boost::starts_with(info.dli_fname, "/usr/lib")) { - basePath = fs::path("/usr/share/mitsuba"); - } else if (boost::starts_with(info.dli_fname, "/usr/local/lib")) { - basePath = fs::path("/usr/local/share/mitsuba"); - } else { - /* This is a locally-compiled repository */ - basePath = fs::path(info.dli_fname).parent_path().parent_path().parent_path(); - } - } - #elif defined(__OSX__) - uint32_t imageCount = _dyld_image_count(); - for (uint32_t i=0; i lpFilename(MAX_PATH); - if (GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | - GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, (LPCSTR) &initializeFramework, &hm)) { - std::vector lpFilename(MAX_PATH); - - // Try to get the path with the default MAX_PATH length (260 chars) - DWORD nSize = GetModuleFileNameW(hm, &lpFilename[0], MAX_PATH); - - // Adjust the buffer size in case if was too short - while (nSize == lpFilename.size()) { - lpFilename.resize(nSize * 2); - nSize = GetModuleFileNameW(hm, &lpFilename[0], nSize); - } - - if (nSize) - basePath = fs::path(lpFilename).parent_path().parent_path().parent_path(); - } - #endif - - if (!basePath.empty()) - Thread::getThread()->getFileResolver()->prependPath(basePath); } void shutdownFramework() { @@ -175,7 +116,7 @@ public: bp::extract extractTransform(value); bp::extract extractSpectrum(value); - if (extractString.check()) { + if (extractString.check()){ props.setString(name, extractString()); } else if (extractBoolean.check() && PyObject_IsInstance(value.ptr(), (PyObject *) &PyBool_Type)) { props.setBoolean(name, extractBoolean()); @@ -205,6 +146,12 @@ struct path_to_python_str { }; +struct TSpectrum_to_Spectrum { + static PyObject* convert(const TSpectrum &spectrum) { + return bp::incref(bp::object(Spectrum(spectrum)).ptr()); + } +}; + static void Matrix4x4_setItem(Matrix4x4 *matrix, bp::tuple tuple, Float value) { if (bp::len(tuple) != 2) SLog(EError, "Invalid matrix indexing operation, required a tuple of length 2"); @@ -341,6 +288,12 @@ static bp::tuple spectrum_toXYZ(const Spectrum &s) { return bp::make_tuple(x, y, z); } +static bp::tuple spectrum_toIPT(const Spectrum &s) { + Float I, P, T; + s.toIPT(I, P, T); + return bp::make_tuple(I, P, T); +} + void aabb_expandby_aabb(AABB *aabb, const AABB &aabb2) { aabb->expandBy(aabb2); } void aabb_expandby_point(AABB *aabb, const Point &p) { aabb->expandBy(p); } Float aabb_distanceto_aabb(AABB *aabb, const AABB &aabb2) { return aabb->distanceTo(aabb2); } @@ -374,7 +327,6 @@ Point transform_mul_point(Transform *transform, const Point &point) { return tra Ray transform_mul_ray(Transform *transform, const Ray &ray) { return transform->operator()(ray); } Transform transform_mul_transform(Transform *transform, const Transform &other) { return *transform * other; } - bp::object cast(ConfigurableObject *obj) { const Class *cls = obj->getClass(); #define TryCast(ClassName) if (cls->derivesFrom(MTS_CLASS(ClassName))) \ @@ -388,6 +340,7 @@ bp::object cast(ConfigurableObject *obj) { TryCast(Medium); TryCast(VolumeDataSource); TryCast(Film); + TryCast(ProjectiveCamera); TryCast(Sensor); TryCast(Emitter); TryCast(Sampler); @@ -472,6 +425,43 @@ Vector refract3(const Vector &wi, const Normal &n, Float eta) { return refract(wi, n, eta); } +void bitmap_applyMatrix(Bitmap *bitmap, bp::list list) { + int length = bp::len(list); + if (length != 9) + SLog(EError, "Require a color matrix specified as a list with 9 entries!"); + + Float matrix[3][3]; + + int idx = 0; + for (int i=0; i<3; ++i) + for (int j=0; j<3; ++j) + matrix[i][j] = bp::extract(list[idx++]); + + bitmap->applyMatrix(matrix); +} + +void bitmap_write(Bitmap *bitmap, Bitmap::EFileFormat fmt, Stream *stream) { + bitmap->write(fmt, stream); +} + +ref bitmap_convert_1(Bitmap *bitmap, Bitmap::EPixelFormat pixelFormat, Bitmap::EComponentFormat componentFormat, + Float gamma, Float multiplier, Spectrum::EConversionIntent intent) { + return bitmap->convert(pixelFormat, componentFormat, gamma, multiplier, intent); +} + +ref bitmap_convert_2(Bitmap *bitmap, Bitmap::EPixelFormat pixelFormat, Bitmap::EComponentFormat componentFormat, + Float gamma, Float multiplier) { + return bitmap->convert(pixelFormat, componentFormat, gamma, multiplier); +} + +ref bitmap_convert_3(Bitmap *bitmap, Bitmap::EPixelFormat pixelFormat, Bitmap::EComponentFormat componentFormat, + Float gamma) { + return bitmap->convert(pixelFormat, componentFormat, gamma); +} + +ref bitmap_convert_4(Bitmap *bitmap, Bitmap::EPixelFormat pixelFormat, Bitmap::EComponentFormat componentFormat) { + return bitmap->convert(pixelFormat, componentFormat); +} Transform transform_glOrthographic1(Float clipNear, Float clipFar) { return Transform::glOrthographic(clipNear, clipFar); @@ -483,12 +473,24 @@ Transform transform_glOrthographic2(Float clipLeft, Float clipRight, clipBottom, clipTop, clipNear, clipFar); } - BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(fromLinearRGB_overloads, fromLinearRGB, 3, 4) BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(fromXYZ_overloads, fromXYZ, 3, 4) +BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(fromIPT_overloads, fromIPT, 3, 4) + +#define IMPLEMENT_ANIMATION_TRACK(Name) \ + BP_CLASS(Name, AbstractAnimationTrack, (bp::init())) \ + .def(bp::init()) \ + .def("reserve", &Name::reserve) \ + .def("prependTransformation", &Name::prependTransformation) \ + .def("appendTransformation", &Name::appendTransformation) \ + .def("eval", &Name::eval, BP_RETURN_VALUE) \ + .def("setValue", &Name::setValue) \ + .def("getValue", &Name::getValue, BP_RETURN_VALUE) \ + .def("append", &Name::append) void export_core() { bp::to_python_converter(); + bp::to_python_converter, TSpectrum_to_Spectrum>(); bp::implicitly_convertible(); bp::object coreModule( @@ -708,19 +710,28 @@ void export_core() { .def("clear", &InterpolatedSpectrum::clear) .def("zeroExtend", &InterpolatedSpectrum::zeroExtend); + void (Bitmap::*accumulate_1)(const Bitmap *bitmap, Point2i sourceOffset, Point2i targetOffset, Vector2i size) = &Bitmap::accumulate; + void (Bitmap::*accumulate_2)(const Bitmap *bitmap, Point2i targetOffset) = &Bitmap::accumulate; + const Properties &(Bitmap::*get_metadata)() const = &Bitmap::getMetadata; + BP_CLASS(Bitmap, Object, (bp::init())) .def(bp::init()) .def(bp::init()) .def("clone", &Bitmap::clone, BP_RETURN_VALUE) + .def("clear", &Bitmap::clear) .def("separateChannel", &Bitmap::separateChannel, BP_RETURN_VALUE) .def("expand", &Bitmap::expand, BP_RETURN_VALUE) .def("flipVertically", &Bitmap::flipVertically) .def("crop", &Bitmap::crop) - .def("accumulate", &Bitmap::accumulate) - .def("clear", &Bitmap::clear) - .def("write", &Bitmap::write) - .def("setString", &Bitmap::setString) - .def("getString", &Bitmap::getString, BP_RETURN_VALUE) + .def("applyMatrix", &bitmap_applyMatrix) + .def("colorBalance", &Bitmap::colorBalance) + .def("accumulate", accumulate_1) + .def("accumulate", accumulate_2) + .def("write", &bitmap_write) + .def("setMetadataString", &Bitmap::setMetadataString) + .def("getMetadataString", &Bitmap::getMetadataString, BP_RETURN_VALUE) + .def("setMetadata", &Bitmap::setMetadata) + .def("getMetadata", get_metadata, BP_RETURN_VALUE) .def("setGamma", &Bitmap::setGamma) .def("getGamma", &Bitmap::getGamma) .def("getWidth", &Bitmap::getWidth) @@ -732,9 +743,18 @@ void export_core() { .def("getBitsPerComponent", &Bitmap::getBitsPerComponent) .def("getBytesPerComponent", &Bitmap::getBytesPerComponent) .def("getBytesPerPixel", &Bitmap::getBytesPerPixel) + .def("getBufferSize", &Bitmap::getBufferSize) .def("getPixel", &Bitmap::getPixel, BP_RETURN_VALUE) .def("setPixel", &Bitmap::setPixel) - .def("getSize", &Bitmap::getSize, BP_RETURN_VALUE); + .def("drawHLine", &Bitmap::drawHLine) + .def("drawVLine", &Bitmap::drawVLine) + .def("drawRect", &Bitmap::drawRect) + .def("fillRect", &Bitmap::fillRect) + .def("getSize", &Bitmap::getSize, BP_RETURN_VALUE) + .def("convert", &bitmap_convert_1, BP_RETURN_VALUE) + .def("convert", &bitmap_convert_2, BP_RETURN_VALUE) + .def("convert", &bitmap_convert_3, BP_RETURN_VALUE) + .def("convert", &bitmap_convert_4, BP_RETURN_VALUE); BP_SETSCOPE(Bitmap_class); bp::enum_("EPixelFormat") @@ -745,7 +765,8 @@ void export_core() { .value("ESpectrum", Bitmap::ESpectrum) .value("ESpectrumAlpha", Bitmap::ESpectrumAlpha) .value("ESpectrumAlphaWeight", Bitmap::ESpectrumAlphaWeight) - .value("EMultiChannel", Bitmap::EMultiChannel); + .value("EMultiChannel", Bitmap::EMultiChannel) + .export_values(); bp::enum_("EComponentFormat") .value("EBitmask", Bitmap::EBitmask) @@ -763,10 +784,13 @@ void export_core() { .value("EPNG", Bitmap::EPNG) .value("EOpenEXR", Bitmap::EOpenEXR) .value("ETGA", Bitmap::ETGA) + .value("EPFM", Bitmap::EPFM) + .value("ERGBE", Bitmap::ERGBE) .value("EBMP", Bitmap::EBMP) .value("EJPEG", Bitmap::EJPEG) .value("EAuto", Bitmap::EAuto) .export_values(); + BP_SETSCOPE(coreModule); BP_CLASS(FileResolver, Object, bp::init<>()) @@ -882,6 +906,56 @@ void export_core() { .def("isBusy", &Scheduler::isBusy) .staticmethod("getInstance"); + BP_CLASS(AbstractAnimationTrack, Object, bp::no_init) + .def("getType", &AbstractAnimationTrack::getType) + .def("setTime", &AbstractAnimationTrack::setTime) + .def("getTime", &AbstractAnimationTrack::getTime) + .def("getSize", &AbstractAnimationTrack::getSize) + .def("clone", &AbstractAnimationTrack::clone, BP_RETURN_VALUE); + + IMPLEMENT_ANIMATION_TRACK(FloatTrack); + IMPLEMENT_ANIMATION_TRACK(VectorTrack); + IMPLEMENT_ANIMATION_TRACK(PointTrack); + IMPLEMENT_ANIMATION_TRACK(QuatTrack); + + BP_SETSCOPE(AbstractAnimationTrack_class); + bp::enum_("EType") + .value("EInvalid", AbstractAnimationTrack::EInvalid) + .value("ETranslationX", AbstractAnimationTrack::ETranslationX) + .value("ETranslationY", AbstractAnimationTrack::ETranslationY) + .value("ETranslationZ", AbstractAnimationTrack::ETranslationZ) + .value("ETranslationXYZ", AbstractAnimationTrack::ETranslationXYZ) + .value("EScaleX", AbstractAnimationTrack::EScaleX) + .value("EScaleY", AbstractAnimationTrack::EScaleY) + .value("EScaleZ", AbstractAnimationTrack::EScaleZ) + .value("EScaleXYZ", AbstractAnimationTrack::EScaleXYZ) + .value("ERotationX", AbstractAnimationTrack::ERotationX) + .value("ERotationY", AbstractAnimationTrack::ERotationY) + .value("ERotationZ", AbstractAnimationTrack::ERotationZ) + .value("ERotationQuat", AbstractAnimationTrack::ERotationQuat) + .export_values(); + + BP_SETSCOPE(coreModule); + + AbstractAnimationTrack *(AnimatedTransform::*animatedTransform_getTrack)(size_t) = &AnimatedTransform::getTrack; + AbstractAnimationTrack *(AnimatedTransform::*animatedTransform_findTrack)(AbstractAnimationTrack::EType) = &AnimatedTransform::findTrack; + + BP_CLASS(AnimatedTransform, Object, (bp::init())) + .def(bp::init<>()) + .def(bp::init()) + .def(bp::init()) + .def("getTrackCount", &AnimatedTransform::getTrackCount) + .def("findTrack", animatedTransform_findTrack, BP_RETURN_VALUE) + .def("getTrack", animatedTransform_getTrack, BP_RETURN_VALUE) + .def("addTrack", &AnimatedTransform::addTrack) + .def("appendTransform", &AnimatedTransform::appendTransform) + .def("isStatic", &AnimatedTransform::isStatic) + .def("sortAndSimplify", &AnimatedTransform::sortAndSimplify) + .def("serialize", &AnimatedTransform::serialize) + .def("getTranslationBounds", &AnimatedTransform::getTranslationBounds, BP_RETURN_VALUE) + .def("getSpatialBounds", &AnimatedTransform::getSpatialBounds, BP_RETURN_VALUE) + .def("eval", &AnimatedTransform::eval, BP_RETURN_VALUE); + BP_STRUCT(Spectrum, bp::init<>()) .def("__init__", bp::make_constructor(spectrum_array_constructor)) .def(bp::init()) @@ -915,6 +989,8 @@ void export_core() { .def("getLuminance", &Spectrum::getLuminance) .def("fromXYZ", &Spectrum::fromXYZ, fromXYZ_overloads()) .def("toXYZ", &spectrum_toXYZ) + .def("fromIPT", &Spectrum::fromIPT, fromIPT_overloads()) + .def("toIPT", &spectrum_toIPT) .def("fromLinearRGB", &Spectrum::fromLinearRGB, fromLinearRGB_overloads()) .def("toLinearRGB", &spectrum_toLinearRGB) .def("fromSRGB", &Spectrum::fromSRGB) @@ -1201,17 +1277,21 @@ void export_core() { .def("toWorld", &Frame::toWorld, BP_RETURN_VALUE) .def("__repr__", &Frame::toString) .def("cosTheta", &Frame::cosTheta) + .def("cosTheta2", &Frame::cosTheta2) .def("sinTheta", &Frame::sinTheta) .def("sinTheta2", &Frame::sinTheta2) .def("tanTheta", &Frame::tanTheta) + .def("tanTheta2", &Frame::tanTheta2) .def("sinPhi", &Frame::sinPhi) .def("cosPhi", &Frame::cosPhi) .def("sinPhi2", &Frame::sinPhi2) .def("cosPhi2", &Frame::cosPhi2) .staticmethod("cosTheta") + .staticmethod("cosTheta2") .staticmethod("sinTheta") .staticmethod("sinTheta2") .staticmethod("tanTheta") + .staticmethod("tanTheta2") .staticmethod("sinPhi") .staticmethod("cosPhi") .staticmethod("sinPhi2") @@ -1257,11 +1337,19 @@ void export_core() { .staticmethod("glOrthographic") .staticmethod("fromFrame"); + Float (*fresnelConductorApprox1)(Float, Float, Float) = &fresnelConductorApprox; + Float (*fresnelConductorExact1)(Float, Float, Float) = &fresnelConductorExact; + Spectrum (*fresnelConductorApprox2)(Float, const Spectrum &, const Spectrum &) = &fresnelConductorApprox; + Spectrum (*fresnelConductorExact2)(Float, const Spectrum &, const Spectrum &) = &fresnelConductorExact; + /* Functions from utility.h */ bp::def("fresnelDielectric", &fresnelDielectric); bp::def("fresnelDielectricExt", &fresnelDielectricExt1); bp::def("fresnelDielectricExt", &fresnelDielectricExt2); - bp::def("fresnelConductor", &fresnelConductor, BP_RETURN_VALUE); + bp::def("fresnelConductorApprox", fresnelConductorApprox1, BP_RETURN_VALUE); + bp::def("fresnelConductorApprox", fresnelConductorApprox2, BP_RETURN_VALUE); + bp::def("fresnelConductorExact", fresnelConductorExact1, BP_RETURN_VALUE); + bp::def("fresnelConductorExact", fresnelConductorExact2, BP_RETURN_VALUE); bp::def("fresnelDiffuseReflectance", &fresnelDiffuseReflectance); bp::def("reflect", &reflect); bp::def("refract", &refract1); diff --git a/src/libpython/render.cpp b/src/libpython/render.cpp index e70434b8..9b9f4bcf 100644 --- a/src/libpython/render.cpp +++ b/src/libpython/render.cpp @@ -45,6 +45,24 @@ bp::object shape_rayIntersect(const Shape *shape, const Ray &ray, Float mint, Fl return bp::object(its); } +bp::object scene_rayIntersect(const Scene *scene, const Ray &ray) { + Intersection its; + + if (!scene->rayIntersect(ray, its)) + return bp::object(); + + return bp::object(its); +} + +bp::object scene_rayIntersectAll(const Scene *scene, const Ray &ray) { + Intersection its; + + if (!scene->rayIntersectAll(ray, its)) + return bp::object(); + + return bp::object(its); +} + bp::tuple shape_getCurvature(const Shape *shape, const Intersection &its, bool shadingFrame) { Float H, K; shape->getCurvature(its, H, K, shadingFrame); @@ -57,7 +75,6 @@ bp::tuple shape_getNormalDerivative(const Shape *shape, const Intersection &its, return bp::make_tuple(dpdu, dpdv); } - ref loadScene(const fs::path &filename, const StringMap ¶ms) { SceneHandler::ParameterMap pmap; for (StringMap::const_iterator it = params.begin(); it != params.end(); ++it) @@ -65,6 +82,49 @@ ref loadScene(const fs::path &filename, const StringMap ¶ms) { return SceneHandler::loadScene(filename, pmap); } +bp::list scene_getSensors(Scene *scene) { + bp::list list; + ref_vector &sensors = scene->getSensors(); + for (size_t i=0; igetSensor()); } +bp::object scene_getIntegrator(Scene *scene) { return cast(scene->getIntegrator()); } + +bp::list scene_getMeshes(Scene *scene) { + bp::list list; + std::vector &meshes = scene->getMeshes(); + for (size_t i=0; i &shapes = scene->getShapes(); + for (size_t i=0; i &emitters = scene->getEmitters(); + for (size_t i=0; i &media = scene->getMedia(); + for (size_t i=0; i(bp::borrowed(PyImport_AddModule("mitsuba.render")))); @@ -86,6 +146,9 @@ void export_render() { .value("EDiscrete", EDiscrete) .export_values(); + Sampler *(Scene::*scene_getSampler)(void) = &Scene::getSampler; + Film *(Scene::*scene_getFilm)(void) = &Scene::getFilm; + BP_CLASS(Scene, NetworkedObject, bp::init<>()) .def(bp::init()) .def(bp::init()) @@ -96,6 +159,14 @@ void export_render() { .def("postprocess", &Scene::postprocess) .def("flush", &Scene::flush) .def("cancel", &Scene::cancel) + .def("rayIntersect", &scene_rayIntersect) + .def("rayIntersectAll", &scene_rayIntersectAll) + .def("evalTransmittance", &Scene::evalTransmittance) + .def("evalTransmittanceAll", &Scene::evalTransmittanceAll) + .def("sampleEmitterDirect", &Scene::sampleEmitterDirect) + .def("sampleSensorDirect", &Scene::sampleSensorDirect) + .def("pdfEmitterDirect", &Scene::pdfEmitterDirect) + .def("pdfSensorDirect", &Scene::pdfSensorDirect) .def("getAABB", &Scene::getAABB, BP_RETURN_VALUE) .def("getBSphere", &Scene::getBSphere, BP_RETURN_VALUE) .def("getBlockSize", &Scene::getBlockSize) @@ -104,7 +175,26 @@ void export_render() { .def("setSourceFile", &Scene::setSourceFile) .def("getDestinationFile", &Scene::getDestinationFile, BP_RETURN_VALUE) .def("setDestinationFile", &Scene::setDestinationFile) - .def("destinationExists", &Scene::destinationExists); + .def("destinationExists", &Scene::destinationExists) + .def("hasEnvironmentEmitter", &Scene::hasEnvironmentEmitter) + .def("getEnvironmentEmitter", &Scene::getEnvironmentEmitter, BP_RETURN_VALUE) + .def("hasDegenerateSensor", &Scene::hasDegenerateSensor) + .def("hasDegenerateEmitters", &Scene::hasDegenerateEmitters) + .def("hasMedia", &Scene::hasMedia) + .def("addSensor", &Scene::addSensor) + .def("removeSensor", &Scene::removeSensor) + .def("getSensor", &scene_getSensor, BP_RETURN_VALUE) + .def("setSensor", &Scene::setSensor) + .def("getSensors", &scene_getSensors) + .def("getIntegrator", &scene_getIntegrator, BP_RETURN_VALUE) + .def("setIntegrator", &Scene::setIntegrator) + .def("getSampler", scene_getSampler, BP_RETURN_VALUE) + .def("setSampler", &Scene::setSampler) + .def("getFilm", scene_getFilm, BP_RETURN_VALUE) + .def("getShapes", &scene_getShapes) + .def("getMeshes", &scene_getMeshes) + .def("getEmitters", &scene_getEmitters) + .def("getMedia", &scene_getMedia); BP_CLASS(Sampler, ConfigurableObject, bp::no_init) .def("clone", &Sampler::clone, BP_RETURN_VALUE) @@ -175,6 +265,37 @@ void export_render() { .def("LoSub", &Intersection::LoSub) .def("__repr__", &Intersection::toString); + BP_STRUCT(PositionSamplingRecord, bp::init<>()) + .def(bp::init()) + .def(bp::init()) + .def_readwrite("p", &PositionSamplingRecord::p) + .def_readwrite("time", &PositionSamplingRecord::time) + .def_readwrite("n", &PositionSamplingRecord::n) + .def_readwrite("pdf", &PositionSamplingRecord::pdf) + .def_readwrite("measure", &PositionSamplingRecord::measure) + .def_readwrite("uv", &PositionSamplingRecord::uv) + .def_readwrite("object", &PositionSamplingRecord::object) + .def("__repr__", &PositionSamplingRecord::toString); + + BP_STRUCT(DirectionSamplingRecord, bp::init<>()) + .def(bp::init()) + .def(bp::init()) + .def_readwrite("d", &DirectionSamplingRecord::d) + .def_readwrite("pdf", &DirectionSamplingRecord::pdf) + .def_readwrite("measure", &DirectionSamplingRecord::measure) + .def("__repr__", &DirectionSamplingRecord::toString); + + BP_SUBSTRUCT(DirectSamplingRecord, PositionSamplingRecord, bp::init<>()) + .def(bp::init()) + .def(bp::init()) + .def(bp::init()) + .def_readwrite("ref", &DirectSamplingRecord::ref) + .def_readwrite("refN", &DirectSamplingRecord::refN) + .def_readwrite("d", &DirectSamplingRecord::d) + .def_readwrite("dist", &DirectSamplingRecord::dist) + .def("setQuery", &DirectSamplingRecord::setQuery) + .def("__repr__", &DirectSamplingRecord::toString); + Medium *(Shape::*shape_getInteriorMedium)(void) = &Shape::getInteriorMedium; Medium *(Shape::*shape_getExteriorMedium)(void) = &Shape::getExteriorMedium; Sensor *(Shape::*shape_getSensor)(void) = &Shape::getSensor; @@ -230,6 +351,31 @@ void export_render() { .def("serialize", triMesh_serialize2) .def("writeOBJ", &TriMesh::writeOBJ); + BP_CLASS(Sensor, ConfigurableObject, bp::no_init) // incomplete + .def("getShutterOpen", &Sensor::getShutterOpen) + .def("setShutterOpen", &Sensor::setShutterOpen) + .def("getShutterOpenTime", &Sensor::getShutterOpenTime) + .def("setShutterOpenTime", &Sensor::setShutterOpenTime); + + void (ProjectiveCamera::*projectiveCamera_setWorldTransform1)(const Transform &) = &ProjectiveCamera::setWorldTransform; + void (ProjectiveCamera::*projectiveCamera_setWorldTransform2)(AnimatedTransform *) = &ProjectiveCamera::setWorldTransform; + const Transform (ProjectiveCamera::*projectiveCamera_getWorldTransform1)(Float t) const = &ProjectiveCamera::getWorldTransform; + const AnimatedTransform *(ProjectiveCamera::*projectiveCamera_getWorldTransform2)(void) const = &ProjectiveCamera::getWorldTransform; + + BP_CLASS(ProjectiveCamera, Sensor, bp::no_init) + .def("getViewTransform", &ProjectiveCamera::getViewTransform, BP_RETURN_VALUE) + .def("getWorldTransform", projectiveCamera_getWorldTransform1, BP_RETURN_VALUE) + .def("getWorldTransform", projectiveCamera_getWorldTransform2, BP_RETURN_VALUE) + .def("setWorldTransform", projectiveCamera_setWorldTransform1) + .def("setWorldTransform", projectiveCamera_setWorldTransform2) + .def("getProjectionTransform", &ProjectiveCamera::getProjectionTransform, BP_RETURN_VALUE) + .def("getNearClip", &ProjectiveCamera::getNearClip) + .def("getFarClip", &ProjectiveCamera::getFarClip) + .def("getFocusDistance", &ProjectiveCamera::getFocusDistance) + .def("setNearClip", &ProjectiveCamera::setNearClip) + .def("setFarClip", &ProjectiveCamera::setFarClip) + .def("setFocusDistance", &ProjectiveCamera::setFocusDistance); + BP_STRUCT(BSDFSamplingRecord, (bp::init())) .def(bp::init()) .def(bp::init()) diff --git a/src/librender/CMakeLists.txt b/src/librender/CMakeLists.txt index e28a966e..3121bf79 100644 --- a/src/librender/CMakeLists.txt +++ b/src/librender/CMakeLists.txt @@ -42,7 +42,6 @@ set(HDRS ${INCLUDE_DIR}/subsurface.h ${INCLUDE_DIR}/testcase.h ${INCLUDE_DIR}/texture.h - ${INCLUDE_DIR}/track.h ${INCLUDE_DIR}/triaccel.h ${INCLUDE_DIR}/triaccel_sse.h ${INCLUDE_DIR}/trimesh.h @@ -82,7 +81,6 @@ set(SRCS subsurface.cpp testcase.cpp texture.cpp - track.cpp trimesh.cpp util.cpp volume.cpp diff --git a/src/librender/SConscript b/src/librender/SConscript index 3404e1b5..baeb6126 100644 --- a/src/librender/SConscript +++ b/src/librender/SConscript @@ -11,13 +11,13 @@ if renderEnv.has_key('XERCESLIB'): librender = renderEnv.SharedLibrary('mitsuba-render', [ 'bsdf.cpp', 'film.cpp', 'integrator.cpp', 'emitter.cpp', 'sensor.cpp', - 'skdtree.cpp', 'medium.cpp', 'renderjob.cpp', 'imageproc.cpp', + 'skdtree.cpp', 'medium.cpp', 'renderjob.cpp', 'imageproc.cpp', 'rectwu.cpp', 'renderproc.cpp', 'imageblock.cpp', 'particleproc.cpp', - 'renderqueue.cpp', 'scene.cpp', 'subsurface.cpp', 'texture.cpp', - 'shape.cpp', 'trimesh.cpp', 'sampler.cpp', 'util.cpp', 'irrcache.cpp', - 'testcase.cpp', 'photonmap.cpp', 'gatherproc.cpp', 'volume.cpp', - 'vpl.cpp', 'shader.cpp', 'scenehandler.cpp', 'intersection.cpp', - 'track.cpp', 'common.cpp', 'phase.cpp', 'noise.cpp', 'photon.cpp' + 'renderqueue.cpp', 'scene.cpp', 'subsurface.cpp', 'texture.cpp', + 'shape.cpp', 'trimesh.cpp', 'sampler.cpp', 'util.cpp', 'irrcache.cpp', + 'testcase.cpp', 'photonmap.cpp', 'gatherproc.cpp', 'volume.cpp', + 'vpl.cpp', 'shader.cpp', 'scenehandler.cpp', 'intersection.cpp', + 'common.cpp', 'phase.cpp', 'noise.cpp', 'photon.cpp' ]) if sys.platform == "darwin": diff --git a/src/librender/emitter.cpp b/src/librender/emitter.cpp index bbd39055..a0dfc2a6 100644 --- a/src/librender/emitter.cpp +++ b/src/librender/emitter.cpp @@ -18,15 +18,14 @@ #include #include -#include +#include #include MTS_NAMESPACE_BEGIN AbstractEmitter::AbstractEmitter(const Properties &props) : ConfigurableObject(props), m_shape(NULL), m_type(0) { - m_worldTransform = new AnimatedTransform( - props.getTransform("toWorld", Transform())); + m_worldTransform = props.getAnimatedTransform("toWorld", Transform()); } AbstractEmitter::AbstractEmitter(Stream *stream, InstanceManager *manager) diff --git a/src/librender/gatherproc.cpp b/src/librender/gatherproc.cpp index ea3fe0e0..19ede9f1 100644 --- a/src/librender/gatherproc.cpp +++ b/src/librender/gatherproc.cpp @@ -136,10 +136,10 @@ public: m_workResult->nextParticle(); } - void handleSurfaceInteraction(int depth, bool delta, + void handleSurfaceInteraction(int depth_, int nullInteractions, bool delta, const Intersection &its, const Medium *medium, const Spectrum &weight) { - int bsdfType = its.getBSDF()->getType(); + int bsdfType = its.getBSDF()->getType(), depth = depth_ - nullInteractions; if (!(bsdfType & BSDF::EDiffuseReflection) && !(bsdfType & BSDF::EGlossyReflection)) return; @@ -149,11 +149,12 @@ public: m_workResult->put(Photon(its.p, its.geoFrame.n, -its.toWorld(its.wi), weight, depth)); } - void handleMediumInteraction(int depth, bool delta, + void handleMediumInteraction(int depth, int nullInteractions, bool delta, const MediumSamplingRecord &mRec, const Medium *medium, const Vector &wi, const Spectrum &weight) { if (m_type == GatherPhotonProcess::EVolumePhotons) - m_workResult->put(Photon(mRec.p, Normal(0.0f, 0.0f, 0.0f), -wi, weight, depth)); + m_workResult->put(Photon(mRec.p, Normal(0.0f, 0.0f, 0.0f), + -wi, weight, depth-nullInteractions)); } MTS_DECLARE_CLASS() diff --git a/src/librender/integrator.cpp b/src/librender/integrator.cpp index b7a090f0..c9004969 100644 --- a/src/librender/integrator.cpp +++ b/src/librender/integrator.cpp @@ -154,6 +154,11 @@ void SamplingIntegrator::renderBlock(const Scene *scene, block->clear(); + uint32_t queryType = RadianceQueryRecord::ESensorRay; + + if (!sensor->getFilm()->hasAlpha()) /* Don't compute an alpha channel if we don't have to */ + queryType &= ~RadianceQueryRecord::EOpacity; + for (size_t i = 0; igetOffset()); if (stop) @@ -162,7 +167,7 @@ void SamplingIntegrator::renderBlock(const Scene *scene, sampler->generate(offset); for (size_t j = 0; jgetSampleCount(); j++) { - rRec.newQuery(RadianceQueryRecord::ESensorRay, sensor->getMedium()); + rRec.newQuery(queryType, sensor->getMedium()); Point2 samplePos(Point2(offset) + Vector2(rRec.nextSample2D())); if (needsApertureSample) @@ -206,6 +211,12 @@ MonteCarloIntegrator::MonteCarloIntegrator(const Properties &props) : SamplingIn */ m_strictNormals = props.getBoolean("strictNormals", false); + /** + * When this flag is set to true, contributions from directly + * visible emitters will not be included in the rendered image + */ + m_hideEmitters = props.getBoolean("hideEmitters", false); + if (m_rrDepth <= 0) Log(EError, "'rrDepth' must be set to a value greater than zero!"); @@ -218,6 +229,7 @@ MonteCarloIntegrator::MonteCarloIntegrator(Stream *stream, InstanceManager *mana m_rrDepth = stream->readInt(); m_maxDepth = stream->readInt(); m_strictNormals = stream->readBool(); + m_hideEmitters = stream->readBool(); } void MonteCarloIntegrator::serialize(Stream *stream, InstanceManager *manager) const { @@ -225,6 +237,7 @@ void MonteCarloIntegrator::serialize(Stream *stream, InstanceManager *manager) c stream->writeInt(m_rrDepth); stream->writeInt(m_maxDepth); stream->writeBool(m_strictNormals); + stream->writeBool(m_hideEmitters); } std::string RadianceQueryRecord::toString() const { diff --git a/src/librender/particleproc.cpp b/src/librender/particleproc.cpp index 9f9a30b4..1197f77a 100644 --- a/src/librender/particleproc.cpp +++ b/src/librender/particleproc.cpp @@ -156,7 +156,7 @@ void ParticleTracer::process(const WorkUnit *workUnit, WorkResult *workResult, handleNewParticle(); } - int depth = 1; + int depth = 1, nullInteractions = 0; bool delta = false; Spectrum throughput(1.0f); // unitless path throughput (used for russian roulette) @@ -174,7 +174,7 @@ void ParticleTracer::process(const WorkUnit *workUnit, WorkResult *workResult, throughput *= mRec.sigmaS * mRec.transmittance / mRec.pdfSuccess; /* Forward the medium scattering event to the attached handler */ - handleMediumInteraction(depth, + handleMediumInteraction(depth, nullInteractions, delta, mRec, medium, -ray.d, throughput*power); PhaseFunctionSamplingRecord pRec(mRec, -ray.d, EImportance); @@ -198,7 +198,7 @@ void ParticleTracer::process(const WorkUnit *workUnit, WorkResult *workResult, const BSDF *bsdf = its.getBSDF(); /* Forward the surface scattering event to the attached handler */ - handleSurfaceInteraction(depth, delta, its, medium, throughput*power); + handleSurfaceInteraction(depth, nullInteractions, delta, its, medium, throughput*power); BSDFSamplingRecord bRec(its, m_sampler, EImportance); Spectrum bsdfWeight = bsdf->sample(bRec, m_sampler->next2D()); @@ -219,6 +219,11 @@ void ParticleTracer::process(const WorkUnit *workUnit, WorkResult *workResult, if (its.isMediumTransition()) medium = its.getTargetMedium(woDotGeoN); + if (bRec.sampledType & BSDF::ENull) + ++nullInteractions; + else + delta = bRec.sampledType & BSDF::EDelta; + #if 0 /* This is somewhat unfortunate: for accuracy, we'd really want the correction factor below to match the path tracing interpretation @@ -244,7 +249,6 @@ void ParticleTracer::process(const WorkUnit *workUnit, WorkResult *workResult, ray.setOrigin(its.p); ray.setDirection(wo); ray.mint = Epsilon; - delta = (bRec.sampledType & (BSDF::EDelta & ~BSDF::ENull)); } if (depth++ >= m_rrDepth) { @@ -266,11 +270,12 @@ void ParticleTracer::handleEmission(const PositionSamplingRecord &pRec, void ParticleTracer::handleNewParticle() { } -void ParticleTracer::handleSurfaceInteraction(int depth, bool delta, - const Intersection &its, const Medium *medium, const Spectrum &weight) { } +void ParticleTracer::handleSurfaceInteraction(int depth, int nullInteractions, + bool delta, const Intersection &its, const Medium *medium, + const Spectrum &weight) { } -void ParticleTracer::handleMediumInteraction(int depth, bool delta, - const MediumSamplingRecord &mRec, const Medium *medium, +void ParticleTracer::handleMediumInteraction(int depth, int nullInteractions, + bool delta, const MediumSamplingRecord &mRec, const Medium *medium, const Vector &wi, const Spectrum &weight) { } MTS_IMPLEMENT_CLASS(RangeWorkUnit, false, WorkUnit) diff --git a/src/librender/renderqueue.cpp b/src/librender/renderqueue.cpp index 31eb2263..23c84a7a 100644 --- a/src/librender/renderqueue.cpp +++ b/src/librender/renderqueue.cpp @@ -54,20 +54,28 @@ void RenderQueue::unregisterListener(RenderListener *listener) { listener->decRef(); } +Float RenderQueue::getRenderTime(const RenderJob *job) const { + LockGuard lock(m_mutex); + std::map::const_iterator it = m_jobs.find(const_cast(job)); + if (it == m_jobs.end()) + Log(EError, "RenderQueue::getRenderJob() - job not found!"); + + unsigned int ms = m_timer->getMilliseconds() - it->second.startTime; + return ms / 1000.0f; +} + void RenderQueue::flush() { LockGuard lock(m_mutex); std::map::iterator it = m_jobs.begin(); - for (; it != m_jobs.end(); ++it) { + for (; it != m_jobs.end(); ++it) (*it).first->flush(); - } } void RenderQueue::removeJob(RenderJob *job, bool cancelled) { LockGuard lock(m_mutex); std::map::iterator it = m_jobs.find(job); - if (it == m_jobs.end()) { + if (it == m_jobs.end()) Log(EError, "RenderQueue::removeRenderJob() - job not found!"); - } JobRecord &rec = (*it).second; unsigned int ms = m_timer->getMilliseconds() - rec.startTime; Log(EInfo, "Render time: %s", timeString(ms/1000.0f, true).c_str()); diff --git a/src/librender/scene.cpp b/src/librender/scene.cpp index 60fb21f8..ac42ce4f 100644 --- a/src/librender/scene.cpp +++ b/src/librender/scene.cpp @@ -405,7 +405,7 @@ void Scene::initializeBidirectional() { aabb.expandBy(emitter->getAABB()); if (!(emitter->getType() & Emitter::EDeltaPosition)) m_degenerateEmitters = false; - } +} m_aabb = aabb; } @@ -419,13 +419,21 @@ bool Scene::preprocess(RenderQueue *queue, const RenderJob *job, sceneResID, sensorResID, samplerResID)) return false; - /* Pre-process step for all sub-surface integrators */ + /* Pre-process step for all sub-surface integrators (each one in independence) */ + for (ref_vector::iterator it = m_ssIntegrators.begin(); + it != m_ssIntegrators.end(); ++it) + (*it)->setActive(false); + for (ref_vector::iterator it = m_ssIntegrators.begin(); it != m_ssIntegrators.end(); ++it) if (!(*it)->preprocess(this, queue, job, sceneResID, sensorResID, samplerResID)) return false; + for (ref_vector::iterator it = m_ssIntegrators.begin(); + it != m_ssIntegrators.end(); ++it) + (*it)->setActive(true); + return true; } @@ -443,8 +451,8 @@ void Scene::cancel() { m_integrator->cancel(); } -void Scene::flush() { - m_sensor->getFilm()->develop(); +void Scene::flush(RenderQueue *queue, const RenderJob *job) { + m_sensor->getFilm()->develop(this, queue->getRenderTime(job)); } void Scene::setDestinationFile(const fs::path &name) { @@ -459,7 +467,7 @@ void Scene::postprocess(RenderQueue *queue, const RenderJob *job, int sceneResID, int sensorResID, int samplerResID) { m_integrator->postprocess(this, queue, job, sceneResID, sensorResID, samplerResID); - m_sensor->getFilm()->develop(); + m_sensor->getFilm()->develop(this, queue->getRenderTime(job)); } void Scene::addChild(const std::string &name, ConfigurableObject *child) { @@ -873,7 +881,7 @@ Spectrum Scene::sampleAttenuatedEmitterDirect(DirectSamplingRecord &dRec, Spectrum value = emitter->sampleDirect(dRec, sample); if (dRec.pdf != 0) { - if (its.isMediumTransition()) + if (its.shape && its.isMediumTransition()) medium = its.getTargetMedium(dRec.d); value *= evalTransmittance(its.p, true, dRec.p, emitter->isOnSurface(), dRec.time, medium, interactions, sampler) / emPdf; @@ -923,7 +931,7 @@ Spectrum Scene::sampleAttenuatedSensorDirect(DirectSamplingRecord &dRec, Spectrum value = m_sensor->sampleDirect(dRec, sample); if (dRec.pdf != 0) { - if (its.isMediumTransition()) + if (its.shape && its.isMediumTransition()) medium = its.getTargetMedium(dRec.d); value *= evalTransmittance(its.p, true, dRec.p, m_sensor->isOnSurface(), dRec.time, medium, interactions, sampler); diff --git a/src/librender/scenehandler.cpp b/src/librender/scenehandler.cpp index 30017839..c41a7ae9 100644 --- a/src/librender/scenehandler.cpp +++ b/src/librender/scenehandler.cpp @@ -31,6 +31,7 @@ #include #include #include +#include MTS_NAMESPACE_BEGIN XERCES_CPP_NAMESPACE_USE @@ -46,6 +47,10 @@ XERCES_CPP_NAMESPACE_USE level, NULL, __FILE__, __LINE__, fmt, ## __VA_ARGS__) #endif +typedef void (*CleanupFun) (); +typedef boost::unordered_set CleanupSet; +static PrimitiveThreadLocal __cleanup_tls; + SceneHandler::SceneHandler(const SAXParser *parser, const ParameterMap ¶ms, NamedObjectMap *namedObjects, bool isIncludedFile) : m_parser(parser), m_params(params), @@ -97,6 +102,7 @@ SceneHandler::SceneHandler(const SAXParser *parser, m_tags["blackbody"] = TagEntry(EBlackBody, (Class *) NULL); m_tags["spectrum"] = TagEntry(ESpectrum, (Class *) NULL); m_tags["transform"] = TagEntry(ETransform, (Class *) NULL); + m_tags["animation"] = TagEntry(EAnimation, (Class *) NULL); m_tags["include"] = TagEntry(EInclude, (Class *) NULL); m_tags["alias"] = TagEntry(EAlias, (Class *) NULL); @@ -155,6 +161,13 @@ void SceneHandler::startDocument() { void SceneHandler::endDocument() { SAssert(m_scene != NULL); + + /* Call cleanup handlers */ + CleanupSet &cleanup = __cleanup_tls.get(); + for (CleanupSet::iterator it = cleanup.begin(); + it != cleanup.end(); ++it) + (*it)(); + cleanup.clear(); } void SceneHandler::characters(const XMLCh* const name, @@ -167,11 +180,12 @@ void SceneHandler::characters(const XMLCh* const name, Float SceneHandler::parseFloat(const std::string &name, const std::string &str, Float defVal) const { char *end_ptr = NULL; - if (str == "") { + if (str.empty()) { if (defVal == -1) XMLLog(EError, "Missing floating point value (in <%s>)", name.c_str()); return defVal; } + Float result = (Float) std::strtod(str.c_str(), &end_ptr); if (*end_ptr != '\0') XMLLog(EError, "Invalid floating point value specified (in <%s>)", name.c_str()); @@ -184,7 +198,6 @@ void SceneHandler::startElement(const XMLCh* const xmlName, ParseContext context((name == "scene") ? NULL : &m_context.top()); - /* Convert attributes to ISO-8859-1 */ for (size_t i=0; i 0 && attrValue.find('$') != attrValue.npos) { @@ -196,7 +209,7 @@ void SceneHandler::startElement(const XMLCh* const xmlName, ++pos; } } - if (attrValue.find('$') != attrValue.npos) + if (attrValue.find('$') != attrValue.npos && attrValue.find('[') == attrValue.npos) XMLLog(EError, "The scene referenced an undefined parameter: \"%s\"", attrValue.c_str()); } @@ -240,6 +253,10 @@ void SceneHandler::startElement(const XMLCh* const xmlName, case ETransform: m_transform = Transform(); break; + case EAnimation: { + m_animatedTransform = new AnimatedTransform(); + } + break; default: break; } @@ -247,6 +264,10 @@ void SceneHandler::startElement(const XMLCh* const xmlName, m_context.push(context); } +void pushSceneCleanupHandler(void (*cleanup)()) { + __cleanup_tls.get().insert(cleanup); +} + void SceneHandler::endElement(const XMLCh* const xmlName) { std::string name = transcode(xmlName); ParseContext &context = m_context.top(); @@ -255,7 +276,7 @@ void SceneHandler::endElement(const XMLCh* const xmlName) { if (context.attributes.find("id") != context.attributes.end()) context.properties.setID(context.attributes["id"]); - ref object = NULL; + ref object; TagMap::const_iterator it = m_tags.find(name); if (it == m_tags.end()) @@ -575,9 +596,22 @@ void SceneHandler::endElement(const XMLCh* const xmlName) { } break; + case EAnimation: { + m_animatedTransform->sortAndSimplify(); + context.parent->properties.setAnimatedTransform( + context.attributes["name"], m_animatedTransform); + m_animatedTransform = NULL; + } + break; + case ETransform: { - context.parent->properties.setTransform( - context.attributes["name"], m_transform); + if (!m_animatedTransform.get()) { + context.parent->properties.setTransform( + context.attributes["name"], m_transform); + } else { + Float time = parseFloat("time", context.attributes["time"]); + m_animatedTransform->appendTransform(time, m_transform); + } } break; @@ -623,11 +657,61 @@ void SceneHandler::endElement(const XMLCh* const xmlName) { } break; - default: - if (tag.second == NULL) - XMLLog(EError, "Internal error: could not instantiate an object " - "corresponding to the tag '%s'", name.c_str()); - object = m_pluginManager->createObject(tag.second, context.properties); + default: { + if (tag.second == NULL) + XMLLog(EError, "Internal error: could not instantiate an object " + "corresponding to the tag '%s'", name.c_str()); + + Properties &props = context.properties; + + /* Convenience hack: allow passing animated transforms to arbitrary shapes + and then internally rewrite this into a shape group + animated instance */ + if (tag.second == MTS_CLASS(Shape) + && props.hasProperty("toWorld") + && props.getType("toWorld") == Properties::EAnimatedTransform + && (props.getPluginName() != "instance" && props.getPluginName() != "disk")) { + /* (The 'disk' plugin also directly supports animated transformations, so + the instancing trick isn't required for it) */ + + ref trafo = props.getAnimatedTransform("toWorld"); + props.removeProperty("toWorld"); + + if (trafo->isStatic()) + props.setTransform("toWorld", trafo->eval(0)); + + object = m_pluginManager->createObject(tag.second, props); + + if (!trafo->isStatic()) { + object = m_pluginManager->createObject(tag.second, props); + /* If the object has children, append them */ + for (std::vector > + ::iterator it = context.children.begin(); + it != context.children.end(); ++it) { + if (it->second != NULL) { + object->addChild(it->first, it->second); + it->second->setParent(object); + it->second->decRef(); + } + } + context.children.clear(); + + object->configure(); + + ref shapeGroup = static_cast ( + m_pluginManager->createObject(MTS_CLASS(Shape), Properties("shapegroup"))); + shapeGroup->addChild(object); + shapeGroup->configure(); + + Properties instanceProps("instance"); + instanceProps.setAnimatedTransform("toWorld", trafo); + object = m_pluginManager->createObject(instanceProps); + object->addChild(shapeGroup); + + } + } else { + object = m_pluginManager->createObject(tag.second, props); + } + } break; } diff --git a/src/librender/sensor.cpp b/src/librender/sensor.cpp index 2d8fcbb1..7f597554 100644 --- a/src/librender/sensor.cpp +++ b/src/librender/sensor.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include @@ -57,6 +57,15 @@ void Sensor::serialize(Stream *stream, InstanceManager *manager) const { stream->writeFloat(m_shutterOpenTime); } +void Sensor::setShutterOpenTime(Float time) { + m_shutterOpenTime = time; + if (m_shutterOpenTime == 0) + m_type |= EDeltaTime; + else + m_type &= ~EDeltaTime; +} + + Spectrum Sensor::eval(const Intersection &its, const Vector &d, Point2 &samplePos) const { Log(EError, "%s::eval(const Intersection &, const Vector &, " @@ -200,11 +209,16 @@ void ProjectiveCamera::setFarClip(Float farClip) { ProjectiveCamera::~ProjectiveCamera() { } -void ProjectiveCamera::setInverseViewTransform(const Transform &trafo) { +void ProjectiveCamera::setWorldTransform(const Transform &trafo) { m_worldTransform = new AnimatedTransform(trafo); m_properties.setTransform("toWorld", trafo, false); } +void ProjectiveCamera::setWorldTransform(AnimatedTransform *trafo) { + m_worldTransform = trafo; + m_properties.setAnimatedTransform("toWorld", trafo, false); +} + PerspectiveCamera::PerspectiveCamera(const Properties &props) : ProjectiveCamera(props), m_xfov(0.0f) { props.markQueried("fov"); @@ -260,6 +274,7 @@ void PerspectiveCamera::configure() { SLog(EError, "Could not parse the focal length (must be of the form " "mm, where is a positive integer)!"); + m_properties.removeProperty("focalLength"); setDiagonalFov(2 * 180/M_PI* std::atan(std::sqrt((Float) (36*36+24*24)) / (2*value))); } } diff --git a/src/librender/skdtree.cpp b/src/librender/skdtree.cpp index 5de789e8..dcc01cfb 100644 --- a/src/librender/skdtree.cpp +++ b/src/librender/skdtree.cpp @@ -114,6 +114,12 @@ bool ShapeKDTree::rayIntersect(const Ray &ray, Intersection &its) const { its.t = std::numeric_limits::infinity(); Float mint, maxt; + #if defined(MTS_FP_DEBUG_STRICT) + Assert( + std::isfinite(ray.o.x) && std::isfinite(ray.o.y) && std::isfinite(ray.o.z) && + std::isfinite(ray.d.x) && std::isfinite(ray.d.y) && std::isfinite(ray.d.z)); + #endif + ++raysTraced; if (m_aabb.rayIntersect(ray, mint, maxt)) { /* Use an adaptive ray epsilon */ diff --git a/src/librender/subsurface.cpp b/src/librender/subsurface.cpp index 982a15e8..1306ccc0 100644 --- a/src/librender/subsurface.cpp +++ b/src/librender/subsurface.cpp @@ -23,7 +23,7 @@ MTS_NAMESPACE_BEGIN Subsurface::Subsurface(const Properties &props) - : NetworkedObject(props) { } + : NetworkedObject(props), m_active(false) { } Subsurface::Subsurface(Stream *stream, InstanceManager *manager) : NetworkedObject(stream, manager) { @@ -32,6 +32,7 @@ Subsurface::Subsurface(Stream *stream, InstanceManager *manager) : Shape *shape = static_cast(manager->getInstance(stream)); m_shapes.push_back(shape); } + m_active = false; } Subsurface::~Subsurface() { } diff --git a/src/librender/texture.cpp b/src/librender/texture.cpp index 66797fb9..76221c71 100644 --- a/src/librender/texture.cpp +++ b/src/librender/texture.cpp @@ -47,6 +47,7 @@ Spectrum Texture::getMinimum() const { NotImplementedError("getMinimum"); } Spectrum Texture::getMaximum() const { NotImplementedError("getMaximum"); } bool Texture::isConstant() const { NotImplementedError("isConstant"); } bool Texture::usesRayDifferentials() const { NotImplementedError("usesRayDifferentials"); } +ref Texture::getBitmap() const { return NULL; } ref Texture::expand() { return this; diff --git a/src/librender/track.cpp b/src/librender/track.cpp deleted file mode 100644 index e438d0f4..00000000 --- a/src/librender/track.cpp +++ /dev/null @@ -1,223 +0,0 @@ -#include -#include - -MTS_NAMESPACE_BEGIN - -AnimatedTransform::AnimatedTransform(Stream *stream) { - size_t nTracks = stream->readSize(); - if (nTracks == 0) { - m_transform = Transform(stream); - } else { - for (size_t i=0; ireadUInt(); - AbstractAnimationTrack *track = NULL; - switch (type) { - case AbstractAnimationTrack::ETranslationX: - case AbstractAnimationTrack::ETranslationY: - case AbstractAnimationTrack::ETranslationZ: - case AbstractAnimationTrack::EScaleX: - case AbstractAnimationTrack::EScaleY: - case AbstractAnimationTrack::EScaleZ: - case AbstractAnimationTrack::ERotationX: - case AbstractAnimationTrack::ERotationY: - case AbstractAnimationTrack::ERotationZ: - track = new FloatTrack(type, stream); - break; - case AbstractAnimationTrack::ETranslationXYZ: - case AbstractAnimationTrack::EScaleXYZ: - track = new VectorTrack(type, stream); - break; - case AbstractAnimationTrack::ERotationQuat: - track = new QuatTrack(type, stream); - break; - default: - Log(EError, "Encountered an unknown animation track type (%i)!", type); - } - - track->incRef(); - m_tracks.push_back(track); - } - } -} - -void AnimatedTransform::addTrack(AbstractAnimationTrack *track) { - track->incRef(); - m_tracks.push_back(track); -} - -AABB1 AnimatedTransform::getTimeBounds() const { - if (m_tracks.size() == 0) -#if !defined(__clang__) - return AABB1(0.0f, 0.0f); -#else - // HACK Workaround for clang - { - AABB1 b; - b.min = b.max = 0.0f; - return b; - } -#endif - - Float min = std::numeric_limits::infinity(); - Float max = -std::numeric_limits::infinity(); - - for (size_t i=0; igetSize(); - SAssert(size > 0); - min = std::min(min, track->getTime(0)); - max = std::max(max, track->getTime(size-1)); - } - -#if !defined(__clang__) - return AABB1(min, max); -#else - // HACK Workaround for clang - AABB1 b; - b.min = min; - b.max = max; - return b; -#endif -} - -AABB AnimatedTransform::getTranslationBounds() const { - if (m_tracks.size() == 0) { - Point p = m_transform(Point(0.0f)); - return AABB(p, p); - } - - AABB aabb; - - for (size_t i=0; igetType()) { - case AbstractAnimationTrack::ETranslationX: - case AbstractAnimationTrack::ETranslationY: - case AbstractAnimationTrack::ETranslationZ: { - int idx = absTrack->getType() - AbstractAnimationTrack::ETranslationX; - const FloatTrack *track = - static_cast(absTrack); - for (size_t j=0; jgetSize(); ++j) { - Float value = track->getValue(j); - aabb.max[idx] = std::max(aabb.max[idx], value); - aabb.min[idx] = std::min(aabb.min[idx], value); - } - } - break; - - case AbstractAnimationTrack::ETranslationXYZ: { - const VectorTrack *track = - static_cast(absTrack); - for (size_t j=0; jgetSize(); ++j) - aabb.expandBy(Point(track->getValue(j))); - } - break; - default: - break; - } - } - for (int i=0; i<3; ++i) { - if (aabb.min[i] > aabb.max[i]) - aabb.min[i] = aabb.max[i] = 0.0f; - } - - return aabb; -} - -AABB AnimatedTransform::getSpatialBounds(const AABB &aabb) const { - AABB result; - - if (m_tracks.size() == 0) { - for (int j=0; j<8; ++j) - result.expandBy(m_transform(aabb.getCorner(j))); - } else { - /* Compute approximate bounds */ - int nSteps = 100; - AABB1 timeBounds = getTimeBounds(); - Float step = timeBounds.getExtents().x / (nSteps-1); - - for (int i=0; idecRef(); -} - -void AnimatedTransform::serialize(Stream *stream) const { - stream->writeSize(m_tracks.size()); - if (m_tracks.size() == 0) { - m_transform.serialize(stream); - } else { - for (size_t i=0; iserialize(stream); - } -} - -void AnimatedTransform::TransformFunctor::operator()(const Float &t, Transform &trafo) const { - Vector translation(0.0f); - Vector scale(1.0f); - Quaternion rotation; - - for (size_t i=0; igetType()) { - case AbstractAnimationTrack::ETranslationX: - translation.x = static_cast(track)->eval(t); - break; - case AbstractAnimationTrack::ETranslationY: - translation.y = static_cast(track)->eval(t); - break; - case AbstractAnimationTrack::ETranslationZ: - translation.z = static_cast(track)->eval(t); - break; - case AbstractAnimationTrack::ETranslationXYZ: - translation = static_cast(track)->eval(t); - break; - case AbstractAnimationTrack::EScaleX: - scale.x = static_cast(track)->eval(t); - break; - case AbstractAnimationTrack::EScaleY: - scale.y = static_cast(track)->eval(t); - break; - case AbstractAnimationTrack::EScaleZ: - scale.z = static_cast(track)->eval(t); - break; - case AbstractAnimationTrack::EScaleXYZ: - scale = static_cast(track)->eval(t); - break; - case AbstractAnimationTrack::ERotationQuat: - rotation = static_cast(track)->eval(t); - break; - default: - Log(EError, "Encountered an unsupported " - "animation track type: %i!", track->getType()); - } - } - - trafo = Transform::translate(translation) * - rotation.toTransform() * - Transform::scale(scale); -} - -std::string AnimatedTransform::toString() const { - if (m_tracks.size() == 0) { - return m_transform.toString(); - } else { - std::ostringstream oss; - oss << "AnimatedTransform[tracks=" << m_tracks.size() << "]"; - return oss.str(); - } -} - -MTS_IMPLEMENT_CLASS(AbstractAnimationTrack, true, Object) -MTS_IMPLEMENT_CLASS(AnimatedTransform, false, Object) -MTS_NAMESPACE_END diff --git a/src/librender/trimesh.cpp b/src/librender/trimesh.cpp index 7a85e25f..79d660d4 100644 --- a/src/librender/trimesh.cpp +++ b/src/librender/trimesh.cpp @@ -171,51 +171,19 @@ static void readHelper(Stream *stream, bool fileDoublePrecision, } } - void TriMesh::loadCompressed(Stream *_stream, int index) { ref stream = _stream; if (stream->getByteOrder() != Stream::ELittleEndian) Log(EError, "Tried to unserialize a shape from a stream, " - "which was not previously set to little endian byte order!"); + "which was not previously set to little endian byte order!"); - short format = stream->readShort(); - if (format == 0x1C04) - Log(EError, "Encountered a geometry file generated by an old " - "version of Mitsuba. Please re-import the scene to update this file " - "to the current format."); - - if (format != MTS_FILEFORMAT_HEADER) - Log(EError, "Encountered an invalid file format!"); - - short version = stream->readShort(); - if (version != MTS_FILEFORMAT_VERSION_V3 && - version != MTS_FILEFORMAT_VERSION_V4) - Log(EError, "Encountered an incompatible file version!"); + const short version = readHeader(stream); if (index != 0) { - size_t streamSize = stream->getSize(); - - /* Determine the position of the requested substream. This - is stored at the end of the file */ - stream->seek(streamSize - sizeof(uint32_t)); - uint32_t count = stream->readUInt(); - if (index < 0 || index > (int) count) { - Log(EError, "Unable to unserialize mesh, " - "shape index is out of range! (requested %i out of 0..%i)", - index, count-1); - } - - // Seek to the correct position - if (version == MTS_FILEFORMAT_VERSION_V4) { - stream->seek(stream->getSize() - sizeof(uint64_t) * (count-index) - sizeof(uint32_t)); - stream->seek(stream->readSize()); - } else { - stream->seek(stream->getSize() - sizeof(uint32_t) * (count-index + 1)); - stream->seek(stream->readUInt()); - } - - stream->skip(sizeof(short) * 2); + const size_t offset = readOffset(stream, version, index); + stream->seek(offset); + stream->skip(sizeof(short) * 2); // Skip the header } stream = new ZStream(stream); @@ -282,6 +250,91 @@ void TriMesh::loadCompressed(Stream *_stream, int index) { m_flipNormals = false; } +short TriMesh::readHeader(Stream *stream) { + short format = stream->readShort(); + if (format == 0x1C04) { + Log(EError, "Encountered a geometry file generated by an old " + "version of Mitsuba. Please re-import the scene to update this file " + "to the current format."); + } + if (format != MTS_FILEFORMAT_HEADER) { + Log(EError, "Encountered an invalid file format!"); + } + short version = stream->readShort(); + if (version != MTS_FILEFORMAT_VERSION_V3 && + version != MTS_FILEFORMAT_VERSION_V4) { + Log(EError, "Encountered an incompatible file version!"); + } + return version; +} + +size_t TriMesh::readOffset(Stream *stream, short version, int idx) { + const size_t streamSize = stream->getSize(); + + /* Determine the position of the requested substream. This is stored + at the end of the file */ + stream->seek(streamSize - sizeof(uint32_t)); + uint32_t count = stream->readUInt(); + if (idx < 0 || idx > (int) count) { + Log(EError, "Unable to unserialize mesh, " + "shape index is out of range! (requested %i out of 0..%i)", + idx, count-1); + } + + // Seek to the correct position + if (version == MTS_FILEFORMAT_VERSION_V4) { + stream->seek(stream->getSize() - sizeof(uint64_t) * (count-idx) - sizeof(uint32_t)); + return stream->readSize(); + } else { + Assert(version == MTS_FILEFORMAT_VERSION_V3); + stream->seek(stream->getSize() - sizeof(uint32_t) * (count-idx + 1)); + return stream->readUInt(); + } +} + +int TriMesh::readOffsetDictionary(Stream *stream, short version, + std::vector& outOffsets) { + const size_t streamSize = stream->getSize(); + stream->seek(streamSize - sizeof(uint32_t)); + const uint32_t count = stream->readUInt(); + + // Check if the stream is large enough to contain that number of meshes + const size_t minSize = sizeof(uint32_t) + count * + ( 2*sizeof(uint16_t) // Header + + sizeof(uint32_t) // Flags + + sizeof(char) // Name + + 2*sizeof(uint64_t) // Number of vertices and triangles + + 3*sizeof(float) // One vertex + + 3*sizeof(uint32_t)); // One triangle + + if (streamSize >= minSize) { + outOffsets.resize(count); + if (version == MTS_FILEFORMAT_VERSION_V4) { + stream->seek(stream->getSize() - sizeof(uint64_t) * count - sizeof(uint32_t)); + if (typeid(size_t) == typeid(uint64_t)) { + stream->readArray(&outOffsets[0], count); + } else { + for (size_t i = 0; i < count; ++i) + outOffsets[i] = stream->readSize(); + } + } else { + stream->seek(stream->getSize() - sizeof(uint32_t) * (count + 1)); + Assert(version == MTS_FILEFORMAT_VERSION_V3); + if (typeid(size_t) == typeid(uint32_t)) { + stream->readArray(&outOffsets[0], count); + } else { + for (size_t i = 0; i < count; ++i) { + outOffsets[i] = (size_t) stream->readUInt(); + } + } + } + return count; + } else { + Log(EDebug, "The serialized mesh does not contain a valid dictionary"); + return -1; + } +} + TriMesh::~TriMesh() { if (m_positions) delete[] m_positions; @@ -367,7 +420,8 @@ void TriMesh::samplePosition(PositionSamplingRecord &pRec, Point2 sample(_sample); size_t index = m_areaDistr.sampleReuse(sample.y); - pRec.p = m_triangles[index].sample(m_positions, m_normals, pRec.n, sample); + pRec.p = m_triangles[index].sample(m_positions, m_normals, + m_texcoords, pRec.n, pRec.uv, sample); pRec.pdf = m_invSurfaceArea; pRec.measure = EArea; } @@ -541,7 +595,7 @@ void TriMesh::rebuildTopology(Float maxAngle) { configure(); } -void TriMesh::computeNormals() { +void TriMesh::computeNormals(bool force) { int invalidNormals = 0; if (m_faceNormals) { if (m_normals) { @@ -557,7 +611,7 @@ void TriMesh::computeNormals() { } } } else { - if (m_normals) { + if (m_normals && !force) { if (m_flipNormals) { for (size_t i=0; igetType() & BSDF::EAnisotropic; if (anisotropic) @@ -653,7 +708,7 @@ void TriMesh::computeUVTangents() { Normal n = Normal(cross(dP1, dP2)); Float length = n.length(); if (length == 0) { - ++degenerate; + // ++degenerate; continue; } @@ -669,9 +724,12 @@ void TriMesh::computeUVTangents() { } } - if (degenerate > 0) - Log(EWarn, "\"%s\": computeTangentSpace(): Mesh contains %i " - "degenerate triangles!", getName().c_str(), degenerate); + #if 0 + /* Don't be so noisy -- this isn't usually a problem.. */ + if (degenerate > 0) + Log(EWarn, "\"%s\": computeTangentSpace(): Mesh contains %i " + "degenerate triangles!", getName().c_str(), degenerate); + #endif } void TriMesh::getNormalDerivative(const Intersection &its, diff --git a/src/librender/vpl.cpp b/src/librender/vpl.cpp index def62936..e23dadd6 100644 --- a/src/librender/vpl.cpp +++ b/src/librender/vpl.cpp @@ -30,7 +30,7 @@ static void appendVPL(const Scene *scene, Random *random, const Sensor *sensor = scene->getSensor(); Float time = sensor->getShutterOpen() - + 0.5f * sensor->getShutterOpenTime(); + + sensor->getShutterOpenTime() * random->nextFloat(); if (prune) { /* Possibly reject VPLs if they are unlikely to be @@ -86,11 +86,12 @@ size_t generateVPLs(const Scene *scene, Random *random, sampler = static_cast (PluginManager::getInstance()-> createObject(MTS_CLASS(Sampler), props)); sampler->configure(); + sampler->generate(Point2i(0)); } const Sensor *sensor = scene->getSensor(); Float time = sensor->getShutterOpen() - + 0.5f * sensor->getShutterOpenTime(); + + sensor->getShutterOpenTime() * sampler->next1D(); const Frame stdFrame(Vector(1,0,0), Vector(0,1,0), Vector(0,0,1)); @@ -110,6 +111,7 @@ size_t generateVPLs(const Scene *scene, Random *random, if (!emitter->isEnvironmentEmitter() && emitter->needsDirectionSample()) { VPL lumVPL(EPointEmitterVPL, weight); lumVPL.its.p = pRec.p; + lumVPL.its.time = time; lumVPL.its.shFrame = pRec.n.isZero() ? stdFrame : Frame(pRec.n); lumVPL.emitter = emitter; appendVPL(scene, random, lumVPL, prune, vpls); @@ -128,6 +130,7 @@ size_t generateVPLs(const Scene *scene, Random *random, VPL lumVPL(EDirectionalEmitterVPL, weight2); lumVPL.its.p = Point(0.0); + lumVPL.its.time = time; lumVPL.its.shFrame = Frame(-diRec.d); lumVPL.emitter = emitter; appendVPL(scene, random, lumVPL, false, vpls); diff --git a/src/medium/heterogeneous.cpp b/src/medium/heterogeneous.cpp index bc5e6fd0..318017da 100644 --- a/src/medium/heterogeneous.cpp +++ b/src/medium/heterogeneous.cpp @@ -647,6 +647,8 @@ public: mRec.sigmaS = albedo * densityAtT; mRec.sigmaA = Spectrum(densityAtT) - mRec.sigmaS; mRec.transmittance = Spectrum(densityAtT != 0.0f ? 1.0f / densityAtT : 0); + if (!std::isfinite(mRec.transmittance[0])) // prevent rare overflow warnings + mRec.transmittance = Spectrum(0.0f); mRec.orientation = m_orientation != NULL ? m_orientation->lookupVector(p) : Vector(0.0f); mRec.medium = this; diff --git a/src/medium/homogeneous.cpp b/src/medium/homogeneous.cpp index 2fdde4ae..1376b350 100644 --- a/src/medium/homogeneous.cpp +++ b/src/medium/homogeneous.cpp @@ -276,7 +276,8 @@ public: Sampler *sampler) const { Float rand = sampler->next1D(), sampledDistance; Float samplingDensity = m_samplingDensity; - if (rand <= m_mediumSamplingWeight) { + + if (rand < m_mediumSamplingWeight) { rand /= m_mediumSamplingWeight; if (m_strategy != EMaximum) { /* Choose the sampling density to be used */ diff --git a/src/medium/materials.h b/src/medium/materials.h index b31de859..50812f01 100644 --- a/src/medium/materials.h +++ b/src/medium/materials.h @@ -161,8 +161,12 @@ static void lookupMaterial(const Properties &props, Spectrum &sigmaS, Spectrum & sigmaA = sigmaT - sigmaS; } - if (props.hasProperty("g")) - g = Spectrum(props.getFloat("g")); + if (props.hasProperty("g")) { + if (props.getType("g") == Properties::ESpectrum) + g = props.getSpectrum("g"); + else + g = Spectrum(props.getFloat("g")); + } if (g.min() <= -1 || g.max() >= 1) SLog(EError, "The anisotropy parameter 'g' must be in the range (-1, 1)!"); diff --git a/src/medium/maxexp.h b/src/medium/maxexp.h index 010dbfab..2d38c666 100644 --- a/src/medium/maxexp.h +++ b/src/medium/maxexp.h @@ -19,6 +19,10 @@ #if !defined(__MAXEXP_H) #define __MAXEXP_H +#if defined(_MSC_VER) +# include +#endif + MTS_NAMESPACE_BEGIN class MaxExpDist { diff --git a/src/mitsuba/CMakeLists.txt b/src/mitsuba/CMakeLists.txt index 36d6ca8b..6baf6e4d 100644 --- a/src/mitsuba/CMakeLists.txt +++ b/src/mitsuba/CMakeLists.txt @@ -5,9 +5,9 @@ endif() include_directories(${XERCES_INCLUDE_DIRS}) -add_mts_exe(mtssrv mtssrv.cpp +add_mts_exe(mtssrv mtssrv.cpp ${MTS_WINDOWS_STUB} RES_DESCRIPTION "Mitsuba compute node application") -add_mts_exe(mitsuba mitsuba.cpp +add_mts_exe(mitsuba mitsuba.cpp ${MTS_WINDOWS_STUB} RES_DESCRIPTION "Mitsuba command line interface frontend") -add_mts_exe(mtsutil mtsutil.cpp ${MTS_DARWIN_STUB} +add_mts_exe(mtsutil mtsutil.cpp ${MTS_DARWIN_STUB} ${MTS_WINDOWS_STUB} RES_DESCRIPTION "Mitsuba utility launcher") diff --git a/src/mitsuba/SConscript b/src/mitsuba/SConscript index 9a2abd4d..82bffc2a 100644 --- a/src/mitsuba/SConscript +++ b/src/mitsuba/SConscript @@ -1,4 +1,4 @@ -Import('sys', 'env', 'hasCollada', 'stubs') +Import('sys', 'env', 'hasCollada', 'stubs', 'winstubs') # Create an environment with Xerces and OpenGL mainEnv = env.Clone() @@ -24,10 +24,10 @@ if sys.platform == 'darwin': stubs += [mainEnv_osx.StaticObject('darwin_stub.mm')] mainEnv.Append(LINKFLAGS=['-Xlinker', '-rpath', '-Xlinker', '@executable_path/../Frameworks']) -mainEnv.Program('mtsutil', stubs + ['mtsutil.cpp']) +mainEnv.Program('mtsutil', stubs + winstubs + ['mtsutil.cpp']) # Build the command-line+GUI interface -mainEnv.Program('mtssrv', ['mtssrv.cpp']) -mainEnv.Program('mitsuba', ['mitsuba.cpp']) +mainEnv.Program('mtssrv', winstubs + ['mtssrv.cpp']) +mainEnv.Program('mitsuba', winstubs + ['mitsuba.cpp']) Return('mainEnv') diff --git a/src/mitsuba/mitsuba.cpp b/src/mitsuba/mitsuba.cpp index 4b265d51..f06aa76d 100644 --- a/src/mitsuba/mitsuba.cpp +++ b/src/mitsuba/mitsuba.cpp @@ -44,7 +44,7 @@ #include #endif -XERCES_CPP_NAMESPACE_USE +using XERCES_CPP_NAMESPACE::SAXParser; using namespace mitsuba; @@ -124,8 +124,9 @@ private: int m_timeout; }; -int mts_main(int argc, char **argv) { - char optchar, *end_ptr = NULL; +int mitsuba_app(int argc, char **argv) { + int optchar; + char *end_ptr = NULL; try { /* Default settings */ @@ -397,7 +398,7 @@ int mts_main(int argc, char **argv) { return 0; } -int main(int argc, char **argv) { +int mts_main(int argc, char **argv) { /* Initialize the core framework */ Class::staticInitialization(); Object::staticInitialization(); @@ -426,7 +427,7 @@ int main(int argc, char **argv) { setlocale(LC_NUMERIC, "C"); #endif - int retval = mts_main(argc, argv); + int retval = mitsuba_app(argc, argv); /* Shutdown the core framework */ SceneHandler::staticShutdown(); @@ -449,3 +450,9 @@ int main(int argc, char **argv) { return retval; } + +#if !defined(__WINDOWS__) +int main(int argc, char **argv) { + return mts_main(argc, argv); +} +#endif diff --git a/src/mitsuba/mtssrv.cpp b/src/mitsuba/mtssrv.cpp index 92739dfc..a4625cd3 100644 --- a/src/mitsuba/mtssrv.cpp +++ b/src/mitsuba/mtssrv.cpp @@ -90,8 +90,9 @@ void collect_zombies(int s) { } #endif -int mts_main(int argc, char **argv) { - char optchar, *end_ptr = NULL; +int mtssrv(int argc, char **argv) { + int optchar; + char *end_ptr = NULL; try { /* Default settings */ @@ -382,7 +383,7 @@ int mts_main(int argc, char **argv) { return 0; } -int main(int argc, char **argv) { +int mts_main(int argc, char **argv) { /* Initialize the core framework */ Class::staticInitialization(); Object::staticInitialization(); @@ -409,7 +410,7 @@ int main(int argc, char **argv) { setlocale(LC_NUMERIC, "C"); #endif - int retval = mts_main(argc, argv); + int retval = mtssrv(argc, argv); /* Shutdown the core framework */ SHVector::staticShutdown(); @@ -432,3 +433,9 @@ int main(int argc, char **argv) { return retval; } + +#if !defined(__WINDOWS__) +int main(int argc, char **argv) { + return mts_main(argc, argv); +} +#endif diff --git a/src/mitsuba/mtsutil.cpp b/src/mitsuba/mtsutil.cpp index 8ab092b1..b9365954 100644 --- a/src/mitsuba/mtsutil.cpp +++ b/src/mitsuba/mtsutil.cpp @@ -23,7 +23,6 @@ #if defined(Assert) # undef Assert #endif -#include #include #include #include @@ -137,7 +136,8 @@ void help() { int mtsutil(int argc, char **argv) { - char optchar, *end_ptr = NULL; + int optchar; + char *end_ptr = NULL; try { /* Default settings */ @@ -436,10 +436,8 @@ int mts_main(int argc, char **argv) { return retval; } -#if !defined(__OSX__) +#if !defined(__OSX__) && !defined(__WINDOWS__) int main(int argc, char **argv) { return mts_main(argc, argv); } #endif - - diff --git a/src/mtsgui/CMakeLists.txt b/src/mtsgui/CMakeLists.txt index 8f0aed2c..5701cdbc 100644 --- a/src/mtsgui/CMakeLists.txt +++ b/src/mtsgui/CMakeLists.txt @@ -134,9 +134,9 @@ include_directories(. ${CMAKE_CURRENT_BINARY_DIR}) QT4_ADD_RESOURCES(RC_SRCS ${RC_FILES}) source_group("Qt Resources" FILES ${RC_FILES}) -# Add our own qtmain +# Add our own qtmain and use the static file dialogs if (WIN32) - add_definitions(-D MTS_CUSTOM_QTMAIN) + add_definitions(-D MTS_CUSTOM_QTMAIN -DMTSGUI_STATIC_QFILEDIALOG=1) list(APPEND SRCS qtmain_win.cpp) endif() @@ -169,7 +169,7 @@ add_mts_exe(mtsgui ${MTS_EXE_TYPE} ${SRCS} ${HDRS} PCH "${CMAKE_CURRENT_SOURCE_DIR}/pch/mtsgui_precompiled.hpp" RES_ICON "${PROJECT_SOURCE_DIR}/data/windows/mitsuba.ico" RES_DESCRIPTION "Mitsuba interactive Qt-based frontend" - LINK_LIBRARIES ${LIBS}) + MTS_HW LINK_LIBRARIES ${LIBS}) # Custom targets to copy the bundle files and the PreviewSettings objects diff --git a/src/mtsgui/SConscript b/src/mtsgui/SConscript index 19f0f37e..c4c4bc33 100644 --- a/src/mtsgui/SConscript +++ b/src/mtsgui/SConscript @@ -1,4 +1,4 @@ -Import('env', 'os', 'glob', 'sys', 'hasQt', 'hasCollada', 'hasBreakpad', 'mainEnv', +Import('env', 'os', 'glob', 'sys', 'hasQt', 'hasCollada', 'hasBreakpad', 'mainEnv', 'resources', 'converter_objects') # For running Uic & Moc (below) @@ -33,9 +33,8 @@ if hasQt: del qtEnv['CXXFLAGS'][index-1] index = qtEnv['LINKFLAGS'].index('/SUBSYSTEM:CONSOLE') del qtEnv['LINKFLAGS'][index] - qtEnv.Append(CXXFLAGS=['/D', '_WINDOWS']) + qtEnv.Append(CXXFLAGS=['/D', '_WINDOWS', '/D', 'MTS_CUSTOM_QTMAIN', '/D', 'MTSGUI_STATIC_QFILEDIALOG=1']) qtEnv.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS']) - qtEnv.Append(LIBS=['qtmain']) if hasBreakpad: qtEnv.Append(CPPPATH=['#dependencies/include/breakpad']) qtEnv.Append(LIBS=['breakpad_common', 'breakpad_exception_handler', 'breakpad_crash_generation_client', @@ -59,7 +58,7 @@ if hasQt: qtEnv.Prepend(LIBPATH=env['COLLADALIBDIR']) if env.has_key('COLLADALIB'): qtEnv.Prepend(LIBS=env['COLLADALIB']) - + if sys.platform == 'darwin': mainEnv_osx = mainEnv.Clone() qtEnv_osx = qtEnv.Clone() @@ -72,6 +71,7 @@ if hasQt: qtSources += qtEnv_osx.StaticObject('breakpad.mm') else: qtSources = [x for x in qtSources if (not isinstance(x, str) or 'cocoa' not in x)] + mtsgui = qtEnv.Program('mtsgui', qtSources) if sys.platform == 'darwin': qtEnv.AddPostAction(mtsgui, 'install_name_tool -change QtGui.framework/Versions/4/QtGui @rpath/QtGui $TARGET') diff --git a/src/mtsgui/aboutdlg.cpp b/src/mtsgui/aboutdlg.cpp index 191cfa43..aa541777 100644 --- a/src/mtsgui/aboutdlg.cpp +++ b/src/mtsgui/aboutdlg.cpp @@ -82,6 +82,7 @@ AboutDialog::AboutDialog(QWidget *parent) : SPECTRUM_SAMPLES).c_str(); ui->label->setText(ui->label->text().replace("MTS_VERSION", MTS_VERSION)); + ui->label->setText(ui->label->text().replace("MTS_YEAR", MTS_YEAR)); ui->label->setText(ui->label->text().replace("CONFIG_FLAGS", configFlags)); #if defined(__OSX__) diff --git a/src/mtsgui/aboutdlg.ui b/src/mtsgui/aboutdlg.ui index dc53785f..26fe8301 100644 --- a/src/mtsgui/aboutdlg.ui +++ b/src/mtsgui/aboutdlg.ui @@ -81,7 +81,7 @@ p, li { white-space: pre-wrap; } <p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">About Mitsuba</span></p> <p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Version MTS_VERSION<br />Configuration flags: <span style=" font-family:'Courier New,courier';">CONFIG_FLAGS</span></p> <p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Mitsuba is an extensible rendering framework written in portable C++. It implements unbiased as well as biased techniques and contains heavy optimizations targeted towards current CPU architectures.</p> -<p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Copyright © 2012 Wenzel Jakob &lt;<a href="mailto:wenzel@cs.cornell.edu"><span style=" text-decoration: underline; color:#0000ff;">wenzel@cs.cornell.edu</span></a>&gt;</p> +<p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Copyright © MTS_YEAR Wenzel Jakob &lt;<a href="mailto:wenzel@cs.cornell.edu"><span style=" text-decoration: underline; color:#0000ff;">wenzel@cs.cornell.edu</span></a>&gt;</p> <p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Licensed under the <a href="http://www.gnu.org/licenses/gpl-3.0.html"><span style=" text-decoration: underline; color:#0000ff;">GNU GPL, Version 3</span></a>.</p></body></html> diff --git a/src/mtsgui/glwidget.cpp b/src/mtsgui/glwidget.cpp index cfb93cc9..9d7afab1 100644 --- a/src/mtsgui/glwidget.cpp +++ b/src/mtsgui/glwidget.cpp @@ -400,7 +400,7 @@ void GLWidget::timerImpulse() { Point origin = (1-t) * m_animationOrigin0 + t * m_animationOrigin1; Point target = (1-t) * m_animationTarget0 + t * m_animationTarget1; - setInverseViewTransform( + setWorldTransform( Transform::lookAt(origin, target, m_context->up)); if (x == 1.0f) @@ -414,18 +414,18 @@ void GLWidget::timerImpulse() { * m_clock->getMilliseconds(); if (m_leftKeyDown) - setInverseViewTransform(getInverseViewTransform() * + setWorldTransform(getWorldTransform() * Transform::translate(Vector(delta,0,0))); if (m_rightKeyDown) - setInverseViewTransform(getInverseViewTransform() * + setWorldTransform(getWorldTransform() * Transform::translate(Vector(-delta,0,0))); if (m_downKeyDown) { - setInverseViewTransform(getInverseViewTransform() * + setWorldTransform(getWorldTransform() * Transform::translate(Vector(0,0,-delta))); camera->setFocusDistance(camera->getFocusDistance() + delta); } if (m_upKeyDown) { - setInverseViewTransform(getInverseViewTransform() * + setWorldTransform(getWorldTransform() * Transform::translate(Vector(0,0,delta))); camera->setFocusDistance(camera->getFocusDistance() - delta); } @@ -589,7 +589,7 @@ void GLWidget::mouseMoveEvent(QMouseEvent *event) { if (!camera || !m_preview->isRunning()) return; - Transform invView = getInverseViewTransform(); + Transform invView = getWorldTransform(); Point p = invView(Point(0,0,0)); Vector d = invView(Vector(0,0,1)); bool didMove = false; @@ -622,7 +622,7 @@ void GLWidget::mouseMoveEvent(QMouseEvent *event) { if (coords.x < 0 || coords.x > M_PI) m_context->up *= -1; - setInverseViewTransform(Transform::lookAt(p, target, m_context->up)); + setWorldTransform(Transform::lookAt(p, target, m_context->up)); } else { Float yaw = -.03f * rel.x() * m_mouseSensitivity; Float pitch = .03f * rel.y() * m_mouseSensitivity; @@ -634,11 +634,11 @@ void GLWidget::mouseMoveEvent(QMouseEvent *event) { * Transform::rotate(Vector(0,1,0), yaw); d = trafo(Vector(0,0,1)); - setInverseViewTransform(Transform::lookAt(p, p+d, up)); + setWorldTransform(Transform::lookAt(p, p+d, up)); } didMove = true; } else if (event->buttons() & Qt::MidButton) { - setInverseViewTransform(invView * + setWorldTransform(invView * Transform::translate(Vector((Float) rel.x(), (Float) rel.y(), 0) * m_mouseSensitivity * .6f * m_context->movementScale)); didMove = true; @@ -648,7 +648,7 @@ void GLWidget::mouseMoveEvent(QMouseEvent *event) { Float fovChange = rel.y() * m_mouseSensitivity * .03f; m_context->up = Transform::rotate(d, isRightHanded() ? -roll : roll)(up); - setInverseViewTransform(Transform::lookAt(p, p+d, m_context->up)); + setWorldTransform(Transform::lookAt(p, p+d, m_context->up)); camera->setXFov(std::min(std::max((Float) 1.0f, camera->getXFov() + fovChange), (Float) 160.0f)); @@ -671,7 +671,7 @@ void GLWidget::mouseMoveEvent(QMouseEvent *event) { camera->setFocusDistance(focusDistance); p = p + (oldFocusDistance - focusDistance) * d; - setInverseViewTransform(Transform::lookAt(p, p+d, up)); + setWorldTransform(Transform::lookAt(p, p+d, up)); } didMove = true; } @@ -829,14 +829,14 @@ void GLWidget::wheelEvent(QWheelEvent *event) { camera->setFocusDistance(focusDistance); - Transform invView = getInverseViewTransform(); + Transform invView = getWorldTransform(); Point p = invView(Point(0,0,0)); Vector d = invView(Vector(0,0,1)); Vector up = invView(Vector(0,1,0)); p = p + (oldFocusDistance - focusDistance) * d; - setInverseViewTransform(Transform::lookAt(p, p+d, up)); + setWorldTransform(Transform::lookAt(p, p+d, up)); m_wheelTimer->reset(); if (!m_movementTimer->isActive()) @@ -863,9 +863,9 @@ Float GLWidget::autoFocus() const { for (size_t sampleIndex=0; sampleIndex<200; ++sampleIndex) { Point2 sample( - radicalInverse(2, sampleIndex) * size.x, - radicalInverse(3, sampleIndex) * size.y); - camera->sampleRay(ray, sample, Point2(0.5f), 0.5f); + radicalInverse(2, sampleIndex), + radicalInverse(3, sampleIndex)); + camera->sampleRay(ray, Point2(sample.x * size.x, sample.y*size.y), Point2(0.5f), 0.5f); if (scene->rayIntersect(ray, t, ptr, n, uv)) { Float weight = math::fastexp(-0.5 / variance * ( std::pow(sample.x - 0.5f, (Float) 2) + @@ -1400,7 +1400,7 @@ void GLWidget::reveal(const AABB &aabb) { if (!camera) return; - Transform invView = getInverseViewTransform(); + Transform invView = getWorldTransform(); Point p = invView(Point(0,0,0)); Vector d = invView(Vector(0,0,1)); diff --git a/src/mtsgui/glwidget.h b/src/mtsgui/glwidget.h index a8a6b5a7..9919a20d 100644 --- a/src/mtsgui/glwidget.h +++ b/src/mtsgui/glwidget.h @@ -151,23 +151,23 @@ protected: static_cast(sensor) : NULL; } - inline Transform getInverseViewTransform() const { + inline Transform getWorldTransform() const { const ProjectiveCamera *camera = getProjectiveCamera(); - return camera->getInverseViewTransform( + return camera->getWorldTransform( camera->getShutterOpen() + 0.5f * camera->getShutterOpenTime() ); } inline bool isRightHanded() { - return getInverseViewTransform().det3x3() > 0; + return getWorldTransform().det3x3() > 0; } - inline void setInverseViewTransform(const Transform &trafo) { + inline void setWorldTransform(const Transform &trafo) { /* Preserve the handedness of the current camera transformation */ - if (getInverseViewTransform().det3x3() * trafo.det3x3() > 0) - getProjectiveCamera()->setInverseViewTransform(trafo); + if (getWorldTransform().det3x3() * trafo.det3x3() > 0) + getProjectiveCamera()->setWorldTransform(trafo); else - getProjectiveCamera()->setInverseViewTransform(trafo * + getProjectiveCamera()->setWorldTransform(trafo * Transform::scale(Vector(-1,1,1))); } diff --git a/src/mtsgui/importdlg.cpp b/src/mtsgui/importdlg.cpp index 5aad3379..060cd34a 100644 --- a/src/mtsgui/importdlg.cpp +++ b/src/mtsgui/importdlg.cpp @@ -53,15 +53,30 @@ void ImportDialog::changeEvent(QEvent *e) { } void ImportDialog::on_inputBrowse_clicked(bool checked) { - QFileDialog dialog(this); - dialog.setNameFilter(tr("All supported formats (*.dae *.zae *.obj);;" + const QString filter(tr("All supported formats (*.dae *.zae *.obj);;" "COLLADA 1.4 scenes (*.dae *.zae);; Wavefront OBJ scenes (*.obj)")); + QString fname; +#if MTSGUI_STATIC_QFILEDIALOG + QSettings settings; + const QString currInput = ui->inputEdit->text(); + const QString initialDir(currInput.isEmpty() ? + settings.value("importDir").toString() : + QFileInfo(currInput).absolutePath()); + fname = QFileDialog::getOpenFileName(this, QString(), initialDir, filter); + if (!fname.isEmpty()) { + settings.setValue("importDir", QFileInfo(fname).absolutePath()); + } +#else + QFileDialog dialog(this); + dialog.setNameFilter(filter); dialog.setAcceptMode(QFileDialog::AcceptOpen); dialog.setViewMode(QFileDialog::Detail); dialog.setWindowModality(Qt::ApplicationModal); - if (dialog.exec()) { - QString fname = dialog.selectedFiles()[0]; + fname = dialog.selectedFiles()[0]; + } +#endif + if (!fname.isEmpty()) { ui->inputEdit->setText(fname); QFileInfo info(fname); ui->directoryEdit->setText(info.absoluteDir().absolutePath()); @@ -71,25 +86,47 @@ void ImportDialog::on_inputBrowse_clicked(bool checked) { } void ImportDialog::on_directoryBrowse_clicked(bool checked) { + QString dirName; +#if MTSGUI_STATIC_QFILEDIALOG + QSettings settings; + QString initialDir = ui->directoryEdit->text(); + dirName = QFileDialog::getExistingDirectory(this, QString(), initialDir); +#else QFileDialog dialog(this); dialog.setAcceptMode(QFileDialog::AcceptOpen); dialog.setFileMode(QFileDialog::DirectoryOnly); dialog.setWindowModality(Qt::ApplicationModal); if (dialog.exec()) { - QString fname = dialog.selectedFiles()[0]; - ui->directoryEdit->setText(fname); + dirName = dialog.selectedFiles()[0]; + } +#endif + if (!dirName.isEmpty()) { + ui->directoryEdit->setText(dirName); refresh(); } } void ImportDialog::on_adjustmentBrowse_clicked(bool checked) { + const QString filter(tr("Import adjustment files (*.xml)")); + QString fname; +#if MTSGUI_STATIC_QFILEDIALOG + QString currFile = ui->adjustmentEdit->text(); + if (currFile.isEmpty()) { + currFile = ui->inputEdit->text(); + } + fname = QFileDialog::getOpenFileName(this, QString(), + QFileInfo(currFile).absolutePath(), filter); +#else QFileDialog dialog(this); - dialog.setNameFilter(tr("Import adjustment files (*.xml)")); + dialog.setNameFilter(filter); dialog.setAcceptMode(QFileDialog::AcceptOpen); dialog.setViewMode(QFileDialog::Detail); dialog.setWindowModality(Qt::ApplicationModal); if (dialog.exec()) { - QString fname = dialog.selectedFiles()[0]; + fname = dialog.selectedFiles()[0]; + } +#endif + if (!fname.isEmpty()) { ui->adjustmentEdit->setText(fname); refresh(); } diff --git a/src/mtsgui/locateresourcedlg.cpp b/src/mtsgui/locateresourcedlg.cpp index 761c7ed6..728bd362 100644 --- a/src/mtsgui/locateresourcedlg.cpp +++ b/src/mtsgui/locateresourcedlg.cpp @@ -28,13 +28,21 @@ LocateResourceDialog::LocateResourceDialog(QWidget *parent, const QString &resNa } void LocateResourceDialog::on_pathBrowse_clicked() { + QString fname; +#if MTSGUI_STATIC_QFILEDIALOG + QSettings settings; + fname = QFileDialog::getOpenFileName(this, QString(), + settings.value("importDir").toString()); +#else QFileDialog dialog(this); dialog.setAcceptMode(QFileDialog::AcceptOpen); dialog.setViewMode(QFileDialog::Detail); dialog.setWindowModality(Qt::ApplicationModal); - if (dialog.exec()) { - QString fname = dialog.selectedFiles()[0]; + fname = dialog.selectedFiles()[0]; + } +#endif + if (!fname.isEmpty()) { ui->pathEdit->setText(fname); m_filename = fname; ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(true); diff --git a/src/mtsgui/main.cpp b/src/mtsgui/main.cpp index 0fcdc65b..c4dd09e9 100644 --- a/src/mtsgui/main.cpp +++ b/src/mtsgui/main.cpp @@ -45,7 +45,6 @@ #include #endif -XERCES_CPP_NAMESPACE_USE using namespace mitsuba; MainWindow *mainWindow = NULL; diff --git a/src/mtsgui/mainwindow.cpp b/src/mtsgui/mainwindow.cpp index f983982c..9ca8bf52 100644 --- a/src/mtsgui/mainwindow.cpp +++ b/src/mtsgui/mainwindow.cpp @@ -223,7 +223,7 @@ MainWindow::MainWindow(QWidget *parent) : move(windowPos); #endif show(); - /* Move again just to be sure.. */ + /* Move again just to be sure ... */ move(windowPos); updateUI(); @@ -274,7 +274,7 @@ void MainWindow::initWorkers() { QDialog *dialog = new NonClosableDialog(this); dialog->setWindowModality(Qt::WindowModal); QVBoxLayout *layout = new QVBoxLayout(dialog); - QLabel *label = new QLabel(tr("Establishing network connections .."), dialog); + QLabel *label = new QLabel(tr("Establishing network connections ..."), dialog); label->setAlignment(Qt::AlignCenter); layout->addWidget(label); QProgressBar *progressBar = new QProgressBar(dialog); @@ -538,6 +538,30 @@ void MainWindow::onProgressMessage(const RenderJob *job, const QString &name, updateUI(); } +#if MTSGUI_STATIC_QFILEDIALOG + +void MainWindow::on_actionOpen_triggered() { + QSettings settings; + QStringList fileNames = QFileDialog::getOpenFileNames(this, QString(), + settings.value("fileDir").toString(), + tr("All supported formats (*.xml *.exr *.rgbe *.hdr *.pfm *.png *.jpg *.jpeg);;" + "Mitsuba scenes (*.xml);;High dynamic-range images (*.exr *.rgbe *.hdr *.pfm);;" + "Low dynamic-range images (*.png *.jpg *.jpeg)")); + + QStringList::ConstIterator it = fileNames.constBegin(); + if (it != fileNames.constEnd()) { + QFileInfo info(*it); + settings.setValue("fileDir", info.absolutePath()); + } + for ( ; it != fileNames.constEnd(); ++it) { + loadFile(*it); + } +} + +void MainWindow::onOpenDialogClose(int reason) { /* unused */ } + +#else // MTSGUI_STATIC_QFILEDIALOG + void MainWindow::on_actionOpen_triggered() { QFileDialog *dialog = new QFileDialog(this, Qt::Sheet); dialog->setNameFilter(tr("All supported formats (*.xml *.exr *.rgbe *.hdr *.pfm *.png *.jpg *.jpeg);;" @@ -570,6 +594,8 @@ void MainWindow::onOpenDialogClose(int reason) { } } +#endif // MTSGUI_STATIC_QFILEDIALOG + void MainWindow::on_actionExit_triggered() { qApp->closeAllWindows(); } @@ -600,7 +626,7 @@ SceneContext *MainWindow::loadScene(const QString &qFileName) { ref loadingThread; loaddlg->setAttribute(Qt::WA_DeleteOnClose); loaddlg->setWindowModality(Qt::ApplicationModal); - loaddlg->setWindowTitle("Loading .."); + loaddlg->setWindowTitle("Loading ..."); loaddlg->show(); retry: @@ -1074,6 +1100,8 @@ bool MainWindow::isActive() { } void MainWindow::drawHLine(SceneContext *ctx, int x1, int y, int x2, const float *color) { + y = std::max(0, std::min(y, ctx->framebuffer->getHeight()-1)); + x1 = std::max(0, x1); x2 = std::min(x2, ctx->framebuffer->getWidth()-1); float *framebuffer = ctx->framebuffer->getFloat32Data(); int fbOffset = (x1 + y*ctx->framebuffer->getWidth())*4; for (int x=x1; x<=x2; x++) { @@ -1085,6 +1113,8 @@ void MainWindow::drawHLine(SceneContext *ctx, int x1, int y, int x2, const float } void MainWindow::drawVLine(SceneContext *ctx, int x, int y1, int y2, const float *color) { + x = std::max(0, std::min(x, ctx->framebuffer->getWidth()-1)); + y1 = std::max(0, y1); y2 = std::min(y2, ctx->framebuffer->getHeight()-1); float *framebuffer = ctx->framebuffer->getFloat32Data(); int width = ctx->framebuffer->getWidth(), fbOffset = (x + y1*width)*4; for (int y=y1; y<=y2; y++) { @@ -1405,8 +1435,30 @@ inline float toSRGB(float value) { return 1.055f * std::pow(value, 0.41666f) - 0.055f; } +#if MTSGUI_STATIC_QFILEDIALOG + void MainWindow::on_actionExportImage_triggered() { - QFileDialog *dialog = new QFileDialog(this, tr("Export image .."), + QSettings settings; + const QString fileName = QFileDialog::getSaveFileName(this, + tr("Export image ..."), settings.value("exportFileDir").toString(), + tr("All supported formats (*.exr *.hdr *.rgbe *.pfm *.png *.jpg *.jpeg);;" + "High dynamic range OpenEXR image (*.exr);;" + "High dynamic range Radiance RGBE image (*.rgbe *.hdr);;" + "High dynamic range Portable Float Map image (*.pfm);;" + "Tonemapped low dynamic range image (*.png *.jpg *.jpeg)")); + if (!fileName.isEmpty()) { + QSettings settings; + settings.setValue("exportFileDir", QFileInfo(fileName).absolutePath()); + exportImage(fileName); + } +} + +void MainWindow::onExportDialogClose(int reason) { /* unused */ } + +#else // MTSGUI_STATIC_QFILEDIALOG + +void MainWindow::on_actionExportImage_triggered() { + QFileDialog *dialog = new QFileDialog(this, tr("Export image ..."), "", tr("All supported formats (*.exr *.hdr *.rgbe *.pfm *.png *.jpg *.jpeg);;" "High dynamic range OpenEXR image (*.exr);;" "High dynamic range Radiance RGBE image (*.rgbe *.hdr);;" @@ -1434,41 +1486,52 @@ void MainWindow::on_actionExportImage_triggered() { } void MainWindow::onExportDialogClose(int reason) { - int currentIndex = ui->tabBar->currentIndex(); - SceneContext *ctx = m_context[currentIndex]; - QSettings settings; QFileDialog *dialog = static_cast(sender()); m_currentChild = NULL; - if (reason == QDialog::Accepted) { - QString fileName = dialog->selectedFiles().value(0); - Bitmap::EFileFormat format; + if (reason == QDialog::Accepted) { + QString fileName = dialog->selectedFiles().value(0); settings.setValue("fileDialogState", dialog->saveState()); + exportImage(fileName); + } +} +#endif // MTSGUI_STATIC_QFILEDIALOG + +void MainWindow::exportImage(const QString &fileName) { + if (!fileName.isEmpty()) { + Bitmap::EFileFormat format; + bool isHDR = true; if (fileName.endsWith(".exr")) { format = Bitmap::EOpenEXR; } else if (fileName.endsWith(".png")) { format = Bitmap::EPNG; + isHDR = false; } else if (fileName.endsWith(".hdr") || fileName.endsWith(".rgbe")) { format = Bitmap::ERGBE; } else if (fileName.endsWith(".jpg") || fileName.endsWith(".jpeg")) { format = Bitmap::EJPEG; + isHDR = false; } else if (fileName.endsWith(".pfm")) { format = Bitmap::EPFM; } else { - SLog(EError, "Unknown file type -- the filename must end in either .exr, .rgbe, .hdr, .pfm, .png, .jpg, or .jpeg"); + SLog(EError, "Unknown file type -- the filename must end in either" + " .exr, .rgbe, .hdr, .pfm, .png, .jpg, or .jpeg"); return; } ref fs = new FileStream(toFsPath(fileName), FileStream::ETruncReadWrite); + const int currentIndex = ui->tabBar->currentIndex(); + const SceneContext *ctx = m_context[currentIndex]; + if (ctx->mode == EPreview) ui->glView->downloadFramebuffer(); ref bitmap = ctx->framebuffer; - if (format != Bitmap::EOpenEXR && format != Bitmap::ERGBE && format != Bitmap::EPFM) { + if (!isHDR) { /* Tonemap the image */ if (ctx->toneMappingMethod == EReinhard) { Float logAvgLuminance = 0, maxLuminance = 0; /* Unused */ @@ -1482,7 +1545,7 @@ void MainWindow::onExportDialogClose(int reason) { bitmap = bitmap->convert(Bitmap::ERGB, Bitmap::EUInt8, ctx->srgb ? (Float) -1 : ctx->gamma, ctx->toneMappingMethod == EReinhard - ? (Float) 1.0f : std::pow((Float) 2.0f, ctx->exposure)); + ? (Float) 1.0f : std::pow((Float) 2, ctx->exposure)); } bitmap->write(format, fs); @@ -1494,8 +1557,26 @@ void MainWindow::on_actionSave_triggered() { saveScene(this, context, context->fileName); } +#if MTSGUI_STATIC_QFILEDIALOG + void MainWindow::on_actionSaveAs_triggered() { - QFileDialog *dialog = new QFileDialog(this, tr("Save as .."), + SceneContext *context = m_context[ui->tabBar->currentIndex()]; + QString fileDir = QFileInfo(context->fileName).absolutePath(); + QString fileName = QFileDialog::getSaveFileName(this, + tr("Save scene as ..."), fileDir, tr("Mitsuba scenes (*.xml)")); + if (!fileName.isEmpty()) { + QSettings settings; + settings.setValue("fileDir", QFileInfo(fileName).absolutePath()); + saveSceneAs(fileName); + } +} + +void MainWindow::onSaveAsDialogClose(int reason) { /* unused */ } + +#else // MTSGUI_STATIC_QFILEDIALOG + +void MainWindow::on_actionSaveAs_triggered() { + QFileDialog *dialog = new QFileDialog(this, tr("Save scene as ..."), "", tr("Mitsuba scenes (*.xml)")); m_currentChild = dialog; @@ -1515,19 +1596,27 @@ void MainWindow::on_actionSaveAs_triggered() { } void MainWindow::onSaveAsDialogClose(int reason) { - int currentIndex = ui->tabBar->currentIndex(); - SceneContext *context = m_context[currentIndex]; - QSettings settings; QFileDialog *dialog = static_cast(sender()); m_currentChild = NULL; if (reason == QDialog::Accepted) { QString fileName = dialog->selectedFiles().value(0); settings.setValue("fileDialogState", dialog->saveState()); + saveSceneAs(fileName); + } +} + +#endif // MTSGUI_STATIC_QFILEDIALOG + +void MainWindow::saveSceneAs(const QString &fileName) { + if (!fileName.isEmpty()) { + int currentIndex = ui->tabBar->currentIndex(); + SceneContext *context = m_context[currentIndex]; + saveScene(this, context, fileName); fs::path pathName = toFsPath(fileName), - complete = fs::absolute(pathName), - baseName = pathName.stem(); + complete = fs::absolute(pathName), + baseName = pathName.stem(); context->fileName = fileName; context->shortName = QFileInfo(fileName).fileName(); context->scene->setSourceFile(pathName); @@ -1593,7 +1682,7 @@ void MainWindow::on_actionStartServer_triggered() { void MainWindow::on_actionEnableCommandLine_triggered() { if (QMessageBox::question(this, tr("Enable command line access"), - tr("

If you proceed, Mitsuba will create symbolic links in /usr/bin and /Library/Python/{2.6,2.7}/site-packages, " + tr("

If you proceed, Mitsuba will create symbolic links in /usr/bin and /Library/Python/{2.6,2.7}/site-packages, as well as an entry in .bashrc, " "which enable command line and Python usage. Note that you will have to " "repeat this process every time the Mitsuba application is moved.

" "

Create links?

"), diff --git a/src/mtsgui/mainwindow.h b/src/mtsgui/mainwindow.h index 464ae114..2ec338d7 100644 --- a/src/mtsgui/mainwindow.h +++ b/src/mtsgui/mainwindow.h @@ -193,8 +193,8 @@ private slots: void updateStatus(); void onPreviewSettingsClose(); void onOpenDialogClose(int reason); - void onSaveAsDialogClose(int reason); void onExportDialogClose(int reason); + void onSaveAsDialogClose(int reason); void onRenderSettingsClose(int reason); void onImportDialogClose(int reason); void onSceneInformationClose(int reason); @@ -204,6 +204,9 @@ private slots: void onSelectionChanged(); private: + void exportImage(const QString &fileName); + void saveSceneAs(const QString &fileName); + Ui::MainWindow *ui; QAction *m_actRecent[MAX_RECENT_FILES]; QAction *m_clearRecent; diff --git a/src/mtsgui/pch/mtsgui_precompiled.hpp b/src/mtsgui/pch/mtsgui_precompiled.hpp index 124dcd68..e699f855 100644 --- a/src/mtsgui/pch/mtsgui_precompiled.hpp +++ b/src/mtsgui/pch/mtsgui_precompiled.hpp @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include diff --git a/src/mtsgui/qtmain_win.cpp b/src/mtsgui/qtmain_win.cpp index a9919de4..a4735053 100644 --- a/src/mtsgui/qtmain_win.cpp +++ b/src/mtsgui/qtmain_win.cpp @@ -44,12 +44,12 @@ # error This source file can only be used in Windows builds #endif -#include -#include -#include +#include +#include +#include #define WIN32_LEAN_AND_MEAN -#include +#include /* This file contains the code in the qtmain library for Windows. diff --git a/src/mtsgui/resources/docs.xml b/src/mtsgui/resources/docs.xml index 3313f9ff..dbd3b387 100644 --- a/src/mtsgui/resources/docs.xml +++ b/src/mtsgui/resources/docs.xml @@ -41,6 +41,10 @@ implicitly have strictNormals activated. Hence, another use of this parameter is to match renderings created by these methods.

+ + Hide light sources (e.g. area or environment light sources) that are directly visible to the camera? + Reflections of light sources remain unaffected. + strictNormals activated. Hence, another use of this parameter is to match renderings created by these methods.

+ + Hide light sources (e.g. area or environment light sources) that are directly visible to the camera? + Reflections of light sources remain unaffected. +
Number of photons to collect for the volume photon map (if applicable) Radius of lookups in the global photon map (relative to the scene size) Radius of lookups in the caustic photon map (relative to the scene size) - Number of photons that should be fetched in photon map queries + Number of photons that should be fetched in photon map queries Granularity of photon tracing work units (in shot particles, 0 => decide automatically) Specifies the minimum path depth, after which the implementation will start to use the "russian roulette" path termination criterion when tracing photons (set to -1 to disable). + + Hide light sources (e.g. area or environment light sources) that are directly visible to the camera? + Reflections of light sources remain unaffected. + times; + trafo->collectKeyframes(times); + + property = doc.createElement("animation"); + + for (std::set::iterator it2 = times.begin(); it2 != times.end(); ++it2) { + const Matrix4x4 &matrix = trafo->eval(*it2).getMatrix(); + QDomElement trafoTag = doc.createElement("transform"); + QDomElement matrixTag = doc.createElement("matrix"); + + QString value; + for (int i=0; i<4; ++i) + for (int j=0; j<4; ++j) + value += QString("%1 ").arg(matrix(i, j)); + + matrixTag.setAttribute("value", value); + trafoTag.setAttribute("time", *it2); + trafoTag.appendChild(matrixTag); + property.appendChild(trafoTag); + } + } + break; case Properties::ETransform: { /* Captures the subset of transformations that are used by Mitsuba's perspective and orthographic camera classes */ diff --git a/src/mtsgui/sceneloader.cpp b/src/mtsgui/sceneloader.cpp index f0eecbc3..070ae326 100644 --- a/src/mtsgui/sceneloader.cpp +++ b/src/mtsgui/sceneloader.cpp @@ -153,7 +153,7 @@ void SceneLoader::run() { m_result->fileName = m_filename; m_result->shortName = fileInfo.fileName(); if (sensor->getClass()->derivesFrom(MTS_CLASS(PerspectiveCamera))) { - m_result->up = static_cast(sensor)->getInverseViewTransform( + m_result->up = static_cast(sensor)->getWorldTransform( sensor->getShutterOpen() + 0.5f * sensor->getShutterOpenTime())(Vector(0, 1, 0)); } else { m_result->up = Vector(0.0f); diff --git a/src/mtsgui/symlinks_auth.cpp b/src/mtsgui/symlinks_auth.cpp index 13f6fe37..a0bf914b 100644 --- a/src/mtsgui/symlinks_auth.cpp +++ b/src/mtsgui/symlinks_auth.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace mitsuba { extern std::string __mts_bundlepath(); @@ -30,7 +31,10 @@ bool create_symlinks() { } std::string bundlePath = mitsuba::__mts_bundlepath(); std::string path = bundlePath + "/Contents/MacOS/symlinks_install"; - char *args[] = { const_cast(bundlePath.c_str()), NULL }; + std::ostringstream oss; + oss << getuid(); + std::string uid = oss.str(); + char *args[] = { const_cast(bundlePath.c_str()), const_cast(uid.c_str()), NULL }; FILE *pipe = NULL; flags = kAuthorizationFlagDefaults; status = AuthorizationExecuteWithPrivileges(ref, const_cast(path.c_str()), flags, args, &pipe); diff --git a/src/mtsgui/symlinks_install.c b/src/mtsgui/symlinks_install.c index 6b9d49fb..34e1b039 100644 --- a/src/mtsgui/symlinks_install.c +++ b/src/mtsgui/symlinks_install.c @@ -3,6 +3,7 @@ #include #include #include +#include void installPython(const char *basedir, const char *version) { char fname[MAXPATHLEN]; @@ -23,6 +24,22 @@ void installPython(const char *basedir, const char *version) { fclose(f); } +void appendShellConfig(const char *basedir, const char *target, const char *fmt, const char *dir) { + char fname[MAXPATHLEN]; + snprintf(fname, sizeof(fname), "%s/%s", basedir, target); + + if (access(fname, R_OK) < 0) + return; + + FILE *f = fopen(fname, "a"); + if (!f) + return; + + fprintf(f, fmt, dir); + + fclose(f); +} + void install(const char *basedir, const char *name) { char fname[MAXPATHLEN]; FILE *f; @@ -51,11 +68,11 @@ void install(const char *basedir, const char *name) { } int main(int argc, char **argv) { - if (argc != 2) { + if (argc != 3) { fprintf(stderr, "Incorrect number of arguments!\n"); return -1; } - + if (setuid(0) != 0) { fprintf(stderr, "setuid(): failed!\n"); return -1; @@ -69,6 +86,12 @@ int main(int argc, char **argv) { installPython(argv[1], "2.6"); installPython(argv[1], "2.7"); + /// this is not required anymore as of Mitsuba 0.4.3 + //struct passwd *pw = getpwuid(atoi(argv[2])); + //appendShellConfig(pw->pw_dir, ".bashrc", "\nexport LD_LIBRARY_PATH=%s/Contents/Frameworks:$LD_LIBRARY_PATH\n", argv[1]); + //appendShellConfig(pw->pw_dir, ".zshrc", "\nexport LD_LIBRARY_PATH=%s/Contents/Frameworks:$LD_LIBRARY_PATH\n", argv[1]); + //appendShellConfig(pw->pw_dir, ".cshrc", "\nsetenv LD_LIBRARY_PATH %s/Contents/Frameworks:${LD_LIBRARY_PATH}\n", argv[1]); + return 0; } diff --git a/src/rfilters/gaussian.cpp b/src/rfilters/gaussian.cpp index a9cc046b..5ec166aa 100644 --- a/src/rfilters/gaussian.cpp +++ b/src/rfilters/gaussian.cpp @@ -52,8 +52,8 @@ public: Float eval(Float x) const { Float alpha = -1.0f / (2.0f * m_stddev*m_stddev); return std::max((Float) 0.0f, - std::exp(alpha * x * x) - - std::exp(alpha * m_radius * m_radius)); + math::fastexp(alpha * x * x) - + math::fastexp(alpha * m_radius * m_radius)); } std::string toString() const { diff --git a/src/samplers/halton.cpp b/src/samplers/halton.cpp index b417ae8c..c44d05df 100644 --- a/src/samplers/halton.cpp +++ b/src/samplers/halton.cpp @@ -174,7 +174,7 @@ public: sampler->m_scramble = m_scramble; sampler->m_permutations = m_permutations; for (size_t i=0; irequest2DArray(m_req1D[i]); + sampler->request1DArray(m_req1D[i]); for (size_t i=0; irequest2DArray(m_req2D[i]); return sampler.get(); diff --git a/src/samplers/independent.cpp b/src/samplers/independent.cpp index f8c7cbcf..a76b8f39 100644 --- a/src/samplers/independent.cpp +++ b/src/samplers/independent.cpp @@ -73,7 +73,7 @@ public: sampler->m_sampleCount = m_sampleCount; sampler->m_random = new Random(m_random); for (size_t i=0; irequest2DArray(m_req1D[i]); + sampler->request1DArray(m_req1D[i]); for (size_t i=0; irequest2DArray(m_req2D[i]); return sampler.get(); diff --git a/src/samplers/ldsampler.cpp b/src/samplers/ldsampler.cpp index 01f18251..3a155d4f 100644 --- a/src/samplers/ldsampler.cpp +++ b/src/samplers/ldsampler.cpp @@ -138,7 +138,7 @@ public: sampler->m_samples2D[i] = new Point2[m_sampleCount]; } for (size_t i=0; irequest2DArray(m_req1D[i]); + sampler->request1DArray(m_req1D[i]); for (size_t i=0; irequest2DArray(m_req2D[i]); diff --git a/src/samplers/sobol.cpp b/src/samplers/sobol.cpp index ecf3e998..0d4d2cdd 100644 --- a/src/samplers/sobol.cpp +++ b/src/samplers/sobol.cpp @@ -138,7 +138,7 @@ public: sampler->m_arrayStartDim = m_arrayStartDim; sampler->m_arrayEndDim = m_arrayEndDim; for (size_t i=0; irequest2DArray(m_req1D[i]); + sampler->request1DArray(m_req1D[i]); for (size_t i=0; irequest2DArray(m_req2D[i]); return sampler.get(); diff --git a/src/samplers/stratified.cpp b/src/samplers/stratified.cpp index 1d1d883f..7ce6fdf5 100644 --- a/src/samplers/stratified.cpp +++ b/src/samplers/stratified.cpp @@ -138,7 +138,7 @@ public: sampler->m_permutations2D[i] = new uint32_t[m_sampleCount]; } for (size_t i=0; irequest2DArray(m_req1D[i]); + sampler->request1DArray(m_req1D[i]); for (size_t i=0; irequest2DArray(m_req2D[i]); return sampler.get(); diff --git a/src/sensors/fluencemeter.cpp b/src/sensors/fluencemeter.cpp index 53c87e18..6bd5a084 100644 --- a/src/sensors/fluencemeter.cpp +++ b/src/sensors/fluencemeter.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include MTS_NAMESPACE_BEGIN diff --git a/src/sensors/irradiancemeter.cpp b/src/sensors/irradiancemeter.cpp index 609d1822..9ef414e5 100644 --- a/src/sensors/irradiancemeter.cpp +++ b/src/sensors/irradiancemeter.cpp @@ -38,6 +38,9 @@ MTS_NAMESPACE_BEGIN * provided surface. * Such a sensor is useful for conducting virtual experiments and * testing the renderer for correctness. + * The result is normalized so that an irradiance sensor inside an + * integrating sphere with constant radiance 1 records + * an irradiance value of $\pi$. * * To create an irradiance meter, instantiate the desired measurement * shape and specify the sensor as its child. Note that when the @@ -106,7 +109,7 @@ public: ray.maxt = std::numeric_limits::infinity(); PositionSamplingRecord pRec(ray.time); - m_shape->samplePosition(pRec, Point2( + m_shape->samplePosition(pRec, Point2( pixelSample.x * m_invResolution.x, pixelSample.y * m_invResolution.y)); @@ -114,7 +117,7 @@ public: ray.setDirection(Frame(pRec.n).toWorld( Warp::squareToCosineHemisphere(otherSample))); - return Spectrum(1.0f); + return Spectrum(M_PI); } Spectrum samplePosition(PositionSamplingRecord &pRec, @@ -127,15 +130,16 @@ public: samplePos.y = (extra->y + sample.y) * m_invResolution.y; } + m_shape->samplePosition(pRec, samplePos); + pRec.uv = Point2(samplePos.x * m_resolution.x, samplePos.y * m_resolution.y); - m_shape->samplePosition(pRec, samplePos); - return Spectrum(1.0f); + return Spectrum(M_PI / (pRec.pdf * m_shape->getSurfaceArea())); } Spectrum evalPosition(const PositionSamplingRecord &pRec) const { - return Spectrum(m_shape->pdfPosition(pRec)); + return Spectrum(M_PI / m_shape->getSurfaceArea()); } Float pdfPosition(const PositionSamplingRecord &pRec) const { @@ -182,8 +186,11 @@ public: a reference point within a medium or on a transmissive surface will set dRec.refN = 0, hence they should always be accepted. */ if (dot(dRec.d, dRec.refN) >= 0 && dot(dRec.d, dRec.n) < 0 && dRec.pdf != 0.0f) { - dRec.uv = Point2(0.5f); - return Spectrum(1.0f / dRec.pdf); + dRec.uv = Point2( + dRec.uv.x * m_resolution.x, + dRec.uv.y * m_resolution.y); + + return Spectrum(1.0f / (dRec.pdf * m_shape->getSurfaceArea())); } else { dRec.pdf = 0.0f; return Spectrum(0.0f); @@ -200,6 +207,23 @@ public: } } + Spectrum eval(const Intersection &its, const Vector &d, Point2 &samplePos) const { + if (dot(its.shFrame.n, d) < 0) + return Spectrum(0.0f); + + samplePos = Point2( + its.uv.x * m_resolution.x, + its.uv.y * m_resolution.y); + + return Spectrum(1.0f / m_shape->getSurfaceArea()); + } + + bool getSamplePosition(const PositionSamplingRecord &pRec, + const DirectionSamplingRecord &dRec, Point2 &samplePosition) const { + samplePosition = pRec.uv; + return true; + } + void setParent(ConfigurableObject *parent) { Sensor::setParent(parent); diff --git a/src/sensors/orthographic.cpp b/src/sensors/orthographic.cpp index 37ea76d4..ae9504d9 100644 --- a/src/sensors/orthographic.cpp +++ b/src/sensors/orthographic.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include diff --git a/src/sensors/perspective.cpp b/src/sensors/perspective.cpp index 6a521467..7184b912 100644 --- a/src/sensors/perspective.cpp +++ b/src/sensors/perspective.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include MTS_NAMESPACE_BEGIN @@ -113,7 +113,7 @@ public: foreshortening terms caused by the aperture, hence the flag "EOnSurface" */ m_type |= EDeltaPosition | EPerspectiveCamera | EOnSurface | EDirectionSampleMapsToPixels; - if (props.getTransform("toWorld", Transform()).hasScale()) + if (props.getAnimatedTransform("toWorld", Transform())->eval(0).hasScale()) Log(EError, "Scale factors in the camera-to-world " "transformation are not allowed!"); } diff --git a/src/sensors/radiancemeter.cpp b/src/sensors/radiancemeter.cpp index 758b9d7c..ae1bd44b 100644 --- a/src/sensors/radiancemeter.cpp +++ b/src/sensors/radiancemeter.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include MTS_NAMESPACE_BEGIN diff --git a/src/sensors/spherical.cpp b/src/sensors/spherical.cpp index 4ef108a4..e39c2143 100644 --- a/src/sensors/spherical.cpp +++ b/src/sensors/spherical.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include MTS_NAMESPACE_BEGIN diff --git a/src/sensors/telecentric.cpp b/src/sensors/telecentric.cpp index 96f06fe3..02bf566d 100644 --- a/src/sensors/telecentric.cpp +++ b/src/sensors/telecentric.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include diff --git a/src/sensors/thinlens.cpp b/src/sensors/thinlens.cpp index 0107915f..7e52b87e 100644 --- a/src/sensors/thinlens.cpp +++ b/src/sensors/thinlens.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include #include @@ -137,7 +137,7 @@ public: m_apertureRadius = Epsilon; } - if (props.getTransform("toWorld", Transform()).hasScale()) + if (props.getAnimatedTransform("toWorld", Transform())->eval(0).hasScale()) Log(EError, "Scale factors in the camera-to-world " "transformation are not allowed!"); } @@ -520,14 +520,11 @@ public: } ref createShape(const Scene *scene) { - if (!m_worldTransform->isStatic()) - Log(EError, "Bidirectional renderings involving moving " - "perspective cameras with depth of field are currently not supported!"); - Transform trafo = m_worldTransform->eval(0) * - Transform::scale(Vector(m_apertureRadius)); + ref trafo = new AnimatedTransform(m_worldTransform); + trafo->prependScale(Vector(m_apertureRadius)); Properties props("disk"); - props.setTransform("toWorld", trafo); + props.setAnimatedTransform("toWorld", trafo); Shape *shape = static_cast (PluginManager::getInstance()-> createObject(MTS_CLASS(Shape), props)); shape->addChild(this); diff --git a/src/shapes/CMakeLists.txt b/src/shapes/CMakeLists.txt index d2df9951..6c73ab39 100644 --- a/src/shapes/CMakeLists.txt +++ b/src/shapes/CMakeLists.txt @@ -15,10 +15,11 @@ add_shape(rectangle rectangle.cpp) add_shape(disk disk.cpp) add_shape(sphere sphere.cpp) add_shape(cylinder cylinder.cpp) +add_shape(cube cube.cpp) add_shape(hair hair.h hair.cpp) -add_shape(shapegroup shapegroup.h shapegroup.cpp) -add_shape(instance instance.h instance.cpp) -add_shape(animatedinstance animatedinstance.cpp) +add_shape(shapegroup shapegroup.h shapegroup.cpp) +add_shape(instance instance.h instance.cpp) +#add_shape(deformable deformable.cpp) add_shape(ply ply.cpp ply/ply_parser.cpp ply/byte_order.hpp ply/config.hpp ply/io_operators.hpp ply/ply.hpp ply/ply_parser.hpp) diff --git a/src/shapes/SConscript b/src/shapes/SConscript index 4c48f497..7d401a95 100644 --- a/src/shapes/SConscript +++ b/src/shapes/SConscript @@ -11,7 +11,8 @@ plugins += env.SharedLibrary('cylinder', ['cylinder.cpp']) plugins += env.SharedLibrary('hair', ['hair.cpp']) plugins += env.SharedLibrary('shapegroup', ['shapegroup.cpp']) plugins += env.SharedLibrary('instance', ['instance.cpp']) -plugins += env.SharedLibrary('animatedinstance', ['animatedinstance.cpp']) +plugins += env.SharedLibrary('cube', ['cube.cpp']) plugins += env.SharedLibrary('heightfield', ['heightfield.cpp']) +#plugins += env.SharedLibrary('deformable', ['deformable.cpp']) Export('plugins') diff --git a/src/shapes/animatedinstance.cpp b/src/shapes/animatedinstance.cpp deleted file mode 100644 index aa288645..00000000 --- a/src/shapes/animatedinstance.cpp +++ /dev/null @@ -1,164 +0,0 @@ -/* - This file is part of Mitsuba, a physically based rendering system. - - Copyright (c) 2007-2012 by Wenzel Jakob and others. - - Mitsuba is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License Version 3 - as published by the Free Software Foundation. - - Mitsuba is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . -*/ - -#include -#include -#include -#include "shapegroup.h" - -MTS_NAMESPACE_BEGIN - -/*!\plugin{animatedinstance}{Animated geometry instance} - * \order{10} - * \parameters{ - * \parameter{filename}{\String}{Filename of an animated - * transformation} - * \parameter{\Unnamed}{\ShapeGroup}{A reference to a - * shape group that should be instantiated} - * } - * - * This plugin implements an \emph{animated} geometry instance, - * i.e. one or more shapes that are undergoing \emph{ridgid} - * transformations over time. - * - * The input file should contain a binary / serialized - * \code{AnimatedTransform} data structure -- for details, - * please refer to the C++ implementation of this class. - */ -class AnimatedInstance : public Shape { -public: - AnimatedInstance(const Properties &props) : Shape(props) { - FileResolver *fResolver = Thread::getThread()->getFileResolver(); - fs::path path = fResolver->resolve(props.getString("filename")); - m_name = path.filename().string(); - - Log(EInfo, "Loading animation track from \"%s\"", m_name.c_str()); - ref fs = new FileStream(path, FileStream::EReadOnly); - m_transform = new AnimatedTransform(fs); - } - - AnimatedInstance(Stream *stream, InstanceManager *manager) - : Shape(stream, manager) { - m_shapeGroup = static_cast(manager->getInstance(stream)); - m_transform = new AnimatedTransform(stream); - configure(); - } - - void serialize(Stream *stream, InstanceManager *manager) const { - Shape::serialize(stream, manager); - manager->serialize(stream, m_shapeGroup.get()); - m_transform->serialize(stream); - } - - void configure() { - if (!m_shapeGroup) - Log(EError, "A reference to a 'shapegroup' must be specified!"); - const ShapeKDTree *kdtree = m_shapeGroup->getKDTree(); - m_aabb = m_transform->getSpatialBounds(kdtree->getAABB()); - } - - AABB getAABB() const { - return m_aabb; - } - - std::string getName() const { - return m_name; - } - - Float getSurfaceArea() const { - Log(EError, "AnimatedInstance::getSurfaceArea(): not supported!"); - return 0.0f; - } - - void addChild(const std::string &name, ConfigurableObject *child) { - const Class *cClass = child->getClass(); - if (cClass->getName() == "ShapeGroup") { - m_shapeGroup = static_cast(child); - } else { - Shape::addChild(name, child); - } - } - - bool rayIntersect(const Ray &_ray, Float mint, - Float maxt, Float &t, void *temp) const { - const ShapeKDTree *kdtree = m_shapeGroup->getKDTree(); - Ray ray; - const Transform &objectToWorld = m_transform->eval(_ray.time); - Transform worldToObject = objectToWorld.inverse(); - worldToObject.transformAffine(_ray, ray); - return kdtree->rayIntersect(ray, mint, maxt, t, temp); - } - - bool rayIntersect(const Ray &_ray, Float mint, Float maxt) const { - const ShapeKDTree *kdtree = m_shapeGroup->getKDTree(); - Ray ray; - const Transform &objectToWorld = m_transform->eval(_ray.time); - Transform worldToObject = objectToWorld.inverse(); - worldToObject.transformAffine(_ray, ray); - return kdtree->rayIntersect(ray, mint, maxt); - } - - void fillIntersectionRecord(const Ray &_ray, - const void *temp, Intersection &its) const { - const ShapeKDTree *kdtree = m_shapeGroup->getKDTree(); - const Transform &objectToWorld = m_transform->eval(_ray.time); - - Ray ray; - objectToWorld.inverse()(_ray, ray); - kdtree->fillIntersectionRecord(ray, temp, its); - - its.shFrame.n = normalize(objectToWorld(its.shFrame.n)); - its.shFrame.s = normalize(objectToWorld(its.shFrame.s)); - its.shFrame.t = normalize(objectToWorld(its.shFrame.t)); - its.geoFrame = Frame(normalize(objectToWorld(its.geoFrame.n))); - its.dpdu = objectToWorld(its.dpdu); - its.dpdv = objectToWorld(its.dpdv); - its.wi = normalize(its.shFrame.toLocal(-_ray.d)); - its.instance = this; - } - - void getNormalDerivative(const Intersection &its, - Vector &dndu, Vector &dndv, bool shadingFrame) const { - const Transform &objectToWorld = m_transform->eval(its.time); - its.shape->getNormalDerivative(its, dndu, dndv, shadingFrame); - - /* The following will probably be incorrect for - non-rigid transformations */ - dndu = objectToWorld(dndu); - dndv = objectToWorld(dndv); - } - - size_t getPrimitiveCount() const { - return 0; - } - - size_t getEffectivePrimitiveCount() const { - return m_shapeGroup->getPrimitiveCount(); - } - - MTS_DECLARE_CLASS() -private: - ref m_shapeGroup; - ref m_transform; - AABB m_aabb; - std::string m_name; -}; - -MTS_IMPLEMENT_CLASS_S(AnimatedInstance, false, Shape) -MTS_EXPORT_PLUGIN(AnimatedInstance, "Animated instanced geometry"); -MTS_NAMESPACE_END diff --git a/src/shapes/cube.cpp b/src/shapes/cube.cpp new file mode 100644 index 00000000..b6a25f68 --- /dev/null +++ b/src/shapes/cube.cpp @@ -0,0 +1,115 @@ +/* + This file is part of Mitsuba, a physically based rendering system. + + Copyright (c) 2007-2012 by Wenzel Jakob and others. + + Mitsuba is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License Version 3 + as published by the Free Software Foundation. + + Mitsuba is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +*/ + +#include +#include + +MTS_NAMESPACE_BEGIN + +static Float CubeData_vertexPositions[][3] = {{1, -1, -1}, {1, -1, 1}, {-1, -1, 1}, {-1, -1, -1}, {1, 1, -1}, {-1, 1, -1}, {-1, 1, 1}, {1, 1, 1}, {1, -1, -1}, {1, 1, -1}, {1, 1, 1}, {1, -1, 1}, {1, -1, 1}, {1, 1, 1}, {-1, 1, 1}, {-1, -1, 1}, {-1, -1, 1}, {-1, 1, 1}, {-1, 1, -1}, {-1, -1, -1}, {1, 1, -1}, {1, -1, -1}, {-1, -1, -1}, {-1, 1, -1}}; + +static Float CubeData_vertexNormals[][3] = {{0, -1, 0}, {0, -1, 0}, {0, -1, 0}, {0, -1, 0}, {0, 1, 0}, {0, 1, 0}, {0, 1, 0}, {0, 1, 0}, {1, 0, 0}, {1, 0, 0}, {1, 0, 0}, {1, 0, 0}, {0, 0, 1}, {0, 0, 1}, {0, 0, 1}, {0, 0, 1}, {-1, 0, 0}, {-1, 0, 0}, {-1, 0, 0}, {-1, 0, 0}, {0, 0, -1}, {0, 0, -1}, {0, 0, -1}, {0, 0, -1}}; + +static Float CubeData_texcoords[][2] = {{0, 1}, {1, 1}, {1, 0}, {0, 0}, {0, 1}, {1, 1}, {1, 0}, {0, 0}, {0, 1}, {1, 1}, {1, 0}, {0, 0}, {0, 1}, {1, 1}, {1, 0}, {0, 0}, {0, 1}, {1, 1}, {1, 0}, {0, 0}, {0, 1}, {1, 1}, {1, 0}, {0, 0}}; + +static uint32_t CubeData_triangles[][3] = {{0, 1, 2}, {3, 0, 2}, {4, 5, 6}, {7, 4, 6}, {8, 9, 10}, {11, 8, 10}, {12, 13, 14}, {15, 12, 14}, {16, 17, 18}, {19, 16, 18}, {20, 21, 22}, {23, 20, 22}}; + +/*!\plugin{cube}{Cube intersection primitive} + * \order{0} + * \parameters{ + * \parameter{toWorld}{\Transform\Or\Animation}{ + * Specifies an optional linear object-to-world transformation. + * \default{none (i.e. object space $=$ world space)} + * } + * \parameter{flipNormals}{\Boolean}{ + * Is the cube inverted, i.e. should the normal vectors + * be flipped? \default{\code{false}, i.e. the normals point outside} + * } + * } + * + * \renderings{ + * \rendering{Basic example} + * {shape_cube_basic} + * \rendering{A textured and stretched cube with the default parameterization + * (Listing~\ref{lst:cube-example})} + * {shape_cube_parameterization} + * } + * + * This shape plugin describes a simple cube/cuboid intersection primitive. By + * default, it creates a cube between the world-space positions $(-1, -1, -1)$ and $(1,1,1)$. + * However, an arbitrary linear transformation may be specified to translate, rotate, scale + * or skew it as desired. The parameterization of this shape maps every face onto the + * rectangle $[0,1]^2$ in $uv$ space. + * \vspace{5mm} + * \begin{xml}[caption={Example of a textured and stretched cube}, label=lst:cube-example] + * + * + * + * + * + * + * + * + * + * + * + * \end{xml} + */ +class Cube : public TriMesh { +public: + Cube(const Properties &props) : TriMesh(props) { + m_name = props.getID(); + m_triangleCount = 12; + m_vertexCount = 24; + m_positions = new Point[m_vertexCount]; + m_texcoords = new Point2[m_vertexCount]; + m_normals = new Normal[m_vertexCount]; + m_triangles = new Triangle[m_triangleCount]; + + Transform toWorld = props.getTransform("toWorld", Transform()); + for (uint32_t i=0; i maxt || farT < mint) + if (!(nearT <= maxt && farT >= mint)) /* NaN-aware conditionals */ return false; const Float zPosNear = ray.o.z + ray.d.z * nearT; const Float zPosFar = ray.o.z + ray.d.z * farT; + if (zPosNear >= 0 && zPosNear <= m_length && nearT >= mint) { t = nearT; } else if (zPosFar >= 0 && zPosFar <= m_length) { @@ -210,12 +211,12 @@ public: Vector dpdu = Vector(-local.y, local.x, 0) * (2*M_PI); Vector dpdv = Vector(0, 0, m_length); - if (m_flipNormals) - dpdu *= -1; its.shape = this; its.dpdu = m_objectToWorld(dpdu); its.dpdv = m_objectToWorld(dpdv); its.geoFrame.n = Normal(normalize(cross(its.dpdu, its.dpdv))); + if (m_flipNormals) + its.geoFrame.n *= -1; its.geoFrame.s = normalize(its.dpdu); its.geoFrame.t = normalize(its.dpdv); its.shFrame = its.geoFrame; @@ -374,10 +375,10 @@ public: /* Find the componentwise maxima of the ellipse */ for (int i=0; i<2; ++i) { int j = (i==0) ? axis1 : axis2; - Float alpha = ellipseAxes[0][j]; - Float beta = ellipseAxes[1][j]; - Float ratio = beta/alpha, tmp = std::sqrt(1+ratio*ratio); - Float cosTheta = 1/tmp, sinTheta = ratio/tmp; + Float alpha = ellipseAxes[0][j], beta = ellipseAxes[1][j]; + Float tmp = 1 / std::sqrt(alpha*alpha + beta*beta); + Float cosTheta = alpha * tmp, sinTheta = beta*tmp; + Point p1 = ellipseCenter + cosTheta*ellipseAxes[0] + sinTheta*ellipseAxes[1]; Point p2 = ellipseCenter - cosTheta*ellipseAxes[0] - sinTheta*ellipseAxes[1]; @@ -495,7 +496,7 @@ public: void getNormalDerivative(const Intersection &its, Vector &dndu, Vector &dndv, bool shadingFrame) const { - dndu = its.dpdu / m_radius; + dndu = its.dpdu / (m_radius * (m_flipNormals ? -1 : 1)); dndv = Vector(0.0f); } @@ -510,8 +511,8 @@ public: std::string toString() const { std::ostringstream oss; oss << "Cylinder[" << endl - << " radius = " << m_radius << ", " << endl - << " length = " << m_length << ", " << endl + << " radius = " << m_radius << "," << endl + << " length = " << m_length << "," << endl << " objectToWorld = " << indent(m_objectToWorld.toString()) << "," << endl << " bsdf = " << indent(m_bsdf.toString()) << "," << endl; if (isMediumTransition()) diff --git a/src/shapes/deformable.cpp b/src/shapes/deformable.cpp new file mode 100644 index 00000000..4f82ff41 --- /dev/null +++ b/src/shapes/deformable.cpp @@ -0,0 +1,467 @@ +/* + This file is part of Mitsuba, a physically based rendering system. + + Copyright (c) 2007-2012 by Wenzel Jakob and others. + + Mitsuba is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License Version 3 + as published by the Free Software Foundation. + + Mitsuba is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +*/ + +#include +#include +#include +#include +#include +#include +#include + +#define SHAPE_PER_SEGMENT 1 +#define NO_CLIPPING_SUPPORT 1 + +MTS_NAMESPACE_BEGIN + +class SpaceTimeKDTree : public SAHKDTree4D { + friend class GenericKDTree; + friend class SAHKDTree4D; +public: + /// Temporarily holds some intersection information + struct IntersectionCache { + Point p[3]; + Float u, v; + }; + + SpaceTimeKDTree(const std::vector &frameTimes, std::vector &positions, + Triangle *triangles, size_t vertexCount, size_t triangleCount) + : m_frameTimes(frameTimes), m_positions(positions), m_triangles(triangles), + m_vertexCount(vertexCount), m_triangleCount(triangleCount) { + + Log(EInfo, "Total amount of vertex data: %s", + memString(vertexCount*frameTimes.size()*sizeof(float)*3).c_str()); + + //setClip(false); + //setExactPrimitiveThreshold(10); + buildInternal(); + + /* Collect some statistics */ + std::stack stack; + + stack.push(m_nodes); + size_t spatialSplits = 0, timeSplits = 0; + while (!stack.empty()) { + const KDNode *node = stack.top(); + stack.pop(); + if (!node->isLeaf()) { + if (node->getAxis() == 3) { + timeSplits++; + } else { + spatialSplits++; + } + stack.push((const KDNode *) node->getLeft()); + stack.push((const KDNode *) node->getRight()); + } + } + + KDLog(EInfo, "Spacetime kd-tree statistics"); + KDLog(EInfo, " Time interval = [%f, %f]" , m_tightAABB.min.w, m_tightAABB.max.w); + KDLog(EInfo, " Spatial splits = " SIZE_T_FMT, spatialSplits); + KDLog(EInfo, " Time splits = " SIZE_T_FMT, timeSplits); + KDLog(EInfo, ""); + + m_spatialAABB = AABB( + Point(m_aabb.min.x, m_aabb.min.y, m_aabb.min.z), + Point(m_aabb.max.x, m_aabb.max.y, m_aabb.max.z) + ); + } + + /// Return one of the points stored in the point cache + inline Point getPoint(uint32_t frame, uint32_t index) const { + float *ptr = m_positions[frame] + index*3; +#if defined(__LITTLE_ENDIAN__) + return Point( + (Float) endianness_swap(ptr[0]), + (Float) endianness_swap(ptr[1]), + (Float) endianness_swap(ptr[2])); +#else + return Point((Float) ptr[0], (Float) ptr[1], (Float) ptr[2]); +#endif + } + + // ======================================================================== + // Implementation of functions required by the parent class + // ======================================================================== + + /// Return the total number of primitives that are organized in the tree + inline SizeType getPrimitiveCount() const { +#ifdef SHAPE_PER_SEGMENT + return m_triangleCount * (m_frameTimes.size() - 1); +#else + return m_triangleCount; +#endif + } + + /// Return the 4D extents for one of the primitives contained in the tree + AABB4 getAABB(IndexType index) const { +#ifdef SHAPE_PER_SEGMENT + int frameIdx = index / m_triangleCount; + int triangleIdx = index % m_triangleCount; + const Triangle &tri = m_triangles[triangleIdx]; + + AABB aabb; + for (int i=0; i<3; ++i) { + aabb.expandBy(getPoint(frameIdx, tri.idx[i])); + aabb.expandBy(getPoint(frameIdx+1, tri.idx[i])); + } + + return AABB4( + Point4(aabb.min.x, aabb.min.y, aabb.min.z, m_frameTimes[frameIdx]), + Point4(aabb.max.x, aabb.max.y, aabb.max.z, m_frameTimes[frameIdx+1]) + ); +#else + AABB aabb; + const Triangle &tri = m_triangles[index]; + for (size_t i=0; i 1) + return false; + + /* Compute interpolated positions */ + Point p[3]; + for (int i=0; i<3; ++i) + p[i] = (1 - alpha) * getPoint(frameIdx, tri.idx[i]) + + alpha * getPoint(frameIdx+1, tri.idx[i]); + + Float tempU, tempV, tempT; + if (!Triangle::rayIntersect(p[0], p[1], p[2], ray, tempU, tempV, tempT)) + return false; + if (tempT < mint || tempT > maxt) + return false; + + if (tmp != NULL) { + IntersectionCache *cache = + static_cast(tmp); + t = tempT; + memcpy(cache->p, p, sizeof(Point)*3); + cache->u = tempU; + cache->v = tempV; + } + return true; + } + + /// Cast a shadow ray against a specific triangle + inline bool intersect(const Ray &ray, IndexType idx, + Float mint, Float maxt) const { + Float tempT; + /* No optimized version for shadow rays yet */ + return intersect(ray, idx, mint, maxt, tempT, NULL); + } + + // ======================================================================== + // Miscellaneous + // ======================================================================== + + /// Intersect a ray with all primitives stored in the kd-tree + inline bool rayIntersect(const Ray &ray, Float _mint, Float _maxt, + Float &t, void *temp) const { + Float tempT = std::numeric_limits::infinity(); + Float mint, maxt; + + if (m_spatialAABB.rayIntersect(ray, mint, maxt)) { + if (_mint > mint) mint = _mint; + if (_maxt < maxt) maxt = _maxt; + + if (EXPECT_TAKEN(maxt > mint && ray.time >= m_aabb.min.w && ray.time <= m_aabb.max.w)) { + if (rayIntersectHavran(ray, mint, maxt, tempT, temp)) { + t = tempT; + return true; + } + } + } + return false; + } + + /** + * \brief Intersect a ray with all primitives stored in the kd-tree + * (Visiblity query version) + */ + inline bool rayIntersect(const Ray &ray, Float _mint, Float _maxt) const { + Float tempT = std::numeric_limits::infinity(); + Float mint, maxt; + + if (m_spatialAABB.rayIntersect(ray, mint, maxt)) { + if (_mint > mint) mint = _mint; + if (_maxt < maxt) maxt = _maxt; + + if (EXPECT_TAKEN(maxt > mint && ray.time >= m_aabb.min.w && ray.time <= m_aabb.max.w)) + if (rayIntersectHavran(ray, mint, maxt, tempT, NULL)) + return true; + } + return false; + } + + inline const Triangle *getTriangles() const { + return m_triangles; + } + + /// Return an AABB with the spatial extents + inline const AABB &getSpatialAABB() const { + return m_spatialAABB; + } + + MTS_DECLARE_CLASS() +protected: + std::vector m_frameTimes; + std::vector m_positions; + Triangle *m_triangles; + size_t m_vertexCount; + size_t m_triangleCount; + AABB m_spatialAABB; +}; + +class Deformable : public Shape { +public: + Deformable(const Properties &props) : Shape(props) { + FileResolver *fResolver = Thread::getThread()->getFileResolver(); + fs::path path = fResolver->resolve(props.getString("filename")); + if (path.extension() != ".mdd") + Log(EError, "Point cache files must have the extension \".mdd\""); + + m_mmap = new MemoryMappedFile(path); + + ref mStream = new MemoryStream((uint8_t *) m_mmap->getData(), + m_mmap->getSize()); + mStream->setByteOrder(Stream::EBigEndian); + + uint32_t frameCount = mStream->readUInt(); + m_vertexCount = mStream->readUInt(); + + Log(EInfo, "Point cache has %i frames and %i vertices", frameCount, m_vertexCount); + + Float clipStart = props.getFloat("clipStart", 0), + clipEnd = props.getFloat("clipEnd", 0); + + std::vector frameTimes; + std::vector positions; + + for (uint32_t i=0; ireadSingle()); + + for (uint32_t i=0; i(mStream->getCurrentData())); + mStream->skip(m_vertexCount * 3 * sizeof(float)); + } + + if (clipStart != clipEnd) { + m_positions.reserve(positions.size()); + m_frameTimes.reserve(frameTimes.size()); + for (uint32_t i=0; i= clipStart && frameTimes[i] <= clipEnd) { + m_frameTimes.push_back(frameTimes[i]); + m_positions.push_back(positions[i]); + } + } + if (m_frameTimes.empty()) + Log(EError, "After clipping to the time range [%f, %f] no frames were left!", + clipStart, clipEnd); + Log(EInfo, "Clipped away %u/%u frames", frameCount - (uint32_t) m_frameTimes.size(), frameCount); + } else { + m_positions = positions; + m_frameTimes = frameTimes; + } + } + + Deformable(Stream *stream, InstanceManager *manager) + : Shape(stream, manager) { + /// TBD + } + + void serialize(Stream *stream, InstanceManager *manager) const { + Shape::serialize(stream, manager); + /// TBD + } + + void configure() { + Shape::configure(); + + if (m_mesh == NULL) + Log(EError, "A nested triangle mesh is required so that " + "connectivity information can be extracted!"); + if (m_mesh->getVertexCount() != m_vertexCount) + Log(EError, "Point cache and nested geometry have mismatched " + "numbers of vertices!"); + + m_kdtree = new SpaceTimeKDTree(m_frameTimes, m_positions, m_mesh->getTriangles(), + m_vertexCount, m_mesh->getTriangleCount()); + m_aabb = m_kdtree->getSpatialAABB(); + } + + bool rayIntersect(const Ray &ray, Float mint, + Float maxt, Float &t, void *temp) const { + return m_kdtree->rayIntersect(ray, mint, maxt, t, temp); + } + + bool rayIntersect(const Ray &ray, Float mint, Float maxt) const { + return m_kdtree->rayIntersect(ray, mint, maxt); + } + + void fillIntersectionRecord(const Ray &ray, + const void *temp, Intersection &its) const { + const SpaceTimeKDTree::IntersectionCache *cache + = reinterpret_cast(temp); + + const Vector b(1 - cache->u - cache->v, cache->u, cache->v); + const Point p0 = cache->p[0]; + const Point p1 = cache->p[1]; + const Point p2 = cache->p[2]; + + Normal faceNormal(cross(p1-p0, p2-p0)); + Float length = faceNormal.length(); + if (!faceNormal.isZero()) + faceNormal /= length; + + /* Just the basic attributes for now and geometric normals */ + its.p = ray(its.t); + its.geoFrame = Frame(faceNormal); + its.shFrame = its.geoFrame; + its.wi = its.toLocal(-ray.d); + its.shape = this; + its.instance = this; + its.hasUVPartials = false; + its.time = ray.time; + } + + AABB getAABB() const { + return m_kdtree->getSpatialAABB(); + } + + size_t getPrimitiveCount() const { + return m_mesh->getTriangleCount(); + } + + size_t getEffectivePrimitiveCount() const { + return m_mesh->getTriangleCount(); + } + + void addChild(const std::string &name, ConfigurableObject *child) { + const Class *cClass = child->getClass(); + if (cClass->derivesFrom(TriMesh::m_theClass)) { + Assert(m_mesh == NULL); + m_mesh = static_cast(child); + if (m_mesh->getVertexCount() != m_vertexCount) + Log(EError, "Geometry mismatch! MDD file contains %u vertices. " + "The attached shape uses %u!", m_vertexCount, m_mesh->getVertexCount()); + } else if (cClass->derivesFrom(Shape::m_theClass) && static_cast(child)->isCompound()) { + size_t index = 0; + Shape *shape = static_cast(child); + do { + ref element = shape->getElement(index++); + if (element == NULL) + break; + addChild(name, element); + } while (true); + } else { + Shape::addChild(name, child); + } + } + + std::string toString() const { + std::ostringstream oss; + oss << "Deformable[" << endl + << " mesh = " << indent(m_mesh.toString()) << endl + << "]"; + return oss.str(); + } + + MTS_DECLARE_CLASS() +private: + ref m_mmap; + ref m_kdtree; + std::vector m_frameTimes; + std::vector m_positions; + ref m_mesh; + uint32_t m_vertexCount; + AABB m_aabb; +}; + +MTS_IMPLEMENT_CLASS(SpaceTimeKDTree, false, KDTreeBase) +MTS_IMPLEMENT_CLASS_S(Deformable, false, Shape) +MTS_EXPORT_PLUGIN(Deformable, "Deformable shape"); +MTS_NAMESPACE_END diff --git a/src/shapes/disk.cpp b/src/shapes/disk.cpp index 9808672f..7d575ebc 100644 --- a/src/shapes/disk.cpp +++ b/src/shapes/disk.cpp @@ -31,7 +31,7 @@ MTS_NAMESPACE_BEGIN /*!\plugin{disk}{Disk intersection primitive} * \order{4} * \parameters{ - * \parameter{toWorld}{\Transform}{ + * \parameter{toWorld}{\Transform\Or\Animation}{ * Specifies a linear object-to-world transformation. * Note that non-uniform scales are not permitted! * \default{none (i.e. object space $=$ world space)} @@ -81,31 +81,29 @@ MTS_NAMESPACE_BEGIN class Disk : public Shape { public: Disk(const Properties &props) : Shape(props) { - m_objectToWorld = props.getTransform("toWorld", Transform()); + m_objectToWorld = new AnimatedTransform(props.getAnimatedTransform("toWorld", Transform())); + if (props.getBoolean("flipNormals", false)) - m_objectToWorld = m_objectToWorld * Transform::scale(Vector(1, 1, -1)); - m_worldToObject = m_objectToWorld.inverse(); + m_objectToWorld->prependScale(Vector(1, 1, -1)); } Disk(Stream *stream, InstanceManager *manager) : Shape(stream, manager) { - m_objectToWorld = Transform(stream); - m_worldToObject = m_objectToWorld.inverse(); + m_objectToWorld = new AnimatedTransform(stream); configure(); } void serialize(Stream *stream, InstanceManager *manager) const { Shape::serialize(stream, manager); - m_objectToWorld.serialize(stream); + m_objectToWorld->serialize(stream); } void configure() { Shape::configure(); - m_normal = normalize(m_objectToWorld(Normal(0, 0, 1))); - - Vector dpdu = m_objectToWorld(Vector(1, 0, 0)); - Vector dpdv = m_objectToWorld(Vector(0, 1, 0)); + const Transform &trafo = m_objectToWorld->eval(0); + Vector dpdu = trafo(Vector(1, 0, 0)); + Vector dpdv = trafo(Vector(0, 1, 0)); if (std::abs(dot(dpdu, dpdv)) > 1e-3f) Log(EError, "Error: 'toWorld' transformation contains shear!"); @@ -117,26 +115,33 @@ public: } AABB getAABB() const { + std::set times; + m_objectToWorld->collectKeyframes(times); + AABB aabb; - aabb.expandBy(m_objectToWorld(Point( 1, 0, 0))); - aabb.expandBy(m_objectToWorld(Point(-1, 0, 0))); - aabb.expandBy(m_objectToWorld(Point( 0, 1, 0))); - aabb.expandBy(m_objectToWorld(Point( 0, -1, 0))); + for (std::set::iterator it = times.begin(); it != times.end(); ++it) { + const Transform &trafo = m_objectToWorld->eval(*it); + aabb.expandBy(trafo(Point( 1, 0, 0))); + aabb.expandBy(trafo(Point(-1, 0, 0))); + aabb.expandBy(trafo(Point( 0, 1, 0))); + aabb.expandBy(trafo(Point( 0, -1, 0))); + } return aabb; } Float getSurfaceArea() const { - Vector dpdu = m_objectToWorld(Vector(1, 0, 0)); - Vector dpdv = m_objectToWorld(Vector(0, 1, 0)); + const Transform &trafo = m_objectToWorld->eval(0); + Vector dpdu = trafo(Vector(1, 0, 0)); + Vector dpdv = trafo(Vector(0, 1, 0)); return M_PI * dpdu.length() * dpdv.length(); } inline bool rayIntersect(const Ray &_ray, Float mint, Float maxt, Float &t, void *temp) const { Ray ray; - m_worldToObject.transformAffine(_ray, ray); + m_objectToWorld->eval(ray.time).inverse().transformAffine(_ray, ray); Float hit = -ray.o.z / ray.d.z; - if (hit < mint || hit > maxt) + if (!(hit >= mint && hit <= maxt)) return false; Point local = ray(hit); @@ -173,18 +178,20 @@ public: phi += 2*M_PI; Float cosPhi = data[0] * invR, sinPhi = data[1] * invR; + const Transform &trafo = m_objectToWorld->eval(ray.time); its.shape = this; if (r != 0) { - its.dpdu = m_objectToWorld(Vector(cosPhi, sinPhi, 0)); - its.dpdv = m_objectToWorld(Vector(-sinPhi, cosPhi, 0)); + its.dpdu = trafo(Vector(cosPhi, sinPhi, 0)); + its.dpdv = trafo(Vector(-sinPhi, cosPhi, 0)); } else { - its.dpdu = m_objectToWorld(Vector(1, 0, 0)); - its.dpdv = m_objectToWorld(Vector(0, 1, 0)); + its.dpdu = trafo(Vector(1, 0, 0)); + its.dpdv = trafo(Vector(0, 1, 0)); } its.shFrame = its.geoFrame = Frame( - normalize(its.dpdu), normalize(its.dpdv), m_normal); + normalize(its.dpdu), normalize(its.dpdv), + normalize(trafo(Normal(0, 0, 1)))); its.uv = Point2(r, phi * INV_TWOPI); its.p = ray(its.t); its.wi = its.toLocal(-ray.d); @@ -206,16 +213,19 @@ public: Float dphi = (2 * M_PI) / (Float) (phiSteps-1); - Point center = m_objectToWorld(Point(0.0f)); + const Transform &trafo = m_objectToWorld->eval(0.0f); + Point center = trafo(Point(0.0f)); + Normal normal = normalize(trafo(Normal(0, 0, 1))); + for (uint32_t i=0; ieval(pRec.time); Point2 p = Warp::squareToUniformDiskConcentric(sample); - pRec.p = m_objectToWorld(Point3(p.x, p.y, 0)); - pRec.n = m_normal; + pRec.p = trafo(Point3(p.x, p.y, 0)); + pRec.n = trafo(normalize(Normal(0,0,1))); pRec.pdf = m_invSurfaceArea; pRec.measure = EArea; } @@ -261,7 +272,7 @@ public: std::string toString() const { std::ostringstream oss; oss << "Disk[" << endl - << " objectToWorld = " << indent(m_objectToWorld.toString()) << ", " << endl + << " objectToWorld = " << indent(m_objectToWorld->toString()) << "," << endl << " bsdf = " << indent(m_bsdf.toString()) << "," << endl; if (isMediumTransition()) { oss << " interiorMedium = " << indent(m_interiorMedium.toString()) << "," << endl @@ -276,10 +287,7 @@ public: MTS_DECLARE_CLASS() private: - Transform m_objectToWorld; - Transform m_worldToObject; - Normal m_normal; - Float m_surfaceArea; + ref m_objectToWorld; Float m_invSurfaceArea; }; diff --git a/src/shapes/hair.cpp b/src/shapes/hair.cpp index b98b0eae..cfd791f4 100644 --- a/src/shapes/hair.cpp +++ b/src/shapes/hair.cpp @@ -350,8 +350,9 @@ public: int j = (i==0) ? axis1 : axis2; Float alpha = ellipseAxes[0][j]; Float beta = ellipseAxes[1][j]; - Float ratio = beta/alpha, tmp = std::sqrt(1+ratio*ratio); - Float cosTheta = 1/tmp, sinTheta = ratio/tmp; + Float tmp = 1 / std::sqrt(alpha*alpha + beta*beta); + Float cosTheta = alpha * tmp, sinTheta = beta*tmp; + Point p1 = ellipseCenter + cosTheta*ellipseAxes[0] + sinTheta*ellipseAxes[1]; Point p2 = ellipseCenter - cosTheta*ellipseAxes[0] - sinTheta*ellipseAxes[1]; @@ -504,7 +505,7 @@ public: if (!solveQuadraticDouble(A, B, C, nearT, farT)) return false; - if (nearT > maxt || farT < mint) + if (!(nearT <= maxt && farT >= mint)) /* NaN-aware conditionals */ return false; /* Next check the intersection points against the miter planes */ @@ -523,7 +524,7 @@ public: p = Point(rayO + rayD * nearT); t = (Float) nearT; } else if (dot(pointFar - v1, n1) >= 0 && - dot(pointFar - v2, n2) <= 0) { + dot(pointFar - v2, n2) <= 0) { if (farT > maxt) return false; p = Point(rayO + rayD * nearT); diff --git a/src/shapes/instance.cpp b/src/shapes/instance.cpp index b4e5cfa3..ef4a4415 100644 --- a/src/shapes/instance.cpp +++ b/src/shapes/instance.cpp @@ -25,36 +25,49 @@ MTS_NAMESPACE_BEGIN * \parameters{ * \parameter{\Unnamed}{\ShapeGroup}{A reference to a * shape group that should be instantiated} - * \parameter{toWorld}{\Transform}{ + * \parameter{toWorld}{\Transform\Or\Animation}{ * Specifies an optional linear instance-to-world transformation. * \default{none (i.e. instance space $=$ world space)} * } * } + * \renderings{ + * \rendering{Surface viewed from the top}{shape_instance_fractal_top} + * \rendering{Surface viewed from the bottom}{shape_instance_fractal_bot} + * \caption{ + * A visualization of a fractal surface by Irving and Segerman. + * (a 2D Gospel curve developed up to level 5 along the third + * dimension). This scene makes use of instancing to replicate + * similar structures to cheaply render a shape that effectively + * consists of several hundred millions of triangles. + * } + * } * * This plugin implements a geometry instance used to efficiently replicate - * geometry many times. For details, please refer to the \pluginref{shapegroup} - * plugin. + * geometry many times. For details on how to create instances, refer to + * the \pluginref{shapegroup} plugin. + * \remarks{ + * \item Note that it is \emph{not} possible to assign a different + * material to each instance --- the material assignment specified within + * the shape group is the one that matters. + * \item Shape groups cannot be used to replicate shapes with + * attached emitters, sensors, or subsurface scattering models. + * } */ Instance::Instance(const Properties &props) : Shape(props) { - m_objectToWorld = props.getTransform("toWorld", Transform()); - m_worldToObject = m_objectToWorld.inverse(); - m_invScale = 1.0f/m_objectToWorld(Vector(0, 0, 1)).length(); + m_transform = props.getAnimatedTransform("toWorld", Transform()); } Instance::Instance(Stream *stream, InstanceManager *manager) : Shape(stream, manager) { m_shapeGroup = static_cast(manager->getInstance(stream)); - m_objectToWorld = Transform(stream); - m_worldToObject = m_objectToWorld.inverse(); - m_invScale = stream->readFloat(); + m_transform = new AnimatedTransform(stream); } void Instance::serialize(Stream *stream, InstanceManager *manager) const { Shape::serialize(stream, manager); manager->serialize(stream, m_shapeGroup.get()); - m_objectToWorld.serialize(stream); - stream->writeFloat(m_invScale); + m_transform->serialize(stream); } void Instance::configure() { @@ -67,15 +80,19 @@ AABB Instance::getAABB() const { const AABB &aabb = kdtree->getAABB(); if (!aabb.isValid()) // the geometry group is empty return aabb; - AABB result; - for (int i=0; i<8; ++i) - result.expandBy(m_objectToWorld(aabb.getCorner(i))); - return result; -} -Float Instance::getSurfaceArea() const { - Log(EError, "Instance::getSurfaceArea(): not supported!"); - return 0.0f; + std::set times; + m_transform->collectKeyframes(times); + + AABB result; + for (std::set::iterator it = times.begin(); it != times.end(); ++it) { + const Transform &trafo = m_transform->eval(*it); + + for (int i=0; i<8; ++i) + result.expandBy(trafo(aabb.getCorner(i))); + } + + return result; } void Instance::addChild(const std::string &name, ConfigurableObject *child) { @@ -98,49 +115,66 @@ size_t Instance::getEffectivePrimitiveCount() const { bool Instance::rayIntersect(const Ray &_ray, Float mint, Float maxt, Float &t, void *temp) const { const ShapeKDTree *kdtree = m_shapeGroup->getKDTree(); + const Transform &trafo = m_transform->eval(_ray.time); Ray ray; - m_worldToObject(_ray, ray); + trafo.inverse()(_ray, ray); return kdtree->rayIntersect(ray, mint, maxt, t, temp); } bool Instance::rayIntersect(const Ray &_ray, Float mint, Float maxt) const { const ShapeKDTree *kdtree = m_shapeGroup->getKDTree(); Ray ray; - m_worldToObject(_ray, ray); + const Transform &trafo = m_transform->eval(_ray.time); + trafo.inverse()(_ray, ray); return kdtree->rayIntersect(ray, mint, maxt); } void Instance::fillIntersectionRecord(const Ray &_ray, const void *temp, Intersection &its) const { const ShapeKDTree *kdtree = m_shapeGroup->getKDTree(); + const Transform &trafo = m_transform->eval(_ray.time); Ray ray; - m_worldToObject(_ray, ray); + trafo.inverse()(_ray, ray); kdtree->fillIntersectionRecord(ray, temp, its); - its.shFrame.n = normalize(m_objectToWorld(its.shFrame.n)); - its.shFrame.s = normalize(m_objectToWorld(its.shFrame.s)); - its.shFrame.t = normalize(m_objectToWorld(its.shFrame.t)); - its.geoFrame = Frame(normalize(m_objectToWorld(its.geoFrame.n))); - its.dpdu = m_objectToWorld(its.dpdu); - its.dpdv = m_objectToWorld(its.dpdv); - its.p = m_objectToWorld(its.p); + Vector s = trafo(its.shFrame.s); + its.shFrame.n = normalize(trafo(its.shFrame.n)); + its.shFrame.s = normalize(s - its.shFrame.n + * dot(its.shFrame.n, s)); + its.shFrame.t = cross(its.shFrame.n, its.shFrame.s); + its.geoFrame = Frame(normalize(trafo(its.geoFrame.n))); + its.dpdu = trafo(its.dpdu); + its.dpdv = trafo(its.dpdv); + its.p = trafo(its.p); its.wi = normalize(its.shFrame.toLocal(-_ray.d)); its.instance = this; } void Instance::getNormalDerivative(const Intersection &its, Vector &dndu, Vector &dndv, bool shadingFrame) const { - /// TODO: this is horrible + const Transform &trafo = m_transform->eval(its.time); + const Transform invTrafo = trafo.inverse(); + + /* The following is really super-inefficient, but it's + needed to be able to deal with general transformations */ Intersection temp(its); - temp.p = m_worldToObject(its.p); - temp.dpdu = m_worldToObject(its.dpdu); - temp.dpdv = m_worldToObject(its.dpdv); + temp.p = invTrafo(its.p); + temp.dpdu = invTrafo(its.dpdu); + temp.dpdv = invTrafo(its.dpdv); + + /* Determine the length of the transformed normal + *before* it was re-normalized */ + Normal tn = trafo(normalize(invTrafo(its.shFrame.n))); + Float invLen = 1 / tn.length(); + tn *= invLen; + its.shape->getNormalDerivative(temp, dndu, dndv, shadingFrame); - /* The following will probably be incorrect for - non-rigid transformations */ - dndu = m_objectToWorld(Normal(dndu))*m_invScale; - dndv = m_objectToWorld(Normal(dndv))*m_invScale; + dndu = trafo(Normal(dndu)) * invLen; + dndv = trafo(Normal(dndv)) * invLen; + + dndu -= tn * dot(tn, dndu); + dndv -= tn * dot(tn, dndv); } MTS_IMPLEMENT_CLASS_S(Instance, false, Shape) diff --git a/src/shapes/instance.h b/src/shapes/instance.h index de17530b..24f096ed 100644 --- a/src/shapes/instance.h +++ b/src/shapes/instance.h @@ -40,7 +40,7 @@ public: void configure(); /// Return the object-to-world transformation used by this instance - inline Transform getWorldTransform() const { return m_objectToWorld; } + inline const AnimatedTransform *getWorldTransform() const { return m_transform.get(); } /// Add a child ConfigurableObject void addChild(const std::string &name, ConfigurableObject *child); @@ -57,8 +57,6 @@ public: AABB getAABB() const; - Float getSurfaceArea() const; - bool rayIntersect(const Ray &_ray, Float mint, Float maxt, Float &t, void *temp) const; @@ -80,8 +78,7 @@ public: MTS_DECLARE_CLASS() private: ref m_shapeGroup; - Transform m_objectToWorld, m_worldToObject; - Float m_invScale; + ref m_transform; }; MTS_NAMESPACE_END diff --git a/src/shapes/obj.cpp b/src/shapes/obj.cpp index 14834ed0..2e1130e6 100644 --- a/src/shapes/obj.cpp +++ b/src/shapes/obj.cpp @@ -55,9 +55,8 @@ MTS_NAMESPACE_BEGIN * this convention. \default{\code{true}, i.e. flip them to get the * correct coordinates}. * } - * \parameter{toWorld}{\Transform}{ + * \parameter{toWorld}{\Transform\Or\Animation}{ * Specifies an optional linear object-to-world transformation. - * Note that non-uniform scales are not permitted! * \default{none (i.e. object space $=$ world space)} * } * } diff --git a/src/shapes/ply.cpp b/src/shapes/ply.cpp index 4faa7f25..25bec1d7 100644 --- a/src/shapes/ply.cpp +++ b/src/shapes/ply.cpp @@ -62,9 +62,8 @@ MTS_NAMESPACE_BEGIN * Optional flag to flip all normals. \default{\code{false}, i.e. * the normals are left unchanged}. * } - * \parameter{toWorld}{\Transform}{ + * \parameter{toWorld}{\Transform\Or\Animation}{ * Specifies an optional linear object-to-world transformation. - * Note that non-uniform scales are not permitted! * \default{none (i.e. object space $=$ world space)} * } * \parameter{srgb}{\Boolean}{ @@ -108,13 +107,14 @@ public: Log(EError, "PLY file \"%s\" could not be found!", filePath.string().c_str()); m_triangleCount = m_vertexCount = 0; - m_vertexCtr = m_triangleCtr = m_triangleIdxCtr = 0; + m_vertexCtr = m_faceCount = m_faceCtr = m_indexCtr = 0; m_normal = Normal(0.0f); m_uv = Point2(0.0f); m_hasNormals = false; m_hasTexCoords = false; - memset(&m_triangle, 0, sizeof(Triangle)); + memset(&m_face, 0, sizeof(uint32_t)*4); loadPLY(filePath); + if (m_triangleCount == 0 || m_vertexCount == 0) Log(EError, "Unable to load \"%s\" (no triangles or vertices found)!"); @@ -125,7 +125,15 @@ public: rebuildTopology(props.getFloat("maxSmoothAngle")); } - Assert(m_triangleCtr == m_triangleCount); + if (m_triangleCount < m_faceCount * 2) { + /* Needed less memory than the earlier conservative estimate -- free it! */ + Triangle *temp = new Triangle[m_triangleCount]; + memcpy(temp, m_triangles, sizeof(Triangle) * m_triangleCount); + delete[] m_triangles; + m_triangles = temp; + } + + Assert(m_faceCtr == m_faceCount); Assert(m_vertexCtr == m_vertexCount); } @@ -171,8 +179,8 @@ public: std::tr1::bind(&PLYLoader::vertex_end_callback, this) ); } else if (element_name == "face") { - m_triangleCount = count; - m_triangles = new Triangle[m_triangleCount]; + m_faceCount = count; + m_triangles = new Triangle[m_faceCount*2]; return std::tr1::tuple, std::tr1::function >( std::tr1::bind(&PLYLoader::face_begin_callback, this), @@ -221,7 +229,7 @@ public: m_aabb.expandBy(p); m_positions[m_vertexCtr] = p; if (m_normals) - m_normals[m_vertexCtr] = m_objectToWorld(m_normal); + m_normals[m_vertexCtr] = normalize(m_objectToWorld(m_normal)); if (m_texcoords) m_texcoords[m_vertexCtr] = m_uv; if (m_colors) { @@ -263,32 +271,42 @@ public: void face_end_callback() { } void face_vertex_indices_begin_uint8(ply::uint8 size) { - if (size != 3) - Log(EError, "Only triangle PLY meshes are supported for now."); - m_triangleIdxCtr = 0; + if (size != 3 && size != 4) + Log(EError, "Only triangle and quad-based PLY meshes are supported for now."); + m_indexCtr = 0; } void face_vertex_indices_begin_uint32(ply::uint32 size) { - if (size != 3) - Log(EError, "Only triangle PLY meshes are supported for now."); - m_triangleIdxCtr = 0; + if (size != 3 && size != 4) + Log(EError, "Only triangle and quad-based PLY meshes are supported for now."); + m_indexCtr = 0; } void face_vertex_indices_element_int32(ply::int32 element) { - Assert(m_triangleIdxCtr < 3); + Assert(m_indexCtr < 4); Assert((size_t) element < m_vertexCount); - m_triangle.idx[m_triangleIdxCtr++] = element; + m_face[m_indexCtr++] = element; } void face_vertex_indices_element_uint32(ply::uint32 element) { - Assert(m_triangleIdxCtr < 3); + Assert(m_indexCtr < 4); Assert((size_t) element < m_vertexCount); - m_triangle.idx[m_triangleIdxCtr++] = element; + m_face[m_indexCtr++] = element; } void face_vertex_indices_end() { - Assert(m_triangleIdxCtr == 3); - m_triangles[m_triangleCtr++] = m_triangle; + Assert(m_indexCtr == 3 || m_indexCtr == 4); + + Triangle t; + t.idx[0] = m_face[0]; t.idx[1] = m_face[1]; t.idx[2] = m_face[2]; + m_triangles[m_triangleCount++] = t; + + if (m_indexCtr == 4) { + t.idx[0] = m_face[3]; t.idx[1] = m_face[0]; t.idx[2] = m_face[2]; + m_triangles[m_triangleCount++] = t; + } + + m_faceCtr++; } MTS_DECLARE_CLASS() @@ -297,8 +315,9 @@ private: Normal m_normal; Float m_red, m_green, m_blue; Transform m_objectToWorld; - size_t m_vertexCtr, m_triangleCtr, m_triangleIdxCtr; - Triangle m_triangle; + size_t m_faceCount, m_vertexCtr; + size_t m_faceCtr, m_indexCtr; + uint32_t m_face[4]; bool m_hasNormals, m_hasTexCoords; Point2 m_uv; bool m_sRGB; @@ -380,7 +399,7 @@ template<> std::tr1::tuple, std::tr1::function, std::tr1::function > PLYLoader::list_property_definition_callback(const std::string& element_name, const std::string& property_name) { - if ((element_name == "face") && (property_name == "vertex_indices")) { + if ((element_name == "face") && (property_name == "vertex_indices" || property_name == "vertex_index")) { return std::tr1::tuple, std::tr1::function, std::tr1::function >( std::tr1::bind(&PLYLoader::face_vertex_indices_begin_uint8, this, _1), @@ -404,7 +423,7 @@ template<> std::tr1::tuple, std::tr1::function, std::tr1::function > PLYLoader::list_property_definition_callback(const std::string& element_name, const std::string& property_name) { - if ((element_name == "face") && (property_name == "vertex_indices")) { + if ((element_name == "face") && (property_name == "vertex_indices" || property_name == "vertex_index")) { return std::tr1::tuple, std::tr1::function, std::tr1::function >( std::tr1::bind(&PLYLoader::face_vertex_indices_begin_uint32, this, _1), @@ -428,7 +447,7 @@ template<> std::tr1::tuple, std::tr1::function, std::tr1::function > PLYLoader::list_property_definition_callback(const std::string& element_name, const std::string& property_name) { - if ((element_name == "face") && (property_name == "vertex_indices")) { + if ((element_name == "face") && (property_name == "vertex_indices" || property_name == "vertex_index")) { return std::tr1::tuple, std::tr1::function, std::tr1::function >( std::tr1::bind(&PLYLoader::face_vertex_indices_begin_uint8, this, _1), @@ -452,7 +471,7 @@ template<> std::tr1::tuple, std::tr1::function, std::tr1::function > PLYLoader::list_property_definition_callback(const std::string& element_name, const std::string& property_name) { - if ((element_name == "face") && (property_name == "vertex_indices")) { + if ((element_name == "face") && (property_name == "vertex_indices" || property_name == "vertex_index")) { return std::tr1::tuple, std::tr1::function, std::tr1::function >( std::tr1::bind(&PLYLoader::face_vertex_indices_begin_uint32, this, _1), diff --git a/src/shapes/ply/byte_order.hpp b/src/shapes/ply/byte_order.hpp index cf1bb596..db509742 100644 --- a/src/shapes/ply/byte_order.hpp +++ b/src/shapes/ply/byte_order.hpp @@ -10,7 +10,8 @@ namespace ply { #if (defined(__powerpc) || defined(__powerpc__) || defined(__POWERPC__) || defined(__ppc__) || defined(_M_PPC) || defined(__ARCH_PPC)) # define PLY_BIG_ENDIAN #elif (defined(i386) || defined(__i386__) || defined(__i386) || defined(_M_IX86) || defined(_X86_) || defined(__THW_INTEL__) || defined(__I86__) || defined(__INTEL__)) \ - || (defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64)) + || (defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64)) \ + || defined(__ARMEL__) # define PLY_LITTLE_ENDIAN #else # error diff --git a/src/shapes/rectangle.cpp b/src/shapes/rectangle.cpp index a2a384b6..2be94648 100644 --- a/src/shapes/rectangle.cpp +++ b/src/shapes/rectangle.cpp @@ -30,7 +30,7 @@ MTS_NAMESPACE_BEGIN /*!\plugin{rectangle}{Rectangle intersection primitive} * \order{3} * \parameters{ - * \parameter{toWorld}{\Transform}{ + * \parameter{toWorld}{\Transform\Or\Animation}{ * Specifies a linear object-to-world transformation. * It is allowed to use non-uniform scaling, but no shear. * \default{none (i.e. object space $=$ world space)} @@ -127,7 +127,7 @@ public: m_worldToObject.transformAffine(_ray, ray); Float hit = -ray.o.z / ray.d.z; - if (hit < mint || hit > maxt) + if (!(hit >= mint && hit <= maxt)) return false; Point local = ray(hit); @@ -212,6 +212,7 @@ public: pRec.n = m_frame.n; pRec.pdf = m_invSurfaceArea; pRec.measure = EArea; + pRec.uv = sample; } Float pdfPosition(const PositionSamplingRecord &pRec) const { @@ -229,7 +230,7 @@ public: std::string toString() const { std::ostringstream oss; oss << "Rectangle[" << endl - << " objectToWorld = " << indent(m_objectToWorld.toString()) << ", " << endl; + << " objectToWorld = " << indent(m_objectToWorld.toString()) << "," << endl; if (isMediumTransition()) oss << " interiorMedium = " << indent(m_interiorMedium.toString()) << "," << endl << " exteriorMedium = " << indent(m_exteriorMedium.toString()) << "," << endl; diff --git a/src/shapes/serialized.cpp b/src/shapes/serialized.cpp index 279283fa..8ab416f0 100644 --- a/src/shapes/serialized.cpp +++ b/src/shapes/serialized.cpp @@ -21,9 +21,18 @@ #include #include #include +#include + +#include + +/// How many files to keep open in the cache, per thread +#define MTS_SERIALIZED_CACHE_SIZE 4 MTS_NAMESPACE_BEGIN +/* Avoid having to include scenehandler.h */ +extern MTS_EXPORT_RENDER void pushSceneCleanupHandler(void (*cleanup)()); + /*!\plugin{serialized}{Serialized mesh loader} * \order{7} * \parameters{ @@ -48,9 +57,8 @@ MTS_NAMESPACE_BEGIN * Optional flag to flip all normals. \default{\code{false}, i.e. * the normals are left unchanged}. * } - * \parameter{toWorld}{\Transform}{ + * \parameter{toWorld}{\Transform\Or\Animation}{ * Specifies an optional linear object-to-world transformation. - * Note that non-uniform scales are not permitted! * \default{none (i.e. object space $=$ world space)} * } * } @@ -152,9 +160,8 @@ public: /* Load the geometry */ Log(EInfo, "Loading shape %i from \"%s\" ..", shapeIndex, filePath.filename().string().c_str()); - ref stream = new FileStream(filePath, FileStream::EReadOnly); ref timer = new Timer(); - loadCompressed(stream, shapeIndex); + loadCompressed(filePath, shapeIndex); Log(EDebug, "Done (" SIZE_T_FMT " triangles, " SIZE_T_FMT " vertices, %i ms)", m_triangleCount, m_vertexCount, timer->getMilliseconds()); @@ -179,7 +186,7 @@ public: } if (m_normals) { for (size_t i=0; isetByteOrder(Stream::ELittleEndian); + const short version = SerializedMesh::readHeader(m_fstream); + if (SerializedMesh::readOffsetDictionary(m_fstream, + version, m_offsets) < 0) { + // Assume there is a single mesh in the file at offset 0 + m_offsets.resize(1, 0); + } + } + + /** + * Positions the stream at the location for the given shape index. + * Returns the modified stream. + */ + inline FileStream* seekStream(size_t shapeIndex) { + if (shapeIndex > m_offsets.size()) { + SLog(EError, "Unable to unserialize mesh, " + "shape index is out of range! (requested %i out of 0..%i)", + shapeIndex, (int) (m_offsets.size()-1)); + } + const size_t pos = m_offsets[shapeIndex]; + m_fstream->seek(pos); + return m_fstream; + } + + private: + std::vector m_offsets; + ref m_fstream; + }; + + typedef LRUCache, + boost::shared_ptr > MeshLoaderCache; + + class FileStreamCache : MeshLoaderCache { + public: + inline boost::shared_ptr get(const fs::path& path) { + bool dummy; + return MeshLoaderCache::get(path, dummy); + } + + FileStreamCache() : MeshLoaderCache(MTS_SERIALIZED_CACHE_SIZE, + &FileStreamCache::create) { } + + private: + inline static boost::shared_ptr create(const fs::path &path) { + return boost::make_shared(path); + } + }; + + /// Release all currently held offset caches / file streams + static void flushCache() { + m_cache.set(NULL); + } + + /// Loads the mesh from the thread-local file stream cache + void loadCompressed(const fs::path& filePath, const int idx) { + if (EXPECT_NOT_TAKEN(idx < 0)) { + Log(EError, "Unable to unserialize mesh, " + "shape index is negative! (requested %i out of 0..%i)", idx); + } + + // Get the thread local cache; create it if this is the first time + FileStreamCache* cache = m_cache.get(); + if (EXPECT_NOT_TAKEN(cache == NULL)) { + cache = new FileStreamCache(); + m_cache.set(cache); + mitsuba::pushSceneCleanupHandler(&SerializedMesh::flushCache); + } + + boost::shared_ptr meshLoader = cache->get(filePath); + Assert(meshLoader != NULL); + TriMesh::loadCompressed(meshLoader->seekStream((size_t) idx)); + } + + static ThreadLocal m_cache; }; +ThreadLocal SerializedMesh::m_cache; + MTS_IMPLEMENT_CLASS_S(SerializedMesh, false, TriMesh) MTS_EXPORT_PLUGIN(SerializedMesh, "Serialized mesh loader"); MTS_NAMESPACE_END diff --git a/src/shapes/shapegroup.cpp b/src/shapes/shapegroup.cpp index 7f9ab86b..6dff5e9d 100644 --- a/src/shapes/shapegroup.cpp +++ b/src/shapes/shapegroup.cpp @@ -34,9 +34,9 @@ MTS_NAMESPACE_BEGIN * so that they can efficiently be referenced many times using the * \pluginref{instance} plugin. This is useful for rendering things like * forests, where only a few distinct types of trees have to be kept - * in memory. + * in memory. An example is given below: * - + * \vspace{5mm} * \begin{xml}[caption={An example of geometry instancing}, label=lst:instancing] * * @@ -68,14 +68,6 @@ MTS_NAMESPACE_BEGIN * * * \end{xml} - * \vspace{-2mm} - * \remarks{ - * \item Note that it is not possible to assign a different - * material to each instance --- the material assignment specified within - * the shape group is the one that matters. - * \item Shape groups can not be used to replicate shapes with - * attached emitters, sensors, or subsurface scattering models. - * } */ ShapeGroup::ShapeGroup(const Properties &props) : Shape(props) { @@ -120,8 +112,8 @@ Float ShapeGroup::getSurfaceArea() const { void ShapeGroup::addChild(const std::string &name, ConfigurableObject *child) { const Class *cClass = child->getClass(); - if (cClass->derivesFrom(MTS_CLASS(ShapeGroup)) || cClass->getName() == "ShapeInstance") { - Log(EError, "Nested instancing is not supported!"); + if (cClass->derivesFrom(MTS_CLASS(ShapeGroup)) || cClass->getName() == "Instance") { + Log(EError, "Nested instancing is not permitted"); } else if (cClass->derivesFrom(MTS_CLASS(Shape))) { Shape *shape = static_cast(child); if (shape->isEmitter()) @@ -170,7 +162,7 @@ size_t ShapeGroup::getEffectivePrimitiveCount() const { std::string ShapeGroup::toString() const { std::ostringstream oss; oss << "ShapeGroup[" << endl - << " name = \"" << m_name << "\", " << endl + << " name = \"" << m_name << "\"," << endl << " primCount = " << m_kdtree->getPrimitiveCount() << endl << "]"; return oss.str(); diff --git a/src/shapes/sphere.cpp b/src/shapes/sphere.cpp index b85ea304..d35ad11c 100644 --- a/src/shapes/sphere.cpp +++ b/src/shapes/sphere.cpp @@ -37,7 +37,7 @@ MTS_NAMESPACE_BEGIN * \parameter{radius}{\Float}{ * Radius of the sphere in object-space units \default{1} * } - * \parameter{toWorld}{\Transform}{ + * \parameter{toWorld}{\Transform\Or\Animation}{ * Specifies an optional linear object-to-world transformation. * Note that non-uniform scales are not permitted! * \default{none (i.e. object space $=$ world space)} @@ -172,8 +172,9 @@ public: if (!solveQuadraticDouble(A, B, C, nearT, farT)) return false; - if (nearT > maxt || farT < mint) + if (!(nearT <= maxt && farT >= mint)) /* NaN-aware conditionals */ return false; + if (nearT < mint) { if (farT > maxt) return false; @@ -273,7 +274,7 @@ public: void getNormalDerivative(const Intersection &its, Vector &dndu, Vector &dndv, bool shadingFrame) const { - Float invRadius = 1.0f / m_radius; + Float invRadius = (m_flipNormals ? -1.0f : 1.0f) / m_radius; dndu = its.dpdu * invRadius; dndv = its.dpdv * invRadius; } @@ -468,8 +469,8 @@ public: std::string toString() const { std::ostringstream oss; oss << "Sphere[" << endl - << " radius = " << m_radius << ", " << endl - << " center = " << m_center.toString() << ", " << endl + << " radius = " << m_radius << "," << endl + << " center = " << m_center.toString() << "," << endl << " bsdf = " << indent(m_bsdf.toString()) << "," << endl; if (isMediumTransition()) oss << " interiorMedium = " << indent(m_interiorMedium.toString()) << "," << endl diff --git a/src/subsurface/bluenoise.cpp b/src/subsurface/bluenoise.cpp index 7844bbbc..20ff403c 100644 --- a/src/subsurface/bluenoise.cpp +++ b/src/subsurface/bluenoise.cpp @@ -145,7 +145,7 @@ void blueNoisePointSet(const Scene *scene, const std::vector &shapes, Vector3i cellCount; for (int i=0; i<3; ++i) - cellCount[i] = ceilToInt(extents[i] * invCellWidth); + cellCount[i] = std::max(1, ceilToInt(extents[i] * invCellWidth)); SLog(EInfo, " phase 2: computing cell indices .."); #if defined(MTS_OPENMP) @@ -218,7 +218,7 @@ void blueNoisePointSet(const Scene *scene, const std::vector &shapes, Cell &cell = cells[cellID]; int arrayIndex = cell.firstIndex + trial; - if (arrayIndex > (int) samples.size() || + if (arrayIndex >= (int) samples.size() || samples[arrayIndex].cellID != cellID || cell.sample != -1) continue; diff --git a/src/subsurface/dipole.cpp b/src/subsurface/dipole.cpp index 4865f488..ebf2b982 100644 --- a/src/subsurface/dipole.cpp +++ b/src/subsurface/dipole.cpp @@ -208,9 +208,9 @@ static int irrOctreeIndex = 0; * rendered using diffusion theory and radiative transport, respectively. * The former produces an incorrect result, since the assumption of * many scattering events breaks down. - * \textbf{(c)}: When the number of irradiance samples is too low, the - * resulting noise becomes visible as ``blotchy'' artifacts in the - * rendering.} + * \textbf{(c)}: When the number of irradiance samples is too low when rendering + * with the dipole model, the resulting noise becomes visible as ``blotchy'' artifacts + * in the rendering.} * } * * \subsubsection*{Typical material setup} @@ -289,7 +289,6 @@ public: m_quality = props.getFloat("quality", 0.2f); /* Asymmetry parameter of the phase function */ - m_ready = false; m_octreeResID = -1; lookupMaterial(props, m_sigmaS, m_sigmaA, m_g, &m_eta); @@ -306,7 +305,6 @@ public: m_octreeIndex = stream->readInt(); m_irrSamples = stream->readInt(); m_irrIndirect = stream->readBool(); - m_ready = false; m_octreeResID = -1; configure(); } @@ -336,7 +334,7 @@ public: Spectrum Lo(const Scene *scene, Sampler *sampler, const Intersection &its, const Vector &d, int depth) const { - if (!m_ready || dot(its.shFrame.n, d) < 0) + if (!m_active || dot(its.shFrame.n, d) < 0) return Spectrum(0.0f); IsotropicDipoleQuery query(m_zr, m_zv, m_sigmaTr, its.p); @@ -375,7 +373,7 @@ public: bool preprocess(const Scene *scene, RenderQueue *queue, const RenderJob *job, int sceneResID, int cameraResID, int _samplerResID) { - if (m_ready) + if (m_octree) return true; if (!scene->getIntegrator()->getClass() @@ -445,7 +443,6 @@ public: Log(EDebug, "Done clustering (took %i ms).", timer->getMilliseconds()); m_octreeResID = Scheduler::getInstance()->registerResource(m_octree); - m_ready = true; return true; } @@ -454,7 +451,7 @@ public: std::string octreeName = formatString("irrOctree%i", m_octreeIndex); if (!m_octree.get() && params.find(octreeName) != params.end()) { m_octree = static_cast(params[octreeName]); - m_ready = true; + m_active = true; } } @@ -474,7 +471,6 @@ private: int m_octreeResID, m_octreeIndex; int m_irrSamples; bool m_irrIndirect; - bool m_ready; }; MTS_IMPLEMENT_CLASS_S(IsotropicDipole, false, Subsurface) diff --git a/src/subsurface/irrproc.cpp b/src/subsurface/irrproc.cpp index b5ff2cde..b546fb0e 100644 --- a/src/subsurface/irrproc.cpp +++ b/src/subsurface/irrproc.cpp @@ -55,6 +55,7 @@ public: m_sampler = static_cast(getResource("sampler")); m_integrator = static_cast(getResource("integrator")); m_scene->wakeup(NULL, m_resources); + m_integrator->wakeup(NULL, m_resources); } void process(const WorkUnit *workUnit, WorkResult *workResult, diff --git a/src/tests/SConscript b/src/tests/SConscript index fc42deae..0dc60491 100644 --- a/src/tests/SConscript +++ b/src/tests/SConscript @@ -9,7 +9,7 @@ bidirEnv.Append(LIBPATH=['#src/libbidir']) for plugin in glob.glob(GetBuildPath('test_*.cpp')): name = os.path.basename(plugin) - if "bidir" in name: + if "bidir" in name: lib = bidirEnv.SharedLibrary(name[0:len(name)-4], name) else: lib = testEnv.SharedLibrary(name[0:len(name)-4], name) diff --git a/src/tests/test_rtrans.cpp b/src/tests/test_rtrans.cpp index 49c98637..cd9332a4 100644 --- a/src/tests/test_rtrans.cpp +++ b/src/tests/test_rtrans.cpp @@ -37,7 +37,7 @@ void transmittanceIntegrand(const BSDF *bsdf, const Vector &wi, size_t nPts, con void diffTransmittanceIntegrand(Float *data, size_t resolution, size_t nPts, const Float *in, Float *out) { for (size_t i=0; i bitmap; + m_separateAlpha = props.getBoolean("separateAlpha", false); + if (props.hasProperty("bitmap")) { /* Support initialization via raw data passed from another plugin */ bitmap = reinterpret_cast(props.getData("bitmap").ptr); @@ -194,7 +196,12 @@ public: Log(EError, "Could not determine modification time of \"%s\"!", m_filename.string().c_str()); cacheFile = m_filename; - cacheFile.replace_extension(".mip"); + + if (!m_separateAlpha) + cacheFile.replace_extension(".mip"); + else + cacheFile.replace_extension(".alpha.mip"); + tryReuseCache = fs::exists(cacheFile) && props.getBoolean("cache", true); } @@ -241,18 +248,27 @@ public: } Bitmap::EPixelFormat pixelFormat; - switch (bitmap->getPixelFormat()) { - case Bitmap::ELuminance: - case Bitmap::ELuminanceAlpha: - pixelFormat = Bitmap::ELuminance; - break; - case Bitmap::ERGB: - case Bitmap::ERGBA: - pixelFormat = Bitmap::ERGB; - break; - default: - Log(EError, "The input image has an unsupported pixel format!"); - return; + if (m_separateAlpha) { + /* Create a texture from the alpha channel of an image */ + if (!bitmap->hasAlpha()) + Log(EError, "separateAlpha specified, but the image contains no alpha channel!"); + pixelFormat = Bitmap::ELuminance; + bitmap = bitmap->separateChannel(bitmap->getChannelCount()-1); + bitmap->setGamma(1.0f); + } else { + switch (bitmap->getPixelFormat()) { + case Bitmap::ELuminance: + case Bitmap::ELuminanceAlpha: + pixelFormat = Bitmap::ELuminance; + break; + case Bitmap::ERGB: + case Bitmap::ERGBA: + pixelFormat = Bitmap::ERGB; + break; + default: + Log(EError, "The input image has an unsupported pixel format!"); + return; + } } /* (Re)generate the MIP map hierarchy; downsample using a @@ -305,6 +321,7 @@ public: m_wrapModeV = (ReconstructionFilter::EBoundaryCondition) stream->readUInt(); m_gamma = stream->readFloat(); m_maxAnisotropy = stream->readFloat(); + m_separateAlpha = stream->readBool(); size_t size = stream->readSize(); ref mStream = new MemoryStream(size); @@ -323,18 +340,27 @@ public: rfilter->configure(); Bitmap::EPixelFormat pixelFormat; - switch (bitmap->getPixelFormat()) { - case Bitmap::ELuminance: - case Bitmap::ELuminanceAlpha: - pixelFormat = Bitmap::ELuminance; - break; - case Bitmap::ERGB: - case Bitmap::ERGBA: - pixelFormat = Bitmap::ERGB; - break; - default: - Log(EError, "The input image has an unsupported pixel format!"); - return; + if (m_separateAlpha) { + /* Create a texture from the alpha channel of an image */ + if (!bitmap->hasAlpha()) + Log(EError, "separateAlpha specified, but the image contains no alpha channel!"); + pixelFormat = Bitmap::ELuminance; + bitmap = bitmap->separateChannel(bitmap->getChannelCount()-1); + bitmap->setGamma(1.0f); + } else { + switch (bitmap->getPixelFormat()) { + case Bitmap::ELuminance: + case Bitmap::ELuminanceAlpha: + pixelFormat = Bitmap::ELuminance; + break; + case Bitmap::ERGB: + case Bitmap::ERGBA: + pixelFormat = Bitmap::ERGB; + break; + default: + Log(EError, "The input image has an unsupported pixel format!"); + return; + } } if (pixelFormat == Bitmap::ELuminance) @@ -360,6 +386,7 @@ public: /* We still have access to the original image -- use that, since it is probably much smaller than the in-memory representation */ ref is = new FileStream(m_filename, FileStream::EReadOnly); + stream->writeBool(m_separateAlpha); stream->writeSize(is->getSize()); is->copyTo(stream); } else { @@ -370,6 +397,7 @@ public: m_mipmap1->toBitmap() : m_mipmap3->toBitmap(); bitmap->write(Bitmap::EOpenEXR, mStream); + stream->writeBool(false); /* separateAlpha */ stream->writeSize(mStream->getSize()); stream->write(mStream->getData(), mStream->getSize()); } @@ -400,6 +428,10 @@ public: return result; } + ref getBitmap() const { + return m_mipmap1.get() ? m_mipmap1->toBitmap() : m_mipmap3->toBitmap(); + } + Spectrum eval(const Point2 &uv, const Vector2 &d0, const Vector2 &d1) const { stats::filteredLookups.incrementBase(); ++stats::filteredLookups; @@ -499,6 +531,7 @@ protected: ReconstructionFilter::EBoundaryCondition m_wrapModeU; ReconstructionFilter::EBoundaryCondition m_wrapModeV; Float m_gamma, m_maxAnisotropy; + bool m_separateAlpha; fs::path m_filename; }; diff --git a/src/utils/importsvg.py b/src/utils/importsvg.py deleted file mode 100755 index 440a9620..00000000 --- a/src/utils/importsvg.py +++ /dev/null @@ -1,388 +0,0 @@ -#!/usr/bin/env python -# -# This file is part of Mitsuba, a physically based rendering system. -# -# Copyright (c) 2007-2012 by Wenzel Jakob and others. -# -# Mitsuba is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License Version 3 -# as published by the Free Software Foundation. -# -# Mitsuba is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from PySide.QtCore import QFile, QTextStream -from xml.etree import ElementTree as et -from lepl import Regexp, Space, Literal, Separator, List -import numpy as np -from OpenGL.GL import * -from OpenGL.GLU import * -from OpenGL.GLUT import * -import sys - -Real = lambda: Regexp(r'[\+\-]?(?:[0-9]*\.[0-9]+|[0-9]+\.' + - r'|[0-9]+)(?:[eE][\+\-]?[0-9]+)?') - -def make_path_grammar(): - sep = ~(Space() | Literal(','))[:] - with Separator(sep): - num = Real() >> float - # Moveto - M = ((Literal('M') | Literal('m')) & num[2][:]) - # Horizontal straight lines - H = (Literal('H') | Literal('h')) & num[:] - # Vertical straight lines - V = (Literal('V') | Literal('v')) & num[:] - # General straight lines - L = (Literal('L') | Literal('l')) & num[2][:] - # Cubic bezier curves (curveto) - C = (Literal('C') | Literal('c')) & num[6][:] - # Cubic bezier curves (smooth curveto) - S = (Literal('S') | Literal('s')) & num[4][:] - # Close the path - z = Literal('z') | Literal('Z') - grammar = sep & ((M|H|V|L|C|S|z) > List)[:] & sep - grammar.config.no_compile_to_regexp() - return grammar - -def make_polygon_grammar(): - sep = ~(Space() | Literal(','))[:] - with Separator(sep): - num = Real() >> float - grammar = sep & num[2][:] & sep - grammar.config.no_compile_to_regexp() - return grammar - -class BezierSpline(object): - def __init__(self, *args): - if len(args) == 4: - self.start = args[0] - self.cp1 = args[1] - self.cp2 = args[2] - self.end = args[3] - elif len(args) == 2: - d = args[1] - args[0] - self.start = args[0] - self.cp1 = args[0] + 1.0/3.0 * d - self.cp2 = args[0] + 2.0/3.0 * d - self.end = args[1] - else: - raise Exception("Invalid constructor call") - - def _eval(self, t): - tmp = 1 - t - tmp2, t2 = tmp*tmp, t*t - return self.start * (tmp*tmp2) + self.cp1 * (3*tmp2*t) + \ - self.cp2 * (3*tmp*t2) + self.end * (t*t2) - - def drawGL(self): - steps = 10 - p = self.start - glVertex2f(p[0], p[1]) - for i in range(1, steps): - p = self._eval(float(i)/(steps-1)) - glVertex2f(p[0], p[1]) - - def drawGL_tess(self): - steps = 10 - p = self.start - gluTessVertex(tobj, [p[0], p[1], 0], [p[0], p[1], 0]) - for i in range(1, steps): - p = self._eval(float(i)/(steps-1)) - gluTessVertex(tobj, [p[0], p[1], 0], [p[0], p[1], 0]) - -class AABB(object): - def __init__(self): - inf = float("inf") - self.min = np.array([inf, inf]) - self.max = np.array([-inf, -inf]) - - def expand_by(self, p): - self.min[0] = min(self.min[0], p[0]) - self.min[1] = min(self.min[1], p[1]) - self.max[0] = max(self.max[0], p[0]) - self.max[1] = max(self.max[1], p[1]) - - def expand_by_aabb(self, aabb): - self.min[0] = min(self.min[0], aabb.min[0]) - self.min[1] = min(self.min[1], aabb.min[1]) - self.max[0] = max(self.max[0], aabb.max[0]) - self.max[1] = max(self.max[1], aabb.max[1]) - - def size(self): - return self.max - self.min - - def __repr__(self): - return "AABB[min=%s, max=%s]" % \ - (repr(self.min), repr(self.max)) - -class Path(object): - PathGrammar = make_path_grammar() - PolygonGrammar = make_polygon_grammar() - - def __init__(self, node): - self.pos = None - self.start = None - self.cp2 = None - self.splines = [] - self.aabb = AABB() - - def getflt(key): - value = node.get(key) - return float(value) if value != None else 0 - - if 'path' in node.tag: - instructions = Path.PathGrammar.parse(node.get("d")) - elif 'polygon' in node.tag: - points = Path.PolygonGrammar.parse(node.get("points")) - instructions = [['M'] + points, ['z']] - elif 'rect' in node.tag: - x, y = getflt("x"), getflt("y") - width, height = getflt("width"), getflt("height") - instructions = [['M', x, y ], ['h', width], ['v', height], ['h', -width], ['z']] - elif 'line' in node.tag: - x1, x2 = getflt("x1"), getflt("x2") - y1, y2 = getflt("y1"), getflt("y2") - instructions = [['M', x1, y1, x2, y2 ]] - else: - raise Exception("Unknown tag!") - - self.id = node.get("id") - - self.stroke = self._color(node.get('stroke')) - self.fill = self._color(node.get('fill')) - - commandList = { - 'm' : Path._moveto, - 'h' : Path._hlineto, - 'v' : Path._vlineto, - 'l' : Path._lineto, - 'c' : Path._curveto, - 's' : Path._scurveto, - 'z' : Path._close - } - - self.index = 1 - for item in instructions: - cmd, args = item[0], item[1:] - commandList[cmd.lower()](self, cmd, args) - self.lastcmd = cmd - self.index += 1 - - for spline in self.splines: - if spline: - self.aabb.expand_by(spline.start) - self.aabb.expand_by(spline.cp1) - self.aabb.expand_by(spline.cp2) - self.aabb.expand_by(spline.end) - - def _moveto(self, cmd, args): - if cmd == 'M': - self.pos = np.array(args[0:2]) - else: - self.pos =+ np.array(args[0:2]) - if self.start is None or self.lastcmd in ['z', 'Z']: - self.start = self.pos - if len(args) > 2: - # Implicit lineto - cmd = 'L' if cmd == 'M' else 'l' - self._lineto(cmd, args[2:]) - - def _lineto(self, cmd, args): - if cmd == 'L': - end = np.array(args[0:2]) - else: - end = self.pos + np.array(args[0:2]) - d = end - self.pos - self.splines.append(BezierSpline(self.pos, end)) - self.pos = end - if len(args) > 2: - self._lineto(cmd, args[2:]) - - def _hlineto(self, cmd, args): - if cmd == 'H': - end = np.array([args[0], self.pos[1]]) - else: - end = np.array([args[0] + self.pos[0], self.pos[1]]) - self.splines.append(BezierSpline(self.pos, end)) - self.pos = end - if len(args) > 1: - self._hlineto(cmd, args[1:]) - - def _vlineto(self, cmd, args): - if cmd == 'V': - end = np.array([self.pos[0], args[0]]) - else: - end = np.array([self.pos[0], args[0] + self.pos[1]]) - self.splines.append(BezierSpline(self.pos, end)) - self.pos = end - if len(args) > 1: - self._vlineto(cmd, args[1:]) - - def _curveto(self, cmd, args): - start = self.pos - cp1 = np.array(args[0:2]) - cp2 = np.array(args[2:4]) - end = np.array(args[4:6]) - - if cmd == 'c': - cp1 += start - cp2 += start - end += start - - self.splines.append(BezierSpline(start, cp1, cp2, end)) - self.pos = end - self.cp2 = cp2 - - if len(args) > 6: - self._curveto(cmd, args[6:]) - - def _scurveto(self, cmd, args): - start = self.pos - cp2 = np.array(args[0:2]) - end = np.array(args[2:4]) - - if cmd == 's': - cp2 += start - end += start - - if self.cp2 != None and self.lastcmd in ['C', 'c', 'S', 's']: - cp1 = 2*start - self.cp2 - else: - cp1 = start - - self.splines.append(BezierSpline(start, cp1, cp2, end)) - self.pos = end - self.cp2 = cp2 - - if len(args) > 4: - self._scurveto(cmd, args[4:]) - - def _close(self, cmd, args): - self._lineto('L', self.start) - self.splines.append(None) - - def _color(self, value): - if value == None or value.lower() == 'none': - return None - r = int(value[1:3], 16) / 255.0 - g = int(value[3:5], 16) / 255.0 - b = int(value[5:7], 16) / 255.0 - return [r, g, b] - - def drawGL(self): - if self.fill: - glColor4f(self.fill[0], self.fill[1], self.fill[2], 1.0) - gluTessBeginPolygon(tobj, None) - gluTessBeginContour(tobj) - for spline in self.splines: - if spline: - spline.drawGL_tess() - else: - gluTessEndContour(tobj) - gluTessBeginContour(tobj) - gluTessEndContour(tobj) - gluTessEndPolygon(tobj) - - if self.stroke: - glColor4f(self.stroke[0], self.stroke[1], self.stroke[2], 1.0) - glBegin(GL_LINE_STRIP) - for spline in self.splines: - if spline: - spline.drawGL() - else: - glEnd() - glBegin(GL_LINE_LOOP) - glEnd() - - def export(self, out): - if self.id: - out.write("obj %s\n" % self.id) - else: - out.write("obj\n") - - if self.stroke: - out.write("stroke %f %f %f\n" % (self.stroke[0], self.stroke[1], self.stroke[2])) - if self.fill: - out.write("fill %f %f %f\n" % (self.fill[0], self.fill[1], self.fill[2])) - for spline in self.splines: - if spline is None: - out.write("skip\n") - else: - out.write("spline %f %f %f %f %f %f %f %f\n" % - (spline.start[0], spline.start[1], spline.cp1[0], spline.cp1[1], - spline.cp2[0], spline.cp2[1], spline.end[0], spline.end[1])) - - out.write("\n") - -class Scene(object): - def __init__(self, filename): - ns = '{http://www.w3.org/2000/svg}' - self.paths = [] - self.aabb = AABB() - def register(node): - if node.tag == ns + 'path' or node.tag == ns + 'polygon' \ - or node.tag == ns + 'line' or node.tag == ns + 'rect': - path = Path(node) - self.aabb.expand_by_aabb(path.aabb) - self.paths.append(path) - else: - for child in node: - register(child) - return - register(et.parse(filename).getroot()) - - def drawGL(self): - glClearColor(.3, .3, .3, 1.0) - glClear(GL_COLOR_BUFFER_BIT) - size = self.aabb.size().max() - xs = self.aabb.min[0] - size * 0.1 - ys = self.aabb.min[1] - size * 0.1 - size = size * 1.2 - glOrtho(xs, size, size*6.0/8.0, ys, 0, 1) - for path in self.paths: - path.drawGL() - glutSwapBuffers() - - def export(self, target): - with open(target, "w") as f: - for path in self.paths: - path.export(f) - -def keyboard(key, x, y): - if key == 'q' or key == "\x1b": - sys.exit(0) - -def initGL(): - global tobj - tobj = gluNewTess() - glHint(GL_LINE_SMOOTH_HINT, GL_NICEST) - glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST) - glEnable(GL_LINE_SMOOTH) - glEnable(GL_POLYGON_SMOOTH) - glEnable(GL_BLEND) - glLineWidth(2.0) - glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA) - gluTessCallback(tobj, GLU_TESS_VERTEX, glVertex3fv) - gluTessCallback(tobj, GLU_TESS_BEGIN, lambda x: glBegin(x)) - gluTessCallback(tobj, GLU_TESS_END, lambda: glEnd()) - gluTessCallback(tobj, GLU_TESS_COMBINE, lambda pos, data, weights: pos) - -if __name__ == '__main__': - if len(sys.argv) != 3: - print('Syntax: importsvg.py ') - scene = Scene(sys.argv[1]) - scene.export(sys.argv[2]) - glutInit(sys.argv) - glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB) - glutInitWindowSize(800, 600) - glutCreateWindow("Bezier spline importer") - glutDisplayFunc(lambda: scene.drawGL()) - glutKeyboardFunc(keyboard) - initGL() - glutMainLoop() diff --git a/src/utils/kdbench.cpp b/src/utils/kdbench.cpp index 70a08ce2..73f6653e 100644 --- a/src/utils/kdbench.cpp +++ b/src/utils/kdbench.cpp @@ -66,7 +66,8 @@ public: int run(int argc, char **argv) { ref fileResolver = Thread::getThread()->getFileResolver(); - char optchar, *end_ptr = NULL; + int optchar; + char *end_ptr = NULL; Float intersectionCost = -1, traversalCost = -1, emptySpaceBonus = -1; int stopPrims = -1, maxDepth = -1, exactPrims = -1, minMaxBins = -1; bool clip = true, parallel = true, retract = true, fitParameters = false; diff --git a/src/utils/tonemap.cpp b/src/utils/tonemap.cpp index ff8e5a9b..a2ac0111 100644 --- a/src/utils/tonemap.cpp +++ b/src/utils/tonemap.cpp @@ -68,7 +68,8 @@ public: int run(int argc, char **argv) { ref fileResolver = Thread::getThread()->getFileResolver(); - char optchar, *end_ptr = NULL; + int optchar; + char *end_ptr = NULL; optind = 1; Float gamma = -1, multiplier = 1; Bitmap::EFileFormat format = Bitmap::EPNG; @@ -236,9 +237,9 @@ public: } fs::path inputFile = fileResolver->resolve(argv[i]); - Log(EInfo, "Loading EXR image \"%s\" ..", inputFile.string().c_str()); + Log(EInfo, "Loading image \"%s\" ..", inputFile.string().c_str()); ref is = new FileStream(inputFile, FileStream::EReadOnly); - ref input = new Bitmap(Bitmap::EOpenEXR, is); + ref input = new Bitmap(Bitmap::EAuto, is); if (crop[2] != -1 && crop[3] != -1) input = input->crop(Point2i(crop[0], crop[1]), Vector2i(crop[2], crop[3])); @@ -281,9 +282,9 @@ public: } else { for (int i=optind; iresolve(argv[i]); - Log(EInfo, "Loading EXR image \"%s\" ..", inputFile.string().c_str()); + Log(EInfo, "Loading image \"%s\" ..", inputFile.string().c_str()); ref is = new FileStream(inputFile, FileStream::EReadOnly); - ref input = new Bitmap(Bitmap::EOpenEXR, is); + ref input = new Bitmap(Bitmap::EAuto, is); if (crop[2] != -1 && crop[3] != -1) input = input->crop(Point2i(crop[0], crop[1]), Vector2i(crop[2], crop[3])); diff --git a/src/volume/constvolume.cpp b/src/volume/constvolume.cpp index 94608910..e4a4d1b6 100644 --- a/src/volume/constvolume.cpp +++ b/src/volume/constvolume.cpp @@ -58,7 +58,7 @@ public: if (m_type == Properties::EFloat) m_float = props.getFloat("value"); - else if (m_type == Properties::EPoint) + else if (m_type == Properties::EVector) m_vector = props.getVector("value"); else if (m_type == Properties::ESpectrum) m_spectrum = props.getSpectrum("value"); @@ -72,7 +72,7 @@ public: m_type = stream->readInt(); if (m_type == Properties::EFloat) m_float = stream->readFloat(); - else if (m_type == Properties::EPoint) + else if (m_type == Properties::EVector) m_vector = Vector(stream); else if (m_type == Properties::ESpectrum) m_spectrum = Spectrum(stream); @@ -90,7 +90,7 @@ public: m_spectrum.serialize(stream); else if (m_type == Properties::EFloat) stream->writeFloat(m_float); - else if (m_type == Properties::EPoint) + else if (m_type == Properties::EVector) m_vector.serialize(stream); else Log(EError, "Internal error - unknown data type"); @@ -117,7 +117,7 @@ public: } bool supportsVectorLookups() const { - return m_type == Properties::EPoint; + return m_type == Properties::EVector; } Float getStepSize() const { @@ -133,7 +133,7 @@ public: oss << "ConstantDataSource[value="; if (m_type == Properties::EFloat) oss << m_float; - else if (m_type == Properties::EPoint) + else if (m_type == Properties::EVector) oss << m_vector.toString(); else if (m_type == Properties::ESpectrum) oss << m_spectrum.toString(); diff --git a/src/volume/gridvolume.cpp b/src/volume/gridvolume.cpp index 77132d9e..a08dae6e 100644 --- a/src/volume/gridvolume.cpp +++ b/src/volume/gridvolume.cpp @@ -93,7 +93,7 @@ MTS_NAMESPACE_BEGIN * * When using this data source to represent floating point density volumes, * please ensure that the values are all normalized to lie in the - * range $[0, 1]$---otherwise, the Woocock-Tracking integration method in + * range $[0, 1]$---otherwise, the Woodcock-Tracking integration method in * \pluginref{heterogeneous} will produce incorrect results. */ class GridDataSource : public VolumeDataSource {