diff --git a/.hgtags b/.hgtags
index f7ca9dfd..8f88e9f9 100644
--- a/.hgtags
+++ b/.hgtags
@@ -6,3 +6,6 @@ e3c0182ba64b77319ce84c9e2a8581649e68273d v0.2.1
cb6e89af8012fac22cc0f3c5ad247c98c701bdda v0.3.0
ee26517b27207353b0c8a7d357bcb4977b5d93fb v0.4.0
7db07694ea00eb1655f7a1adcc3ae880e8e116f9 v0.4.1
+13a39b11aceee517c19d2e2cec2e6b875546062c v0.4.2
+f1b73d39617071297167cc7ce96f3892f21105fc v0.4.3
+bd6ddacdf7955e51d9b80be639c282d4974e6f56 v0.4.4
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7d38a80a..cbe39073 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -35,31 +35,26 @@ endif()
# Load the required modules
include (MitsubaUtil)
+include (MtsGetVersionInfo)
include (CheckCXXSourceCompiles)
include (CMakeDependentOption)
-# Read version (MTS_VERSION) from include/mitsuba/core/version.h
-file(STRINGS "include/mitsuba/core/version.h" MITSUBA_H REGEX "^#define MTS_VERSION \"[^\"]*\"$")
-string(REGEX REPLACE "^.*MTS_VERSION \"([0-9]+).*$" "\\1" MTS_VERSION_MAJOR "${MITSUBA_H}")
-string(REGEX REPLACE "^.*MTS_VERSION \"[0-9]+\\.([0-9]+).*$" "\\1" MTS_VERSION_MINOR "${MITSUBA_H}")
-string(REGEX REPLACE "^.*MTS_VERSION \"[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" MTS_VERSION_PATCH "${MITSUBA_H}")
-set(MTS_VERSION "${MTS_VERSION_MAJOR}.${MTS_VERSION_MINOR}.${MTS_VERSION_PATCH}")
-set(MITSUBA_H)
-if("${MTS_VERSION_MAJOR}" MATCHES "[0-9]+" AND
- "${MTS_VERSION_MINOR}" MATCHES "[0-9]+" AND
- "${MTS_VERSION_PATCH}" MATCHES "[0-9]+")
- message(STATUS "mitsuba ${MTS_VERSION}")
+# Read the version information
+MTS_GET_VERSION_INFO()
+if (MTS_HAS_VALID_REV)
+ message(STATUS "mitsuba ${MTS_VERSION}-hg${MTS_REV_ID} (${MTS_DATE})")
else()
- message(FATAL_ERROR "The mitsuba version could not be determined!")
+ message(STATUS "mitsuba ${MTS_VERSION} (${MTS_DATE})")
endif()
+# Setup the build options
+include (MitsubaBuildOptions)
# Find the external libraries and setup the paths
include (MitsubaExternal)
-# Setup the build options, include paths and compile definitions
-include (MitsubaBuildOptions)
-
+# Main mitsuba include directory
+include_directories("include")
# ===== Prerequisite resources =====
@@ -92,8 +87,13 @@ endif()
# Additional files to add to main executables
if(APPLE)
set(MTS_DARWIN_STUB "${CMAKE_CURRENT_SOURCE_DIR}/src/mitsuba/darwin_stub.mm")
+ set(MTS_WINDOWS_STUB "")
+elseif(WIN32)
+ set(MTS_DARWIN_STUB "")
+ set(MTS_WINDOWS_STUB "${CMAKE_CURRENT_SOURCE_DIR}/data/windows/wmain_stub.cpp")
else()
set(MTS_DARWIN_STUB "")
+ set(MTS_WINDOWS_STUB "")
endif()
diff --git a/SConstruct b/SConstruct
index 44f5ef65..2538ea12 100644
--- a/SConstruct
+++ b/SConstruct
@@ -6,9 +6,10 @@ import os
resources = []
plugins = []
stubs = []
+winstubs = []
Export('SCons', 'sys', 'os', 'glob', 'resources',
- 'plugins', 'stubs')
+ 'plugins', 'stubs', 'winstubs')
# Configure the build framework
env = SConscript('build/SConscript.configure')
@@ -18,6 +19,9 @@ Export('env')
if sys.platform == 'win32':
# Set an application icon on Windows
resources += [ env.RES('data/windows/mitsuba_res.rc') ]
+ # Convert the command line args from UTF-8 to UTF-16
+ winstubs += [ env.SharedObject('#data/windows/wmain_stub.cpp') ]
+ Export('winstubs')
def build(scriptFile, exports = [], duplicate = 0):
dirname = '/'.join(os.path.dirname(scriptFile).split('/')[1:])
diff --git a/build/SConscript.configure b/build/SConscript.configure
index 3a65f817..73d917be 100644
--- a/build/SConscript.configure
+++ b/build/SConscript.configure
@@ -285,7 +285,7 @@ if needsBuildDependencies:
print '\nThe dependency directory and your Mitsuba codebase have different version'
print 'numbers! Your copy of Mitsuba has version %s, whereas the dependencies ' % MTS_VERSION
print 'have version %s. Please bring them into sync, either by running\n' % depVersion
- print '$ hg update -r v%s\n' % depVersion
+ print '$ hg update -r v%s\n' % depVersion
print 'in the Mitsuba directory, or by running\n'
print '$ cd dependencies'
print '$ hg pull'
@@ -353,7 +353,7 @@ def configure_for_objective_cpp(env):
env.RemoveFlags(['-fstrict-aliasing', '-ftree-vectorize',
'-std=c\+\+0x'])
# Remove Intel compiler-specific optimization flags
- env.RemoveFlags(['-x.*', '-ax.*', '-ipo', '-no-prec-div',
+ env.RemoveFlags(['-x.*', '-ax.*', '-ipo', '-no-prec-div',
'-fp-model', 'fast=.*', '-wd.*', '-openmp'])
env['CCFLAGS'] += ['-fno-strict-aliasing']
# Enforce GCC usage (Intel compiler doesn't handle Objective C/C++)
@@ -374,7 +374,7 @@ env.__class__.ConfigureForObjectiveCPP = configure_for_objective_cpp
env.__class__.RelaxCompilerSettings = relax_compiler_settings
if hasCollada:
- env.Append(CPPDEFINES = [['MTS_HAS_COLLADA', 1]] )
+ env.Append(CPPDEFINES = [['MTS_HAS_COLLADA', 1]])
env.SConsignFile()
diff --git a/build/config-linux-gcc-debug.py b/build/config-linux-gcc-debug.py
index 55f32973..109f06d8 100644
--- a/build/config-linux-gcc-debug.py
+++ b/build/config-linux-gcc-debug.py
@@ -20,11 +20,11 @@ GLLIB = ['GL', 'GLU', 'GLEWmx', 'Xxf86vm', 'X11']
GLFLAGS = ['-DGLEW_MX']
BOOSTLIB = ['boost_system', 'boost_filesystem', 'boost_thread']
COLLADAINCLUDE = ['/usr/include/collada-dom', '/usr/include/collada-dom/1.4']
-COLLADALIB = ['collada14dom']
+COLLADALIB = ['collada14dom', 'xml2']
# The following assumes that the Mitsuba bindings should be built for the
# "default" Python version. It is also possible to build bindings for multiple
-# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE,
+# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE,
# PYTHON27LIB, PYTHON27LIBDIR and PYTHON32INCLUDE, PYTHON32LIB, PYTHON32LIBDIR
pyver = os.popen("python --version 2>&1 | grep -oE '([[:digit:]].[[:digit:]])'").read().strip().replace('.', '')
diff --git a/build/config-linux-gcc.py b/build/config-linux-gcc.py
index 6173d921..d81b3408 100644
--- a/build/config-linux-gcc.py
+++ b/build/config-linux-gcc.py
@@ -20,11 +20,11 @@ GLLIB = ['GL', 'GLU', 'GLEWmx', 'Xxf86vm', 'X11']
GLFLAGS = ['-DGLEW_MX']
BOOSTLIB = ['boost_system', 'boost_filesystem', 'boost_thread']
COLLADAINCLUDE = ['/usr/include/collada-dom', '/usr/include/collada-dom/1.4']
-COLLADALIB = ['collada14dom']
+COLLADALIB = ['collada14dom', 'xml2']
# The following assumes that the Mitsuba bindings should be built for the
# "default" Python version. It is also possible to build bindings for multiple
-# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE,
+# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE,
# PYTHON27LIB, PYTHON27LIBDIR and PYTHON32INCLUDE, PYTHON32LIB, PYTHON32LIBDIR
pyver = os.popen("python --version 2>&1 | grep -oE '([[:digit:]].[[:digit:]])'").read().strip().replace('.', '')
diff --git a/build/config-linux-icl.py b/build/config-linux-icl.py
index f376ac60..10baef50 100644
--- a/build/config-linux-icl.py
+++ b/build/config-linux-icl.py
@@ -20,11 +20,11 @@ GLLIB = ['GL', 'GLU', 'GLEWmx', 'Xxf86vm', 'X11']
GLFLAGS = ['-DGLEW_MX']
BOOSTLIB = ['boost_system', 'boost_filesystem', 'boost_thread']
COLLADAINCLUDE = ['/usr/include/collada-dom', '/usr/include/collada-dom/1.4']
-COLLADALIB = ['collada14dom']
+COLLADALIB = ['collada14dom', 'xml2']
# The following assumes that the Mitsuba bindings should be built for the
# "default" Python version. It is also possible to build bindings for multiple
-# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE,
+# versions at the same time by explicitly specifying e.g. PYTHON27INCLUDE,
# PYTHON27LIB, PYTHON27LIBDIR and PYTHON32INCLUDE, PYTHON32LIB, PYTHON32LIBDIR
pyver = os.popen("python --version 2>&1 | grep -oE '([[:digit:]].[[:digit:]])'").read().strip().replace('.', '')
diff --git a/build/config-macos10.7-gcc-x86_64.py b/build/config-macos10.7-gcc-x86_64.py
index b4112976..5714db52 100644
--- a/build/config-macos10.7-gcc-x86_64.py
+++ b/build/config-macos10.7-gcc-x86_64.py
@@ -6,7 +6,7 @@ CCFLAGS = ['-arch', 'x86_64', '-mmacosx-version-min=10.7', '-march=nocona
LINKFLAGS = ['-framework', 'OpenGL', '-framework', 'Cocoa', '-arch', 'x86_64', '-mmacosx-version-min=10.7', '-Wl,-syslibroot,/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.7.sdk', '-Wl,-headerpad,128']
BASEINCLUDE = ['#include', '#dependencies/include']
BASELIBDIR = ['#dependencies/lib']
-BASELIB = ['m', 'pthread', 'gomp', 'Half']
+BASELIB = ['m', 'pthread', 'Half']
OEXRINCLUDE = ['#dependencies/include/OpenEXR']
OEXRLIB = ['IlmImf', 'Imath', 'Iex', 'z']
PNGLIB = ['png']
diff --git a/build/mitsuba-msvc2010.vcxproj b/build/mitsuba-msvc2010.vcxproj
index 1ee777d5..fb1a61ec 100644
--- a/build/mitsuba-msvc2010.vcxproj
+++ b/build/mitsuba-msvc2010.vcxproj
@@ -125,13 +125,15 @@
-
+
-
+
-
+
-
+
+
+
@@ -203,6 +205,8 @@
+
+
@@ -287,14 +291,14 @@
+
+
-
-
@@ -385,6 +389,8 @@
+
+
@@ -409,6 +415,8 @@
+
+
@@ -431,6 +439,8 @@
+
+
@@ -455,8 +465,6 @@
-
-
@@ -511,6 +519,8 @@
+
+
@@ -629,8 +639,6 @@
-
-
@@ -757,6 +765,8 @@
+
+
@@ -805,6 +815,10 @@
+
+
+
+
@@ -937,6 +951,8 @@
+
+
@@ -1001,12 +1017,8 @@
-
-
-
-
@@ -1021,6 +1033,8 @@
+
+
@@ -1033,9 +1047,9 @@
-
+
-
+
diff --git a/build/mitsuba-msvc2010.vcxproj.filters b/build/mitsuba-msvc2010.vcxproj.filters
index 2a0fca14..353385b3 100644
--- a/build/mitsuba-msvc2010.vcxproj.filters
+++ b/build/mitsuba-msvc2010.vcxproj.filters
@@ -9,136 +9,136 @@
cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
- {b54fb131-7e86-4e08-bba2-828939a038ff}
+ {81dd9e3e-7bca-44a1-a313-76fb5af5ab0c}
- {62e70ba9-1509-4ecb-9c06-8de3ef48cd47}
+ {7112f301-f6d5-4351-ae06-4a29910a3766}
- {81da027f-f282-46cb-a88d-abc84969dbf8}
+ {15522c16-64b9-487e-b9fe-dea0094cb5c1}
- {8364b08a-50e5-471b-bfaa-4e45915df101}
+ {4df10a81-6f13-45ae-9a39-ce009db3a659}
- {bb52270c-4a49-423b-ad02-4a7c042295c2}
+ {0c722d61-5189-477d-b242-2fa2ad26c83c}
- {c4330c75-0353-41ab-a809-1471f359aec6}
+ {d849b48a-b61d-4540-81ab-f5228fd6fa93}
+
+
+ {32a38ee8-b255-4e23-8efc-9491fc27c645}
- {0f1ec875-44fb-46b2-9116-26d659410d97}
+ {9f6ef52b-2504-4722-a200-7aaa9603088a}
- {ab910bc2-a0a5-4f02-8232-396937071149}
+ {6c49af9f-8cac-477f-825b-ccb368e313c6}
- {dff3f942-f016-4bad-aa2c-63c444401a94}
+ {182aef81-5fdf-4eb1-a7d3-5b1d7d5de2b5}
- {855b0594-d28c-486a-b081-72991219654e}
+ {81241aff-695b-4556-bf46-e9d3ac4aa7fb}
- {08449c4a-89f1-4b5b-a756-2bd2f03bf566}
-
-
- {8437086b-afc9-48b6-a358-53d4df044974}
+ {f4218b48-d878-435b-915a-8da9727666b1}
- {d9802d62-8614-401a-afa6-941271c63386}
+ {e6e1a06f-b795-4a12-9b0c-0c730e0b2ac6}
- {65e0e20a-2809-4352-8c81-69a0a3824fa9}
+ {231a82e9-6b4b-4a95-995c-8abd0b6149b3}
- {8ea19f58-a2c0-4a30-bc79-817da2f1a1ed}
+ {4c70e19a-a935-4b0f-88e3-10683b03f66c}
- {753c9d80-ceb0-4932-8097-5c7cac92e091}
+ {4a433329-84ca-4a8b-938e-4d06c21ec6ff}
- {b4290489-566b-4576-847b-392b4c45f1f1}
+ {4414bb60-269f-4934-9dc9-58bbed35bc1d}
- {ad07dceb-214e-42e1-933d-c8ceeecd1389}
+ {4bc23857-f9a2-453d-bc64-8c9ebc5aff42}
- {3ee94c12-30fb-4f65-8781-f7b5e8a95db2}
+ {a9318f22-caaa-40af-b8fc-9b8051ec0859}
- {813aba46-5b9d-4290-9a31-18e77952929e}
+ {fba5da81-e3f2-4fbe-98d7-807aaa2c2cf3}
- {55bcc3cd-7f6b-4bb6-b22e-b731ed0451fb}
+ {abd6883d-375b-4d85-bb93-12952995332d}
- {6ebccae2-fbfd-4a3f-9bfc-7e9dbd2b6d74}
+ {4c9fdf84-d6a6-47f8-8d58-c01ed8351fa0}
- {a339ab19-2bed-4d03-afdc-d73223648b72}
+ {366f9975-2268-4124-81b9-87e7c2c03b1e}
- {87b01999-0b73-481b-b8fa-b66f594f65be}
+ {8d2d02ea-1957-4256-8046-f525d13e7ba5}
- {5870cf94-fb1b-44c0-90f2-32c3c8cbf6b7}
+ {8438e074-8733-4689-95ba-86145dd9fd9d}
- {90263b28-e65b-4d41-92cb-a0d1dac22564}
+ {9d35d85d-280c-49ef-869a-ffaa0602f9f5}
- {453d13c1-1847-4a18-8f2e-028913b4ec43}
+ {6df94c72-e89d-451e-ad6b-160ea0313077}
- {e4baad7e-ee3c-4085-961f-2851c0ed2b8f}
+ {7bec8429-3bb9-4dfd-8008-a10c675a9818}
- {a7ab51c2-5596-458f-b5f0-1129dc3dbbf5}
+ {747a1613-4c01-46ef-ba17-92d2286890ad}
- {a7456898-2253-48d8-abec-9c86311cca64}
+ {516a0fc3-a824-4486-b50c-f3165d1d8361}
- {cb9f48b8-6f25-4c9d-9d2c-b1a67cc80427}
+ {bddbc784-474a-4cd3-bbea-3123048dbfaa}
- {b44ee927-e827-4c46-bbce-c7a1b705b63b}
+ {7bba2a7a-8cd3-47bd-9ab2-e33939d4eba6}
- {40540c0e-02d6-4296-bbdf-87dfd9f7f213}
+ {32cdfdd7-c3bb-41f2-b433-10165963f9ed}
- {430d27a2-66db-4169-bb48-aeb0b65e4d1a}
+ {49edc93d-4305-45a8-a6d9-ae283837d46a}
- {a247e523-dbaf-4fb8-b284-6c8c5cbc09a5}
+ {d24cf1e6-a301-42e8-9fc1-26fe0ddc3f04}
- {91d6db52-180b-4dde-9339-807917e42f4e}
+ {3df649be-39d5-4416-8776-427a1696c016}
- {8ae0e3a3-87db-4e93-bb20-9d198040ae86}
+ {2e68088f-97e2-4dee-91f5-c85e3e36dd74}
- {0049f7d1-4211-4010-b53c-3ee99244046e}
+ {f7674a86-b0a5-484c-9db3-a20500b9bbf9}
- {8bddb89b-fa54-448a-a4b3-294f18d45ded}
+ {f41987e3-e0b9-431d-b37b-5931db380b09}
- {082f589c-eb3f-47a4-b2f6-fbd6d89dec07}
+ {88ed597b-50ba-4d6f-bf50-ae8f5fa17789}
- {a7f59027-bc87-4d28-9b90-eb5be186d8a7}
+ {5499a8e9-ef31-4ab9-a58f-d3a36a784004}
- {f5410d0f-0a22-4579-afbf-8e13db8fa3d5}
+ {b74f25a9-ff20-4888-8dfc-57dd968b0b8d}
- {ebe2b998-26de-4e92-8618-c739cb43a5f2}
+ {18246123-74da-4fee-8581-bd2860451c40}
- {4f4d9d4b-3f44-4d56-9a18-16b94e753706}
+ {3fdeab5a-5ccf-41c6-bf05-d9f7f510c741}
@@ -247,9 +247,6 @@
Source Files\librender
-
- Source Files\librender
-
Source Files\librender
@@ -439,6 +436,9 @@
Source Files\libhw
+
+ Source Files\libhw
+
Source Files\libhw
@@ -511,6 +511,12 @@
Source Files\libcore
+
+ Source Files\libcore
+
+
+ Source Files\libcore
+
Source Files\libcore
@@ -709,6 +715,9 @@
Source Files\mtsgui
+
+ Source Files\mtsgui
+
Source Files\mtsgui
@@ -805,15 +814,9 @@
Source Files\utils
-
- Source Files\utils
-
Source Files\utils
-
- Source Files\utils
-
Source Files\utils
@@ -835,6 +838,9 @@
Source Files\shapes\ply
+
+ Source Files\shapes
+
Source Files\shapes
@@ -853,10 +859,10 @@
Source Files\shapes
-
+
Source Files\shapes
-
+
Source Files\shapes
@@ -1035,17 +1041,20 @@
Source Files\samplers
-
- Source Files\libhw
+
+ Source Files\libhw\data
-
- Source Files\libhw
+
+ Source Files\libhw\data
-
- Source Files\libhw
+
+ Source Files\libhw\data
-
- Source Files\libhw
+
+ Source Files\libhw\data
+
+
+ Source Files\libhw\data
Source Files\medium
@@ -1152,6 +1161,9 @@
Source Files\films
+
+ Source Files\films
+
Source Files\shapes
@@ -1278,6 +1290,9 @@
Header Files\mitsuba\render
+
+ Header Files\mitsuba\render
+
Header Files\mitsuba\render
@@ -1287,9 +1302,6 @@
Header Files\mitsuba\render
-
- Header Files\mitsuba\render
-
Header Files\mitsuba\render
@@ -1425,6 +1437,9 @@
Header Files\mitsuba\core
+
+ Header Files\mitsuba\core
+
Header Files\mitsuba\core
@@ -1461,6 +1476,9 @@
Header Files\mitsuba\core
+
+ Header Files\mitsuba\core
+
Header Files\mitsuba\core
@@ -1494,6 +1512,9 @@
Header Files\mitsuba\core
+
+ Header Files\mitsuba\core
+
Header Files\mitsuba\core
@@ -1530,9 +1551,6 @@
Header Files\mitsuba\core
-
- Header Files\mitsuba\core
-
Header Files\mitsuba\core
@@ -1614,6 +1632,9 @@
Header Files\mitsuba\hw
+
+ Header Files\mitsuba\hw
+
Header Files\mitsuba\hw
diff --git a/data/cmake/MitsubaBuildOptions.cmake b/data/cmake/MitsubaBuildOptions.cmake
index 6a36aabb..7aae4210 100644
--- a/data/cmake/MitsubaBuildOptions.cmake
+++ b/data/cmake/MitsubaBuildOptions.cmake
@@ -6,15 +6,17 @@ if (NOT DEFINED MTS_VERSION)
message(FATAL_ERROR "This file has to be included from the main build file.")
endif()
-# Image format definitions
-if (PNG_FOUND)
- add_definitions(-DMTS_HAS_LIBPNG=1)
-endif()
-if (JPEG_FOUND)
- add_definitions(-DMTS_HAS_LIBJPEG=1)
-endif()
-if (OPENEXR_FOUND)
- add_definitions(-DMTS_HAS_OPENEXR=1)
+# Default initial compiler flags which may be modified by advanced users
+if (MTS_CMAKE_INIT)
+ set(MTS_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ if (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang")
+ set(MTS_CXX_FLAGS "-fvisibility=hidden -pipe -march=nocona -mfpmath=sse -ffast-math -Wall -Winvalid-pch")
+ endif()
+ if (MTS_CXX_FLAGS)
+ set(CMAKE_CXX_FLAGS "${MTS_CXX_FLAGS} ${CMAKE_CXX_FLAGS}" CACHE
+ STRING "Flags used by the compiler during all build types." FORCE)
+ set(MTS_CXX_FLAGS)
+ endif()
endif()
# Top level configuration definitions
@@ -107,15 +109,7 @@ endif()
if (WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8)
add_definitions(-DWIN64)
endif()
-
-
-# Main mitsuba include directory
-include_directories("include")
-
-# Includes for the common libraries
-include_directories(${Boost_INCLUDE_DIRS} ${Eigen_INCLUDE_DIR})
-
-# If we are using the system OpenEXR, add its headers which half.h requires
-if (OPENEXR_FOUND)
- include_directories(${ILMBASE_INCLUDE_DIRS})
+if (MSVC AND MTS_SSE AND NOT CMAKE_CL_64)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:SSE2")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:SSE2")
endif()
diff --git a/data/cmake/MitsubaExternal.cmake b/data/cmake/MitsubaExternal.cmake
index 621e317a..acfbf98c 100644
--- a/data/cmake/MitsubaExternal.cmake
+++ b/data/cmake/MitsubaExternal.cmake
@@ -244,3 +244,23 @@ CMAKE_DEPENDENT_OPTION(BUILD_PYTHON "Build the Python bindings." ON
if (PYTHONLIBS_FOUND AND mts_boost_PYTHON_FOUND)
set (PYTHON_FOUND TRUE)
endif ()
+
+
+# Includes for the common libraries
+include_directories(${Boost_INCLUDE_DIRS} ${Eigen_INCLUDE_DIR})
+
+# If we are using the system OpenEXR, add its headers which half.h requires
+if (OPENEXR_FOUND)
+ include_directories(${ILMBASE_INCLUDE_DIRS})
+endif()
+
+# Image format definitions
+if (PNG_FOUND)
+ add_definitions(-DMTS_HAS_LIBPNG=1)
+endif()
+if (JPEG_FOUND)
+ add_definitions(-DMTS_HAS_LIBJPEG=1)
+endif()
+if (OPENEXR_FOUND)
+ add_definitions(-DMTS_HAS_OPENEXR=1)
+endif()
diff --git a/data/cmake/MitsubaUtil.cmake b/data/cmake/MitsubaUtil.cmake
index e465e535..473be190 100644
--- a/data/cmake/MitsubaUtil.cmake
+++ b/data/cmake/MitsubaUtil.cmake
@@ -141,14 +141,16 @@ function(mts_win_resource target_filename name ext description)
endif()
set(RC_DESCRIPTION "${description}")
- #TODO Add the hg revision number to the version, e.g. 0.0.0-hg000000000000
- set(RC_VERSION "${MTS_VERSION}")
- set(RC_VERSION_COMMA "${MTS_VERSION}.0")
- string(REPLACE "." "," RC_VERSION_COMMA ${RC_VERSION_COMMA})
+ if (MTS_HAS_VALID_REV)
+ set(RC_VERSION "${MTS_VERSION}-${MTS_VERSION_BUILD}hg${MTS_REV_ID}")
+ else()
+ set(RC_VERSION "${MTS_VERSION}")
+ endif()
+ set(RC_VERSION_COMMA "${MTS_VERSION_MAJOR},${MTS_VERSION_MINOR},${MTS_VERSION_PATCH},0")
set(RC_FILENAME "${name}${ext}")
set(RC_NAME "${name}")
- #TODO Set the year programmatically
- set(RC_YEAR "2012")
+ # MTS_DATE has the format YYYY.MM.DD
+ string(SUBSTRING "${MTS_DATE}" 0 4 RC_YEAR)
configure_file("${RC_FILE}" "${target_filename}" ESCAPE_QUOTES @ONLY)
endfunction()
@@ -308,15 +310,15 @@ macro (add_mts_plugin _plugin_name)
add_library (${_plugin_name} MODULE ${_plugin_srcs})
endif ()
- set(core_libraries "mitsuba-core" "mitsuba-render")
+ set(_plugin_core_libraries "mitsuba-core" "mitsuba-render")
if (_plugin_MTS_HW)
- list(APPEND core_libraries "mitsuba-hw")
+ list(APPEND _plugin_core_libraries "mitsuba-hw")
endif()
if (_plugin_MTS_BIDIR)
- list(APPEND core_libraries "mitsuba-bidir")
+ list(APPEND _plugin_core_libraries "mitsuba-bidir")
endif()
target_link_libraries (${_plugin_name}
- ${core_libraries} ${_plugin_LINK_LIBRARIES})
+ ${_plugin_core_libraries} ${_plugin_LINK_LIBRARIES})
set_target_properties (${_plugin_name} PROPERTIES PREFIX "")
if (APPLE)
@@ -367,6 +369,7 @@ endif()
# [RES_ICON filename]
# [RES_DESCRIPTION "Description string"]
# [NO_INSTALL]
+# [MTS_HW] [MTS_BIDIR]
# [NO_MTS_PCH | PCH pch_header] )
#
# The executable name is taken from the first argument. The target gets
@@ -375,6 +378,11 @@ endif()
# (for example, libpng) may be specified after the optionl LINK_LIBRARIES
# keyword.
#
+# By default the executables are linked against mitsuba-core and mitsuba-render.
+# When MTS_HW is set, the executable will be linked against with mitsuba-hw.
+# When MTS_BIDIR is specified, the executable will also be linked against
+# mitsuba-bidir.
+#
# The optional keyword WIN32, if presents, gets passed to add_executable(...)
# to produce a Windows executable using winmain, thus it won't have a
# console. The NO_INSTALL keyword causes the target not to be installed.
@@ -388,7 +396,7 @@ endif()
# builds; other platforms simply ignore this value as with RES_ICON.
#
macro (add_mts_exe _exe_name)
- CMAKE_PARSE_ARGUMENTS(_exe "WIN32;NO_INSTALL;NO_MTS_PCH"
+ CMAKE_PARSE_ARGUMENTS(_exe "WIN32;NO_INSTALL;MTS_HW;MTS_BIDIR;NO_MTS_PCH"
"PCH;RES_ICON;RES_DESCRIPTION" "LINK_LIBRARIES" ${ARGN})
set (_exe_srcs ${_exe_UNPARSED_ARGUMENTS})
if (_exe_WIN32)
@@ -425,8 +433,15 @@ macro (add_mts_exe _exe_name)
else ()
add_executable (${_exe_name} ${_exe_TYPE} ${_exe_srcs})
endif ()
- target_link_libraries (${_exe_name}
- ${MTS_CORELIBS} ${_exe_LINK_LIBRARIES})
+
+ set(_exe_core_libraries "mitsuba-core" "mitsuba-render")
+ if (_exe_MTS_HW)
+ list(APPEND _exe_core_libraries "mitsuba-hw")
+ endif()
+ if (_exe_MTS_BIDIR)
+ list(APPEND _exe_core_libraries "mitsuba-bidir")
+ endif()
+ target_link_libraries (${_exe_name} ${_exe_core_libraries} ${_exe_LINK_LIBRARIES})
if (WIN32)
set_target_properties (${_exe_name} PROPERTIES VERSION "${MTS_VERSION}")
endif()
diff --git a/data/cmake/MtsGetVersionInfo.cmake b/data/cmake/MtsGetVersionInfo.cmake
new file mode 100644
index 00000000..19092f44
--- /dev/null
+++ b/data/cmake/MtsGetVersionInfo.cmake
@@ -0,0 +1,160 @@
+# ============================================================================
+# HDRITools - High Dynamic Range Image Tools
+# Copyright 2008-2011 Program of Computer Graphics, Cornell University
+#
+# Distributed under the OSI-approved MIT License (the "License");
+# see accompanying file LICENSE for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+# ---------------------------------------------------------------------------
+# Primary author:
+# Edgar Velazquez-Armendariz
+# ============================================================================
+
+# - Sets up the version info variables
+# This module provides a function intended to be called ONLY from the root dir:
+# MTS_GET_VERSION_INFO()
+# This function will read the "include/mitsuba/core/version.h" file and execute
+# "hg", setting the following variables:
+# MTS_VERSION - Full version string: ..
+# MTS_VERSION_MAJOR
+# MTS_VERSION_MINOR
+# MTS_VERSION_PATCH
+# MTS_VERSION_BUILD - Simple build number based on MTS_DATE,
+# encoded as YYYYMMDD
+# MTS_HAS_VALID_REV - Flag to indicate whether MTS_REV_ID is set
+# MTS_REV_ID - First 12 digits of the mercurial revision ID
+# MTS_DATE - Represents the code date as YYYY.MM.DD
+# MTS_MACLS_VERSION - A version for Mac Launch Services from the version and
+# code date, in the format nnnnn.nn.nn[hgXXXXXXXXXXXX]
+
+function(MTS_GET_VERSION_INFO)
+
+ # Simple, internal macro for zero padding values. Assumes that the number of
+ # digits is enough. Note that this method overwrites the variable!
+ macro(ZERO_PAD NUMBER_VAR NUM_DIGITS)
+ set(_val ${${NUMBER_VAR}})
+ set(${NUMBER_VAR} "")
+ foreach(dummy_var RANGE 1 ${NUM_DIGITS})
+ math(EXPR _digit "${_val} % 10")
+ set(${NUMBER_VAR} "${_digit}${${NUMBER_VAR}}")
+ math(EXPR _val "${_val} / 10")
+ endforeach()
+ unset(_val)
+ unset(_digit)
+ endmacro()
+
+
+ # Uses hg to get the version string and the date of such revision
+ # Based on info from:
+ # http://mercurial.selenic.com/wiki/VersioningWithMake (January 2011)
+
+ # Try to directly get the information assuming the source is within a repo
+ find_program(HG_CMD hg DOC "Mercurial command line executable")
+ mark_as_advanced(HG_CMD)
+ if (HG_CMD)
+ execute_process(
+ COMMAND "${HG_CMD}" -R "${PROJECT_SOURCE_DIR}"
+ parents --template "{node|short},{date|shortdate}"
+ OUTPUT_VARIABLE HG_INFO
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+ if (HG_INFO)
+ # Extract the revision ID and the date
+ string(REGEX REPLACE "(.+),.+" "\\1" MTS_REV_ID "${HG_INFO}")
+ string(REGEX REPLACE ".+,(.+)-(.+)-(.+)" "\\1.\\2.\\3"
+ MTS_DATE "${HG_INFO}")
+ set(MTS_REV_ID ${MTS_REV_ID} PARENT_SCOPE)
+ set(MTS_DATE ${MTS_DATE} PARENT_SCOPE)
+ endif()
+ endif()
+
+ # If that failed, try grabbing the id from .hg_archival.txt, in case a tarball
+ # made by "hg archive" is being used
+ if (NOT MTS_REV_ID)
+ set(HG_ARCHIVAL_FILENAME "${CMAKE_CURRENT_SOURCE_DIR}/.hg_archival.txt")
+ # Try to read from the file generated by "hg archive"
+ if (EXISTS "${HG_ARCHIVAL_FILENAME}")
+ file(READ "${HG_ARCHIVAL_FILENAME}" HG_ARCHIVAL_TXT)
+ # Extract just the first 12 characters of the node
+ string(REGEX REPLACE ".*node:[ \\t]+(............).*" "\\1"
+ MTS_REV_ID "${HG_ARCHIVAL_TXT}")
+ set(MTS_REV_ID ${MTS_REV_ID} PARENT_SCOPE)
+ endif()
+ endif()
+
+ if (NOT MTS_DATE)
+ # The Windows "date" command output depends on the regional settings
+ if (WIN32)
+ set(GETDATE_CMD "${PROJECT_SOURCE_DIR}/data/windows/getdate.exe")
+ set(GETDATE_ARGS "")
+ else()
+ set(GETDATE_CMD "date")
+ set(GETDATE_ARGS "+'%Y.%m.%d'")
+ endif()
+ execute_process(COMMAND "${GETDATE_CMD}" ${GETDATE_ARGS}
+ OUTPUT_VARIABLE MTS_DATE
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+ if (NOT MTS_DATE)
+ message(FATAL_ERROR "Unable to get a build date!")
+ endif()
+ set(MTS_DATE ${MTS_DATE} PARENT_SCOPE)
+ endif()
+
+ if (MTS_REV_ID)
+ set (MTS_HAS_VALID_REV 1)
+ else()
+ message(WARNING "Unable to find the mercurial revision id.")
+ set (MTS_HAS_VALID_REV 0)
+ endif()
+ set(MTS_HAS_VALID_REV ${MTS_HAS_VALID_REV} PARENT_SCOPE)
+
+
+ # Read version (MTS_VERSION) from include/mitsuba/core/version.h
+ file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/mitsuba/core/version.h" MITSUBA_H REGEX "^#define MTS_VERSION \"[^\"]*\"$")
+ if (MITSUBA_H MATCHES "^.*MTS_VERSION \"([0-9]+)\\.([0-9]+)\\.([0-9]+).*$")
+ set(MTS_VERSION_MAJOR ${CMAKE_MATCH_1})
+ set(MTS_VERSION_MINOR ${CMAKE_MATCH_2})
+ set(MTS_VERSION_PATCH ${CMAKE_MATCH_3})
+ set(MTS_VERSION "${MTS_VERSION_MAJOR}.${MTS_VERSION_MINOR}.${MTS_VERSION_PATCH}" PARENT_SCOPE)
+ set(MTS_VERSION_MAJOR ${MTS_VERSION_MAJOR} PARENT_SCOPE)
+ set(MTS_VERSION_MINOR ${MTS_VERSION_MINOR} PARENT_SCOPE)
+ set(MTS_VERSION_PATCH ${MTS_VERSION_PATCH} PARENT_SCOPE)
+ else()
+ message(FATAL_ERROR "The mitsuba version could not be determined!")
+ endif()
+
+ # Make a super simple build number from the date
+ if (MTS_DATE MATCHES "([0-9]+)\\.([0-9]+)\\.([0-9]+)")
+ set(MTS_VERSION_BUILD
+ "${CMAKE_MATCH_1}${CMAKE_MATCH_2}${CMAKE_MATCH_3}" PARENT_SCOPE)
+
+ # Now make a Mac Launch Services version number based on version and date.
+ # Based on specs from:
+ # http://lists.apple.com/archives/carbon-dev/2006/Jun/msg00139.html (Feb 2011)
+ if (MTS_VERSION_MAJOR GREATER 30 OR
+ MTS_VERSION_MINOR GREATER 14 OR
+ MTS_VERSION_PATCH GREATER 14 OR
+ ${CMAKE_MATCH_1} GREATER 2032)
+ message(AUTHOR_WARNING "Mitsuba version violates the Mac LS assumptions")
+ endif()
+ math(EXPR _MACLS_MAJOR "(${MTS_VERSION_MAJOR}+1)*256 + (${MTS_VERSION_MINOR}+1)*16 + ${MTS_VERSION_PATCH}+1")
+ math(EXPR _MACLS_MINOR "((${CMAKE_MATCH_1}-2008)*4) + ((${CMAKE_MATCH_2}-1)*32 + ${CMAKE_MATCH_3})/100")
+ math(EXPR _MACLS_BUILD "((${CMAKE_MATCH_2}-1)*32 + ${CMAKE_MATCH_3})%100")
+ ZERO_PAD(_MACLS_MAJOR 4)
+ ZERO_PAD(_MACLS_MINOR 2)
+ ZERO_PAD(_MACLS_BUILD 2)
+ set(MTS_MACLS_VERSION "${_MACLS_MAJOR}.${_MACLS_MINOR}.${_MACLS_BUILD}")
+ if(MTS_HAS_VALID_REV)
+ set(MTS_MACLS_VERSION "${MTS_MACLS_VERSION}hg${MTS_REV_ID}")
+ endif()
+ set(MTS_MACLS_VERSION ${MTS_MACLS_VERSION} PARENT_SCOPE)
+ else()
+ message(FATAL_ERROR
+ "Mitsuba date has an unexpected format: ${MTS_DATE}")
+ endif()
+
+endfunction()
diff --git a/data/darwin/Info.plist b/data/darwin/Info.plist
index 05e42854..94eb2192 100644
--- a/data/darwin/Info.plist
+++ b/data/darwin/Info.plist
@@ -90,5 +90,7 @@
YES
BreakpadVendor
the Mitsuba authors
+ NSHighResolutionCapable
+
diff --git a/data/darwin/Info.plist.in b/data/darwin/Info.plist.in
index 409395a9..12dc0e7b 100644
--- a/data/darwin/Info.plist.in
+++ b/data/darwin/Info.plist.in
@@ -70,5 +70,7 @@
@MTS_VERSION@
CSResourcesFileMapped
+ NSHighResolutionCapable
+
diff --git a/data/darwin/add-icl-libraries.sh b/data/darwin/add-icl-libraries.sh
index e35e925d..9db846ad 100755
--- a/data/darwin/add-icl-libraries.sh
+++ b/data/darwin/add-icl-libraries.sh
@@ -1,5 +1,6 @@
#!/bin/bash
cp /opt/intel/composer_xe_*/compiler/lib/libiomp5.dylib Mitsuba.app/Contents/Frameworks
+install_name_tool -id @rpath/libiomp5.dylib Mitsuba.app/Contents/Frameworks/libiomp5.dylib
find Mitsuba.app/Contents/MacOS/ Mitsuba.app/plugins -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib
find Mitsuba.app/Contents/Frameworks/libmitsuba-* -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib
-find Mitsuba.app/Contents/python -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib
+find Mitsuba.app/python -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib
diff --git a/data/linux/debian/changelog b/data/linux/debian/changelog
index d42b3978..f05344b5 100644
--- a/data/linux/debian/changelog
+++ b/data/linux/debian/changelog
@@ -1,3 +1,65 @@
+mitsuba (0.4.4-1) unstable; urgency=low
+ * Improved Python support for rendering animations and motion blur
+ * Photon mapper logic rewrite to account for certain missing specular paths
+ * Robustness improvements for specular+diffuse materials such as 'plastic'
+ * Fixed a remaining issue in the instancing frame computation code
+ * The thindielectric plugin formerly computed incorrect transmittance values
+ * The cube shape is now centered at the origin by default
+ * The TLS cleanup logic has been fixed to avoid a potential crash in mtssrv
+ * Other minor improvements, which are listed in the repository log
+ -- Wenzel Jakob Thu, 28 Feb 2013 00:00:00 -0400
+
+mitsuba (0.4.3-1) unstable; urgency=low
+ * Motion blur: Support for arbitrary linear camera, object, and sensor motion
+ to produce motion blur in renderings.
+ * Render-time annotations: added the ability to tag image files with additional
+ information by means of metadata or text labels.
+ * Hide directly visible emitters: convenient feature for removing an environment
+ light source so that an image can be composited onto documents having a
+ different color.
+ * Improved instancing: more robust instancing code with support for
+ non-rigid transformations.
+ * Threading on Windows: fixed various threading-related issues on Windows that
+ previously caused crashes and deadlocks.
+ * Caching: Caching mechanism to further accelerate the loading of
+ .serialized files.
+ * File dialogs: Native File Open/Save dialogs are now used on Windows.
+ * Python: Improved python bindings; easier usage on MacOS X.
+ * Blender interaction: Fixed a issue where GUI tabs containing scenes created
+ in Blender could not be cloned.
+ * Non-uniform scales: All triangle mesh-based shapes now permit
+ non-uniform scales.
+ * NaNs and friends: Increased resilience against various numerical corner cases.
+ * Index-matched participating media: Fixed an unfortunate regression in volpath
+ regarding index-matched media that was accidentally introduced in 0.4.2.
+ * roughdiffuse: Fixed texturing support in the roughdiffuse plugin.
+ * Photon mapping: Fixed some inaccuracies involving participating media when
+ rendered by the photon mapper and the Beam Radiance Estimate.
+ * Conductors: Switched Fresnel reflectance computations for conductors to the
+ exact expressions predicted by geometric optics (an approximation was
+ previously used).
+ * New cube shape: Added a cube shape plugin for convenience. This does
+ exactly what one would expect.
+ * The rest: As usual, a large number of smaller bugfixes and improvements
+ were below the threshold and are thus not listed individually. The
+ repository log has more details.
+ -- Wenzel Jakob Tue, 29 Jan 2013 00:00:00 -0400
+
+mitsuba (0.4.2-1) unstable; urgency=low
+ * Volumetric path tracers: improved sampling when dealing with index-matched medium transitions. This is essentially a re-implementation of an optimization that Mitsuba 0.3.1 already had, but which got lost in the bidirectional rewrite.
+ * Batch tonemapper: due to an unfortunate bug, the batch tonemapper in the last release produced invalid results for images containing an alpha channel. This is now fixed.
+ * Shapes: corrected some differential geometry issues in the "cylinder" and "rectangle" shapes.
+ * MLT: fixed 2-stage MLT, which was producing incorrect results.
+ * MEPT: fixed the handling of directional light sources.
+ * Robustness: got rid of various corner-cases that could produce NaNs.
+ * Filenames: to facilitate loading scenes created on Windows/OSX, the Linux version now resolves files case-insensitively if they could not be found after a case-sensitive search.
+ * Python: added Python bindings for shapes and triangle meshes. The Python plugin should now be easier to load (previously, this was unfortunately rather difficult on several platforms). The documentation was also given an overhaul.
+ * Particle tracing: I've decided to disable the adjoint BSDF for shading normals in the particle tracer, since it causes an unacceptable amount of variance in scenes containing poorly tesselated geometry. This affects the plugins ptracer, ppm, sppm and photonmapper.
+ * Subsurface scattering: fixed parallel network renderings involving the dipole model.
+ * Homogeneous medium & dipole: added many more material presets by Narasimhan et al.
+ * OBJ loader: further robustness improvements to the OBJ loader and the associated MTL material translator.
+ -- Wenzel Jakob Wed, 31 Oct 2012 00:00:00 -0400
+
mitsuba (0.4.1-1) unstable; urgency=low
* negative pixel values in textures and environment maps are handled more gracefully.
* minor robustness improvements to the OBJ and COLLADA importers.
@@ -35,15 +97,15 @@ mitsuba (0.4.0-1) unstable; urgency=low
mitsuba (0.3.1-1) unstable; urgency=low
- * Photon mapper: The photon mapper had some serious issues in the
+ * Photon mapper: The photon mapper had some serious issues in the
last release. These are now fixed, and it should run faster too.
- * On Linux/x86_64, the performance of the single precision exp() and log()
+ * On Linux/x86_64, the performance of the single precision exp() and log()
math library functions is extremely poor. Mitsuba now uses the double
prevision versions of these functions by default.
* Primitive clipping: Fixed numerical issues that occurred when using
primitive clipping in a double precision build.
* The adaptive integrator now better interacts with certain sub-integrators.
- * Instanced analytic shapes (e.g. spheres, cylinders, ..) are now supported,
+ * Instanced analytic shapes (e.g. spheres, cylinders, ..) are now supported,
and an error involving network rendering with instanced geometry is fixed.
* Fixed a serious issue that could destroy a scene file when saving from a cloned tab!
* Fixed some bad GUI behavior in multi-screen setups
@@ -57,30 +119,30 @@ mitsuba (0.3.0-1) unstable; urgency=low
* Added Python bindings that can be used to instantiate plugins
and control rendering processes.
- * Spectral rendering: most of the code pertaining to spectral
- rendering has seen a significant overhaul. It is now faster and
+ * Spectral rendering: most of the code pertaining to spectral
+ rendering has seen a significant overhaul. It is now faster and
in certain cases more accurate.
- * Flexible material classes: this release introduces a robust and
- very general suite of eight physically-based smooth and rough
+ * Flexible material classes: this release introduces a robust and
+ very general suite of eight physically-based smooth and rough
(microfacet-based) material classes.
* Material modifiers: two new material modifiers (bump & coating)
can be applied to BSDFs to create new materials.
- * Material verification: the sampling methods of all material
- models in Mitsuba are now automatically verified with the help
+ * Material verification: the sampling methods of all material
+ models in Mitsuba are now automatically verified with the help
of statistical hypothesis tests (using Chi^2-tests).
- * Generated documentation: there is now a javadoc-like system,
- which extracts documentation directly from the plugin source code
+ * Generated documentation: there is now a javadoc-like system,
+ which extracts documentation directly from the plugin source code
and stitches it into a LaTeX reference document.
- * lookAt: Mitsuba inherited a bug from PBRT, where the
- tag changed the handedness of the coordinate system. This is now
+ * lookAt: Mitsuba inherited a bug from PBRT, where the
+ tag changed the handedness of the coordinate system. This is now
fixed--also, the syntax of this tag has changed to make it easier to read.
* Scene portability: A new conversion tool ensures that old and incompatible
- scenes can be translated into the scene description format of the
+ scenes can be translated into the scene description format of the
most recent version.
- * Contributed plugins: Tom Kazimiers and Papas have contributed
+ * Contributed plugins: Tom Kazimiers and Papas have contributed
implementations of the Preetham Sun & Sky model and the Hanrahan-Krueger
scattering model.
- * Photon mapping: The Photon map integrator has been rewritten for
+ * Photon mapping: The Photon map integrator has been rewritten for
improved accuracy and better performance. Furthermore, the underlying
data structure has been replaced with a ~50% faster implementation.
@@ -125,8 +187,8 @@ mitsuba (0.2.0-1) unstable; urgency=low
mitsuba (0.1.3-1) unstable; urgency=low
- This is mainly a bugfix release to address a serious regression in the
- material system. Other notable changes are:
+ This is mainly a bugfix release to address a serious regression in the
+ material system. Other notable changes are:
* Imported scenes now store relative paths
* OBJ importing works on Windows
@@ -134,7 +196,7 @@ mitsuba (0.1.3-1) unstable; urgency=low
* The anisotropic Ward BRDF is now supported in the preview
* Faster texture loading
* The renderer now has a testcase framework similar to JUnit
-
+
-- Wenzel Jakob Wed, 8 Sep 2010 09:59:00 -0400
mitsuba (0.1.2-1) unstable; urgency=low
@@ -150,8 +212,8 @@ mitsuba (0.1.2-1) unstable; urgency=low
is lacking some required OpenGL features.
* Create default cameras/lightsources if none are specified in a scene
* Support for drag & drop in the user interface
- * The Mitsuba user interface now also doubles as an EXR viewer / tonemapper.
- Drag an EXR file onto the UI or open it using the File menu, and the image
+ * The Mitsuba user interface now also doubles as an EXR viewer / tonemapper.
+ Drag an EXR file onto the UI or open it using the File menu, and the image
opens in a new tab. Afterwards, it is possible to export the image as a tonemapped
8-bit PNG image.
* The realtime preview now has a 'force diffuse' feature to improve
@@ -165,6 +227,6 @@ mitsuba (0.1.2-1) unstable; urgency=low
mitsuba (0.1.1-1) unstable; urgency=low
- * Initial release
+ * Initial release
-- Wenzel Jakob Sat, 17 Jul 2010 23:56:03 -0400
diff --git a/data/linux/debian/control b/data/linux/debian/control
index 8bf000ec..ea681978 100644
--- a/data/linux/debian/control
+++ b/data/linux/debian/control
@@ -4,10 +4,10 @@ Priority: optional
Maintainer: Wenzel Jakob
Build-Depends: debhelper (>= 7), build-essential, scons, qt4-dev-tools,
libpng12-dev, libjpeg-dev, libilmbase-dev, libopenexr-dev,
- libxerces-c-dev, libboost-dev, libglewmx1.5-dev, libxxf86vm-dev,
+ libxerces-c-dev, libboost-dev, libglewmx-dev, libxxf86vm-dev,
collada-dom-dev, libboost-system-dev, libboost-filesystem-dev,
libboost-python-dev, libboost-thread-dev, libgl1-mesa-dev,
- libglu1-mesa-dev, pkg-config, libeigen3-dev
+ libglu1-mesa-dev, pkg-config, libeigen3-dev, libxml2-dev
Standards-Version: 3.8.3
Homepage: http://www.mitsuba-renderer.org
@@ -24,7 +24,7 @@ Description: Mitsuba renderer
Package: mitsuba-dev
Architecture: any
Depends: qt4-dev-tools, libpng12-dev, libjpeg-dev, libilmbase-dev,
- libopenexr-dev, libxerces-c-dev, libboost-dev, libglewmx1.5-dev,
+ libopenexr-dev, libxerces-c-dev, libboost-dev, libglewmx-dev,
libxxf86vm-dev, collada-dom-dev, libboost-system-dev,
libboost-filesystem-dev, libboost-python-dev, libboost-thread-dev,
libeigen3-dev, mitsuba
diff --git a/data/linux/fedora/mitsuba.spec b/data/linux/fedora/mitsuba.spec
index e326f57a..3f076370 100644
--- a/data/linux/fedora/mitsuba.spec
+++ b/data/linux/fedora/mitsuba.spec
@@ -1,7 +1,7 @@
Name: mitsuba
-Version: 0.4.1
+Version: 0.4.4
Release: 1%{?dist}
-Summary: Mitsuba renderer
+Summary: Mitsuba renderer
Group: Applications/Graphics
License: GPL-3
URL: http://www.mitsuba-renderer.org
@@ -10,11 +10,11 @@ BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
BuildRequires: gcc-c++ scons boost-devel qt4-devel OpenEXR-devel xerces-c-devel python-devel glew-devel collada-dom-devel eigen3-devel
Requires: boost qt4 OpenEXR-libs xerces-c python libGLEWmx collada-dom
%description
-Mitsuba is an extensible rendering framework written in portable C++. It implements unbiased as well as biased techniques and contains heavy optimizations targeted towards current CPU architectures.
+Mitsuba is an extensible rendering framework written in portable C++. It implements unbiased as well as biased techniques and contains heavy optimizations targeted towards current CPU architectures.
The program currently runs on Linux, MacOS X and Microsoft Windows and makes use of SSE2 optimizations on x86 and x86_64 platforms. So far, its main use has been as a testbed for algorithm development in computer graphics, but there are many other interesting applications.
-Mitsuba comes with a command-line interface as well as a graphical frontend to interactively explore scenes. While navigating, a rough preview is shown that becomes increasingly accurate as soon as all movements are stopped. Once a viewpoint has been chosen, a wide range of rendering techniques can be used to generate images, and their parameters can be tuned from within the program.
+Mitsuba comes with a command-line interface as well as a graphical frontend to interactively explore scenes. While navigating, a rough preview is shown that becomes increasingly accurate as soon as all movements are stopped. Once a viewpoint has been chosen, a wide range of rendering techniques can be used to generate images, and their parameters can be tuned from within the program.
%package devel
Summary: Mitsuba development files
Requires: boost-devel qt4-devel OpenEXR-devel xerces-c-devel python-devel glew-devel collada-dom-devel
@@ -35,13 +35,14 @@ mkdir -p $RPM_BUILD_ROOT/usr/share/mitsuba/plugins
mkdir -p $RPM_BUILD_ROOT/usr/share/pixmaps
mkdir -p $RPM_BUILD_ROOT/usr/share/applications
mkdir -p $RPM_BUILD_ROOT/usr/include
-strip dist/lib* dist/mtsgui dist/mitsuba dist/mtssrv dist/mtsutil
+strip dist/lib* dist/mtsgui dist/mitsuba dist/mtssrv dist/mtsutil dist/mtsimport
strip dist/plugins/* dist/python/*/*
cp dist/libmitsuba-*.so $RPM_BUILD_ROOT%{_libdir}
cp dist/mtsgui $RPM_BUILD_ROOT%{_bindir}
cp dist/mitsuba $RPM_BUILD_ROOT%{_bindir}
cp dist/mtssrv $RPM_BUILD_ROOT%{_bindir}
cp dist/mtsutil $RPM_BUILD_ROOT%{_bindir}
+cp dist/mtsimport $RPM_BUILD_ROOT%{_bindir}
cp dist/python/2.7/mitsuba.so $RPM_BUILD_ROOT%{_libdir}/python2.7/lib-dynload
cp dist/plugins/* $RPM_BUILD_ROOT/usr/share/mitsuba/plugins
cp -Rdp dist/data $RPM_BUILD_ROOT/usr/share/mitsuba/data
@@ -62,6 +63,15 @@ rm -rf $RPM_BUILD_ROOT
/usr/include/*
%changelog
+* Thu Feb 28 2013 Wenzel Jakob 0.4.4%{?dist}
+- Upgrade to version 0.4.4
+
+* Tue Jan 29 2013 Wenzel Jakob 0.4.3%{?dist}
+- Upgrade to version 0.4.3
+
+* Wed Oct 31 2012 Wenzel Jakob 0.4.2%{?dist}
+- Upgrade to version 0.4.2
+
* Wed Oct 10 2012 Wenzel Jakob 0.4.1%{?dist}
- Upgrade to version 0.4.1
diff --git a/data/linux/mitsuba.desktop b/data/linux/mitsuba.desktop
index 80b80205..ac7d81a1 100644
--- a/data/linux/mitsuba.desktop
+++ b/data/linux/mitsuba.desktop
@@ -10,5 +10,5 @@ Exec=mtsgui %U
TryExec=mtsgui
Terminal=false
StartupNotify=true
-MimeType=application/xml
-Icon=mitsuba48.png
+MimeType=application/xml;image/x-exr;image/x-hdr;
+Icon=mitsuba48
diff --git a/data/pch/mitsuba_precompiled.hpp b/data/pch/mitsuba_precompiled.hpp
index cbe1e530..97b8f846 100644
--- a/data/pch/mitsuba_precompiled.hpp
+++ b/data/pch/mitsuba_precompiled.hpp
@@ -42,7 +42,6 @@
#include
#include
#include
-#include
#include
#include
#include
diff --git a/data/schema/scene.xsd b/data/schema/scene.xsd
index 1a073b82..18de916b 100644
--- a/data/schema/scene.xsd
+++ b/data/schema/scene.xsd
@@ -26,7 +26,7 @@
-
+
@@ -43,6 +43,7 @@
+
@@ -50,14 +51,14 @@
-
+
-
+
@@ -140,7 +141,7 @@
-
+
@@ -297,7 +298,7 @@
-
+
@@ -314,6 +315,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/data/windows/getdate.c b/data/windows/getdate.c
new file mode 100644
index 00000000..54ce6852
--- /dev/null
+++ b/data/windows/getdate.c
@@ -0,0 +1,52 @@
+/*============================================================================
+ HDRITools - High Dynamic Range Image Tools
+ Copyright 2008-2011 Program of Computer Graphics, Cornell University
+
+ Distributed under the OSI-approved MIT License (the "License");
+ see accompanying file LICENSE for details.
+
+ This software is distributed WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the License for more information.
+ -----------------------------------------------------------------------------
+ Primary author:
+ Edgar Velazquez-Armendariz
+============================================================================*/
+
+#include
+#include
+
+int main(int argc, char **argv)
+{
+ time_t ltime;
+ struct tm *today;
+ FILE *of;
+#if _MSC_VER >= 1400
+ struct tm timebuf;
+#endif
+
+ if (argc != 2) {
+ of = stdout;
+ } else {
+#if _MSC_VER >= 1400
+ if (fopen_s(&of, argv[1], "w") != 0) return 3;
+#else
+ of = fopen(argv[1], "w");
+ if (!of) return 3;
+#endif
+ }
+
+ time(<ime);
+#if _MSC_VER >= 1400
+ if (localtime_s(&timebuf, <ime) != 0) return 1;
+ today = &timebuf;
+#else
+ today = localtime(<ime);
+ if (!today) return 1;
+#endif
+
+ fprintf(of, "%d.%02d.%02d", (today->tm_year + 1900),
+ (today->tm_mon + 1), today->tm_mday);
+ if (of != stdout) fclose(of);
+ return 0;
+}
diff --git a/data/windows/getdate.exe b/data/windows/getdate.exe
new file mode 100644
index 00000000..9cd66244
Binary files /dev/null and b/data/windows/getdate.exe differ
diff --git a/data/windows/wmain_stub.cpp b/data/windows/wmain_stub.cpp
new file mode 100644
index 00000000..3580bbad
--- /dev/null
+++ b/data/windows/wmain_stub.cpp
@@ -0,0 +1,110 @@
+/*
+ This file is part of Mitsuba, a physically based rendering system.
+
+ Copyright (c) 2007-2012 by Wenzel Jakob and others.
+
+ Mitsuba is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License Version 3
+ as published by the Free Software Foundation.
+
+ Mitsuba is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+*/
+
+#include
+#if defined(__WINDOWS__)
+
+// Stub for generating UTF-8 command line arguments from wmain (UTF-16)
+#include
+
+extern int mts_main(int argc, char **argv);
+
+
+namespace {
+
+class ArgsUTF8 {
+public:
+ ArgsUTF8(int argc, wchar_t *wargv[]) :
+ m_argc(-1), m_argv(NULL), m_data(NULL)
+ {
+ if (argc > 0)
+ m_argc = argc;
+ else
+ return;
+
+ m_argv = new char*[argc];
+ int total = 0;
+
+ // Pass 1: get the lengths of each converted string an allocate data
+ for (int i = 0; i < argc; ++i) {
+ const int lenUtf8 = WideCharToMultiByte(CP_UTF8, 0,
+ wargv[i], -1, NULL, 0, NULL, NULL);
+ if (lenUtf8 != 0) {
+ total += lenUtf8;
+ m_argv[i] = reinterpret_cast(lenUtf8);
+ } else {
+ m_argc = i;
+ break;
+ }
+ }
+
+ if (m_argc < 1)
+ return;
+
+ m_data = new char[total];
+ int currOffset = 0;
+
+ // Pass 2: perform the conversion
+ for (int i = 0; i < m_argc; ++i) {
+ int lenUtf8 = reinterpret_cast(m_argv[i]);
+ m_argv[i] = m_data + currOffset;
+ lenUtf8 = WideCharToMultiByte(CP_UTF8, 0,
+ wargv[i], -1, m_argv[i], lenUtf8, NULL, NULL);
+ if (lenUtf8 != 0) {
+ currOffset += lenUtf8;
+ } else {
+ m_argc = i;
+ return;
+ }
+ }
+ }
+
+ ~ArgsUTF8() {
+ if (m_argv != NULL) {
+ delete [] m_argv;
+ }
+ if (m_data != NULL) {
+ delete [] m_data;
+ }
+ }
+
+ inline int argc() const {
+ return m_argc;
+ }
+
+ inline char** argv() const {
+ return m_argv;
+ }
+
+private:
+ int m_argc;
+ char** m_argv;
+ char* m_data;
+};
+
+} // namespace
+
+
+// MSDN Documentation:
+// http://msdn.microsoft.com/en-US/library/fzc2cy7w%28v=vs.110%29.aspx
+int wmain(int argc, wchar_t *wargv[], wchar_t *envp[]) {
+ ArgsUTF8 argsUTF8(argc, wargv);
+ return mts_main(argsUTF8.argc(), argsUTF8.argv());
+}
+
+#endif // __WINDOWS__
diff --git a/doc/Makefile b/doc/Makefile
index c64ad654..41c41ab1 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -1,2 +1,7 @@
all:
./gendoc.py
+
+clean:
+ $(RM) main.pdf
+ $(RM) plugins_generated.tex
+ $(RM) *.aux main.bbl main.blg main.log main.out main.toc
diff --git a/doc/acknowledgements.tex b/doc/acknowledgements.tex
index 2828ff2e..7ebe70a3 100644
--- a/doc/acknowledgements.tex
+++ b/doc/acknowledgements.tex
@@ -1,11 +1,11 @@
\section{Acknowledgments}
I am indebted to my advisor Steve Marschner for allowing me to devote
-a significant amount of my research time to this project. His insightful and
+a significant amount of my research time to this project. His insightful and
encouraging suggestions have helped transform this program into much more than
I ever thought it would be.
-The architecture of Mitsuba as well as some individual components are based on
-implementations discussed in: \emph{Physically Based Rendering - From Theory
+The architecture of Mitsuba as well as some individual components are based on
+implementations discussed in: \emph{Physically Based Rendering - From Theory
To Implementation} by Matt Pharr and Greg Humphreys.
Some of the GUI icons were taken from the Humanity icon set by Canonical Ltd.
@@ -13,10 +13,10 @@ The material test scene was created by Jonas Pilo, and the environment map
it uses is courtesy of Bernhard Vogl.
The included index of refraction data files for conductors are copied from
-PBRT. They are originally from the Luxpop database (\url{www.luxpop.com})
+PBRT. They are originally from the Luxpop database (\url{www.luxpop.com})
and are based on data by Palik et al. \cite{Palik1998Handbook}
and measurements of atomic scattering factors made by the Center For
-X-Ray Optics (CXRO) at Berkeley and the Lawrence Livermore National
+X-Ray Optics (CXRO) at Berkeley and the Lawrence Livermore National
Laboratory (LLNL).
The following people have kindly contributed code or bugfixes:
@@ -28,7 +28,7 @@ The following people have kindly contributed code or bugfixes:
\item Leonhard Gr\"unschlo\ss
\end{itemize}
-Mitsuba makes heavy use of the following amazing libraries and tools:
+Mitsuba makes heavy use of the following amazing libraries and tools:
\begin{itemize}
\item Qt 4 by Digia
\item OpenEXR by Industrial Light \& Magic
diff --git a/doc/basics.tex b/doc/basics.tex
index b344b23f..fccf5bcf 100644
--- a/doc/basics.tex
+++ b/doc/basics.tex
@@ -4,18 +4,18 @@ The rendering functionality of Mitsuba can be accessed through
a command line interface and an interactive Qt-based frontend. This section
provides some basic instructions on how to use them.
\subsection{Interactive frontend}
-To launch the interactive frontend, run \code{Mitsuba.app} on MacOS,
+To launch the interactive frontend, run \code{Mitsuba.app} on MacOS,
\code{mtsgui.exe} on Windows, and \code{mtsgui} on Linux (after sourcing \code{setpath.sh}).
You can also drag and drop scene files onto the application icon or the running program to open them.
A quick video tutorial on using the GUI can be found here: \url{http://vimeo.com/13480342}.
\subsection{Command line interface}
\label{sec:mitsuba}
-The \texttt{mitsuba} binary is an alternative non-interactive rendering
+The \texttt{mitsuba} binary is an alternative non-interactive rendering
frontend for command-line usage and batch job operation.
To get a listing of the parameters it supports, run
the executable without parameters:
\begin{shell}
-$\texttt{\$}$ mitsuba
+$\texttt{\$}$ mitsuba
\end{shell}
\begin{console}[label=lst:mitsuba-cli,caption=Command line options of the \texttt{mitsuba} binary]
Mitsuba version $\texttt{\MitsubaVersion}$, Copyright (c) $\texttt{\MitsubaYear}$ Wenzel Jakob
@@ -75,18 +75,18 @@ mode of operation is to render a single scene, which is provided as a parameter,
$\texttt{\$}$ mitsuba path-to/my-scene.xml
\end{shell}
It is also possible to connect to network render nodes, which essentially lets Mitsuba parallelize
-over additional cores. To do this, pass a semicolon-separated list of machines to
-the \code{-c} parameter.
+over additional cores. To do this, pass a semicolon-separated list of machines to
+the \code{-c} parameter.
\begin{shell}
$\texttt{\$}$ mitsuba -c machine1;machine2;... path-to/my-scene.xml
\end{shell}
There are two different ways in which you can access render nodes:
\begin{itemize}
\item\textbf{Direct}: Here, you create a direct connection to a running \code{mtssrv} instance on
-another machine (\code{mtssrv} is the Mitsuba server process). From the the performance
-standpoint, this approach should always be preferred over the SSH method described below when there is
+another machine (\code{mtssrv} is the Mitsuba server process). From the the performance
+standpoint, this approach should always be preferred over the SSH method described below when there is
a choice between them. There are some disadvantages though: first, you need to manually start
-\code{mtssrv} on every machine you want to use.
+\code{mtssrv} on every machine you want to use.
And perhaps more importantly: the direct communication
protocol makes no provisions for a malicious user on the remote side. It is too costly
@@ -98,11 +98,11 @@ For direct connections, you can specify the remote port as follows:
\begin{shell}
$\texttt{\$}$ mitsuba -c machine:1234 path-to/my-scene.xml
\end{shell}
-When no port is explicitly specified, Mitsuba uses default value of 7554.
-\item \textbf{SSH}:
- This approach works as follows: The renderer creates a SSH connection
+When no port is explicitly specified, Mitsuba uses default value of 7554.
+\item \textbf{SSH}:
+ This approach works as follows: The renderer creates a SSH connection
to the remote side, where it launches a Mitsuba worker instance.
- All subsequent communication then passes through the encrypted link.
+ All subsequent communication then passes through the encrypted link.
This is completely secure but slower due to the encryption overhead.
If you are rendering a complex scene, there is a good chance that it
won't matter much since most time is spent doing computations rather than
@@ -119,18 +119,18 @@ $\texttt{\$}$ mitsuba -c username@machine path-to/my-scene.xml
\begin{shell}
$\texttt{\$}$ mitsuba -c username@machine:/opt/mitsuba path-to/my-scene.xml
\end{shell}
- For the SSH connection approach to work, you \emph{must} enable passwordless
+ For the SSH connection approach to work, you \emph{must} enable passwordless
authentication.
- Try opening a terminal window and running the command \code{ssh username@machine}
+ Try opening a terminal window and running the command \code{ssh username@machine}
(replace with the details of your remote connection).
- If you are asked for a password, something is not set up correctly --- please see
+ If you are asked for a password, something is not set up correctly --- please see
\url{http://www.debian-administration.org/articles/152} for instructions.
On Windows, the situation is a bit more difficult since there is no suitable SSH client by
default. To get SSH connections to work, Mitsuba requires \code{plink.exe} (from PuTTY) to
be on the path. For passwordless authentication with a Linux/OSX-based
server, convert your private key to PuTTY's format using \code{puttygen.exe}.
- Afterwards, start \code{pageant.exe} to load and authenticate the key. All
+ Afterwards, start \code{pageant.exe} to load and authenticate the key. All
of these binaries are available from the PuTTY website.
It is possible to mix the two approaches to access some machines directly and others
@@ -152,7 +152,7 @@ machine3.domain.org:7346
Any attribute in the XML-based scene description language can be parameterized from the
command line.
For instance, you can render a scene several times with different reflectance values
-on a certain material by changing its description to something like
+on a certain material by changing its description to something like
\begin{xml}
@@ -160,28 +160,28 @@ on a certain material by changing its description to something like
\end{xml}
and running Mitsuba as follows:
\begin{shell}
-$\texttt{\$}$ mitsuba -Dreflectance=0.1 -o ref_0.1.exr scene.xml
-$\texttt{\$}$ mitsuba -Dreflectance=0.2 -o ref_0.2.exr scene.xml
-$\texttt{\$}$ mitsuba -Dreflectance=0.5 -o ref_0.5.exr scene.xml
+$\texttt{\$}$ mitsuba -Dreflectance=0.1 -o ref_0.1.exr scene.xml
+$\texttt{\$}$ mitsuba -Dreflectance=0.2 -o ref_0.2.exr scene.xml
+$\texttt{\$}$ mitsuba -Dreflectance=0.5 -o ref_0.5.exr scene.xml
\end{shell}
\subsubsection{Writing partial images to disk}
-When doing lengthy command line renders on Linux or OSX, it is possible
-to send a signal to the process using
-\begin{shell}
+When doing lengthy command line renders on Linux or OSX, it is possible
+to send a signal to the process using
+\begin{shell}
$\texttt{\$}$ killall -HUP mitsuba
\end{shell}
-This causes the renderer to write out the partially finished
-image, after which it continues rendering. This can sometimes be useful to
+This causes the renderer to write out the partially finished
+image, after which it continues rendering. This can sometimes be useful to
check if everything is working correctly.
\subsubsection{Rendering an animation}
The command line interface is ideally suited for rendering large amounts of files in batch
-operation. You can simply pass in the files using a wildcard in the filename.
+operation. You can simply pass in the files using a wildcard in the filename.
-If you've already rendered a subset of the frames and you only want to complete the remainder,
-add the \texttt{-x} flag, and all files with existing output will be skipped. You can also
-let the scheduler work on several scenes at once using the \texttt{-j} parameter --- this is
+If you've already rendered a subset of the frames and you only want to complete the remainder,
+add the \texttt{-x} flag, and all files with existing output will be skipped. You can also
+let the scheduler work on several scenes at once using the \texttt{-j} parameter --- this is
especially useful when parallelizing over multiple machines: as some of the participating machines
finish rendering the current frame, they can immediately start working on the next one
instead of having to wait for all other cores to finish. Altogether, you
@@ -189,6 +189,13 @@ might start the with parameters such as the following
\begin{shell}
$\texttt{\$}$ mitsuba -xj 2 -c machine1;machine2;... animation/frame_*.xml
\end{shell}
+Note that this requires a shell capable of expanding the asterisk into a list of
+filenames. The default Windows shell \code{cmd.exe} does not do this---however,
+the PowerShell supports the following syntax:
+\begin{shell}
+dir frame_*.xml | % $\texttt{\{}$ $\texttt{\$\_}$ $\texttt{\}}$
+\end{shell}
+
\subsection{Direct connection server}
\label{sec:mtssrv}
A Mitsuba compute node can be created using the \code{mtssrv} executable. By default,
@@ -206,26 +213,26 @@ $\texttt{\$}$ mtssrv -i maxwell.cs.cornell.edu
\end{shell}
As advised in Section~\ref{sec:mitsuba}, it is advised to run \code{mtssrv} \emph{only} in trusted networks.
-One nice feature of \code{mtssrv} is that it (like the \code{mitsuba} executable)
-also supports the \code{-c} and \code{-s} parameters, which create connections
+One nice feature of \code{mtssrv} is that it (like the \code{mitsuba} executable)
+also supports the \code{-c} and \code{-s} parameters, which create connections
to additional compute servers.
Using this feature, one can create hierarchies of compute nodes. For instance,
-the root \code{mttsrv} instance of such a hierarchy could share its work with a
-number of other machines running \code{mtssrv}, and each of these might also
+the root \code{mttsrv} instance of such a hierarchy could share its work with a
+number of other machines running \code{mtssrv}, and each of these might also
share their work with further machines, and so on...
-The parallelization over such hierarchies happens transparently---when
-connecting a renderering process to the root node, it sees a machine
+The parallelization over such hierarchies happens transparently---when
+connecting a renderering process to the root node, it sees a machine
with hundreds or thousands of cores, to which it can submit work without
-needing to worry about how exactly it is going to be spread out in
+needing to worry about how exactly it is going to be spread out in
the hierarchy.
Such hierarchies are mainly useful to reduce communication bottlenecks when distributing
large resources (such as scenes) to remote machines. Imagine the following hypothetical scenario:
-you would like to render a 50MB-sized scene while at home, but rendering is too slow.
+you would like to render a 50MB-sized scene while at home, but rendering is too slow.
You decide to tap into some extra machines available
at your workplace, but this usually doesn't make things much faster because of the relatively slow broadband
-connection and the need to transmit your scene to every single compute node involved.
+connection and the need to transmit your scene to every single compute node involved.
Using \code{mtssrv}, you can
instead designate a central scheduling node at your workplace, which accepts connections and delegates
@@ -233,10 +240,10 @@ rendering tasks to the other machines. In this case, you will only have to trans
and the remaining distribution happens over the fast local network at your workplace.
\subsection{Utility launcher}
\label{sec:mtsutil}
-When working on a larger project, one often needs to implement various utility programs that
+When working on a larger project, one often needs to implement various utility programs that
perform simple tasks, such as applying a filter to an image or processing
-a matrix stored in a file. In a framework like Mitsuba, this unfortunately involves
-a significant coding overhead in initializing the necessary APIs on all supported platforms.
+a matrix stored in a file. In a framework like Mitsuba, this unfortunately involves
+a significant coding overhead in initializing the necessary APIs on all supported platforms.
To reduce this tedious work on the side of the programmer, Mitsuba comes with a utility launcher
called \code{mtsutil}.
@@ -250,7 +257,7 @@ For a listing of all supported options and utilities, enter the command without
\label{sec:tonemapper}
One particularly useful utility that shall be mentioned here is the batch tonemapper, which
loads EXR/RGBE images and writes tonemapped 8-bit PNG/JPGs. This can save much time when one has to
-process many high dynamic-range images such as animation frames using the same basic operations,
+process many high dynamic-range images such as animation frames using the same basic operations,
e.g. gamma correction, changing the overall brightness, resizing, cropping, etc. The available
command line options are shown in \lstref{tonemap-cli}.
@@ -282,14 +289,14 @@ Options/Arguments:
between [0, 1] chooses between low and high-key images and
'burn' (also [0, 1]) controls how much highlights may burn out
- -x Temporal coherence mode: activate this flag when tonemapping
+ -x Temporal coherence mode: activate this flag when tonemapping
frames of an animation using the '-p' option to avoid flicker
-o file Save the output with a given filename
-t Multithreaded: process several files in parallel
- The operations are ordered as follows: 1. crop, 2. resize, 3. color-balance,
- 4. tonemap, 5. annotate. To simply process a directory full of EXRs in
+ The operations are ordered as follows: 1. crop, 2. resize, 3. color-balance,
+ 4. tonemap, 5. annotate. To simply process a directory full of EXRs in
parallel, run the following: 'mtsutil tonemap -t path-to-directory/*.exr'
\end{console}
diff --git a/doc/compiling.tex b/doc/compiling.tex
index 353cfc71..6faa5c36 100644
--- a/doc/compiling.tex
+++ b/doc/compiling.tex
@@ -1,23 +1,23 @@
\section{Compiling the renderer}
\label{sec:compiling}
-To compile Mitsuba, you will need a recent C++ compiler (e.g. GCC 4.2+ or
-Visual Studio 2010) and some additional libraries, which Mitsuba uses internally.
+To compile Mitsuba, you will need a recent C++ compiler (e.g. GCC 4.2+ or
+Visual Studio 2010) and some additional libraries, which Mitsuba uses internally.
Builds on all supported platforms are done using a unified system
-based on SCons (\url{http://www.scons.org}), which is a Python-based
-software construction tool. The exact process is different depending on
+based on SCons (\url{http://www.scons.org}), which is a Python-based
+software construction tool. The exact process is different depending on
which operating system is used and will be explained in the following subsections.
\subsection{Common steps}
To get started, you will need to download a recent version of the Mitsuba source code. Before
-doing this, ensure that you have read the licensing agreement
-(Section~\ref{sec:license}), and that you abide by its contents. Note that, being a ``viral''
+doing this, ensure that you have read the licensing agreement
+(Section~\ref{sec:license}), and that you abide by its contents. Note that, being a ``viral''
license, the GPL automatically applies to derivative work. Amongst other things, this
means that Mitsuba's source code is \emph{off-limits} to those who develop rendering
software not distributed under a compatible license.
-Check that the Mercurial (\url{http://mercurial.selenic.com/}) versioning
-system\footnote{On Windows, you might want to use the convenient TortoiseHG shell
-extension (\url{http://tortoisehg.bitbucket.org/}) to run the subsequent steps directly from the Explorer.}
+Check that the Mercurial (\url{http://mercurial.selenic.com/}) versioning
+system\footnote{On Windows, you might want to use the convenient TortoiseHG shell
+extension (\url{http://tortoisehg.bitbucket.org/}) to run the subsequent steps directly from the Explorer.}
is installed, which is required to fetch the most recent source code release.
Begin by entering the following at the command prompt (or run an equivalent command from a graphical Mercurial frontend):
\begin{shell}
@@ -38,7 +38,7 @@ will run extremely slowly. Its main use is to track down elusive bugs.
\paragraph{Windows:}
On Windows, builds can either be performed using the Visual Studio 2010\footnote{No other Visual Studio versions are currently supported.}
compiler or Intel XE Composer (on top of Visual Studio 2010).
-Note that Visual Studio 2010 Service Pack 1 \emph{must} be installed or the resulting binaries will crash.
+Note that Visual Studio 2010 Service Pack 1 \emph{must} be installed or the resulting binaries will crash.
\begin{description}
\item[\code{build/config-\{win32, win64\}-\{msvc2010, msvc2010-debug\}.py}:] Create 32 or 64 bit binaries using Microsoft Visual C++ version 2010.
The configurations with the suffix \code{-debug} will include debug symbols in all binaries, which run very slowly.
@@ -66,7 +66,7 @@ $\texttt{\$}$ cp build/config-linux-gcc.py config.py
\subsection{Compilation flags}
\label{sec:compiling-flags}
There are several flags that affect the behavior of Mitsuba and must be specified at compile time.
-These usually don't need to be changed, but if you want to compile Mitsuba for spectral rendering, or
+These usually don't need to be changed, but if you want to compile Mitsuba for spectral rendering, or
to use double precision for internal computations then the following may be useful. Otherwise, you may skip ahead to the subsection
that covers your operating system.
@@ -77,13 +77,13 @@ The following options are available:
enabled by default (even in release builds).
\item[\texttt{MTS\_KD\_DEBUG}] Enable additional checks in the kd-tree. This
is quite slow and mainly useful to track down bugs when they are suspected.
-\item[\texttt{MTS\_KD\_CONSERVE\_MEMORY}] Use a more compact representation
+\item[\texttt{MTS\_KD\_CONSERVE\_MEMORY}] Use a more compact representation
for triangle geometry (at the cost of speed). This flag causes Mitsuba to use the somewhat slower
Moeller-Trumbore triangle intersection method instead of the default Wald
intersection test, which has an overhead of 48 bytes per triangle.
Off by default.
\item[\texttt{MTS\_SSE}]Activate optimized SSE routines. On by default.
-\item[\texttt{MTS\_HAS\_COHERENT\_RT}]Include coherent ray tracing support (depends on \texttt{MTS\_SSE}). This flag is activated by default.
+\item[\texttt{MTS\_HAS\_COHERENT\_RT}]Include coherent ray tracing support (depends on \texttt{MTS\_SSE}). This flag is activated by default.
\item[\texttt{MTS\_DEBUG\_FP}]Generated NaNs and overflows will cause floating point exceptions, which can be caught in a debugger. This is slow and mainly meant as a debugging tool for developers. Off by default.
\item[\texttt{SPECTRUM\_SAMPLES=}$\langle ..\rangle$]This setting defines the number of spectral samples (in the 368-830 $nm$ range) that are used to render scenes. The default is 3 samples, in which case the renderer automatically turns into an RGB-based system. For high-quality spectral rendering, this should be set to 30 or higher.
Refer also to \secref{colorspaces}.
@@ -95,24 +95,24 @@ fallback instead of the hardware-accelerated realtime preview.
This is useful when the binary will be executed over a remote link using a
protocol such as RDP (which does not provide the requisite OpenGL features).
\end{description}
-All of the default configurations files located in the \texttt{build} directory use the flags
+All of the default configurations files located in the \texttt{build} directory use the flags
\code{SINGLE\_PRECISION}, \code{SPECTRUM\_SAMPLES=3}, \code{MTS\_DEBUG}, \code{MTS\_SSE},
as well as \code{MTS\_HAS\_COHERENT\_RT}.
\subsection{Building on Debian or Ubuntu Linux}
\label{sec:compiling-ubuntu}
-You'll first need to install a number of dependencies. It is assumed here that you are using a
-recent version of Ubuntu Linux (Precise Pangolin / 12.04 LTS or later), hence some of the
+You'll first need to install a number of dependencies. It is assumed here that you are using a
+recent version of Ubuntu Linux (Precise Pangolin / 12.04 LTS or later), hence some of the
package may be named differently if you are using Debian Linux or another Ubuntu version.
First, run
\begin{shell}
-$\text{\$}$ sudo apt-get install build-essential scons mercurial qt4-dev-tools libpng12-dev
- libjpeg62-dev libilmbase-dev libxerces-c-dev libboost-all-dev
+$\text{\$}$ sudo apt-get install build-essential scons mercurial qt4-dev-tools libpng12-dev
+ libjpeg62-dev libilmbase-dev libxerces-c-dev libboost-all-dev
libopenexr-dev libglewmx1.5-dev libxxf86vm-dev libpcrecpp0 libeigen3-dev
\end{shell}
-To get COLLADA support, you will also need to install the \texttt{collada-dom} packages
-or build them from scratch. Here, we install the \code{x86\_64} binaries and development
+To get COLLADA support, you will also need to install the \texttt{collada-dom} packages
+or build them from scratch. Here, we install the \code{x86\_64} binaries and development
headers that can be found on the Mitsuba website (at \url{http://www.mitsuba-renderer.org/releases/current})
\begin{shell}
$\text{\$}$ sudo dpkg --install collada-dom_*.deb
@@ -121,7 +121,7 @@ To start a regular build, run
\begin{shell}
$\text{\$}$ scons
\end{shell}
-inside the Mitsuba directory. In the case that you have multiple processors, you might want to parallelize the
+inside the Mitsuba directory. In the case that you have multiple processors, you might want to parallelize the
build by appending \code{-j }\emph{core count} to the \code{scons} command.
If all goes well, SCons should finish successfully within a few minutes:
\begin{shell}
@@ -129,16 +129,15 @@ scons: $\texttt{done}$ building targets.
\end{shell}
To run the renderer from the command line, you first have to import it into your shell environment:
\begin{shell}
-$\text{\$}$ . setpath.sh
+$\text{\$}$ source setpath.sh
\end{shell}
-(note the period at the beginning -- this assumes that you are using \code{bash}).
Having set up everything, you can now move on to \secref{basics}.
\subsubsection{Creating Debian or Ubuntu Linux packages}
The preferred way of redistristributing executables on Debian or Ubuntu Linux is to create
\code{.deb} package files. To make custom Mitsuba packages, it is strongly recommended
-that you work with a pristine installation of the target operating system\footnote{Several commercial graphics
-drivers ``pollute'' the OpenGL setup so that the compiled Mitsuba binaries
-can only be used on machines using the same drivers. For this reason, it is
+that you work with a pristine installation of the target operating system\footnote{Several commercial graphics
+drivers ``pollute'' the OpenGL setup so that the compiled Mitsuba binaries
+can only be used on machines using the same drivers. For this reason, it is
better to work from a clean boostrapped install.}. This can be done as follows:
first, install \code{debootstrap} and download the most recent operating system release
to a subdirectory. The following example is based on Ubuntu 12.04 LTS (``Precise Pangolin''),
@@ -169,18 +168,18 @@ $\text{\$}$ dpkg-buildpackage -nc
After everything has been built, you should find the created package files
in the root directory.
\subsubsection{Releasing Ubuntu packages}
-To redistribute Ubuntu packages over the Internet or a local network, it is convenient to
+To redistribute Ubuntu packages over the Internet or a local network, it is convenient to
put them into an \code{apt}-compatible repository. To prepare such a
-repository, put the two \code{deb}-files built in the last section,
-as well as the \code{collada-dom} \code{deb}-files into a public directory
+repository, put the two \code{deb}-files built in the last section,
+as well as the \code{collada-dom} \code{deb}-files into a public directory
made available by a HTTP server and inside it, run
\begin{shell}
path-to-htdocs$\text{\$}$ dpkg-scanpackages path/to/deb-directory /dev/null | gzip -9c > path/to/deb-directory/Packages.gz
\end{shell}
This will create a respository index file named \code{Packages.gz}.
Note that you must execute this command in the root directory of the
-HTTP server's web directory and provide the relative path to the
-package files -- otherwise, the index file will specify the wrong package
+HTTP server's web directory and provide the relative path to the
+package files -- otherwise, the index file will specify the wrong package
paths. Finally, the whole directory can be uploaded to some public location
and then referenced by placing a line following the pattern
\begin{shell}
@@ -190,7 +189,7 @@ into the \code{/etc/apt/sources.list} file. This setup is convenient for
distributing a custom Mitsuba build to many Debian or Ubuntu machines running (e.g. to nodes in a rendering cluster).
\subsection{Building on Fedora Core}
You'll first need to install a number of dependencies. It is assumed here
-that you are using FC15, hence some of the package may be named differently if you are
+that you are using FC15, hence some of the package may be named differently if you are
using another version.
First, run
@@ -208,9 +207,8 @@ scons: $\texttt{done}$ building targets.
\end{shell}
To run the renderer from the command line, you first have to import it into your shell environment:
\begin{shell}
-$\text{\$}$ . setpath.sh
+$\text{\$}$ source setpath.sh
\end{shell}
-(note the period at the beginning -- this assumes that you are using \code{bash}).
Having set up everything, you can now move on to \secref{basics}.
\subsubsection{Creating Fedora Core packages}
To create \code{RPM} packages, you will need to install the \code{RPM} development tools:
@@ -232,7 +230,7 @@ $\text{\$}$ rpmbuild -bb mitsuba-$\code{\MitsubaVersion}$/data/linux/fedora/mits
\end{shell}
After this command finishes, its output can be found in the directory \code{rpmbuild/RPMS}.
\subsection{Building on Arch Linux}
-You'll first need to install a number of dependencies:
+You'll first need to install a number of dependencies:
\begin{shell}
$\text{\$}$ sudo pacman -S gcc xerces-c glew openexr boost libpng libjpeg qt scons mercurial python
\end{shell}
@@ -261,9 +259,9 @@ scons: $\texttt{done}$ building targets.
\end{shell}
To run the renderer from the command line, you first have to import it into your shell environment:
\begin{shell}
-$\text{\$}$ . setpath.sh
+$\text{\$}$ source setpath.sh
\end{shell}
-(note the period at the beginning -- this assumes that you are using \code{bash}).
+
Having set up everything, you can now move on to \secref{basics}.
\subsubsection{Creating Arch Linux packages}
Mitsuba ships with a \code{PKGBUILD} file, which automatically builds
@@ -292,16 +290,16 @@ There are a few other things that need to be set up: make sure that your
installation of Visual Studio is up to date, since Mitsuba binaries created with versions
prior to Service Pack 1 will crash.
-Next, you will need to install Python 2.6.x
-(\url{www.python.org}) and SCons\footnote{Note that on some Windows machines, the SCons
-installer generates a warning about not finding Python in the registry. In this case, you
-can instead run \code{python setup.py install} within the source release of SCons.}
-(\url{http://www.scons.org}, any 2.x version will do) and ensure that they are contained in the \code{\%PATH\%}
-environment variable so that entering \code{scons} on the command prompt (\code{cmd.exe})
+Next, you will need to install Python 2.6.x
+(\url{www.python.org}) and SCons\footnote{Note that on some Windows machines, the SCons
+installer generates a warning about not finding Python in the registry. In this case, you
+can instead run \code{python setup.py install} within the source release of SCons.}
+(\url{http://www.scons.org}, any 2.x version will do) and ensure that they are contained in the \code{\%PATH\%}
+environment variable so that entering \code{scons} on the command prompt (\code{cmd.exe})
launches the build system.
-Having installed all dependencies, run the ``Visual Studio \emph{2010} Command
-Prompt'' from the Start Menu (\code{x86} for 32-bit or \code{x64} for 64bit),
+Having installed all dependencies, run the ``Visual Studio \emph{2010} Command
+Prompt'' from the Start Menu (\code{x86} for 32-bit or \code{x64} for 64bit),
navigate to the Mitsuba directory, and simply run
\begin{shell}
C:\mitsuba\>scons
@@ -309,17 +307,17 @@ C:\mitsuba\>scons
In the case that you have multiple processors, you might want to parallelize the build by appending the option \code{-j }\emph{core count} to the \code{scons} command.
If all goes well, the build process will finish successfully after a few
-minutes. \emph{Note} that in comparison to the other platforms, you don't have to run the \code{setpath.sh} script at this point.
+minutes. \emph{Note} that in comparison to the other platforms, you don't have to run the \code{setpath.sh} script at this point.
All binaries are automatically copied into the \code{dist} directory, and they should be executed directly from there.
\subsubsection{Integration with the Visual Studio interface}
-Basic Visual Studio 2010 integration with support for code completion
-exists for those who develop Mitsuba code on Windows.
-To use the supplied projects, simply double-click on one of the two files \code{build/mitsuba-msvc2010.sln}
+Basic Visual Studio 2010 integration with support for code completion
+exists for those who develop Mitsuba code on Windows.
+To use the supplied projects, simply double-click on one of the two files \code{build/mitsuba-msvc2010.sln}
and \code{build/mitsuba-msvc2010.sln}. These Visual Studio projects still internally
-use the SCons-based build system to compile Mitsuba; whatever
+use the SCons-based build system to compile Mitsuba; whatever
build configuration is selected within Visual Studio will be used to pick a matching
-configuration file from the \texttt{build} directory.
+configuration file from the \texttt{build} directory.
\subsection{Building on Mac OS X}
\vspace{-5mm}
@@ -332,13 +330,13 @@ configuration file from the \texttt{build} directory.
Compiling Mitsuba's dependencies on Mac OS is a laborious process; for convenience, there
is a repository that provides them in precompiled form. To use this repository, clone it
using Mercurial and rename the directory so that it forms the \code{dependencies} subdirectory
-inside the main Mitsuba directory, i.e. run something like
+inside the main Mitsuba directory, i.e. run something like
\begin{shell}
$\text{\$}$ cd mitsuba
$\text{\$}$ hg clone https://www.mitsuba-renderer.org/hg/dependencies_macos
$\text{\$}$ mv dependencies_macos dependencies
\end{shell}
-You will also need to install SCons (>2.0.0, available at \code{www.scons.org}) and
+You will also need to install SCons (>2.0.0, available at \code{www.scons.org}) and
a recent release of XCode, including its command-line compilation tools. Next, run
\begin{shell}
$\text{\$}$ scons
@@ -350,7 +348,6 @@ scons: $\texttt{done}$ building targets.
\end{shell}
To run the renderer from the command line, you first have to import it into your shell environment:
\begin{shell}
-$\text{\$}$ . setpath.sh
+$\text{\$}$ source setpath.sh
\end{shell}
-(note the period at the beginning -- this assumes that you are using \code{bash}).
diff --git a/doc/development.tex b/doc/development.tex
index 228576b2..aae78c42 100644
--- a/doc/development.tex
+++ b/doc/development.tex
@@ -1,7 +1,7 @@
\part{Development guide}
This chapter and the subsequent ones will provide an overview
-of the the coding conventions and general architecture of Mitsuba.
-You should only read them if if you wish to interface with the API
+of the the coding conventions and general architecture of Mitsuba.
+You should only read them if if you wish to interface with the API
in some way (e.g. by developing your own plugins). The coding style
section is only relevant if you plan to submit patches that are meant
to become part of the main codebase.
@@ -9,15 +9,15 @@ to become part of the main codebase.
\section{Code structure}
Mitsuba is split into four basic support libraries:
\begin{itemize}
-\item The core library (\code{libcore}) implements basic functionality such as
+\item The core library (\code{libcore}) implements basic functionality such as
cross-platform file and bitmap I/O, data structures, scheduling, as well as logging and plugin management.
-\item The rendering library (\code{librender}) contains abstractions
+\item The rendering library (\code{librender}) contains abstractions
needed to load and represent scenes containing light sources, shapes, materials, and participating media.
\item The hardware acceleration library (\code{libhw})
implements a cross-platform display library, an object-oriented OpenGL
- wrapper, as well as support for rendering interactive previews of scenes.
+ wrapper, as well as support for rendering interactive previews of scenes.
\item Finally, the bidirectional library (\code{libbidir})
- contains a support layer that is used to implement bidirectional rendering algorithms such as
+ contains a support layer that is used to implement bidirectional rendering algorithms such as
Bidirectional Path Tracing and Metropolis Light Transport.
\end{itemize}
A detailed reference of these APIs is available at
@@ -25,11 +25,11 @@ A detailed reference of these APIs is available at
present a few basic examples to get familiar with them.
\section{Coding style}
-\paragraph{Indentation:} The Mitsuba codebase uses tabs for indentation,
+\paragraph{Indentation:} The Mitsuba codebase uses tabs for indentation,
which expand to \emph{four} spaces. Please make sure that you configure your editor
this way, otherwise the source code layout will look garbled.
-\paragraph{Placement of braces:} Opening braces should be placed on the
+\paragraph{Placement of braces:} Opening braces should be placed on the
same line to make the best use of vertical space, i.e.
\begin{cpp}
if (x > y) {
@@ -54,9 +54,9 @@ if ( x==y ){
..
\end{cpp}
-\paragraph{Name format:} Names are always written in camel-case.
+\paragraph{Name format:} Names are always written in camel-case.
Classes and structures start with a capital letter, whereas member functions
-and attributes start with a lower-case letter. Attributes of classes
+and attributes start with a lower-case letter. Attributes of classes
have the prefix \code{m\_}. Here is an example:
\begin{cpp}
class MyClass {
@@ -86,14 +86,14 @@ and properly conveys the semantics.
as getters and setters.
\paragraph{Documentation:} Headers files should contain
Doxygen-compatible documentation. It is also a good idea to add
-comments to a \code{.cpp} file to explain subtleties of an implemented algorithm.
+comments to a \code{.cpp} file to explain subtleties of an implemented algorithm.
However, anything pertaining to the API should go into the header file.
\paragraph{Boost:} Use the boost libraries whenever this helps to save
time or write more compact code.
\paragraph{Classes vs structures:}In Mitsuba, classes usually go onto the heap,
-whereas structures may be allocated both on the stack and the heap.
+whereas structures may be allocated both on the stack and the heap.
Classes that derive from \code{Object} implement a protected virtual
deconstructor, which explicitly prevents them from being allocated on the stack.
@@ -110,6 +110,6 @@ if (..) {
\paragraph{Separation of plugins:}Mitsuba encourages that plugins are only
used via the generic interface they implement. You will find that almost all plugins
(e.g. emitters) don't actually provide a header file, hence they can only be accessed
-using the generic \code{Emitter} interface they implement. If any kind of special
-interaction between plugins is needed, this is usually an indication that the
+using the generic \code{Emitter} interface they implement. If any kind of special
+interaction between plugins is needed, this is usually an indication that the
generic interface should be extended to accomodate this.
diff --git a/doc/format.tex b/doc/format.tex
index c9de4401..c7e31189 100644
--- a/doc/format.tex
+++ b/doc/format.tex
@@ -1,8 +1,8 @@
\section{Scene file format}
\label{sec:format}
-Mitsuba uses a very simple and general XML-based format to represent scenes.
-Since the framework's philosophy is to represent discrete blocks of functionality as plugins,
-a scene file can essentially be interpreted as description that determines which
+Mitsuba uses a very simple and general XML-based format to represent scenes.
+Since the framework's philosophy is to represent discrete blocks of functionality as plugins,
+a scene file can essentially be interpreted as description that determines which
plugins should be instantiated and how they should interface with each other.
In the following, we'll look at a few examples to get a feeling for the scope of the
format.
@@ -18,17 +18,17 @@ something like this:
\end{xml}
The scene version attribute denotes the release of Mitsuba that was used to
-create the scene. This information allows Mitsuba to always correctly process the
+create the scene. This information allows Mitsuba to always correctly process the
file irregardless of any potential future changes in the scene description language.
This example already contains the most important things to know about format: you can have
-\emph{objects} (such as the objects instantiated by the \code{scene} or \code{shape} tags),
-which are allowed to be nested within each other. Each object optionally accepts \emph{properties}
-(such as the \code{string} tag), which further characterize its behavior. All objects except
+\emph{objects} (such as the objects instantiated by the \code{scene} or \code{shape} tags),
+which are allowed to be nested within each other. Each object optionally accepts \emph{properties}
+(such as the \code{string} tag), which further characterize its behavior. All objects except
for the root object (the \code{scene}) cause the renderer to search and load a plugin from disk,
-hence you must provide the plugin name using \code{type=".."} parameter.
+hence you must provide the plugin name using \code{type=".."} parameter.
-The object tags also let the renderer know \emph{what kind} of object is to be instantiated: for instance,
+The object tags also let the renderer know \emph{what kind} of object is to be instantiated: for instance,
any plugin loaded using the \code{shape} tag must conform to the \emph{Shape} interface, which is
certainly the case for the plugin named \code{obj} (it contains a WaveFront OBJ loader).
Similarly, you could write
@@ -40,12 +40,12 @@ Similarly, you could write
\end{xml}
-This loads a different plugin (\code{sphere}) which is still a \emph{Shape}, but instead represents
-a sphere configured with a radius of 10 world-space units. Mitsuba ships with
+This loads a different plugin (\code{sphere}) which is still a \emph{Shape}, but instead represents
+a sphere configured with a radius of 10 world-space units. Mitsuba ships with
a large number of plugins; please refer to the next chapter for a detailed
overview of them.
-The most common scene setup is to declare an integrator, some geometry, a sensor (e.g. a camera), a film, a sampler
+The most common scene setup is to declare an integrator, some geometry, a sensor (e.g. a camera), a film, a sampler
and one or more emitters. Here is a more complex example:
\begin{xml}
@@ -92,20 +92,20 @@ and one or more emitters. Here is a more complex example:
-
+
-
+
\end{xml}
This example introduces several new object types (\code{integrator, sensor, bsdf, sampler, film}, and \code{emitter})
-and property types (\code{integer}, \code{transform}, and \code{rgb}).
+and property types (\code{integer}, \code{transform}, and \code{rgb}).
As you can see in the example, objects are usually declared at the top level except if there is some
-inherent relation that links them to another object. For instance, BSDFs are usually specific to a certain geometric object, so
+inherent relation that links them to another object. For instance, BSDFs are usually specific to a certain geometric object, so
they appear as a child object of a shape. Similarly, the sampler and film affect the way in which
rays are generated from the sensor and how it records the resulting radiance samples, hence they are nested inside it.
@@ -135,45 +135,45 @@ uses a basic linear RGB representation\footnote{The official
releases all use linear RGB---to do spectral renderings, you will have
to compile Mitsuba yourself.}.
Irrespective of which internal representation is used, Mitsuba supports
-several different ways of specifying color information, which is then
+several different ways of specifying color information, which is then
converted appropriately.
-The preferred way of passing color spectra to the renderer is to explicitly
+The preferred way of passing color spectra to the renderer is to explicitly
denote the associated wavelengths of each value:
\begin{xml}
\end{xml}
-This is a mapping from wavelength in nanometers (before the colon)
+This is a mapping from wavelength in nanometers (before the colon)
to a reflectance or intensity value (after the colon).
Values in between are linearly interpolated from the two closest neighbors.
-A useful shortcut to get a completely uniform spectrum, it is to provide
+A useful shortcut to get a completely uniform spectrum, it is to provide
only a single value:
\begin{xml}
\end{xml}
Another (discouraged) option is to directly provide the spectrum in Mitsuba's
-internal representation, avoiding the need for any kind of conversion.
+internal representation, avoiding the need for any kind of conversion.
However, this is problematic, since the associated scene will likely not work
-anymore when Mitsuba is compiled with a different value of
-\texttt{SPECTRUM\_SAMPLES}.
-For completeness, the possibility is explained nonetheless. Assuming that
-the 360-830$nm$ range is discretized into ten 47$nm$-sized blocks
-(i.e. \texttt{SPECTRUM\_SAMPLES} is set to 10), their values can be specified
+anymore when Mitsuba is compiled with a different value of
+\texttt{SPECTRUM\_SAMPLES}.
+For completeness, the possibility is explained nonetheless. Assuming that
+the 360-830$nm$ range is discretized into ten 47$nm$-sized blocks
+(i.e. \texttt{SPECTRUM\_SAMPLES} is set to 10), their values can be specified
as follows:
\begin{xml}
\end{xml}
-Another convenient way of providing color spectra is by specifying linear RGB
+Another convenient way of providing color spectra is by specifying linear RGB
or sRGB values using floating-point triplets or hex values:
\begin{xml}
\end{xml}
-When Mitsuba is compiled with the default settings, it internally uses
-linear RGB to represent colors, so these values can directly be used.
+When Mitsuba is compiled with the default settings, it internally uses
+linear RGB to represent colors, so these values can directly be used.
However, when configured for doing spectral rendering, a suitable color
spectrum with the requested RGB reflectance must be found. This is a tricky
problem, since there is an infinite number of spectra with this property.
@@ -190,9 +190,9 @@ The \texttt{reflectance} intent is used by default, so remember to
set it to \texttt{illuminant} when defining the brightness of a
light source with the \texttt{} tag.
-When spectral power or reflectance distributions are obtained from measurements
+When spectral power or reflectance distributions are obtained from measurements
(e.g. at 10$nm$ intervals), they are usually quite unwiedy and can clutter
-the scene description. For this reason, there is yet another way to pass
+the scene description. For this reason, there is yet another way to pass
a spectrum by loading it from an external file:
\begin{xml}
@@ -213,23 +213,23 @@ are allowed. Here is an example:
\renderings{
\fbox{\includegraphics[width=10cm]{images/blackbody}}
\hfill\,
- \caption{\label{fig:blackbody}A few simulated
+ \caption{\label{fig:blackbody}A few simulated
black body emitters over a range of temperature values}
}
\label{sec:blackbody}
-Finally, it is also possible to specify the spectral distribution of a black body emitter (\figref{blackbody}),
+Finally, it is also possible to specify the spectral distribution of a black body emitter (\figref{blackbody}),
where the temperature is given in Kelvin.
\begin{xml}
\end{xml}
Note that attaching a black body spectrum to the \texttt{intensity} property
-of a emitter introduces physical units into the rendering process of
-Mitsuba, which is ordinarily a unitless system\footnote{This means that the
-units of pixel values in a rendering are completely dependent on the units of
-the user input, including the unit of world-space distance and the units of
+of a emitter introduces physical units into the rendering process of
+Mitsuba, which is ordinarily a unitless system\footnote{This means that the
+units of pixel values in a rendering are completely dependent on the units of
+the user input, including the unit of world-space distance and the units of
the light source emission profile.}.
-Specifically, the black body spectrum has units of power ($W$) per
+Specifically, the black body spectrum has units of power ($W$) per
unit area ($m^{-2}$) per steradian ($sr^{-1}$) per unit wavelength ($nm^{-1}$).
If these units are inconsistent with your scene description, you may use the
optional \texttt{scale} attribute to adjust them, e.g.:
@@ -244,11 +244,11 @@ Points and vectors can be specified as follows:
\end{xml}
-It is important that whatever you choose as world-space units (meters, inches, etc.) is
+It is important that whatever you choose as world-space units (meters, inches, etc.) is
used consistently in all places.
\subsubsection{Transformations}
Transformations are the only kind of property that require more than a single tag. The idea is that, starting
-with the identity, one can build up a transformation using a sequence of commands. For instance, a transformation that
+with the identity, one can build up a transformation using a sequence of commands. For instance, a transformation that
does a translation followed by a rotation might be written like this:
\begin{xml}
@@ -276,8 +276,8 @@ choices are available:
\begin{xml}
\end{xml}
-\item lookat transformations --- this is primarily useful for setting up cameras (and spot lights). The \code{origin} coordinates
-specify the camera origin, \code{target} is the point that the camera will look at, and the
+\item \code{lookat} transformations --- this is primarily useful for setting up cameras (and spot lights). The \code{origin} coordinates
+specify the camera origin, \code{target} is the point that the camera will look at, and the
(optional) \code{up} parameter determines the ``upward'' direction in the final rendered image.
The \code{up} parameter is not needed for spot lights.
\begin{xml}
@@ -286,7 +286,38 @@ The \code{up} parameter is not needed for spot lights.
\end{itemize}
Cordinates that are zero (for \code{translate} and \code{rotate}) or one (for \code{scale})
do not explicitly have to be specified.
-\subsection{Instancing}
+\subsection{Animated transformations}
+Most shapes, emitters, and sensors in Mitsuba can accept both normal transformations
+and \emph{animated transformations} as parameters. The latter is useful to
+render scenes involving motion blur (Figure~\ref{fig:animated-transform}). The syntax used to specify these
+is slightly different:
+\begin{xml}
+
+
+ .. chained list of transformations as discussed above ..
+
+
+
+ .. chained list of transformations as discussed above ..
+
+
+ .. additional transformations (optional) ..
+
+\end{xml}
+\renderings{
+ \fbox{\includegraphics[width=.6\textwidth]{images/animated_transform}}\hfill\,
+ \caption{\label{fig:animated-transform}Beware the dragon: a triangle mesh undergoing linear motion with several keyframes (object courtesy of XYZRGB)}
+}
+
+Mitsuba then decomposes each transformation into a scale, translation, and
+rotation component and interpolates\footnote{Using linear interpolation
+for the scale and translation component and spherical linear quaternion
+interpolation for the rotation component.} these for intermediate
+time values.
+It is important to specify appropriate shutter open/close times
+to the sensor so that the motion is visible.
+\newpage
+\subsection{References}
Quite often, you will find yourself using an object (such as a material) in many places. To avoid having
to declare it over and over again, which wastes memory, you can make use of references. Here is an example
of how this works:
@@ -297,7 +328,7 @@ of how this works:
-
@@ -311,7 +342,7 @@ of how this works:
\end{xml}
By providing a unique \texttt{id} attribute in the
-object declaration, the object is bound to that identifier
+object declaration, the object is bound to that identifier
upon instantiation.
Referencing this identifier at a later point (using the \texttt{} tag)
will add the instance to the parent object, with no further memory
@@ -326,7 +357,7 @@ it cannot be used to instantiate geometry---if this functionality is needed,
take a look at the \pluginref{instance} plugin.
\subsection{Including external files}
-A scene can be split into multiple pieces for better readability.
+A scene can be split into multiple pieces for better readability.
to include an external file, please use the following command:
\begin{xml}
diff --git a/doc/gendoc.py b/doc/gendoc.py
index 3d9b4713..d83aed88 100755
--- a/doc/gendoc.py
+++ b/doc/gendoc.py
@@ -1,6 +1,6 @@
-#! /usr/bin/python
-#
-# This script walks through all plugin files and
+#!/usr/bin/env python
+#
+# This script walks through all plugin files and
# extracts documentation that should go into the
# reference manual
@@ -52,11 +52,8 @@ def process(path, target):
fileList += [fname]
fileList = []
- # Wrap the walk function to make this work in python 2 and 3.
- if pyVer >= 3:
- os.walk(path, capture, fileList)
- else:
- os.path.walk(path, capture, fileList)
+ for (dirname, subdirs, files) in os.walk(path):
+ capture(fileList, dirname, files)
ordering = [(findOrderID(fname), fname) for fname in fileList]
ordering = sorted(ordering, key = lambda entry: entry[0])
@@ -64,34 +61,40 @@ def process(path, target):
for entry in ordering:
extract(target, entry[1])
-os.chdir(os.path.dirname(__file__))
-f = open('plugins_generated.tex', 'w')
-f.write('\input{section_shapes}\n')
-process('../src/shapes', f)
-f.write('\input{section_bsdf}\n')
-process('../src/bsdfs', f)
-f.write('\input{section_textures}\n')
-process('../src/textures', f)
-f.write('\input{section_subsurface}\n')
-process('../src/subsurface', f)
-f.write('\input{section_media}\n')
-process('../src/medium', f)
-f.write('\input{section_phase}\n')
-process('../src/phase', f)
-f.write('\input{section_volumes}\n')
-process('../src/volume', f)
-f.write('\input{section_emitters}\n')
-process('../src/emitters', f)
-f.write('\input{section_sensors}\n')
-process('../src/sensors', f)
-f.write('\input{section_integrators}\n')
-process('../src/integrators', f)
-f.write('\input{section_samplers}\n')
-process('../src/samplers', f)
-f.write('\input{section_films}\n')
-process('../src/films', f)
-f.write('\input{section_rfilters}\n')
-f.close()
-os.system('bibtex main.aux')
-os.system('pdflatex main.tex')
-#os.system('pdflatex main.tex | grep -i warning | grep -v "Package \(typearea\|hyperref\)"')
+def process_src(target, src_subdir, section=None):
+ if section is None:
+ section = "section_" + src_subdir
+ target.write('\input{{{0}}}\n'.format(section))
+ process('../src/{0}'.format(src_subdir), target)
+
+def texify(texfile):
+ from subprocess import Popen, PIPE, check_call
+ version = Popen(["pdflatex", "-version"], stdout=PIPE).communicate()[0]
+ # Call decode() to convert from bytes to string, required in Python 3
+ if re.match('.*MiKTeX.*', version.decode()):
+ # MiKTeX's "texify" calls latex/bibtex in tandem automatically
+ print("Running texify on {0}...".format(texfile))
+ check_call(['texify', '-pq', texfile])
+ else:
+ check_call(['pdflatex', texfile])
+ check_call(['bibtex', texfile.replace('.tex', '.aux')])
+ check_call(['pdflatex', texfile])
+ check_call(['pdflatex', texfile])
+
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+with open('plugins_generated.tex', 'w') as f:
+ process_src(f, 'shapes')
+ process_src(f, 'bsdfs', 'section_bsdf')
+ process_src(f, 'textures')
+ process_src(f, 'subsurface')
+ process_src(f, 'medium', 'section_media')
+ process_src(f, 'phase')
+ process_src(f, 'volume', 'section_volumes')
+ process_src(f, 'emitters')
+ process_src(f, 'sensors')
+ process_src(f, 'integrators')
+ process_src(f, 'samplers')
+ process_src(f, 'films')
+ process_src(f, 'rfilters')
+
+texify('main.tex')
diff --git a/doc/gpl-v3.tex b/doc/gpl-v3.tex
index 54c869a2..d5a64851 100644
--- a/doc/gpl-v3.tex
+++ b/doc/gpl-v3.tex
@@ -1,7 +1,7 @@
\section{License}
\label{sec:license}
Mitsuba is licensed under the terms of Version 3 of the GNU General Public License,
-which is reproduced here in its entirety. The license itself is copyrighted
+which is reproduced here in its entirety. The license itself is copyrighted
\copyright\ 2007 by the Free Software Foundation, Inc. \texttt{http://fsf.org/}.
\subsection{Preamble}
diff --git a/doc/images/animated_transform.jpg b/doc/images/animated_transform.jpg
new file mode 100644
index 00000000..fd287ab1
Binary files /dev/null and b/doc/images/animated_transform.jpg differ
diff --git a/doc/images/annotation_example.jpg b/doc/images/annotation_example.jpg
new file mode 100644
index 00000000..0420429f
Binary files /dev/null and b/doc/images/annotation_example.jpg differ
diff --git a/doc/images/integrator_volpath_hideemitters.jpg b/doc/images/integrator_volpath_hideemitters.jpg
new file mode 100644
index 00000000..d7869c0a
Binary files /dev/null and b/doc/images/integrator_volpath_hideemitters.jpg differ
diff --git a/doc/images/integrator_volpath_normal.jpg b/doc/images/integrator_volpath_normal.jpg
new file mode 100644
index 00000000..adf69e63
Binary files /dev/null and b/doc/images/integrator_volpath_normal.jpg differ
diff --git a/doc/images/shape_cube_basic.jpg b/doc/images/shape_cube_basic.jpg
new file mode 100644
index 00000000..d5f26251
Binary files /dev/null and b/doc/images/shape_cube_basic.jpg differ
diff --git a/doc/images/shape_cube_parameterization.jpg b/doc/images/shape_cube_parameterization.jpg
new file mode 100644
index 00000000..d9f34781
Binary files /dev/null and b/doc/images/shape_cube_parameterization.jpg differ
diff --git a/doc/images/shape_instance_fractal_bot.jpg b/doc/images/shape_instance_fractal_bot.jpg
new file mode 100644
index 00000000..c136c9a7
Binary files /dev/null and b/doc/images/shape_instance_fractal_bot.jpg differ
diff --git a/doc/images/shape_instance_fractal_top.jpg b/doc/images/shape_instance_fractal_top.jpg
new file mode 100644
index 00000000..e827827c
Binary files /dev/null and b/doc/images/shape_instance_fractal_top.jpg differ
diff --git a/doc/integrator.tex b/doc/integrator.tex
index f8bdf85a..e4501830 100644
--- a/doc/integrator.tex
+++ b/doc/integrator.tex
@@ -3,9 +3,9 @@ Suppose you want to design a custom integrator to render scenes in Mitsuba.
There are two general ways you can do this, and which one you should take
mostly depends on the characteristics of your particular integrator.
-The framework distinguishes between \emph{sampling-based} integrators and
-\emph{generic} ones. A sampling-based integrator is able to generate
-(usually unbiased) estimates of the incident radiance along a specified rays, and this
+The framework distinguishes between \emph{sampling-based} integrators and
+\emph{generic} ones. A sampling-based integrator is able to generate
+(usually unbiased) estimates of the incident radiance along a specified rays, and this
is done a large number of times to render a scene. A generic integrator
is more like a black box, where no assumptions are made on how the the image is
created. For instance, the VPL renderer uses OpenGL to rasterize the scene
@@ -13,44 +13,44 @@ using hardware acceleration, which certainly doesn't fit into the sampling-based
For that reason, it must be implemented as a generic integrator.
Generally, if you can package up your code to fit into the
-\code{SampleIntegrator} interface, you should do it, because you'll get
+\code{SamplingIntegrator} interface, you should do it, because you'll get
parallelization and network rendering essentially for free. This is done
by transparently sending instances of your integrator class to all participating cores
and assigning small image blocks for each one to work on. Also, sampling-based
integrators can be nested within some other integrators, such as an
irradiance cache or an adaptive integrator. This cannot be done with generic
-integrators due to their black-box nature. Note that it is often still
-possible to parallelize generic integrators, but this involves significantly
+integrators due to their black-box nature. Note that it is often still
+possible to parallelize generic integrators, but this involves significantly
more work.
-In this section, we'll design a rather contrived sampling-based integrator,
-which renders a monochromatic image of your scene, where the intensity
-denotes the distance to the camera. But to get a feel for the overall
-framework, we'll start with an even simpler one, that just renders a
+In this section, we'll design a rather contrived sampling-based integrator,
+which renders a monochromatic image of your scene, where the intensity
+denotes the distance to the camera. But to get a feel for the overall
+framework, we'll start with an even simpler one, that just renders a
solid-color image.
\subsection{Basic implementation}
-In Mitsuba's \code{src/integrators} directory, create a file named
-\code{myIntegrator.cpp}.
+In Mitsuba's \code{src/integrators} directory, create a file named
+\code{myIntegrator.cpp}.
\begin{cpp}
#include
MTS_NAMESPACE_BEGIN
-class MyIntegrator : public SampleIntegrator {
+class MyIntegrator : public SamplingIntegrator {
public:
MTS_DECLARE_CLASS()
};
-MTS_IMPLEMENT_CLASS_S(MyIntegrator, false, SampleIntegrator)
+MTS_IMPLEMENT_CLASS_S(MyIntegrator, false, SamplingIntegrator)
MTS_EXPORT_PLUGIN(MyIntegrator, "A contrived integrator");
MTS_NAMESPACE_END
\end{cpp}
The \code{scene.h} header file contains all of the dependencies we'll need
for now.
To avoid conflicts with other libraries, the whole framework is located in
-a separate namespace named \code{mitsuba}, and the lines starting with
+a separate namespace named \code{mitsuba}, and the lines starting with
\code{MTS\_NAMESPACE} ensure that our integrator is placed there
as well.
@@ -61,17 +61,17 @@ and serialization/unserialization support. Let's take a look at the second of th
lines, because it contains several important pieces of information:
The suffix \code{S} in \code{MTS\_IMPLEMENT\_CLASS\_S} specifies that this is
-a serializable class, which means that it can be sent over the network or
+a serializable class, which means that it can be sent over the network or
written to disk and later restored. That also implies that certain methods
need to be provided by the implementation --- we'll add those in a moment.
The three following parameters specify the name of this class (\code{MyIntegrator}),
the fact that it is \emph{not} an abstract class (\code{false}), and the name of its
-parent class (\code{SampleIntegrator}).
+parent class (\code{SamplingIntegrator}).
Just below, you can see a line that starts with
\code{MTS\_EXPORT\_PLUGIN}. As the name suggests, this line is only necessary
-for plugins, and it ensures that the specified class (\code{MyIntegrator}) is
+for plugins, and it ensures that the specified class (\code{MyIntegrator}) is
what you want to be instantiated when somebody loads this plugin. It is also
possible to supply a short descriptive string.
\vspace{3mm}
@@ -80,7 +80,7 @@ Let's add an instance variable and a constructor:
\begin{cpp}
public:
/// Initialize the integrator with the specified properties
- MyIntegrator(const Properties &props) : SampleIntegrator(props) {
+ MyIntegrator(const Properties &props) : SamplingIntegrator(props) {
Spectrum defaultColor;
defaultColor.fromLinearRGB(0.2f, 0.5f, 0.2f);
m_color = props.getSpectrum("color", defaultColor);
@@ -106,41 +106,41 @@ Next, we need to add serialization and unserialization support:
\begin{cpp}
/// Unserialize from a binary data stream
MyIntegrator(Stream *stream, InstanceManager *manager)
- : SampleIntegrator(stream, manager) {
+ : SamplingIntegrator(stream, manager) {
m_color = Spectrum(stream);
}
/// Serialize to a binary data stream
void serialize(Stream *stream, InstanceManager *manager) const {
- SampleIntegrator::serialize(stream, manager);
+ SamplingIntegrator::serialize(stream, manager);
m_color.serialize(stream);
}
\end{cpp}
-This makes use of a \emph{stream} abstraction similar in style to Java.
-A stream can represent various things, such as a file, a console session, or a
+This makes use of a \emph{stream} abstraction similar in style to Java.
+A stream can represent various things, such as a file, a console session, or a
network communication link. Especially when dealing with multiple machines,
it is important to realize that the machines may use different binary representations
related to their respective \emph{endianness}. To prevent issues from arising,
-the \code{Stream} interface provides many methods for writing and reading
+the \code{Stream} interface provides many methods for writing and reading
small chunks of data (e.g. \code{writeShort}, \code{readFloat}, ..),
which automatically perform endianness translation. In our case, the
\code{Spectrum} class already provides serialization/unserialization support,
so we don't really have to do anything.
-Note that it is crucial that your code calls the serialization and unserialization
+Note that it is crucial that your code calls the serialization and unserialization
implementations of the superclass, since it will also read/write some
information to the stream.
We haven't used the \texttt{manager} parameter yet, so here is a quick overview
of what it does: if many cases, we don't just want to serialize a single class,
but a whole graph of objects. Some may be referenced many
-times from different places, and potentially there are even cycles. If we just
-naively called the serialization and unserialization implementation of members
-recursively within each class, we'd waste much bandwitdth and potentially
+times from different places, and potentially there are even cycles. If we just
+naively called the serialization and unserialization implementation of members
+recursively within each class, we'd waste much bandwitdth and potentially
end up stuck in an infinite recursion.
This is where the instance manager comes in. Every time you want to serialize
-a heap-allocated object (suppose it is of type \code{SomeClass}),
+a heap-allocated object (suppose it is of type \code{SomeClass}),
instead of calling its serialize method, write
\begin{cpp}
@@ -155,7 +155,7 @@ ref myObject = static_cast(manager->getInstance(stream))
Behind the scenes, the object manager adds annotations to the data stream,
which ensure that you will end up with the exact same reference graph on the
-remote side, while only one copy of every object is transmitted and no
+remote side, while only one copy of every object is transmitted and no
infinite recursion can occur. But we digress -- let's go back to our integrator.
\vspace{3mm}
@@ -168,10 +168,11 @@ radiance along a ray differential: here, we simply return the stored color
}
\end{cpp}
-Let's try building the plugin: edit the \code{SConstruct} file in the main
-directory, and add the following line after the comment ''\code{\# Integrators}'':
+Let's try building the plugin: edit the \code{SConscript} file in the
+\code{integrator} directory, and add the following line before the
+last line containing ''\code{Export('plugins')}'':
\begin{cpp}
-plugins += env.SharedLibrary('plugins/myIntegrator', ['src/integrators/myIntegrator.cpp'])
+plugins += env.SharedLibrary('myIntegrator', ['myIntegrator.cpp'])
\end{cpp}
After calling, \texttt{scons}, you should be able to use your new integrator
in parallel rendering jobs and you'll get something like this:
@@ -182,8 +183,8 @@ That is admittedly not very exciting --- so let's do some actual computation.
\subsection{Visualizing depth}
Add an instance variable \code{Float m\_maxDist;} to the implementation. This
will store the maximum distance from the camera to any object, which is needed
-to map distances into the $[0,1]$ range. Note the upper-case \code{Float} ---
-this means that either a single- or a double-precision variable is
+to map distances into the $[0,1]$ range. Note the upper-case \code{Float} ---
+this means that either a single- or a double-precision variable is
substituted based the compilation flags. This variable constitutes local
state, thus it must not be forgotten in the serialization- and unserialization routines:
append
@@ -202,18 +203,19 @@ To avoid having to do this every time \code{Li()} is called,
we can override the \code{preprocess} function:
\begin{cpp}
/// Preprocess function -- called on the initiating machine
- bool preprocess(const Scene *scene, RenderQueue *queue,
- const RenderJob *job, int sceneResID, int cameraResID,
+ bool preprocess(const Scene *scene, RenderQueue *queue,
+ const RenderJob *job, int sceneResID, int cameraResID,
int samplerResID) {
- SampleIntegrator::preprocess(scene, queue, job, sceneResID,
+ SamplingIntegrator::preprocess(scene, queue, job, sceneResID,
cameraResID, samplerResID);
const AABB &sceneAABB = scene->getAABB();
- Point cameraPosition = scene->getCamera()->getPosition();
+ /* Find the camera position at t=0 seconds */
+ Point cameraPosition = scene->getSensor()->getWorldTransform()->eval(0).transformAffine(Point(0.0f));
m_maxDist = - std::numeric_limits::infinity();
for (int i=0; i<8; ++i)
- m_maxDist = std::max(m_maxDist,
+ m_maxDist = std::max(m_maxDist,
(cameraPosition - sceneAABB.getCorner(i)).length());
return true;
@@ -228,13 +230,13 @@ global resource identifiers. When a network render job runs, many associated
pieces of information (the scene, the camera, etc.) are wrapped into global resource chunks
shared amongst all nodes, and these can be referenced using such identifiers.
-One important aspect of the \code{preprocess} function is that it is executed
-on the initiating node and before any of the parallel rendering begins.
+One important aspect of the \code{preprocess} function is that it is executed
+on the initiating node and before any of the parallel rendering begins.
This can be used to compute certain things only once. Any
information updated here (such as \code{m\_maxDist}) will be forwarded to the
other nodes before the rendering begins.
-Now, replace the body of the \code{Li} method with
+Now, replace the body of the \code{Li} method with
\begin{cpp}
if (rRec.rayIntersect(r)) {
Float distance = rRec.its.t;
@@ -255,36 +257,36 @@ to intersect a ray against the scene actually works like this:
/* Do something with the intersection stored in 'its' */
}
\end{cpp}
-As you can see, we did something slightly different in the distance
+As you can see, we did something slightly different in the distance
renderer fragment above (we called \code{RadianceQueryRecord::rayIntersect()}
on the supplied parameter \code{rRec}), and the reason for this is \emph{nesting}.
\subsection{Nesting}
The idea of of nesting is that sampling-based rendering techniques can be
-embedded within each other for added flexibility: for instance, one
-might concoct a 1-bounce indirect rendering technique complete with
-irradiance caching and adaptive integration simply by writing the following
+embedded within each other for added flexibility: for instance, one
+might concoct a 1-bounce indirect rendering technique complete with
+irradiance caching and adaptive integration simply by writing the following
into a scene XML file:
\begin{xml}
-
+
-
+
-
+
\end{xml}
-To support this kind of complex interaction, some information needs to be passed between the
+To support this kind of complex interaction, some information needs to be passed between the
integrators, and the \code{RadianceQueryRecord} parameter of the function
-\code{SampleIntegrator::Li} is used for this.
+\code{SamplingIntegrator::Li} is used for this.
-This brings us back to the odd way of computing an intersection a moment ago:
-the reason why we didn't just do this by calling
+This brings us back to the odd way of computing an intersection a moment ago:
+the reason why we didn't just do this by calling
\code{scene->rayIntersect()} is that our technique might actually be nested
within a parent technique, which has already computed this intersection.
-To avoid wasting resources, the function \code{rRec.rayIntersect} first
-determines whether an intersection record has already been provided.
-If yes, it does nothing. Otherwise, it takes care of computing one.
+To avoid wasting resources, the function \code{rRec.rayIntersect} first
+determines whether an intersection record has already been provided.
+If yes, it does nothing. Otherwise, it takes care of computing one.
The radiance query record also lists the particular \emph{types} of radiance requested
by the parent integrator -- your implementation should respect these as much
diff --git a/doc/introduction.tex b/doc/introduction.tex
index 0fb36cc9..aa9996f2 100644
--- a/doc/introduction.tex
+++ b/doc/introduction.tex
@@ -1,69 +1,69 @@
\part{Using Mitsuba}
\textbf{Disclaimer:} This is manual documents the usage, file format, and
-internal design of the Mitsuba rendering system. It is currently a work
+internal design of the Mitsuba rendering system. It is currently a work
in progress, hence some parts may still be incomplete or missing.
\section{About Mitsuba}
-Mitsuba is a research-oriented rendering system in the style of PBRT
+Mitsuba is a research-oriented rendering system in the style of PBRT
(\url{www.pbrt.org}), from which it derives much inspiration.
-It is written in portable C++, implements unbiased as well
-as biased techniques, and contains heavy optimizations targeted
-towards current CPU architectures.
-Mitsuba is extremely modular: it consists of a small set of core libraries
-and over 100 different plugins that implement functionality ranging
+It is written in portable C++, implements unbiased as well
+as biased techniques, and contains heavy optimizations targeted
+towards current CPU architectures.
+Mitsuba is extremely modular: it consists of a small set of core libraries
+and over 100 different plugins that implement functionality ranging
from materials and light sources to complete rendering algorithms.
-In comparison to other open source renderers, Mitsuba places a strong
-emphasis on experimental rendering techniques, such as path-based
+In comparison to other open source renderers, Mitsuba places a strong
+emphasis on experimental rendering techniques, such as path-based
formulations of Metropolis Light Transport and volumetric
modeling approaches. Thus, it may be of genuine interest to those who
-would like to experiment with such techniques that haven't yet found
-their way into mainstream renderers, and it also provides a solid
+would like to experiment with such techniques that haven't yet found
+their way into mainstream renderers, and it also provides a solid
foundation for research in this domain.
Other design considerations are:
\parheader{Performance:}
-Mitsuba provides optimized implementations of the most commonly
+Mitsuba provides optimized implementations of the most commonly
used rendering algorithms. By virtue of running on a shared foundation, comparisons between them can
-better highlight the merits and limitations of different approaches. This is in contrast to, say,
-comparing two completely different rendering products, where technical information on the underlying
+better highlight the merits and limitations of different approaches. This is in contrast to, say,
+comparing two completely different rendering products, where technical information on the underlying
implementation is often intentionally not provided.
\parheader{Robustness:}
-In many cases, physically-based rendering packages force the user to model scenes with the underlying
-algorithm (specifically: its convergence behavior) in mind. For instance, glass windows are routinely
-replaced with light portals, photons must be manually guided to the relevant parts of a scene, and
-interactions with complex materials are taboo, since they cannot be importance sampled exactly.
-One focus of Mitsuba will be to develop path-space light transport algorithms, which handle such
+In many cases, physically-based rendering packages force the user to model scenes with the underlying
+algorithm (specifically: its convergence behavior) in mind. For instance, glass windows are routinely
+replaced with light portals, photons must be manually guided to the relevant parts of a scene, and
+interactions with complex materials are taboo, since they cannot be importance sampled exactly.
+One focus of Mitsuba will be to develop path-space light transport algorithms, which handle such
cases more gracefully.
-\parheader{Scalability:} Mitsuba instances can be merged into large clusters, which transparently distribute and
+\parheader{Scalability:} Mitsuba instances can be merged into large clusters, which transparently distribute and
jointly execute tasks assigned to them using only node-to-node communcation. It has successfully
scaled to large-scale renderings that involved more than 1000 cores working on a single image.
-Most algorithms in Mitsuba are written using a generic parallelization layer, which can tap
+Most algorithms in Mitsuba are written using a generic parallelization layer, which can tap
into this cluster-wide parallelism. The principle is that if any component of the renderer produces
-work that takes longer than a second or so, it at least ought to use all of the processing power
+work that takes longer than a second or so, it at least ought to use all of the processing power
it can get.
-The renderer also tries to be very conservative in its use of memory, which allows it to handle
+The renderer also tries to be very conservative in its use of memory, which allows it to handle
large scenes (>30 million triangles) and multi-gigabyte heterogeneous volumes on consumer hardware.
\parheader{Realism and accuracy:} Mitsuba comes with a large repository of physically-based
reflectance models for surfaces and participating media. These implementations
are designed so that they can be used to build complex shader networks, while
providing enough flexibility to be compatible with a wide range of different
-rendering techniques, including path tracing, photon mapping, hardware-accelerated rendering
+rendering techniques, including path tracing, photon mapping, hardware-accelerated rendering
and bidirectional methods.
-The unbiased path tracers in Mitsuba are battle-proven and produce
-reference-quality results that can be used for predictive rendering, and to verify
-implementations of other rendering methods.
+The unbiased path tracers in Mitsuba are battle-proven and produce
+reference-quality results that can be used for predictive rendering, and to verify
+implementations of other rendering methods.
\parheader{Usability:}
-Mitsuba comes with a graphical user interface to interactively explore scenes. Once a suitable
-viewpoint has been found, it is straightforward to perform renderings using any of the
-implemented rendering techniques, while tweaking their parameters to find the most suitable
+Mitsuba comes with a graphical user interface to interactively explore scenes. Once a suitable
+viewpoint has been found, it is straightforward to perform renderings using any of the
+implemented rendering techniques, while tweaking their parameters to find the most suitable
settings. Experimental integration into Blender 2.5 is also available.
\section{Limitations}
@@ -72,22 +72,22 @@ However, there are some inherent limitations of the system that users should be
\begin{enumerate}[(i)]
\item \textbf{Wave Optics}: Mitsuba is fundamentally based on the geometric optics toolbox,
which means that it generally does not simulate phenomena that arise due to
-the wave properties of light (diffraction, for instance).
+the wave properties of light (diffraction, for instance).
\item \textbf{Polarization}: Mitsuba does not account for polarization. In
other words, light is always assumed to be randomly polarized. This can be a problem for
some predictive rendering applications.
\item \textbf{Numerical accuracy}: The accuracy of any result produced with this
-system is constrained by the underlying floating point computations.
+system is constrained by the underlying floating point computations.
For instance, an intricate scene that can be rendered without problems,
may produce the wrong answer when all objects are translated away from the
origin by a large distance, since floating point numbers are spaced less densely at the
-new position. To avoid these sorts of pitfalls, it is good to have a basic
-understanding of the IEEE-754 standard.
+new position. To avoid these sorts of pitfalls, it is good to have a basic
+understanding of the IEEE-754 standard.
\end{enumerate}
\section{License}
-Mitsuba is free software and can be redistributed and modified under the terms of the GNU General
+Mitsuba is free software and can be redistributed and modified under the terms of the GNU General
Public License (Version 3) as provided by the Free Software Foundation.
\remarks{
diff --git a/doc/macros.sty b/doc/macros.sty
index ea25e5e3..6eae9806 100644
--- a/doc/macros.sty
+++ b/doc/macros.sty
@@ -92,7 +92,7 @@
}}\hspace*{1cm}}\end{figure}
\setlength\fboxrule\fboxrulebackup
}
-
+
\newcommand{\renderings}[1]{
\begin{figure}[htb!]
\setcounter{subfigure}{0}
diff --git a/doc/main.tex b/doc/main.tex
index b063a71a..29322217 100644
--- a/doc/main.tex
+++ b/doc/main.tex
@@ -27,6 +27,7 @@
\usepackage{ifthen}
\usepackage{longtable}
\usepackage{wrapfig}
+\usepackage{footnote} % savenotes environment
% Make sure that ligatures remain searchable in the PDF
\input glyphtounicode
@@ -39,8 +40,8 @@
\setcounter{secnumdepth}{3}
\setcounter{tocdepth}{3}
-\newcommand{\MitsubaVersion}{0.4.1}
-\newcommand{\MitsubaYear}{2012}
+\newcommand{\MitsubaVersion}{0.4.4}
+\newcommand{\MitsubaYear}{2013}
\typearea[current]{last}
\raggedbottom
@@ -116,7 +117,7 @@
medium,film,sampler,integrator,emitter,sensor,
translate,rotate,scale,lookat,point,vector,matrix,
include,fscat,volume,alias,rfilter,boolean,
- subsurface
+ subsurface,animation
},
}
diff --git a/doc/misc.tex b/doc/misc.tex
index a25f372f..890915db 100644
--- a/doc/misc.tex
+++ b/doc/misc.tex
@@ -2,9 +2,9 @@
\label{sec:miscellaneous}
\subsection{A word about color spaces}
\label{sec:colorspaces}
-When using one of the downloadable release builds of Mitsuba, or a version
+When using one of the downloadable release builds of Mitsuba, or a version
that was compiled with the default settings, the renderer internally
-operates in \emph{RGB mode}: all computations are performed using a representation
+operates in \emph{RGB mode}: all computations are performed using a representation
that is based on the three colors red, green, and blue.
More specifically, these are the intensities of the red, green, and blue primaries
@@ -14,20 +14,20 @@ rendering. This is an intuitive default which yields fast computations and
satisfactory results for most applications.
Low dynamic range images exported using the \pluginref{ldrfilm} will be stored in a
-sRGB-compatible format that accounts for the custom gamma curves mandated by this
+sRGB-compatible format that accounts for the custom gamma curves mandated by this
standard. They should display as intended across a wide range of display devices.
When saving high dynamic range output (e.g. OpenEXR, RGBE, or PFM), the computed radiance
values are exported in a linear form (i.e. without having the sRGB gamma curve applied to it),
which is the most common way of storing high dynamic range data.
-It is important to keep in mind that other applications may not support this
+It is important to keep in mind that other applications may not support this
``linearized sRGB'' space---in particular, the Mac OS preview currently
does not display images with this encoding correctly.
-\subsubsection{Spectral mode}
+\subsubsection{Spectral rendering}
Some predictive rendering applications will require a more realistic space for
interreflection computations. In such cases, Mitsuba can be switched to \emph{spectral mode}.
-This can be done by compiling it with the \code{SPECTRUM\_SAMPLES=}$n$ parameter
+This can be done by compiling it with the \code{SPECTRUM\_SAMPLES=}$n$ parameter
(\secref{compiling}), where $n$ is usually between 15 and 30.
Now, all input parameters are converted into color spectra with the specified
diff --git a/doc/parallelization.tex b/doc/parallelization.tex
index 20ace309..224e3dfb 100644
--- a/doc/parallelization.tex
+++ b/doc/parallelization.tex
@@ -5,23 +5,23 @@ The guiding principle is that if an operation can potentially take longer than a
few seconds, it ought to use all the cores it can get.
Here, we will go through a basic example, which will hopefully provide sufficient intuition
-to realize more complex tasks.
+to realize more complex tasks.
To obtain good (i.e. close to linear) speedups, the parallelization layer depends on
several key assumptions of the task to be parallelized:
\begin{itemize}
\item The task can easily be split up into a discrete number of \emph{work units}, which requires a negligible amount of computation.
-\item Each work unit is small in footprint so that it can easily be transferred over the network or shared memory.
+\item Each work unit is small in footprint so that it can easily be transferred over the network or shared memory.
\item A work unit constitutes a significant amount of computation, which by far outweighs the cost of transmitting it to another node.
\item The \emph{work result} obtained by processing a work unit is again small in footprint, so that it can easily be transferred back.
\item Merging all work results to a solution of the whole problem requires a negligible amount of additional computation.
\end{itemize}
-This essentially corresponds to a parallel version of \emph{Map} (one part of \emph{Map\&Reduce}) and is
-ideally suited for most rendering workloads.
+This essentially corresponds to a parallel version of \emph{Map} (one part of \emph{Map\&Reduce}) and is
+ideally suited for most rendering workloads.
-The example we consider here computes a \code{ROT13} ``encryption'' of a string, which
+The example we consider here computes a \code{ROT13} ``encryption'' of a string, which
most certainly violates the ``significant amount of computation'' assumption.
It was chosen due to the inherent parallelism and simplicity of this task.
-While of course over-engineered to the extreme, the example hopefully
+While of course over-engineered to the extreme, the example hopefully
communicates how this framework might be used in more complex scenarios.
We will implement this program as a plugin for the utility launcher \code{mtsutil}, which
@@ -49,9 +49,9 @@ MTS_NAMESPACE_END
\end{cpp}
The file must also be added to the build system: insert the line
\begin{shell}
-plugins += $\texttt{env}$.SharedLibrary('plugins/rot13', ['src/utils/rot13.cpp'])
+plugins += $\texttt{env}$.SharedLibrary('rot13', ['rot13.cpp'])
\end{shell}
-into the SConscript (near the comment ``\code{Build the plugins -- utilities}''). After compiling
+into the \code{utils/SConscript} file. After compiling
using \code{scons}, the \code{mtsutil} binary should automatically pick up your new utility plugin:
\begin{shell}
$\texttt{\$}$ mtsutil
@@ -69,7 +69,7 @@ $\texttt{\$}$ mtsutil rot13
Hello world!
\end{shell}
-Our approach for implementing distributed ROT13 will be to treat each character as an
+Our approach for implementing distributed ROT13 will be to treat each character as an
indpendent work unit. Since the ordering is lost when sending out work units, we must
also include the position of the character in both the work units and the work results.
@@ -78,7 +78,7 @@ For reference, here are the interfaces of \code{WorkUnit} and \code{WorkResult}:
\begin{cpp}
/**
* Abstract work unit. Represents a small amount of information
- * that encodes part of a larger processing task.
+ * that encodes part of a larger processing task.
*/
class MTS_EXPORT_CORE WorkUnit : public Object {
public:
@@ -100,7 +100,7 @@ protected:
virtual ~WorkUnit() { }
};
/**
- * Abstract work result. Represents the information that encodes
+ * Abstract work result. Represents the information that encodes
* the result of a processed WorkUnit instance.
*/
class MTS_EXPORT_CORE WorkResult : public Object {
@@ -125,7 +125,7 @@ In our case, the \code{WorkUnit} implementation then looks like this:
class ROT13WorkUnit : public WorkUnit {
public:
void set(const WorkUnit *workUnit) {
- const ROT13WorkUnit *wu =
+ const ROT13WorkUnit *wu =
static_cast(workUnit);
m_char = wu->m_char;
m_pos = wu->m_pos;
@@ -135,10 +135,10 @@ public:
m_char = stream->readChar();
m_pos = stream->readInt();
}
-
+
void save(Stream *stream) const {
stream->writeChar(m_char);
- stream->writeInt(m_pos);
+ stream->writeInt(m_pos);
}
std::string toString() const {
@@ -163,14 +163,14 @@ private:
MTS_IMPLEMENT_CLASS(ROT13WorkUnit, false, WorkUnit)
\end{cpp}
-The \code{ROT13WorkResult} implementation is not reproduced since it is almost identical
+The \code{ROT13WorkResult} implementation is not reproduced since it is almost identical
(except that it doesn't need the \code{set} method).
-The similarity is not true in general: for most algorithms, the work unit and result
+The similarity is not true in general: for most algorithms, the work unit and result
will look completely different.
Next, we need a class, which does the actual work of turning a work unit into a work result
(a subclass of \code{WorkProcessor}). Again, we need to implement a range of support
-methods to enable the various ways in which work processor instances will be submitted to
+methods to enable the various ways in which work processor instances will be submitted to
remote worker nodes and replicated amongst local threads.
\begin{cpp}
class ROT13WorkProcessor : public WorkProcessor {
@@ -190,7 +190,7 @@ public:
return new ROT13WorkUnit();
}
- ref createWorkResult() const {
+ ref createWorkResult() const {
return new ROT13WorkResult();
}
@@ -202,9 +202,9 @@ public:
void prepare() { }
/// Do the actual computation
- void process(const WorkUnit *workUnit, WorkResult *workResult,
+ void process(const WorkUnit *workUnit, WorkResult *workResult,
const bool &stop) {
- const ROT13WorkUnit *wu
+ const ROT13WorkUnit *wu
= static_cast(workUnit);
ROT13WorkResult *wr = static_cast(workResult);
wr->setPos(wu->getPos());
@@ -252,9 +252,9 @@ public:
}
void processResult(const WorkResult *result, bool cancelled) {
- if (cancelled) // indicates a work unit, which was
+ if (cancelled) // indicates a work unit, which was
return; // cancelled partly through its execution
- const ROT13WorkResult *wr =
+ const ROT13WorkResult *wr =
static_cast(result);
m_output[wr->getPos()] = wr->getChar();
}
@@ -273,7 +273,7 @@ MTS_IMPLEMENT_CLASS(ROT13Process, false, ParallelProcess)
\end{cpp}
The \code{generateWork} method produces work units until we have moved past
the end of the string, after which it returns the status code \code{EFailure}.
-Note the method \code{getRequiredPlugins()}: this is necessary to use
+Note the method \code{getRequiredPlugins()}: this is necessary to use
the utility across
machines. When communicating with another node, it ensures that the remote side
loads the \code{ROT13*} classes at the right moment.
@@ -302,9 +302,9 @@ from the main utility function (the `Hello World' code we wrote earlier). We can
}
\end{cpp}
After compiling everything using \code{scons}, a simple example
-involving the utility would be to encode a string (e.g. \code{SECUREBYDESIGN}), while
+involving the utility would be to encode a string (e.g. \code{SECUREBYDESIGN}), while
forwarding all computation to a network machine. (\code{-p0} disables
-all local worker threads). Adding a verbose flag (\code{-v}) shows
+all local worker threads). Adding a verbose flag (\code{-v}) shows
some additional scheduling information:
\begin{shell}
$\texttt{\$}$ mtsutil -vc feynman -p0 rot13 SECUREBYDESIGN
diff --git a/doc/plugins.tex b/doc/plugins.tex
index a86ef0f5..91824168 100644
--- a/doc/plugins.tex
+++ b/doc/plugins.tex
@@ -1,9 +1,9 @@
\section{Plugin reference}
\vspace{-2mm}
-The following subsections describe the available Mitsuba plugins, usually along
+The following subsections describe the available Mitsuba plugins, usually along
with example renderings and a description of what each parameter does.
They are separated into subsections covering textures, surface scattering
-models, etc.
+models, etc.
Each subsection begins with a brief general description.
The documentation of a plugin always starts on a new page and is preceded
@@ -14,7 +14,7 @@ by a table similar to the one below:
\default{\code{false}}
}
\parameter{darkMatter}{\Float}{
- Controls the proportionate amount of dark matter present in the scene.
+ Controls the proportionate amount of dark matter present in the scene.
\default{0.83}
}
}
@@ -27,10 +27,10 @@ this description, it can be instantiated from an XML scene file using a custom c
\end{xml}
-In some cases\footnote{Note that obvious parameters are generally omitted.
-For instance, all shape plugins accept a surface scattering plugin, but this
-is left out from the documentation for brevity.}, plugins also indicate that they accept nested plugins
-as input arguments. These can either be \emph{named} or \emph{unnamed}. If
+In some cases\footnote{Note that obvious parameters are generally omitted.
+For instance, all shape plugins accept a surface scattering plugin, but this
+is left out from the documentation for brevity.}, plugins also indicate that they accept nested plugins
+as input arguments. These can either be \emph{named} or \emph{unnamed}. If
the \code{amazing} integrator also accepted the following two parameters\vspace{-2mm}
\parameters{
\parameter{\Unnamed}{\Integrator}{A nested integrator which does the actual hard work}
diff --git a/doc/python.tex b/doc/python.tex
index 775c1c3e..43227b7d 100644
--- a/doc/python.tex
+++ b/doc/python.tex
@@ -1,14 +1,29 @@
\section{Python integration}
\label{sec:python}
-A recent feature of Mitsuba is a simple Python interface to the renderer API.
+A recent feature of Mitsuba is a Python interface to the renderer API.
While the interface is still limited at this point, it can already be
used for many useful purposes. To access the API, start your Python
interpreter and enter
\begin{python}
import mitsuba
\end{python}
+\paragraph{Mac OS:}
For this to work on MacOS X, you will first have to run the ``\emph{Apple
Menu}$\to$\emph{Command-line access}'' menu item from within Mitsuba.
+In the unlikely case that you run into shared library loading issues (this is
+taken care of by default), you may have to set the \code{LD\_LIBRARY\_PATH}
+environment variable before starting Python so that it points to where the
+Mitsuba libraries are installed (e.g. the \code{Mitsuba.app/Contents/Frameworks}
+directory).
+
+When Python crashes directly after the \code{import mitsuba} statement,
+make sure that Mitsuba is linked against the right Python distribution
+(i.e. matching the \code{python} binary you are using). For e.g. Python
+2.7, can be done by adjusting the \code{PYTHON27INCLUDE} and
+\code{PYTHON27LIBDIR} variables in \code{config.py}. For other versions,
+adjust the numbers accordingly.
+
+\paragraph{Windows and Linux:}
On Windows and \emph{non-packaged} Linux builds, you may have to explicitly
specify the required extension search path before issuing the \code{import} command, e.g.:
\begin{python}
@@ -29,6 +44,9 @@ os.environ['PATH'] = 'path-to-mitsuba-directory' + os.pathsep + os.environ['PATH
import mitsuba
\end{python}
+In rare cases when running on Linux, it may also be necessary to set the
+\code{LD\_LIBRARY\_PATH} environment variable before starting Python so that it
+points to where the Mitsuba core libraries are installed.
For an overview of the currently exposed API subset, please refer
to the following page: \url{http://www.mitsuba-renderer.org/api/group__libpython.html}.
@@ -64,8 +82,8 @@ classes, function, or entire namespaces when running an interactive Python shell
...
\end{shell}
The docstrings list the currently exported functionality, as well as C++ and Python signatures, but they
-don't document what these functions actually do. The web API documentation is the preferred source for
-this information.
+don't document what these functions actually do. The web API documentation is
+the preferred source of this information.
\subsection{Basics}
Generally, the Python API tries to mimic the C++ API as closely as possible.
@@ -337,3 +355,43 @@ logger.setLogLevel(EDebug)
Log(EInfo, 'Test message')
\end{python}
+\subsubsection{Rendering a turntable animation with motion blur}
+Rendering a turntable animation is a fairly common task that is
+conveniently accomplished via the Python interface. In a turntable
+video, the camera rotates around a completely static object or scene.
+The following snippet does this for the material test ball scene downloadable
+on the main website, complete with motion blur. It assumes that the
+scene and scheduler have been set up approriately using one of the previous
+snippets.
+\begin{python}
+sensor = scene.getSensor()
+sensor.setShutterOpen(0)
+sensor.setShutterOpenTime(1)
+
+stepSize = 5
+for i in range(0,360 / stepSize):
+ rotationCur = Transform.rotate(Vector(0, 0, 1), i*stepSize);
+ rotationNext = Transform.rotate(Vector(0, 0, 1), (i+1)*stepSize);
+
+ trafoCur = Transform.lookAt(rotationCur * Point(0,-6,4),
+ Point(0, 0, .5), rotationCur * Vector(0, 1, 0))
+ trafoNext = Transform.lookAt(rotationNext * Point(0,-6,4),
+ Point(0, 0, .5), rotationNext * Vector(0, 1, 0))
+
+ atrafo = AnimatedTransform()
+ atrafo.appendTransform(0, trafoCur)
+ atrafo.appendTransform(1, trafoNext)
+ atrafo.sortAndSimplify()
+ sensor.setWorldTransform(atrafo)
+
+ scene.setDestinationFile('frame_%03i.png' % i)
+ job = RenderJob('job_%i' % i, scene, queue)
+ job.start()
+
+ queue.waitLeft(0)
+ queue.join()
+\end{python}
+A useful property of this approach is that scene loading and initialization
+must only take place once. Performance-wise, this compares favourably with
+running many separate rendering jobs, e.g. using the \code{mitsuba}
+command-line executable.
diff --git a/doc/section_bsdf.tex b/doc/section_bsdf.tex
index a03050ec..2e835c94 100644
--- a/doc/section_bsdf.tex
+++ b/doc/section_bsdf.tex
@@ -13,34 +13,34 @@
\label{sec:bsdfs}
Surface scattering models describe the manner in which light interacts
-with surfaces in the scene. They conveniently summarize the mesoscopic
-scattering processes that take place within the material and
+with surfaces in the scene. They conveniently summarize the mesoscopic
+scattering processes that take place within the material and
cause it to look the way it does.
-This represents one central component of the material system in Mitsuba---another
-part of the renderer concerns itself with what happens
-\emph{in between} surface interactions. For more information on this aspect,
+This represents one central component of the material system in Mitsuba---another
+part of the renderer concerns itself with what happens
+\emph{in between} surface interactions. For more information on this aspect,
please refer to Sections~\ref{sec:media} and \ref{sec:subsurface}.
-This section presents an overview of all surface scattering models that are
+This section presents an overview of all surface scattering models that are
supported, along with their parameters.
\subsubsection*{BSDFs}
-To achieve realistic results, Mitsuba comes with a library of both
+To achieve realistic results, Mitsuba comes with a library of both
general-purpose surface scattering models (smooth or rough glass, metal,
plastic, etc.) and specializations to particular materials (woven cloth,
masks, etc.). Some model plugins fit neither category and can best be described
-as \emph{modifiers} that are applied on top of one or more scattering models.
+as \emph{modifiers} that are applied on top of one or more scattering models.
-Throughout the documentation and within the scene description
+Throughout the documentation and within the scene description
language, the word \emph{BSDF} is used synonymously with the term ``surface
-scattering model''. This is an abbreviation for \emph{Bidirectional
-Scattering Distribution Function}, a more precise technical
-term.
+scattering model''. This is an abbreviation for \emph{Bidirectional
+Scattering Distribution Function}, a more precise technical
+term.
-In Mitsuba, BSDFs are
+In Mitsuba, BSDFs are
assigned to \emph{shapes}, which describe the visible surfaces in
the scene. In the scene description language, this assignment can
-either be performed by nesting BSDFs within shapes, or they can
-be named and then later referenced by their name.
+either be performed by nesting BSDFs within shapes, or they can
+be named and then later referenced by their name.
The following fragment shows an example of both kinds of usages:
\begin{xml}
@@ -75,35 +75,35 @@ memory usage.
\label{fig:glass-explanation}
Some of the scattering models in Mitsuba need to know
the indices of refraction on the exterior and interior-facing
- side of a surface.
+ side of a surface.
It is therefore important to decompose the mesh into meaningful
separate surfaces corresponding to each index of refraction change.
The example here shows such a decomposition for a water-filled Glass.
}
\end{figure}
-A vital consideration when modeling a scene in a physically-based rendering
-system is that the used materials do not violate physical properties, and
+A vital consideration when modeling a scene in a physically-based rendering
+system is that the used materials do not violate physical properties, and
that their arrangement is meaningful. For instance, imagine having designed
-an architectural interior scene that looks good except for a white desk that
-seems a bit too dark. A closer inspection reveals that it uses a Lambertian
-material with a diffuse reflectance of $0.9$.
+an architectural interior scene that looks good except for a white desk that
+seems a bit too dark. A closer inspection reveals that it uses a Lambertian
+material with a diffuse reflectance of $0.9$.
-In many rendering systems, it would be feasible to increase the
-reflectance value above $1.0$ in such a situation. But in Mitsuba, even a
-small surface that reflects a little more light than it receives will
-likely break the available rendering algorithms, or cause them to produce otherwise
+In many rendering systems, it would be feasible to increase the
+reflectance value above $1.0$ in such a situation. But in Mitsuba, even a
+small surface that reflects a little more light than it receives will
+likely break the available rendering algorithms, or cause them to produce otherwise
unpredictable results. In fact, the right solution in this case would be to switch to
a different the lighting setup that causes more illumination to be received by
-the desk and then \emph{reduce} the material's reflectance---after all, it is quite unlikely that
+the desk and then \emph{reduce} the material's reflectance---after all, it is quite unlikely that
one could find a real-world desk that reflects 90\% of all incident light.
As another example of the necessity for a meaningful material description, consider
-the glass model illustrated in \figref{glass-explanation}. Here, careful thinking
-is needed to decompose the object into boundaries that mark index of
+the glass model illustrated in \figref{glass-explanation}. Here, careful thinking
+is needed to decompose the object into boundaries that mark index of
refraction-changes. If this is done incorrectly and a beam of light can
potentially pass through a sequence of incompatible index of refraction changes (e.g. $1.00\to 1.33$
followed by $1.50\to1.33$), the output is undefined and will quite likely
-even contain inaccuracies in parts of the scene that are far
+even contain inaccuracies in parts of the scene that are far
away from the glass.
diff --git a/doc/section_films.tex b/doc/section_films.tex
index 9fdb0b68..325ec17d 100644
--- a/doc/section_films.tex
+++ b/doc/section_films.tex
@@ -3,7 +3,7 @@
\label{sec:films}
A film defines how conducted measurements are stored and converted into the final
output file that is written to disk at the end of the rendering process. Mitsuba comes with a few
-films that can write to high and low dynamic range image formats (OpenEXR, JPEG or PNG), as well
+films that can write to high and low dynamic range image formats (OpenEXR, JPEG or PNG), as well
more scientifically oriented data formats (e.g. MATLAB or Mathematica).
In the XML scene description language, a normal film configuration might look as follows
@@ -20,7 +20,7 @@ In the XML scene description language, a normal film configuration might look as
-
@@ -28,6 +28,6 @@ In the XML scene description language, a normal film configuration might look as
\end{xml}
The \code{film} plugin should be instantiated nested inside a \code{sensor} declaration.
-Note how the output filename is never specified---it is automatically inferred
-from the scene filename and can be manually overridden by passing the configuration parameter
+Note how the output filename is never specified---it is automatically inferred
+from the scene filename and can be manually overridden by passing the configuration parameter
\code{-o} to the \code{mitsuba} executable when rendering from the command line.
diff --git a/doc/section_integrators.tex b/doc/section_integrators.tex
index 1afce34d..16e6a671 100644
--- a/doc/section_integrators.tex
+++ b/doc/section_integrators.tex
@@ -1,14 +1,14 @@
\newpage
\subsection{Integrators}
\label{sec:integrators}
-In Mitsuba, the different rendering techniques are collectively referred to as
+In Mitsuba, the different rendering techniques are collectively referred to as
\emph{integrators}, since they perform integration over a high-dimensional
space. Each integrator represents a specific approach for solving
the light transport equation---usually favored in certain scenarios, but
at the same time affected by its own set of intrinsic limitations.
-Therefore, it is important to carefully select an integrator based on
-user-specified accuracy requirements and properties of the scene to be
-rendered.
+Therefore, it is important to carefully select an integrator based on
+user-specified accuracy requirements and properties of the scene to be
+rendered.
In Mitsuba's XML description language, a single integrator
is usually instantiated by declaring it at the top level within the
@@ -28,15 +28,15 @@ scene, e.g.
\end{xml}
-This section gives a brief overview of the available choices
+This section gives a brief overview of the available choices
along with their parameters.
\subsubsection*{Choosing an integrator}
-Due to the large number of integrators in Mitsuba, the decision of which
+Due to the large number of integrators in Mitsuba, the decision of which
one is suitable may seem daunting. Assuming that the goal is to solve
the full light transport equation without approximations, a few integrators
(\pluginref{ao}, \pluginref{direct}, \pluginref{vpl})
-can already be ruled out. The adjoint particle tracer \pluginref{ptracer} is
+can already be ruled out. The adjoint particle tracer \pluginref{ptracer} is
also rarely used.
The following ``algorithm'' may help to decide amongst the remaining ones:
@@ -46,11 +46,11 @@ The following ``algorithm'' may help to decide amongst the remaining ones:
Mitsuba currently comes with three path tracer variations that target different setups: It your
scene contains no media and no surfaces with opacity masks, use the plain path tracer (\pluginref{path}).
-Otherwise, use one of the volumetric path tracers (\pluginref[volpathsimple]{volpath\_simple}
+Otherwise, use one of the volumetric path tracers (\pluginref[volpathsimple]{volpath\_simple}
or \pluginref{volpath}). The latter is preferable if the scene contains glossy surface scattering models.
-\item If step 1 produced poor (i.e. noisy and slowly converging) results, try
+\item If step 1 produced poor (i.e. noisy and slowly converging) results, try
the bidirectional path tracer (\pluginref{bdpt}).
-\item If steps 1 and 2 failed, the scene contains a relatively difficult lighting setup, potentially
+\item If steps 1 and 2 failed, the scene contains a relatively difficult lighting setup, potentially
including interaction with complex materials.
In many cases, these difficulties can be greatly ameliorated by running a ``metropolized'' version
of a path tracer. This is implemented in the Primary Sample Space MLT (\pluginref{pssmlt}) plugin.
@@ -69,7 +69,7 @@ method (\pluginref{mlt}, \pluginref{erpt}).
\smallrendering{Max. depth = $\infty$}{pathdepth-all}
\caption{
\label{fig:pathdepths}
- These Cornell box renderings demonstrate the visual
+ These Cornell box renderings demonstrate the visual
effect of a maximum path depth. As the paths
are allowed to grow longer, the color saturation
increases due to multiple scattering interactions
@@ -79,13 +79,13 @@ method (\pluginref{mlt}, \pluginref{erpt}).
\end{figure}
Almost all integrators use the concept of \emph{path depth}.
-Here, a path refers to a chain of scattering events that
+Here, a path refers to a chain of scattering events that
starts at the light source and ends at the eye or sensor.
-It is often useful to limit the path depth (\figref{pathdepths})
-when rendering scenes for preview purposes, since this reduces the amount
+It is often useful to limit the path depth (\figref{pathdepths})
+when rendering scenes for preview purposes, since this reduces the amount
of computation that is necessary per pixel. Furthermore, such renderings
usually converge faster and therefore need fewer samples per pixel.
-When reference-quality is desired, one should always leave the path
+When reference-quality is desired, one should always leave the path
depth unlimited.
\begin{figure}[h!]
@@ -95,21 +95,42 @@ depth unlimited.
\vspace{-5mm}
\caption{
\label{fig:path-explanation}
- A ray of emitted light is scattered by an object and subsequently
+ A ray of emitted light is scattered by an object and subsequently
reaches the eye/sensor.
In Mitsuba, this is a \emph{depth-2} path, since it has two edges.
}
\end{figure}
Mitsuba counts depths starting at $1$, which correspond to
-visible light sources (i.e. a path that starts at the light
-source and ends at the eye or sensor without any scattering
+visible light sources (i.e. a path that starts at the light
+source and ends at the eye or sensor without any scattering
interaction in between).
A depth-$2$ path (also known as ``direct illumination'') includes
a single scattering event (\figref{path-explanation}).
\subsubsection*{Progressive versus non-progressive}
Some of the rendering techniques in Mitsuba are \emph{progressive}.
-What this means is that they display a rough preview, which improves over time.
+What this means is that they display a rough preview, which improves over time.
Leaving them running indefinitely will continually reduce noise (in unbiased algorithms
-such as Metropolis Light Transport) or noise and bias (in biased
+such as Metropolis Light Transport) or noise and bias (in biased
rendering techniques such as Progressive Photon Mapping).
+\newpage
+\subsubsection*{Hiding directly visible emitters}
+\label{sec:hideemitters}
+Several rendering algorithms in Mitsuba have a feature to hide directly
+visible light sources (e.g. environment maps or area lights). While not
+particularly realistic, this feature is often convenient to remove a background
+from a rendering so that it can be pasted into a differently-colored document.
+
+Note that only directly visible emitters can be hidden using this feature---a
+reflection on a shiny surface will be unaffected. To perform the kind of
+compositing shown in Figure~\ref{fig:hideemitters}, it is also necessary to
+enable the alpha channel in the scene's film instance (Section~\ref{sec:films}).
+
+\renderings{
+ \unframedrendering{Daylit smoke rendered with \code{hideEmitters} set to \code{false}
+ (the default setting)}{integrator_volpath_normal}
+ \unframedrendering{Rendered with \code{hideEmitters} set to \code{true} and alpha-composited
+ onto a white background.}{integrator_volpath_hideemitters}
+ \caption{\label{fig:hideemitters}An example application of the \code{hideEmitters} parameter
+ together with alpha blending}
+}
diff --git a/doc/section_media.tex b/doc/section_media.tex
index 72ca44b5..3a6a42ed 100644
--- a/doc/section_media.tex
+++ b/doc/section_media.tex
@@ -11,7 +11,7 @@
\caption{Participating media are not limited to smoke or fog: they are
also great for rendering fuzzy materials such as these knitted sweaters
(made using the \pluginref{heterogeneous} and \pluginref{microflake} plugins).
- Figure courtesy of Yuksel et al. \cite{Yuksel2012Stitch}, models courtesy of
+ Figure courtesy of Yuksel et al. \cite{Yuksel2012Stitch}, models courtesy of
Rune Spaans and Christer Sveen.}
}
In Mitsuba, participating media are used to simulate materials ranging from
diff --git a/doc/section_phase.tex b/doc/section_phase.tex
index bed50d2a..6092f9f3 100644
--- a/doc/section_phase.tex
+++ b/doc/section_phase.tex
@@ -1,17 +1,17 @@
\newpage
\subsection{Phase functions}
\label{sec:phase}
-This section contains a description of all implemented medium scattering models, which
-are also known as \emph{phase functions}. These are very similar in principle to surface
-scattering models (or \emph{BSDF}s), and essentially describe where light travels after
+This section contains a description of all implemented medium scattering models, which
+are also known as \emph{phase functions}. These are very similar in principle to surface
+scattering models (or \emph{BSDF}s), and essentially describe where light travels after
hitting a particle within the medium.
The most commonly used models for smoke, fog, and other homogeneous media
-are isotropic scattering (\pluginref{isotropic}) and the Henyey-Greenstein
+are isotropic scattering (\pluginref{isotropic}) and the Henyey-Greenstein
phase function (\pluginref{hg}). Mitsuba also supports \emph{anisotropic}
-media, where the behavior of the medium changes depending on the direction
-of light propagation (e.g. in volumetric representations of fabric). These
-are the Kajiya-Kay (\pluginref{kkay}) and Micro-flake (\pluginref{microflake})
+media, where the behavior of the medium changes depending on the direction
+of light propagation (e.g. in volumetric representations of fabric). These
+are the Kajiya-Kay (\pluginref{kkay}) and Micro-flake (\pluginref{microflake})
models.
Finally, there is also a phase function for simulating scattering in
diff --git a/doc/section_rfilters.tex b/doc/section_rfilters.tex
index 04e587e2..78861546 100644
--- a/doc/section_rfilters.tex
+++ b/doc/section_rfilters.tex
@@ -2,7 +2,7 @@
\subsection{Reconstruction filters}
\label{sec:rfilters}
Image reconstruction filters are responsible for converting a series of radiance samples generated
-jointly by the \emph{sampler} and \emph{integrator} into the final output image that will be written
+jointly by the \emph{sampler} and \emph{integrator} into the final output image that will be written
to disk at the end of a rendering process.
This section gives a brief overview of the reconstruction filters that are available in Mitsuba.
There is no universally superior filter, and the final choice depends on a trade-off between
@@ -15,44 +15,44 @@ near discontinuities, such as a light-shadow transiton.
\begin{description}
\item[Box filter (\code{box}):]
-the fastest, but also about the worst possible
-reconstruction filter, since it is extremely prone to aliasing.
+the fastest, but also about the worst possible
+reconstruction filter, since it is extremely prone to aliasing.
It is included mainly for completeness, though some rare situations
may warrant its use.
\item[Tent filter (\code{tent}):]
Simple tent, or triangle filter. This reconstruction filter never
-suffers from ringing and usually causes less aliasing than a naive
+suffers from ringing and usually causes less aliasing than a naive
box filter. When rendering scenes with sharp brightness discontinuities,
this may be useful; otherwise, negative-lobed filters will be preferable
(e.g. Mitchell-Netravali or Lanczos Sinc)
\item[Gaussian filter (\code{gaussian}):]
this is a windowed Gaussian filter with configurable standard deviation.
-It produces pleasing results and never suffers from ringing, but may
+It produces pleasing results and never suffers from ringing, but may
occasionally introduce too much blurring.
When no reconstruction filter is explicitly requested, this is the default
choice in Mitsuba.
\item[Mitchell-Netravali filter (\code{mitchell}):]
Separable cubic spline reconstruction filter by Mitchell and Netravali
\cite{Mitchell:1988:Reconstruction}
-This is often a good compromise between sharpness and ringing.
+This is often a good compromise between sharpness and ringing.
-The plugin has two \code{float}-valued parameters named \texttt{B} and \texttt{C} that
+The plugin has two \code{float}-valued parameters named \texttt{B} and \texttt{C} that
correspond to the two parameters in the original research paper. By default, these
are set to the recommended value of $1/3$, but can be tweaked if desired.
\item[Catmull-Rom filter (\code{catmullrom}):]
-This is a special version of the Mitchell-Netravali filter that has the
+This is a special version of the Mitchell-Netravali filter that has the
constants \texttt{B} and \texttt{C} adjusted to produce higher sharpness at the
cost of increased susceptibility to ringing.
\item[Lanczos Sinc filter (\code{lanczos}):]
This is a windowed version of the theoretically optimal low-pass filter.
-It is generally one of the best available filters in terms of producing sharp
-high-quality output. Its main disadvantage is that it produces strong ringing around
-discontinuities, which can become a serious problem when rendering bright objects
-with sharp edges (for instance, a directly visible light source will have black
-fringing artifacts around it).
+It is generally one of the best available filters in terms of producing sharp
+high-quality output. Its main disadvantage is that it produces strong ringing around
+discontinuities, which can become a serious problem when rendering bright objects
+with sharp edges (for instance, a directly visible light source will have black
+fringing artifacts around it).
This is also the computationally slowest reconstruction filter.
This plugin has an \code{integer}-valued parameter named \code{lobes}, that
@@ -60,19 +60,19 @@ sets the desired number of filter side-lobes. The higher, the closer
the filter will approximate an optimal low-pass filter, but this also
increases the susceptibility to ringing. Values of 2 or 3 are common (3 is the default).
\end{description}
-The next section contains a series of comparisons between reconstruction filters. In the first
-case, a very high-resolution input image (corresponding to a hypothetical radiance field
+The next section contains a series of comparisons between reconstruction filters. In the first
+case, a very high-resolution input image (corresponding to a hypothetical radiance field
incident at the camera) is reconstructed at low resolutions.
\newpage
\subsubsection{Reconstruction filter comparison 1: frequency attenuation and aliasing}
\vspace{-2mm}
-Here, a high frequency function is reconstructed at low resolutions. A good filter
+Here, a high frequency function is reconstructed at low resolutions. A good filter
(e.g. Lanczos Sinc) will capture all oscillations that are representable at the desired
resolution and attenuate the remainder to a uniform gray. The filters are ordered by their
approximate level of success at this benchmark.
\renderings{
- \subfloat[A high resolution input image whose frequency decreases
+ \subfloat[A high resolution input image whose frequency decreases
towards the borders. If you are looking at this on a computer, you may
have to zoom in.]{\fbox{\includegraphics[width=0.43\textwidth]{images/rfilter_sines_input}}}
\hfill
@@ -92,7 +92,7 @@ approximate level of success at this benchmark.
}
\newpage
\subsubsection{Reconstruction filter comparison 2: ringing}
-This comparison showcases the ringing artifacts that can occur when the rendered
+This comparison showcases the ringing artifacts that can occur when the rendered
image contains extreme and discontinuous brightness transitions. The
Mitchell-Netravali, Catmull-Rom, and Lanczos Sinc filters are affected by this problem.
Note the black fringing around the light source in the cropped Cornell box renderings below.
diff --git a/doc/section_samplers.tex b/doc/section_samplers.tex
index ffd1e5c0..97762b70 100644
--- a/doc/section_samplers.tex
+++ b/doc/section_samplers.tex
@@ -1,16 +1,16 @@
\newpage
\subsection{Sample generators}
\label{sec:samplers}
-When rendering an image, Mitsuba has to solve a high-dimensional integration problem that involves the
-geometry, materials, lights, and sensors that make up the scene. Because of the mathematical complexity
+When rendering an image, Mitsuba has to solve a high-dimensional integration problem that involves the
+geometry, materials, lights, and sensors that make up the scene. Because of the mathematical complexity
of these integrals, it is generally impossible to solve them analytically --- instead, they
-are solved \emph{numerically} by evaluating the function to be integrated at a large number of
-different positions referred to as \emph{samples}. Sample generators are an essential ingredient to this
-process: they produce points in a (hypothetical) infinite dimensional hypercube $[0,1]^{\infty}$ that
+are solved \emph{numerically} by evaluating the function to be integrated at a large number of
+different positions referred to as \emph{samples}. Sample generators are an essential ingredient to this
+process: they produce points in a (hypothetical) infinite dimensional hypercube $[0,1]^{\infty}$ that
constitute the canonical representation of these samples.
To do its work, a rendering algorithm, or \emph{integrator}, will send many queries to the sample generator.
-Generally, it will request subsequent 1D or 2D components of this infinite-dimensional ``point'' and map
+Generally, it will request subsequent 1D or 2D components of this infinite-dimensional ``point'' and map
them into a more convenient space (for instance, positions on surfaces). This allows it to construct
light paths to eventually evaluate the flow of light through the scene.
@@ -18,6 +18,6 @@ Since the whole process starts with a large number of points in the abstract spa
it is natural to consider different ways of positioning them. Desirable properties of a sampler are
that it ``randomly'' covers the whole space evenly with samples, but without placing samples too close
to each other. This leads to such notions as \emph{stratified sampling} and \emph{low-discrepancy}
-number sequences. The samplers in this section make different guarantees on the quality of generated
-samples based on these criteria. To obtain intuition about their behavior, the provided point plots
+number sequences. The samplers in this section make different guarantees on the quality of generated
+samples based on these criteria. To obtain intuition about their behavior, the provided point plots
illustrate the resulting sample placement.
diff --git a/doc/section_sensors.tex b/doc/section_sensors.tex
index 06dd9168..cb5ed783 100644
--- a/doc/section_sensors.tex
+++ b/doc/section_sensors.tex
@@ -8,12 +8,12 @@ into a given direction or the irradiance received by a certain surface. The foll
section lists the available choices.
\subsubsection*{Handedness convention}
-Sensors in Mitsuba are \emph{right-handed}.
+Sensors in Mitsuba are \emph{right-handed}.
Any number of rotations and translations can be applied to them
without changing this property. By default they are located at the
origin and oriented in such a way that in the rendered image, $+X$ points left,
$+Y$ points upwards, and $+Z$ points along the viewing direction.
Left-handed sensors are also supported. To switch the handedness,
-flip any one of the axes, e.g. by passing a scale transformation like
+flip any one of the axes, e.g. by passing a scale transformation like
\code{} to the sensor's \code{toWorld} parameter.
diff --git a/doc/section_shapes.tex b/doc/section_shapes.tex
index f9e4673e..ca8582a8 100644
--- a/doc/section_shapes.tex
+++ b/doc/section_shapes.tex
@@ -5,11 +5,11 @@ This section presents an overview of the shape plugins that are released along w
In Mitsuba, shapes define surfaces that mark transitions between different types of materials. For instance,
a shape could describe a boundary between air and a solid object, such as a piece of rock. Alternatively,
-a shape can mark the beginning of a region of space that isn't solid at all, but rather contains a participating
+a shape can mark the beginning of a region of space that isn't solid at all, but rather contains a participating
medium, such as smoke or steam. Finally, a shape can be used to create an object that emits light on its own.
-Shapes are usually declared along with a surface scattering model (named ``BSDF'', see \secref{bsdfs} for details).
-This BSDF characterizes what happens \emph{at the surface}. In the XML scene description language, this might look like
+Shapes are usually declared along with a surface scattering model (named ``BSDF'', see \secref{bsdfs} for details).
+This BSDF characterizes what happens \emph{at the surface}. In the XML scene description language, this might look like
the following:
\begin{xml}
@@ -20,7 +20,7 @@ the following:
... $\code{bsdf}$ parameters ..
-
+ *
+ *
+ *
+ *
+ *
+ * \end{xml}
+ *
+ * The \code{value="..."} argument may also include certain keywords that will be
+ * evaluated and substituted when the rendered image is written to disk. A list all available
+ * keywords is provided in Table~\ref{tbl:film-keywords}.
+ *
+ * Apart from querying the render time,
+ * memory usage, and other scene-related information, it is also possible
+ * to `paste' an existing parameter that was provided to another plugin---for instance,the
+ * the camera transform matrix would be obtained as \code{\$sensor['toWorld']}. The name of
+ * the active integrator plugin is given by \code{\$integrator['type']}, and so on.
+ * All of these can be mixed to build larger fragments, as following example demonstrates.
+ * The result of this annotation is shown in Figure~\ref{fig:annotation-example}.
+ * \begin{xml}[mathescape=false]
+ *
+ * \end{xml}
+ * \vspace{1cm}
+ * \renderings{
+ * \fbox{\includegraphics[width=.8\textwidth]{images/annotation_example}}\hfill\,
+ * \caption{\label{fig:annotation-example}A demonstration of the label annotation feature
+ * given the example string shown above.}
+ * }
+ * \vspace{2cm}
+ * \begin{table}[htb]
+ * \centering
+ * \begin{savenotes}
+ * \begin{tabular}{ll}
+ * \toprule
+ * \code{\$scene['renderTime']}& Image render time, use \code{renderTimePrecise} for more digits.\\
+ * \code{\$scene['memUsage']}& Mitsuba memory usage\footnote{The definition of this quantity unfortunately
+ * varies a bit from platform to platform. On Linux and Windows, it denotes the total
+ * amount of allocated RAM and disk-based memory that is private to the process (i.e. not
+ * shared or shareable), which most intuitively captures the amount of memory required for
+ * rendering. On OSX, it denotes the working set size---roughly speaking, this is the
+ * amount of RAM apportioned to the process (i.e. excluding disk-based memory).}.
+ * Use \code{memUsagePrecise} for more digits.\\
+ * \code{\$scene['coreCount']}& Number of local and remote cores working on the rendering job\\
+ * \code{\$scene['blockSize']}& Block size used to parallelize up the rendering workload\\
+ * \code{\$scene['sourceFile']}& Source file name\\
+ * \code{\$scene['destFile']}& Destination file name\\
+ * \code{\$integrator['..']}& Copy a named integrator parameter\\
+ * \code{\$sensor['..']}& Copy a named sensor parameter\\
+ * \code{\$sampler['..']}& Copy a named sampler parameter\\
+ * \code{\$film['..']}& Copy a named film parameter\\
+ * \bottomrule
+ * \end{tabular}
+ * \end{savenotes}
+ * \caption{\label{tbl:film-keywords}A list of all special
+ * keywords supported by the annotation feature}
+ * \end{table}
+ *
*/
+
class HDRFilm : public Film {
public:
HDRFilm(const Properties &props) : Film(props) {
@@ -127,7 +211,7 @@ public:
std::string fileFormat = boost::to_lower_copy(
props.getString("fileFormat", "openexr"));
std::string pixelFormat = boost::to_lower_copy(
- props.getString("pixelFormat", "rgba"));
+ props.getString("pixelFormat", "rgb"));
std::string componentFormat = boost::to_lower_copy(
props.getString("componentFormat", "float16"));
@@ -204,6 +288,16 @@ public:
}
+ std::vector keys = props.getPropertyNames();
+ for (size_t i=0; i bitmap = m_storage->getBitmap()->convert(
@@ -322,26 +416,43 @@ public:
}
fs::path filename = m_destFile;
+ std::string properExtension;
+ if (m_fileFormat == Bitmap::EOpenEXR)
+ properExtension = ".exr";
+ else if (m_fileFormat == Bitmap::ERGBE)
+ properExtension = ".rgbe";
+ else
+ properExtension = ".pfm";
+
std::string extension = boost::to_lower_copy(filename.extension().string());
- std::string properExtension = (m_fileFormat == Bitmap::EOpenEXR) ? ".exr" : ".rgbe";
if (extension != properExtension)
filename.replace_extension(properExtension);
Log(EInfo, "Writing image to \"%s\" ..", filename.string().c_str());
ref stream = new FileStream(filename, FileStream::ETruncWrite);
+ annotate(scene, m_properties, bitmap, renderTime, 1.0f);
+
/* Attach the log file to the image if this is requested */
Logger *logger = Thread::getThread()->getLogger();
std::string log;
if (m_attachLog && logger->readLog(log)) {
log += "\n\n";
log += Statistics::getInstance()->getStats();
- bitmap->setString("log", log);
+ bitmap->setMetadataString("log", log);
}
bitmap->write(m_fileFormat, stream);
}
+ bool hasAlpha() const {
+ return
+ m_pixelFormat == Bitmap::ELuminanceAlpha ||
+ m_pixelFormat == Bitmap::ERGBA ||
+ m_pixelFormat == Bitmap::EXYZA ||
+ m_pixelFormat == Bitmap::ESpectrumAlpha;
+ }
+
bool destinationExists(const fs::path &baseName) const {
fs::path filename = baseName;
std::string extension = (m_fileFormat == Bitmap::EOpenEXR) ? ".exr" : ".rgbe";
diff --git a/src/films/ldrfilm.cpp b/src/films/ldrfilm.cpp
index b26f2ca1..07eece1f 100644
--- a/src/films/ldrfilm.cpp
+++ b/src/films/ldrfilm.cpp
@@ -22,6 +22,7 @@
#include
#include
#include "banner.h"
+#include "annotations.h"
MTS_NAMESPACE_BEGIN
@@ -90,8 +91,10 @@ MTS_NAMESPACE_BEGIN
* be used by the film. \default{\code{gaussian}, a windowed Gaussian filter}}
* }
* This plugin implements a low dynamic range film that can write out 8-bit PNG
- * and JPEG images. It also provides basic tonemapping techniques to map recorded
- * radiance values into a reasonable displayable range.
+ * and JPEG images in various configurations. It provides basic tonemapping techniques
+ * to map recorded radiance values into a reasonable displayable range. An alpha (opacity)
+ * channel can be written if desired. By default, the plugin writes gamma-corrected
+ * PNG files using the sRGB color space and no alpha channel.
*
* This film is a good choice when low dynamic range output is desired
* and the rendering setup can be configured to capture the relevant portion
@@ -113,6 +116,9 @@ MTS_NAMESPACE_BEGIN
* The RGB values exported by this plugin correspond to the ITU-R Rec. BT. 709-3
* primaries with a D65 white point. When $\texttt{gamma}$ is set to $\code{-1}$ (the default),
* the output is in the sRGB color space and will display as intended on compatible devices.
+ *
+ * Note that this plugin supports render-time \emph{annotations}, which
+ * are described on page~\pageref{sec:film-annotations}.
*/
class LDRFilm : public Film {
public:
@@ -128,7 +134,7 @@ public:
std::string fileFormat = boost::to_lower_copy(
props.getString("fileFormat", "png"));
std::string pixelFormat = boost::to_lower_copy(
- props.getString("pixelFormat", "rgba"));
+ props.getString("pixelFormat", "rgb"));
std::string tonemapMethod = boost::to_lower_copy(
props.getString("tonemapMethod", "gamma"));
@@ -176,6 +182,16 @@ public:
m_reinhardKey = props.getFloat("key", 0.18f);
m_reinhardBurn = props.getFloat("burn", 0.0);
+ std::vector keys = props.getPropertyNames();
+ for (size_t i=0; iwriteBool(m_hasBanner);
- stream->writeUInt(m_fileFormat);
stream->writeUInt(m_pixelFormat);
+ stream->writeUInt(m_fileFormat);
stream->writeFloat(m_gamma);
stream->writeUInt(m_tonemapMethod);
stream->writeFloat(m_exposure);
@@ -281,7 +297,7 @@ public:
m_destFile = destFile;
}
- void develop() {
+ void develop(const Scene *scene, Float renderTime) {
Log(EDebug, "Developing film ..");
ref bitmap = m_storage->getBitmap();
@@ -328,9 +344,17 @@ public:
Log(EInfo, "Writing image to \"%s\" ..", filename.string().c_str());
ref stream = new FileStream(filename, FileStream::ETruncWrite);
+ annotate(scene, m_properties, bitmap, renderTime, m_gamma);
+
bitmap->write(m_fileFormat, stream);
}
+ bool hasAlpha() const {
+ return
+ m_pixelFormat == Bitmap::ELuminanceAlpha ||
+ m_pixelFormat == Bitmap::ERGBA;
+ }
+
bool destinationExists(const fs::path &baseName) const {
fs::path filename = baseName;
std::string extension;
diff --git a/src/films/mfilm.cpp b/src/films/mfilm.cpp
index 97107f89..3a46e190 100644
--- a/src/films/mfilm.cpp
+++ b/src/films/mfilm.cpp
@@ -53,7 +53,7 @@ MTS_NAMESPACE_BEGIN
* and \code{spectrumAlpha}. In the latter two cases,
* the number of written channels depends on the value assigned to
* \code{SPECTRUM\_SAMPLES} during compilation (see Section~\ref{sec:compiling}
- * section for details) \default{\code{rgba}}
+ * section for details) \default{\code{luminance}}
* }
* \parameter{highQualityEdges}{\Boolean}{
* If set to \code{true}, regions slightly outside of the film
@@ -238,7 +238,7 @@ public:
m_destFile = destFile;
}
- void develop() {
+ void develop(const Scene *scene, Float renderTime) {
Log(EDebug, "Developing film ..");
fs::path filename = m_destFile;
@@ -323,6 +323,14 @@ public:
return fs::exists(filename);
}
+ bool hasAlpha() const {
+ return
+ m_pixelFormat == Bitmap::ELuminanceAlpha ||
+ m_pixelFormat == Bitmap::ERGBA ||
+ m_pixelFormat == Bitmap::EXYZA ||
+ m_pixelFormat == Bitmap::ESpectrumAlpha;
+ }
+
std::string toString() const {
std::ostringstream oss;
oss << "MFilm[" << endl
diff --git a/src/films/tiledhdrfilm.cpp b/src/films/tiledhdrfilm.cpp
index 0f15b59e..5476168c 100644
--- a/src/films/tiledhdrfilm.cpp
+++ b/src/films/tiledhdrfilm.cpp
@@ -53,7 +53,7 @@ MTS_NAMESPACE_BEGIN
* the number of written channels depends on the value assigned to
* \code{SPECTRUM\_SAMPLES} during compilation (see Section~\ref{sec:compiling}
* section for details)
- * \default{\code{rgba}}
+ * \default{\code{rgb}}
* }
* \parameter{componentFormat}{\String}{Specifies the desired floating
* point component format used for the output. The options are
@@ -81,7 +81,7 @@ MTS_NAMESPACE_BEGIN
* Based on the provided parameter values, the film will either write a luminance,
* luminance/alpha, RGB(A), XYZ(A) tristimulus, or spectrum/spectrum-alpha-based
* bitmap having a \code{float16}, \code{float32}, or \code{uint32}-based
- * internal representation. The default is RGBA and \code{float16}.
+ * internal representation. The default is RGB and \code{float16}.
* Note that the spectral output options only make sense when using a
* custom compiled Mitsuba distribution that has spectral rendering
* enabled. This is not the case for the downloadable release builds.
@@ -102,7 +102,7 @@ class TiledHDRFilm : public Film {
public:
TiledHDRFilm(const Properties &props) : Film(props), m_output(NULL), m_frameBuffer(NULL) {
std::string pixelFormat = boost::to_lower_copy(
- props.getString("pixelFormat", "rgba"));
+ props.getString("pixelFormat", "rgb"));
std::string componentFormat = boost::to_lower_copy(
props.getString("componentFormat", "float16"));
@@ -156,7 +156,7 @@ public:
}
virtual ~TiledHDRFilm() {
- develop();
+ develop(NULL, 0);
}
void serialize(Stream *stream, InstanceManager *manager) const {
@@ -167,7 +167,7 @@ public:
void setDestinationFile(const fs::path &destFile, uint32_t blockSize) {
if (m_output)
- develop();
+ develop(NULL, 0);
Bitmap::EPixelFormat pixelFormat = m_pixelFormat;
#if SPECTRUM_SAMPLES == 3
@@ -432,11 +432,11 @@ public:
bool develop(const Point2i &sourceOffset, const Vector2i &size,
const Point2i &targetOffset, Bitmap *target) const {
- target->fill(targetOffset, size, Spectrum(0.0f));
+ target->fillRect(targetOffset, size, Spectrum(0.0f));
return false; /* Not supported by the tiled EXR film! */
}
- void develop() {
+ void develop(const Scene *scene, Float renderTime) {
if (m_output) {
Log(EInfo, "Closing EXR file (%u tiles in total, peak memory usage: %u tiles)..",
m_blocksH * m_blocksV, m_peakUsage);
@@ -469,6 +469,14 @@ public:
void clear() { /* Do nothing */ }
+ bool hasAlpha() const {
+ return
+ m_pixelFormat == Bitmap::ELuminanceAlpha ||
+ m_pixelFormat == Bitmap::ERGBA ||
+ m_pixelFormat == Bitmap::EXYZA ||
+ m_pixelFormat == Bitmap::ESpectrumAlpha;
+ }
+
bool destinationExists(const fs::path &baseName) const {
fs::path filename = baseName;
if (boost::to_lower_copy(filename.extension().string()) != ".exr")
diff --git a/src/integrators/direct/direct.cpp b/src/integrators/direct/direct.cpp
index 0d77f61e..c2c7ba27 100644
--- a/src/integrators/direct/direct.cpp
+++ b/src/integrators/direct/direct.cpp
@@ -25,20 +25,29 @@ MTS_NAMESPACE_BEGIN
* \parameters{
* \parameter{shadingSamples}{\Integer}{This convenience parameter can be
* used to set both \code{emitterSamples} and \code{bsdfSamples} at
- * the same time.}
+ * the same time.
+ * }
* \parameter{emitterSamples}{\Integer}{Optional more fine-grained
* parameter: specifies the number of samples that should be generated
* using the direct illumination strategies implemented by the scene's
- * emitters\default{set to the value of \code{shadingSamples}}}
+ * emitters\default{set to the value of \code{shadingSamples}}
+ * }
* \parameter{bsdfSamples}{\Integer}{Optional more fine-grained
* parameter: specifies the number of samples that should be generated
* using the BSDF sampling strategies implemented by the scene's
- * surfaces\default{set to the value of \code{shadingSamples}}}
+ * surfaces\default{set to the value of \code{shadingSamples}}
+ * }
* \parameter{strictNormals}{\Boolean}{Be strict about potential
- * inconsistencies involving shading normals? See \pluginref{path}
- * for details.\default{no, i.e. \code{false}}}
+ * inconsistencies involving shading normals? See
+ * page~\pageref{sec:strictnormals} for details.
+ * \default{no, i.e. \code{false}}
+ * }
+ * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters?
+ * See page~\pageref{sec:hideemitters} for details.
+ * \default{no, i.e. \code{false}}
+ * }
* }
- *
+ * \vspace{-1mm}
* \renderings{
* \medrendering{Only BSDF sampling}{integrator_direct_bsdf}
* \medrendering{Only emitter sampling}{integrator_direct_lum}
@@ -92,6 +101,9 @@ public:
m_bsdfSamples = props.getSize("bsdfSamples", shadingSamples);
/* Be strict about potential inconsistencies involving shading normals? */
m_strictNormals = props.getBoolean("strictNormals", false);
+ /* When this flag is set to true, contributions from directly
+ * visible emitters will not be included in the rendered image */
+ m_hideEmitters = props.getBoolean("hideEmitters", false);
Assert(m_emitterSamples + m_bsdfSamples > 0);
}
@@ -101,6 +113,7 @@ public:
m_emitterSamples = stream->readSize();
m_bsdfSamples = stream->readSize();
m_strictNormals = stream->readBool();
+ m_hideEmitters = stream->readBool();
configure();
}
@@ -109,6 +122,7 @@ public:
stream->writeSize(m_emitterSamples);
stream->writeSize(m_bsdfSamples);
stream->writeBool(m_strictNormals);
+ stream->writeBool(m_hideEmitters);
}
void configure() {
@@ -142,14 +156,14 @@ public:
if (!rRec.rayIntersect(ray)) {
/* If no intersection could be found, possibly return
radiance from a background emitter */
- if (rRec.type & RadianceQueryRecord::EEmittedRadiance)
+ if (rRec.type & RadianceQueryRecord::EEmittedRadiance && !m_hideEmitters)
return scene->evalEnvironment(ray);
else
return Spectrum(0.0f);
}
/* Possibly include emitted radiance if requested */
- if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance))
+ if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance) && !m_hideEmitters)
Li += its.Le(-ray.d);
/* Include radiance from a subsurface scattering model if requested */
@@ -271,7 +285,7 @@ public:
/* Intersected nothing -- perhaps there is an environment map? */
const Emitter *env = scene->getEnvironmentEmitter();
- if (!env)
+ if (!env || (m_hideEmitters && bRec.sampledType == BSDF::ENull))
continue;
value = env->evalEnvironment(RayDifferential(bsdfRay));
@@ -316,6 +330,7 @@ private:
Float m_fracBSDF, m_fracLum;
Float m_weightBSDF, m_weightLum;
bool m_strictNormals;
+ bool m_hideEmitters;
};
MTS_IMPLEMENT_CLASS_S(MIDirectIntegrator, false, SamplingIntegrator)
diff --git a/src/integrators/erpt/erpt_proc.cpp b/src/integrators/erpt/erpt_proc.cpp
index 13708297..fe5b7714 100644
--- a/src/integrators/erpt/erpt_proc.cpp
+++ b/src/integrators/erpt/erpt_proc.cpp
@@ -234,15 +234,17 @@ public:
}
#endif
- #if defined(MTS_BD_DEBUG)
- if (Qxy <= 0 || Qyx < 0 || std::isnan(Qxy) || std::isnan(Qyx)) {
- Log(EDebug, "Source path: %s", current->toString().c_str());
- Log(EDebug, "Proposal path: %s", proposed->toString().c_str());
- Log(EWarn, "Internal error while computing acceptance probabilities: "
- "Qxy=%f, Qyx=%f, muRec=%s", Qxy, Qyx, muRec.toString().c_str());
+ if (Qxy == 0) { // be tolerant of this (can occasionally happen due to floating point inaccuracies)
+ a = 0;
+ } else if (Qxy < 0 || Qyx < 0 || std::isnan(Qxy) || std::isnan(Qyx)) {
+ #if defined(MTS_BD_DEBUG)
+ Log(EDebug, "Source path: %s", current->toString().c_str());
+ Log(EDebug, "Proposal path: %s", proposed->toString().c_str());
+ Log(EWarn, "Internal error while computing acceptance probabilities: "
+ "Qxy=%f, Qyx=%f, muRec=%s", Qxy, Qyx, muRec.toString().c_str());
+ #endif
a = 0;
}
- #endif
accumulatedWeight += 1-a;
diff --git a/src/integrators/misc/adaptive.cpp b/src/integrators/misc/adaptive.cpp
index 65bb0119..77b6f868 100644
--- a/src/integrators/misc/adaptive.cpp
+++ b/src/integrators/misc/adaptive.cpp
@@ -212,7 +212,10 @@ public:
Float mean = 0, meanSqr = 0.0f;
sampleCount = 0;
- while (!stop) {
+ while (true) {
+ if (stop)
+ return;
+
rRec.newQuery(RadianceQueryRecord::ESensorRay, sensor->getMedium());
rRec.extra = RadianceQueryRecord::EAdaptiveQuery;
diff --git a/src/integrators/mlt/mlt.cpp b/src/integrators/mlt/mlt.cpp
index a5df2575..0dff0ac4 100644
--- a/src/integrators/mlt/mlt.cpp
+++ b/src/integrators/mlt/mlt.cpp
@@ -259,15 +259,21 @@ public:
bool nested = m_config.twoStage && m_config.firstStage;
- Vector2i cropSize = film->getCropSize();;
+ Vector2i cropSize = film->getCropSize();
+ Assert(cropSize.x > 0 && cropSize.y > 0);
Log(EInfo, "Starting %srender job (%ix%i, " SIZE_T_FMT
" %s, " SSE_STR ", approx. " SIZE_T_FMT " mutations/pixel) ..",
nested ? "nested " : "", cropSize.x, cropSize.y,
nCores, nCores == 1 ? "core" : "cores", sampleCount);
- if (m_config.workUnits <= 0)
- m_config.workUnits = std::max((int) std::ceil((cropSize.x
- * cropSize.y * sampleCount) / 200000.0f), 1);
+ if (m_config.workUnits <= 0) {
+ const size_t desiredMutationsPerWorkUnit = 200000;
+ const size_t cropArea = (size_t) cropSize.x * cropSize.y;
+ const size_t workUnits = ((desiredMutationsPerWorkUnit - 1) +
+ (cropArea * sampleCount)) / desiredMutationsPerWorkUnit;
+ Assert(workUnits <= (size_t) std::numeric_limits::max());
+ m_config.workUnits = (int) std::max(workUnits, (size_t) 1);
+ }
m_config.nMutations = (cropSize.x * cropSize.y *
sampleCount) / m_config.workUnits;
@@ -290,12 +296,7 @@ public:
m_config, directImage, pathSeeds);
m_config.luminance = pathSampler->generateSeeds(m_config.luminanceSamples,
- m_config.workUnits, false, pathSeeds);
-
- pathSeeds.clear();
-
- m_config.luminance = pathSampler->generateSeeds(m_config.luminanceSamples,
- m_config.workUnits, true, pathSeeds);
+ m_config.workUnits, true, m_config.importanceMap, pathSeeds);
if (!nested)
m_config.dump();
diff --git a/src/integrators/mlt/mlt_proc.cpp b/src/integrators/mlt/mlt_proc.cpp
index 610df074..7d820505 100644
--- a/src/integrators/mlt/mlt_proc.cpp
+++ b/src/integrators/mlt/mlt_proc.cpp
@@ -122,7 +122,7 @@ public:
result->clear();
/// Reconstruct the seed path
- m_pathSampler->reconstructPath(wu->getSeed(), *current);
+ m_pathSampler->reconstructPath(wu->getSeed(), m_config.importanceMap, *current);
relWeight = current->getRelativeWeight();
BDAssert(!relWeight.isZero());
@@ -235,7 +235,9 @@ public:
}
#endif
- if (Qxy <= 0 || Qyx < 0 || std::isnan(Qxy) || std::isnan(Qyx)) {
+ if (Qxy == 0) { // be tolerant of this (can occasionally happen due to floating point inaccuracies)
+ a = 0;
+ } else if (Qxy < 0 || Qyx < 0 || std::isnan(Qxy) || std::isnan(Qyx)) {
#if defined(MTS_BD_DEBUG)
Log(EDebug, "Source path: %s", current->toString().c_str());
Log(EDebug, "Proposal path: %s", proposed->toString().c_str());
@@ -367,6 +369,7 @@ void MLTProcess::develop() {
value += direct[i];
target[i] = value;
}
+
m_film->setBitmap(m_developBuffer);
m_refreshTimer->reset();
diff --git a/src/integrators/path/path.cpp b/src/integrators/path/path.cpp
index 19ed0466..3fca3f9b 100644
--- a/src/integrators/path/path.cpp
+++ b/src/integrators/path/path.cpp
@@ -38,7 +38,12 @@ static StatsCounter avgPathLength("Path tracer", "Average path length", EAverage
* }
* \parameter{strictNormals}{\Boolean}{Be strict about potential
* inconsistencies involving shading normals? See the description below
- * for details.\default{no, i.e. \code{false}}}
+ * for details.\default{no, i.e. \code{false}}
+ * }
+ * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters?
+ * See page~\pageref{sec:hideemitters} for details.
+ * \default{no, i.e. \code{false}}
+ * }
* }
*
* This integrator implements a basic path tracer and is a \emph{good default choice}
@@ -75,7 +80,8 @@ static StatsCounter avgPathLength("Path tracer", "Average path length", EAverage
* low-discrepancy sample generators (i.e. \pluginref{ldsampler},
* \pluginref{halton}, or \pluginref{sobol}).
*
- * \paragraph{Strict normals:} Triangle meshes often rely on interpolated shading normals
+ * \paragraph{Strict normals:}\label{sec:strictnormals}
+ * Triangle meshes often rely on interpolated shading normals
* to suppress the inherently faceted appearance of the underlying geometry. These
* ``fake'' normals are not without problems, however. They can lead to paradoxical
* situations where a light ray impinges on an object from a direction that is classified as ``outside''
@@ -116,6 +122,7 @@ public:
Intersection &its = rRec.its;
RayDifferential ray(r);
Spectrum Li(0.0f);
+ bool scattered = false;
/* Perform the first ray intersection (or ignore if the
intersection has already been provided). */
@@ -129,7 +136,8 @@ public:
if (!its.isValid()) {
/* If no intersection could be found, potentially return
radiance from a environment luminaire if it exists */
- if (rRec.type & RadianceQueryRecord::EEmittedRadiance)
+ if ((rRec.type & RadianceQueryRecord::EEmittedRadiance)
+ && (!m_hideEmitters || scattered))
Li += throughput * scene->evalEnvironment(ray);
break;
}
@@ -137,7 +145,8 @@ public:
const BSDF *bsdf = its.getBSDF(ray);
/* Possibly include emitted radiance if requested */
- if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance))
+ if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance)
+ && (!m_hideEmitters || scattered))
Li += throughput * its.Le(-ray.d);
/* Include radiance from a subsurface scattering model if requested */
@@ -201,6 +210,8 @@ public:
if (bsdfWeight.isZero())
break;
+ scattered |= bRec.sampledType != BSDF::ENull;
+
/* Prevent light leaks due to the use of shading normals */
const Vector wo = its.toWorld(bRec.wo);
Float woDotGeoN = dot(its.geoFrame.n, wo);
@@ -224,6 +235,9 @@ public:
const Emitter *env = scene->getEnvironmentEmitter();
if (env) {
+ if (m_hideEmitters && !scattered)
+ break;
+
value = env->evalEnvironment(ray);
if (!env->fillDirectSamplingRecord(dRec, ray))
break;
diff --git a/src/integrators/path/volpath.cpp b/src/integrators/path/volpath.cpp
index bc1fa419..788c3c8e 100644
--- a/src/integrators/path/volpath.cpp
+++ b/src/integrators/path/volpath.cpp
@@ -37,8 +37,14 @@ static StatsCounter avgPathLength("Volumetric path tracer", "Average path length
* path termination criterion. \default{\code{5}}
* }
* \parameter{strictNormals}{\Boolean}{Be strict about potential
- * inconsistencies involving shading normals? See \pluginref{path}
- * for details.\default{no, i.e. \code{false}}}
+ * inconsistencies involving shading normals? See
+ * page~\pageref{sec:strictnormals} for details.
+ * \default{no, i.e. \code{false}}
+ * }
+ * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters?
+ * See page~\pageref{sec:hideemitters} for details.
+ * \default{no, i.e. \code{false}}
+ * }
* }
*
* This plugin provides a volumetric path tracer that can be used to
@@ -130,8 +136,7 @@ public:
if (phaseVal != 0) {
/* Calculate prob. of having sampled that direction using
phase function sampling */
- Float phasePdf = (emitter->isOnSurface() && dRec.measure == ESolidAngle
- && interactions == 0)
+ Float phasePdf = (emitter->isOnSurface() && dRec.measure == ESolidAngle)
? phase->pdf(pRec) : (Float) 0.0f;
/* Weight using the power heuristic */
@@ -152,7 +157,6 @@ public:
break;
throughput *= phaseVal;
-
/* Trace a ray in this direction */
ray = Ray(mRec.p, pRec.wo, ray.time);
ray.mint = 0;
@@ -187,13 +191,20 @@ public:
if (!its.isValid()) {
/* If no intersection could be found, possibly return
attenuated radiance from a background luminaire */
- if (rRec.type & RadianceQueryRecord::EEmittedRadiance)
- Li += throughput * scene->evalEnvironment(ray);
+ if ((rRec.type & RadianceQueryRecord::EEmittedRadiance)
+ && (!m_hideEmitters || scattered)) {
+ Spectrum value = throughput * scene->evalEnvironment(ray);
+ if (rRec.medium)
+ value *= rRec.medium->evalTransmittance(ray, rRec.sampler);
+ Li += value;
+ }
+
break;
}
/* Possibly include emitted radiance if requested */
- if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance))
+ if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance)
+ && (!m_hideEmitters || scattered))
Li += throughput * its.Le(-ray.d);
/* Include radiance from a subsurface integrator if requested */
diff --git a/src/integrators/path/volpath_simple.cpp b/src/integrators/path/volpath_simple.cpp
index 3733fc5d..e7f3bde9 100644
--- a/src/integrators/path/volpath_simple.cpp
+++ b/src/integrators/path/volpath_simple.cpp
@@ -37,8 +37,14 @@ static StatsCounter avgPathLength("Volumetric path tracer", "Average path length
* path termination criterion. \default{\code{5}}
* }
* \parameter{strictNormals}{\Boolean}{Be strict about potential
- * inconsistencies involving shading normals? See \pluginref{path}
- * for details.\default{no, i.e. \code{false}}}
+ * inconsistencies involving shading normals? See
+ * page~\pageref{sec:strictnormals} for details.
+ * \default{no, i.e. \code{false}}
+ * }
+ * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters?
+ * See page~\pageref{sec:hideemitters} for details.
+ * \default{no, i.e. \code{false}}
+ * }
* }
*
* This plugin provides a basic volumetric path tracer that can be used to
@@ -86,7 +92,7 @@ public:
MediumSamplingRecord mRec;
RayDifferential ray(r);
Spectrum Li(0.0f);
- bool nullChain = true;
+ bool nullChain = true, scattered = false;
Float eta = 1.0f;
/* Perform the first ray intersection (or ignore if the
@@ -153,6 +159,7 @@ public:
ray.mint = 0;
scene->rayIntersect(ray, its);
nullChain = false;
+ scattered = true;
} else {
/* Sample
tau(x, y) * (Surface integral). This happens with probability mRec.pdfFailure
@@ -165,13 +172,19 @@ public:
if (!its.isValid()) {
/* If no intersection could be found, possibly return
attenuated radiance from a background luminaire */
- if (rRec.type & RadianceQueryRecord::EEmittedRadiance)
- Li += throughput * scene->evalEnvironment(ray);
+ if ((rRec.type & RadianceQueryRecord::EEmittedRadiance)
+ && (!m_hideEmitters || scattered)) {
+ Spectrum value = throughput * scene->evalEnvironment(ray);
+ if (rRec.medium)
+ value *= rRec.medium->evalTransmittance(ray);
+ Li += value;
+ }
break;
}
/* Possibly include emitted radiance if requested */
- if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance))
+ if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance)
+ && (!m_hideEmitters || scattered))
Li += throughput * its.Le(-ray.d);
/* Include radiance from a subsurface integrator if requested */
@@ -263,6 +276,7 @@ public:
/* In the next iteration, trace a ray in this direction */
ray = Ray(its.p, wo, ray.time);
scene->rayIntersect(ray, its);
+ scattered |= bRec.sampledType != BSDF::ENull;
}
if (rRec.depth++ >= m_rrDepth) {
diff --git a/src/integrators/photonmapper/bre.cpp b/src/integrators/photonmapper/bre.cpp
index dcfc431b..d92c76e1 100644
--- a/src/integrators/photonmapper/bre.cpp
+++ b/src/integrators/photonmapper/bre.cpp
@@ -116,21 +116,20 @@ void BeamRadianceEstimator::serialize(Stream *stream, InstanceManager *manager)
AABB BeamRadianceEstimator::buildHierarchy(IndexType index) {
BRENode &node = m_nodes[index];
+ Point center = node.photon.getPosition();
+ Float radius = node.radius;
+ node.aabb = AABB(
+ center - Vector(radius, radius, radius),
+ center + Vector(radius, radius, radius)
+ );
+
if (!node.photon.isLeaf()) {
IndexType left = node.photon.getLeftIndex(index);
IndexType right = node.photon.getRightIndex(index);
- node.aabb.reset();
if (left)
node.aabb.expandBy(buildHierarchy(left));
if (right)
node.aabb.expandBy(buildHierarchy(right));
- } else {
- Point center = node.photon.getPosition();
- Float radius = node.radius;
- node.aabb = AABB(
- center - Vector(radius, radius, radius),
- center + Vector(radius, radius, radius)
- );
}
return node.aabb;
@@ -170,7 +169,7 @@ Spectrum BeamRadianceEstimator::query(const Ray &r, const Medium *medium) const
Float diskDistance = dot(originToCenter, ray.d), radSqr = node.radius * node.radius;
Float distSqr = (ray(diskDistance) - node.photon.getPosition()).lengthSquared();
- if (distSqr < radSqr) {
+ if (diskDistance > 0 && distSqr < radSqr) {
Float weight = K2(distSqr/radSqr)/radSqr;
Vector wi = -node.photon.getDirection();
diff --git a/src/integrators/photonmapper/photonmapper.cpp b/src/integrators/photonmapper/photonmapper.cpp
index cbda46f9..a1849833 100644
--- a/src/integrators/photonmapper/photonmapper.cpp
+++ b/src/integrators/photonmapper/photonmapper.cpp
@@ -46,6 +46,10 @@ MTS_NAMESPACE_BEGIN
* Granularity of photon tracing work units for the purpose
* of parallelization (in \# of shot particles) \default{0, i.e. decide automatically}
* }
+ * \parameter{hideEmitters}{\Boolean}{Hide directly visible emitters?
+ * See page~\pageref{sec:hideemitters} for details.
+ * \default{no, i.e. \code{false}}
+ * }
* \parameter{rrDepth}{\Integer}{Specifies the minimum path depth, after
* which the implementation will start to use the ``russian roulette''
* path termination criterion. \default{\code{5}}
@@ -83,7 +87,8 @@ MTS_NAMESPACE_BEGIN
*/
class PhotonMapIntegrator : public SamplingIntegrator {
public:
- PhotonMapIntegrator(const Properties &props) : SamplingIntegrator(props) {
+ PhotonMapIntegrator(const Properties &props) : SamplingIntegrator(props),
+ m_parentIntegrator(NULL) {
/* Number of lsamples for direct illumination */
m_directSamples = props.getInteger("directSamples", 16);
/* Number of BSDF samples when intersecting a glossy material */
@@ -126,6 +131,9 @@ public:
m_gatherLocally = props.getBoolean("gatherLocally", true);
/* Indicates if the gathering steps should be canceled if not enough photons are generated. */
m_autoCancelGathering = props.getBoolean("autoCancelGathering", true);
+ /* When this flag is set to true, contributions from directly
+ * visible emitters will not be included in the rendered image */
+ m_hideEmitters = props.getBoolean("hideEmitters", false);
if (m_maxDepth == 0) {
Log(EError, "maxDepth must be greater than zero!");
@@ -143,7 +151,7 @@ public:
/// Unserialize from a binary data stream
PhotonMapIntegrator(Stream *stream, InstanceManager *manager)
- : SamplingIntegrator(stream, manager) {
+ : SamplingIntegrator(stream, manager), m_parentIntegrator(NULL) {
m_directSamples = stream->readInt();
m_glossySamples = stream->readInt();
m_maxDepth = stream->readInt();
@@ -159,6 +167,7 @@ public:
m_volumeLookupSize = stream->readInt();
m_gatherLocally = stream->readBool();
m_autoCancelGathering = stream->readBool();
+ m_hideEmitters = stream->readBool();
m_causticPhotonMapID = m_globalPhotonMapID = m_breID = 0;
configure();
}
@@ -190,6 +199,7 @@ public:
stream->writeInt(m_volumeLookupSize);
stream->writeBool(m_gatherLocally);
stream->writeBool(m_autoCancelGathering);
+ stream->writeBool(m_hideEmitters);
}
/// Configure the sampler for a specified amount of direct illumination samples
@@ -201,9 +211,6 @@ public:
if (bsdfSamples > 1)
sampler->request2DArray(bsdfSamples);
- if (scene->getMedia().size() == 0)
- m_volumePhotons = 0;
-
bool hasDelta = false;
const ref_vector &shapes = scene->getShapes();
for (size_t i=0; igetReturnStatus() != ParallelProcess::ESuccess)
return false;
- Log(EDebug, "Global photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
- SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());
-
ref globalPhotonMap = proc->getPhotonMap();
if (globalPhotonMap->isFull()) {
+ Log(EDebug, "Global photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
+ SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());
+
m_globalPhotonMap = globalPhotonMap;
m_globalPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
m_globalPhotonMap->build();
@@ -280,37 +287,6 @@ public:
/* Generate the caustic photon map */
ref proc = new GatherPhotonProcess(
GatherPhotonProcess::ECausticPhotons, m_causticPhotons,
- m_granularity, 3, m_rrDepth, m_gatherLocally,
- m_autoCancelGathering, job);
-
- proc->bindResource("scene", sceneResID);
- proc->bindResource("sensor", sensorResID);
- proc->bindResource("sampler", qmcSamplerID);
-
- m_proc = proc;
- sched->schedule(proc);
- sched->wait(proc);
- m_proc = NULL;
-
- if (proc->getReturnStatus() != ParallelProcess::ESuccess)
- return false;
-
- Log(EDebug, "Caustic photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
- SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());
-
- ref causticPhotonMap = proc->getPhotonMap();
- if (causticPhotonMap->isFull()) {
- m_causticPhotonMap = causticPhotonMap;
- m_causticPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
- m_causticPhotonMap->build();
- m_causticPhotonMapID = sched->registerResource(m_causticPhotonMap);
- }
- }
-
- if (m_volumePhotonMap.get() == NULL && m_volumePhotons > 0) {
- /* Generate the volume photon map */
- ref proc = new GatherPhotonProcess(
- GatherPhotonProcess::EVolumePhotons, m_volumePhotons,
m_granularity, m_maxDepth-1, m_rrDepth, m_gatherLocally,
m_autoCancelGathering, job);
@@ -326,11 +302,43 @@ public:
if (proc->getReturnStatus() != ParallelProcess::ESuccess)
return false;
- Log(EDebug, "Volume photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
- SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());
+ ref causticPhotonMap = proc->getPhotonMap();
+ if (causticPhotonMap->isFull()) {
+ Log(EDebug, "Caustic photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
+ SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());
+
+ m_causticPhotonMap = causticPhotonMap;
+ m_causticPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
+ m_causticPhotonMap->build();
+ m_causticPhotonMapID = sched->registerResource(m_causticPhotonMap);
+ }
+ }
+
+ size_t volumePhotons = scene->getMedia().size() == 0 ? 0 : m_volumePhotons;
+ if (m_volumePhotonMap.get() == NULL && volumePhotons > 0) {
+ /* Generate the volume photon map */
+ ref proc = new GatherPhotonProcess(
+ GatherPhotonProcess::EVolumePhotons, volumePhotons,
+ m_granularity, m_maxDepth-1, m_rrDepth, m_gatherLocally,
+ m_autoCancelGathering, job);
+
+ proc->bindResource("scene", sceneResID);
+ proc->bindResource("sensor", sensorResID);
+ proc->bindResource("sampler", qmcSamplerID);
+
+ m_proc = proc;
+ sched->schedule(proc);
+ sched->wait(proc);
+ m_proc = NULL;
+
+ if (proc->getReturnStatus() != ParallelProcess::ESuccess)
+ return false;
ref volumePhotonMap = proc->getPhotonMap();
if (volumePhotonMap->isFull()) {
+ Log(EDebug, "Volume photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
+ SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());
+
volumePhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
volumePhotonMap->build();
m_bre = new BeamRadianceEstimator(volumePhotonMap, m_volumeLookupSize);
@@ -409,13 +417,13 @@ public:
if (!its.isValid()) {
/* If no intersection could be found, possibly return
attenuated radiance from a background luminaire */
- if (rRec.type & RadianceQueryRecord::EEmittedRadiance)
+ if ((rRec.type & RadianceQueryRecord::EEmittedRadiance) && !m_hideEmitters)
LiSurf = scene->evalEnvironment(ray);
return LiSurf * transmittance + LiMedium;
}
/* Possibly include emitted radiance if requested */
- if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance))
+ if (its.isEmitter() && (rRec.type & RadianceQueryRecord::EEmittedRadiance) && !m_hideEmitters)
LiSurf += its.Le(-ray.d);
/* Include radiance from a subsurface scattering model if requested */
@@ -428,9 +436,17 @@ public:
return LiSurf * transmittance + LiMedium;
unsigned int bsdfType = bsdf->getType() & BSDF::EAll;
- bool isDiffuse = (bsdfType == BSDF::EDiffuseReflection);
- if (isDiffuse || cacheQuery) {
+ /* Irradiance cachq query -> trat as diffuse */
+ bool isDiffuse = (bsdfType == BSDF::EDiffuseReflection) || cacheQuery;
+
+ bool hasSpecular = bsdfType & BSDF::EDelta;
+
+ /* Exhaustively recurse into all specular lobes? */
+ bool exhaustiveSpecular = rRec.depth < m_maxSpecularDepth && !cacheQuery;
+
+ if (isDiffuse) {
+ /* 1. Diffuse indirect */
int maxDepth = m_maxDepth == -1 ? INT_MAX : (m_maxDepth-rRec.depth);
if (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance && m_globalPhotonMap.get())
LiSurf += m_globalPhotonMap->estimateIrradiance(its.p,
@@ -442,50 +458,55 @@ public:
m_causticLookupSize) * bsdf->getDiffuseReflectance(its) * INV_PI;
}
- if ((bsdfType & BSDF::EDelta) && (bsdfType & ~BSDF::EDelta) == 0 && rRec.depth < m_maxSpecularDepth && !cacheQuery) {
- if (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance) {
- int compCount = bsdf->getComponentCount();
- RadianceQueryRecord rRec2;
- for (int i=0; isample(bRec, Point2(0.0f));
- if (bsdfVal.isZero())
- continue;
+ if (hasSpecular && exhaustiveSpecular
+ && (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance)) {
+ /* 1. Specular indirect */
+ int compCount = bsdf->getComponentCount();
+ RadianceQueryRecord rRec2;
+ for (int i=0; igetType(i);
+ if (!(type & BSDF::EDelta))
+ continue;
+ /* Sample the BSDF and recurse */
+ BSDFSamplingRecord bRec(its, rRec.sampler, ERadiance);
+ bRec.component = i;
+ Spectrum bsdfVal = bsdf->sample(bRec, Point2(0.5f));
+ if (bsdfVal.isZero())
+ continue;
- rRec2.recursiveQuery(rRec, RadianceQueryRecord::ERadiance);
- RayDifferential bsdfRay(its.p, its.toWorld(bRec.wo), ray.time);
- if (its.isMediumTransition())
- rRec2.medium = its.getTargetMedium(bsdfRay.d);
+ rRec2.recursiveQuery(rRec, RadianceQueryRecord::ERadiance);
+ RayDifferential bsdfRay(its.p, its.toWorld(bRec.wo), ray.time);
+ if (its.isMediumTransition())
+ rRec2.medium = its.getTargetMedium(bsdfRay.d);
- LiSurf += bsdfVal * m_parentIntegrator->Li(bsdfRay, rRec2);
- }
+ LiSurf += bsdfVal * m_parentIntegrator->Li(bsdfRay, rRec2);
}
- } else if (rRec.type & RadianceQueryRecord::EDirectSurfaceRadiance) {
- /* Estimate the direct illumination if this is requested */
- Point2 *sampleArray;
- Point2 sample;
- int numEmitterSamples = m_directSamples,
- numBSDFSamples;
+ }
- Float weightLum, weightBSDF;
+ /* Estimate the direct illumination if this is requested */
+ int numEmitterSamples = m_directSamples, numBSDFSamples;
+ Float weightLum, weightBSDF;
+ Point2 *sampleArray;
+ Point2 sample;
- if (rRec.depth > 1 || cacheQuery || adaptiveQuery) {
- /* This integrator is used recursively by another integrator.
- Be less accurate as this sample will not directly be observed. */
- numBSDFSamples = numEmitterSamples = 1;
- weightLum = weightBSDF = 1.0f;
+ if (rRec.depth > 1 || cacheQuery || adaptiveQuery) {
+ /* This integrator is used recursively by another integrator.
+ Be less accurate as this sample will not directly be observed. */
+ numBSDFSamples = numEmitterSamples = 1;
+ weightLum = weightBSDF = 1.0f;
+ } else {
+ if (isDiffuse) {
+ numBSDFSamples = m_directSamples;
+ weightBSDF = weightLum = m_invEmitterSamples;
} else {
- if (isDiffuse) {
- numBSDFSamples = m_directSamples;
- weightBSDF = weightLum = m_invEmitterSamples;
- } else {
- numBSDFSamples = m_glossySamples;
- weightLum = m_invEmitterSamples;
- weightBSDF = m_invGlossySamples;
- }
+ numBSDFSamples = m_glossySamples;
+ weightLum = m_invEmitterSamples;
+ weightBSDF = m_invGlossySamples;
}
+ }
+
+ if ((bsdfType & BSDF::ESmooth) && (rRec.type & RadianceQueryRecord::EDirectSurfaceRadiance)) {
+ DirectSamplingRecord dRec(its);
if (numEmitterSamples > 1) {
sampleArray = rRec.sampler->next2DArray(m_directSamples);
@@ -493,45 +514,62 @@ public:
sample = rRec.nextSample2D(); sampleArray = &sample;
}
- DirectSamplingRecord dRec(its);
- if (bsdf->getType() & BSDF::ESmooth) {
- for (int i=0; isampleAttenuatedEmitterDirect(
- dRec, its, rRec.medium, interactions,
- sampleArray[i], rRec.sampler);
+ for (int i=0; isampleAttenuatedEmitterDirect(
+ dRec, its, rRec.medium, interactions,
+ sampleArray[i], rRec.sampler);
- /* Estimate the direct illumination if this is requested */
- if (!value.isZero()) {
- const Emitter *emitter = static_cast(dRec.object);
+ /* Estimate the direct illumination if this is requested */
+ if (!value.isZero()) {
+ const Emitter *emitter = static_cast(dRec.object);
- /* Allocate a record for querying the BSDF */
- BSDFSamplingRecord bRec(its, its.toLocal(dRec.d));
+ /* Allocate a record for querying the BSDF */
+ BSDFSamplingRecord bRec(its, its.toLocal(dRec.d));
- /* Evaluate BSDF * cos(theta) */
- const Spectrum bsdfVal = bsdf->eval(bRec);
+ /* Evaluate BSDF * cos(theta) */
+ const Spectrum bsdfVal = bsdf->eval(bRec);
- if (!bsdfVal.isZero()) {
- /* Calculate prob. of having sampled that direction
- using BSDF sampling */
- Float bsdfPdf = (emitter->isOnSurface()
- && dRec.measure == ESolidAngle
- && interactions == 0)
- ? bsdf->pdf(bRec) : (Float) 0.0f;
+ if (!bsdfVal.isZero()) {
+ /* Calculate prob. of having sampled that direction
+ using BSDF sampling */
- /* Weight using the power heuristic */
- const Float weight = miWeight(dRec.pdf * numEmitterSamples,
- bsdfPdf * numBSDFSamples) * weightLum;
- LiSurf += value * bsdfVal * weight;
- }
+ if (!hasSpecular || exhaustiveSpecular)
+ bRec.typeMask = BSDF::ESmooth;
+
+ Float bsdfPdf = (emitter->isOnSurface()
+ && dRec.measure == ESolidAngle
+ && interactions == 0)
+ ? bsdf->pdf(bRec) : (Float) 0.0f;
+
+ /* Weight using the power heuristic */
+ const Float weight = miWeight(dRec.pdf * numEmitterSamples,
+ bsdfPdf * numBSDFSamples) * weightLum;
+
+ LiSurf += value * bsdfVal * weight;
}
}
}
+ }
- /* ==================================================================== */
- /* BSDF sampling */
- /* ==================================================================== */
+ /* ==================================================================== */
+ /* BSDF sampling */
+ /* ==================================================================== */
+ /* Sample direct compontent via BSDF sampling if this is generally requested AND
+ the BSDF is smooth, or there is a delta component that was not handled by the
+ exhaustive sampling loop above */
+ bool bsdfSampleDirect = (rRec.type & RadianceQueryRecord::EDirectSurfaceRadiance) &&
+ ((bsdfType & BSDF::ESmooth) || (hasSpecular && !exhaustiveSpecular));
+
+ /* Sample indirect component via BSDF sampling if this is generally requested AND
+ the BSDF is non-diffuse (diffuse is handled by the global photon map)
+ or there is a delta component that was not handled by the exhaustive sampling loop
+ above. */
+ bool bsdfSampleIndirect = (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance) &&
+ !isDiffuse && ((bsdfType & BSDF::ESmooth) || (hasSpecular && !exhaustiveSpecular));
+
+ if (bsdfSampleDirect || bsdfSampleIndirect) {
if (numBSDFSamples > 1) {
sampleArray = rRec.sampler->next2DArray(
std::max(m_directSamples, m_glossySamples));
@@ -542,9 +580,13 @@ public:
RadianceQueryRecord rRec2;
Intersection &bsdfIts = rRec2.its;
+ DirectSamplingRecord dRec(its);
for (int i=0; isample(bRec, bsdfPdf, sampleArray[i]);
if (bsdfVal.isZero())
@@ -557,12 +599,12 @@ public:
bool hitEmitter = false;
if (scene->rayIntersect(bsdfRay, bsdfIts)) {
/* Intersected something - check if it was a luminaire */
- if (bsdfIts.isEmitter()) {
+ if (bsdfIts.isEmitter() && bsdfSampleDirect) {
value = bsdfIts.Le(-bsdfRay.d);
dRec.setQuery(bsdfRay, bsdfIts);
hitEmitter = true;
}
- } else {
+ } else if (bsdfSampleDirect) {
/* Intersected nothing -- perhaps there is an environment map? */
const Emitter *env = scene->getEnvironmentEmitter();
@@ -574,8 +616,7 @@ public:
}
if (hitEmitter) {
- const Float emitterPdf = (!(bRec.sampledType & BSDF::EDelta)) ?
- scene->pdfEmitterDirect(dRec) : 0;
+ const Float emitterPdf = scene->pdfEmitterDirect(dRec);
Spectrum transmittance = rRec2.medium ?
rRec2.medium->evalTransmittance(Ray(bsdfRay, 0, bsdfIts.t)) : Spectrum(1.0f);
@@ -587,7 +628,7 @@ public:
}
/* Recurse */
- if (!isDiffuse && (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance) && !cacheQuery) {
+ if (bsdfSampleIndirect) {
rRec2.recursiveQuery(rRec,
RadianceQueryRecord::ERadianceNoEmission);
rRec2.type ^= RadianceQueryRecord::EIntersection;
@@ -598,37 +639,6 @@ public:
LiSurf += bsdfVal * m_parentIntegrator->Li(bsdfRay, rRec2) * weightBSDF;
}
}
- if (true)
- return LiSurf;
- } else if (!isDiffuse && (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance) && !cacheQuery) {
- int numBSDFSamples = (rRec.depth > 1 || adaptiveQuery) ? 1 : m_glossySamples;
- Float weightBSDF;
- Point2 *sampleArray;
- Point2 sample;
-
- if (numBSDFSamples > 1) {
- sampleArray = rRec.sampler->next2DArray(
- std::max(m_directSamples, m_glossySamples));
- weightBSDF = m_invGlossySamples;
- } else {
- sample = rRec.nextSample2D(); sampleArray = &sample;
- weightBSDF = 1.0f;
- }
-
- RadianceQueryRecord rRec2;
- for (int i=0; isample(bRec, bsdfPdf, sampleArray[i]);
- if (bsdfVal.isZero())
- continue;
- rRec2.recursiveQuery(rRec,
- RadianceQueryRecord::ERadianceNoEmission);
-
- RayDifferential bsdfRay(its.p, its.toWorld(bRec.wo), ray.time);
- LiSurf += bsdfVal * m_parentIntegrator->Li(bsdfRay, rRec2) * weightBSDF;
- }
}
return LiSurf * transmittance + LiMedium;
@@ -677,6 +687,7 @@ private:
int m_granularity, m_directSamples, m_glossySamples;
int m_rrDepth, m_maxDepth, m_maxSpecularDepth;
bool m_gatherLocally, m_autoCancelGathering;
+ bool m_hideEmitters;
};
MTS_IMPLEMENT_CLASS_S(PhotonMapIntegrator, false, SamplingIntegrator)
diff --git a/src/integrators/photonmapper/ppm.cpp b/src/integrators/photonmapper/ppm.cpp
index f031791d..d4e2963a 100644
--- a/src/integrators/photonmapper/ppm.cpp
+++ b/src/integrators/photonmapper/ppm.cpp
@@ -43,6 +43,8 @@ MTS_NAMESPACE_BEGIN
* which the implementation will start to use the ``russian roulette''
* path termination criterion. \default{\code{5}}
* }
+ * \parameter{maxPasses}{\Integer}{Maximum number of passes to render (where \code{-1}
+ * corresponds to rendering until stopped manually). \default{\code{-1}}}
* }
* This plugin implements the progressive photon mapping algorithm by Hachisuka et al.
* \cite{Hachisuka2008Progressive}. Progressive photon mapping is a variant of photon
@@ -109,9 +111,14 @@ public:
m_rrDepth = props.getInteger("rrDepth", 3);
/* Indicates if the gathering steps should be canceled if not enough photons are generated. */
m_autoCancelGathering = props.getBoolean("autoCancelGathering", true);
+ /* Maximum number of passes to render. -1 renders until the process is stopped. */
+ m_maxPasses = props.getInteger("maxPasses", -1);
+
m_mutex = new Mutex();
if (m_maxDepth <= 1 && m_maxDepth != -1)
Log(EError, "Maximum depth must either be set to \"-1\" or \"2\" or higher!");
+ if (m_maxPasses <= 0 && m_maxPasses != -1)
+ Log(EError, "Maximum number of Passes must either be set to \"-1\" or \"1\" or higher!");
}
virtual ~PPMIntegrator() {
@@ -242,9 +249,14 @@ public:
}
}
+ #if defined(MTS_OPENMP)
+ Thread::initializeOpenMP(nCores);
+ #endif
+
int it = 0;
- while (m_running)
+ while (m_running && (m_maxPasses == -1 || it < m_maxPasses)) {
photonMapPass(++it, queue, job, film, sceneResID, sensorResID, indepSamplerResID);
+ }
#ifdef MTS_DEBUG_FP
disableFPExceptions();
@@ -384,7 +396,8 @@ public:
<< " initialRadius = " << m_initialRadius << "," << endl
<< " alpha = " << m_alpha << "," << endl
<< " photonCount = " << m_photonCount << "," << endl
- << " granularity = " << m_granularity << endl
+ << " granularity = " << m_granularity << "," << endl
+ << " maxPasses = " << m_maxPasses << endl
<< "]";
return oss.str();
}
@@ -400,6 +413,7 @@ private:
bool m_running;
bool m_autoCancelGathering;
ref m_mutex;
+ int m_maxPasses;
};
MTS_IMPLEMENT_CLASS(PPMIntegrator, false, Integrator)
diff --git a/src/integrators/photonmapper/sppm.cpp b/src/integrators/photonmapper/sppm.cpp
index 58811832..a4625631 100644
--- a/src/integrators/photonmapper/sppm.cpp
+++ b/src/integrators/photonmapper/sppm.cpp
@@ -48,6 +48,8 @@ MTS_NAMESPACE_BEGIN
* which the implementation will start to use the ``russian roulette''
* path termination criterion. \default{\code{5}}
* }
+ * \parameter{maxPasses}{\Integer}{Maximum number of passes to render (where \code{-1}
+ * corresponds to rendering until stopped manually). \default{\code{-1}}}
* }
* This plugin implements stochastic progressive photon mapping by Hachisuka et al.
* \cite{Hachisuka2009Stochastic}. This algorithm is an extension of progressive photon
@@ -101,9 +103,13 @@ public:
m_rrDepth = props.getInteger("rrDepth", 3);
/* Indicates if the gathering steps should be canceled if not enough photons are generated. */
m_autoCancelGathering = props.getBoolean("autoCancelGathering", true);
+ /* Maximum number of passes to render. -1 renders until the process is stopped. */
+ m_maxPasses = props.getInteger("maxPasses", -1);
m_mutex = new Mutex();
if (m_maxDepth <= 1 && m_maxDepth != -1)
Log(EError, "Maximum depth must be set to \"2\" or higher!");
+ if (m_maxPasses <= 0 && m_maxPasses != -1)
+ Log(EError, "Maximum number of Passes must either be set to \"-1\" or \"1\" or higher!");
}
SPPMIntegrator(Stream *stream, InstanceManager *manager)
@@ -187,8 +193,12 @@ public:
enableFPExceptions();
#endif
- int it=0;
- while (m_running) {
+#if defined(MTS_OPENMP)
+ Thread::initializeOpenMP(nCores);
+#endif
+
+ int it = 0;
+ while (m_running && (m_maxPasses == -1 || it < m_maxPasses)) {
distributedRTPass(scene, samplers);
photonMapPass(++it, queue, job, film, sceneResID,
sensorResID, samplerResID);
@@ -378,7 +388,8 @@ public:
<< " initialRadius = " << m_initialRadius << "," << endl
<< " alpha = " << m_alpha << "," << endl
<< " photonCount = " << m_photonCount << "," << endl
- << " granularity = " << m_granularity << endl
+ << " granularity = " << m_granularity << "," << endl
+ << " maxPasses = " << m_maxPasses << endl
<< "]";
return oss.str();
}
@@ -395,6 +406,7 @@ private:
size_t m_totalEmitted, m_totalPhotons;
bool m_running;
bool m_autoCancelGathering;
+ int m_maxPasses;
};
MTS_IMPLEMENT_CLASS_S(SPPMIntegrator, false, Integrator)
diff --git a/src/integrators/pssmlt/pssmlt.cpp b/src/integrators/pssmlt/pssmlt.cpp
index 2787aeb4..ad20260d 100644
--- a/src/integrators/pssmlt/pssmlt.cpp
+++ b/src/integrators/pssmlt/pssmlt.cpp
@@ -310,7 +310,8 @@ public:
bool nested = m_config.twoStage && m_config.firstStage;
- Vector2i cropSize = film->getCropSize();;
+ Vector2i cropSize = film->getCropSize();
+ Assert(cropSize.x > 0 && cropSize.y > 0);
Log(EInfo, "Starting %srender job (%ix%i, " SIZE_T_FMT
" %s, " SSE_STR ", approx. " SIZE_T_FMT " mutations/pixel) ..",
nested ? "nested " : "", cropSize.x, cropSize.y,
@@ -319,9 +320,13 @@ public:
size_t desiredMutationsPerWorkUnit =
m_config.technique == PathSampler::EBidirectional ? 100000 : 200000;
- if (m_config.workUnits <= 0)
- m_config.workUnits = std::max((int) std::ceil((cropSize.x
- * cropSize.y * sampleCount) / (Float) desiredMutationsPerWorkUnit), 1);
+ if (m_config.workUnits <= 0) {
+ const size_t cropArea = (size_t) cropSize.x * cropSize.y;
+ const size_t workUnits = ((desiredMutationsPerWorkUnit - 1) +
+ (cropArea * sampleCount)) / desiredMutationsPerWorkUnit;
+ Assert(workUnits <= (size_t) std::numeric_limits::max());
+ m_config.workUnits = (int) std::max(workUnits, (size_t) 1);
+ }
m_config.nMutations = (cropSize.x * cropSize.y *
sampleCount) / m_config.workUnits;
@@ -344,7 +349,7 @@ public:
m_config, directImage, pathSeeds);
m_config.luminance = pathSampler->generateSeeds(m_config.luminanceSamples,
- m_config.workUnits, false, pathSeeds);
+ m_config.workUnits, false, m_config.importanceMap, pathSeeds);
if (!nested)
m_config.dump();
diff --git a/src/integrators/pssmlt/pssmlt_sampler.cpp b/src/integrators/pssmlt/pssmlt_sampler.cpp
index 2b4e8aad..98288e03 100644
--- a/src/integrators/pssmlt/pssmlt_sampler.cpp
+++ b/src/integrators/pssmlt/pssmlt_sampler.cpp
@@ -50,7 +50,7 @@ void PSSMLTSampler::serialize(Stream *stream, InstanceManager *manager) const {
}
void PSSMLTSampler::configure() {
- m_logRatio = -std::log(m_s2/m_s1);
+ m_logRatio = -math::fastlog(m_s2/m_s1);
m_time = 0;
m_largeStepTime = 0;
m_largeStep = false;
diff --git a/src/integrators/ptracer/ptracer_proc.cpp b/src/integrators/ptracer/ptracer_proc.cpp
index b5a2886f..e7c224c2 100644
--- a/src/integrators/ptracer/ptracer_proc.cpp
+++ b/src/integrators/ptracer/ptracer_proc.cpp
@@ -88,8 +88,13 @@ void CaptureParticleWorker::handleEmission(const PositionSamplingRecord &pRec,
DirectSamplingRecord dRec(pRec.p, pRec.time);
int maxInteractions = m_maxPathDepth - 1;
+ /* Create a dummy intersection to ensure that sampleAttenuatedSensorDirect()
+ treats the light source vertex as being located on a surface */
+ Intersection its;
+ its.p = pRec.p;
+
Spectrum value = weight * m_scene->sampleAttenuatedSensorDirect(
- dRec, medium, maxInteractions, m_sampler->next2D(), m_sampler);
+ dRec, its, medium, maxInteractions, m_sampler->next2D(), m_sampler);
if (value.isZero())
return;
@@ -101,7 +106,7 @@ void CaptureParticleWorker::handleEmission(const PositionSamplingRecord &pRec,
m_workResult->put(dRec.uv, (Float *) &value[0]);
}
-void CaptureParticleWorker::handleSurfaceInteraction(int depth,
+void CaptureParticleWorker::handleSurfaceInteraction(int depth, int nullInteractions,
bool caustic, const Intersection &its, const Medium *medium,
const Spectrum &weight) {
@@ -159,7 +164,7 @@ void CaptureParticleWorker::handleSurfaceInteraction(int depth,
m_workResult->put(dRec.uv, (Float *) &value[0]);
}
-void CaptureParticleWorker::handleMediumInteraction(int depth, bool caustic,
+void CaptureParticleWorker::handleMediumInteraction(int depth, int nullInteractions, bool caustic,
const MediumSamplingRecord &mRec, const Medium *medium, const Vector &wi,
const Spectrum &weight) {
diff --git a/src/integrators/ptracer/ptracer_proc.h b/src/integrators/ptracer/ptracer_proc.h
index 15ede4c4..ca12938d 100644
--- a/src/integrators/ptracer/ptracer_proc.h
+++ b/src/integrators/ptracer/ptracer_proc.h
@@ -89,28 +89,31 @@ public:
const bool &stop);
/**
- * Handles particles emitted by a light source - if a connection to the
- * sensor is possible, compute the importance and accumulate in the proper
- * pixel of the accumulation buffer.
+ * \brief Handles particles emitted by a light source
+ *
+ * If a connection to the sensor is possible, compute the importance
+ * and accumulate in the proper pixel of the accumulation buffer.
*/
void handleEmission(const PositionSamplingRecord &pRec,
const Medium *medium, const Spectrum &weight);
/**
- * Handles particles interacting with a surface - if a connection to the
- * sensor is possible, compute the importance and accumulate in the proper
- * pixel of the accumulation buffer.
+ * \brief Handles particles interacting with a surface
+ *
+ * If a connection to the sensor is possible, compute the importance
+ * and accumulate in the proper pixel of the accumulation buffer.
*/
- void handleSurfaceInteraction(int depth, bool caustic,
+ void handleSurfaceInteraction(int depth, int nullInteractions, bool caustic,
const Intersection &its, const Medium *medium,
const Spectrum &weight);
/**
- * Handles particles interacting with a medium - if a connection to the
- * sensor is possible, compute the importance and accumulate in the proper
- * pixel of the accumulation buffer.
+ * \brief Handles particles interacting with a medium
+ *
+ * If a connection to the sensor is possible, compute the importance
+ * and accumulate in the proper pixel of the accumulation buffer.
*/
- void handleMediumInteraction(int depth, bool caustic,
+ void handleMediumInteraction(int depth, int nullInteractions, bool caustic,
const MediumSamplingRecord &mRec, const Medium *medium,
const Vector &wi, const Spectrum &weight);
diff --git a/src/libbidir/SConscript b/src/libbidir/SConscript
index 5a78630c..69161888 100644
--- a/src/libbidir/SConscript
+++ b/src/libbidir/SConscript
@@ -9,7 +9,7 @@ bidirEnv.Append(CPPDEFINES = [['MTS_BUILD_MODULE', 'MTS_MODULE_BIDIR']])
libbidir = bidirEnv.SharedLibrary('mitsuba-bidir', [
'common.cpp', 'rsampler.cpp', 'vertex.cpp', 'edge.cpp',
'path.cpp', 'verification.cpp', 'util.cpp', 'pathsampler.cpp',
- 'mut_bidir.cpp', 'mut_lens.cpp', 'mut_caustic.cpp',
+ 'mut_bidir.cpp', 'mut_lens.cpp', 'mut_caustic.cpp',
'mut_mchain.cpp', 'manifold.cpp', 'mut_manifold.cpp'
])
diff --git a/src/libbidir/edge.cpp b/src/libbidir/edge.cpp
index 6ad50fb1..4e867899 100644
--- a/src/libbidir/edge.cpp
+++ b/src/libbidir/edge.cpp
@@ -48,6 +48,9 @@ bool PathEdge::sampleNext(const Scene *scene, Sampler *sampler,
return false;
}
+ if (length == 0)
+ return false;
+
if (!medium) {
weight[ERadiance] = weight[EImportance] = Spectrum(1.0f);
pdf[ERadiance] = pdf[EImportance] = 1.0f;
@@ -103,6 +106,9 @@ bool PathEdge::perturbDirection(const Scene *scene,
}
d = ray.d;
+ if (length == 0)
+ return false;
+
if (!medium) {
weight[ERadiance] = weight[EImportance] = Spectrum(1.0f);
pdf[ERadiance] = pdf[EImportance] = 1.0f;
diff --git a/src/libbidir/manifold.cpp b/src/libbidir/manifold.cpp
index c5690da0..f6fd5aa4 100644
--- a/src/libbidir/manifold.cpp
+++ b/src/libbidir/manifold.cpp
@@ -643,7 +643,11 @@ bool SpecularManifold::update(Path &path, int start, int end) {
step = -1; mode = ERadiance;
}
- for (int j=0, i=start; j < (int) m_vertices.size()-2; ++j, i += step) {
+ int last = (int) m_vertices.size() - 2;
+ if (m_vertices[0].type == EPinnedDirection)
+ last = std::max(last, 1);
+
+ for (int j=0, i=start; j < last; ++j, i += step) {
const SimpleVertex
&v = m_vertices[j],
&vn = m_vertices[j+1];
@@ -664,7 +668,8 @@ bool SpecularManifold::update(Path &path, int start, int end) {
PathVertex::EMediumInteraction : PathVertex::ESurfaceInteraction;
if (v.type == EPinnedDirection) {
- /* Create a fake vertex and use it to call sampleDirect() */
+ /* Create a fake vertex and use it to call sampleDirect(). This is
+ kind of terrible -- a nicer API is needed to cleanly support this */
PathVertex temp;
temp.type = PathVertex::EMediumInteraction;
temp.degenerate = false;
@@ -681,7 +686,7 @@ bool SpecularManifold::update(Path &path, int start, int end) {
return false;
}
- if (m_vertices.size() > 3) {
+ if (m_vertices.size() >= 3) {
PathVertex *succ2 = path.vertex(i+2*step);
PathEdge *succ2Edge = path.edge(predEdgeIdx + 2*step);
if (!succ->sampleNext(m_scene, NULL, vertex, succEdge, succ2Edge, succ2, mode)) {
@@ -863,37 +868,30 @@ Float SpecularManifold::det(const Path &path, int a, int b, int c) {
}
Float SpecularManifold::multiG(const Path &path, int a, int b) {
- if (a == 0) {
+ if (a == 0)
++a;
- if (!path.vertex(a)->isConnectable())
- ++a;
- } else if (a == path.length()) {
+ else if (a == path.length())
--a;
- if (!path.vertex(a)->isConnectable())
- --a;
- }
- if (b == 0) {
+ if (b == 0)
++b;
- if (!path.vertex(b)->isConnectable())
- ++b;
- } else if (b == path.length()) {
+ else if (b == path.length())
--b;
- if (!path.vertex(b)->isConnectable())
- --b;
- }
+ int step = b > a ? 1 : -1;
+ while (!path.vertex(b)->isConnectable())
+ b -= step;
+ while (!path.vertex(a)->isConnectable())
+ a += step;
- int step = b > a ? 1 : -1, start = a;
Float result = 1;
BDAssert(path.vertex(a)->isConnectable() && path.vertex(b)->isConnectable());
- for (int i = a + step; i != b + step; i += step) {
+ for (int i = a + step, start = a; i != b + step; i += step) {
if (path.vertex(i)->isConnectable()) {
result *= G(path, start, i);
start = i;
}
}
- BDAssert(start == b);
return result;
}
@@ -915,7 +913,7 @@ Float SpecularManifold::G(const Path &path, int a, int b) {
SimpleVertex &last = m_vertices[m_vertices.size()-1];
const PathVertex *vb = path.vertex(b);
- if (vb->isMediumInteraction()) {
+ if (!vb->isOnSurface()) {
last.n = Vector(path.edge(a < b ? (b-1) : b)->d);
} else {
last.n = vb->getShadingNormal();
diff --git a/src/libbidir/mut_lens.cpp b/src/libbidir/mut_lens.cpp
index 107702e8..e5ce662d 100644
--- a/src/libbidir/mut_lens.cpp
+++ b/src/libbidir/mut_lens.cpp
@@ -100,7 +100,7 @@ bool LensPerturbation::sampleMutation(
return false;
Float focusDistance = sensor->getFocusDistance() /
- absDot(sensor->getInverseViewTransform(0)(Vector(0,0,1)), ray.d);
+ absDot(sensor->getWorldTransform(0)(Vector(0,0,1)), ray.d);
/* Correct direction based on the current aperture sample.
This is necessary to support thin lens cameras */
diff --git a/src/libbidir/mut_manifold.cpp b/src/libbidir/mut_manifold.cpp
index 52b9e313..5835456f 100644
--- a/src/libbidir/mut_manifold.cpp
+++ b/src/libbidir/mut_manifold.cpp
@@ -274,6 +274,7 @@ bool ManifoldPerturbation::sampleMutation(
proposal.append(m_pool.allocEdge());
}
proposal.append(source, m, k+1);
+
proposal.vertex(a) = proposal.vertex(a)->clone(m_pool);
proposal.vertex(c) = proposal.vertex(c)->clone(m_pool);
@@ -521,8 +522,8 @@ bool ManifoldPerturbation::sampleMutation(
}
}
- if ((vb_old->isSurfaceInteraction() && m_thetaDiffSurfaceSamples < DIFF_SAMPLES) ||
- (vb_old->isMediumInteraction() && m_thetaDiffMediumSamples < DIFF_SAMPLES)) {
+ if (((vb_old->isSurfaceInteraction() && m_thetaDiffSurfaceSamples < DIFF_SAMPLES) ||
+ (vb_old->isMediumInteraction() && m_thetaDiffMediumSamples < DIFF_SAMPLES)) && b+1 != k && b-1 != 0) {
LockGuard guard(m_thetaDiffMutex);
if ((vb_old->isSurfaceInteraction() && m_thetaDiffSurfaceSamples < DIFF_SAMPLES) ||
@@ -586,6 +587,7 @@ bool ManifoldPerturbation::sampleMutation(
}
}
}
+
if (!PathVertex::connect(m_scene,
proposal.vertexOrNull(q-1),
proposal.edgeOrNull(q-1),
@@ -662,6 +664,10 @@ Float ManifoldPerturbation::Q(const Path &source, const Path &proposal,
if (prob == 0)
return 0.0f;
weight /= prob;
+
+ /* Catch very low probabilities which round to +inf in the above division operation */
+ if (!std::isfinite(weight.average()))
+ return 0.0f;
} else {
Frame frame(source.vertex(a+step)->getGeometricNormal());
diff --git a/src/libbidir/mut_mchain.cpp b/src/libbidir/mut_mchain.cpp
index 4e08b2c4..d5d119b3 100644
--- a/src/libbidir/mut_mchain.cpp
+++ b/src/libbidir/mut_mchain.cpp
@@ -103,7 +103,7 @@ bool MultiChainPerturbation::sampleMutation(
return false;
Float focusDistance = sensor->getFocusDistance() /
- absDot(sensor->getInverseViewTransform(0)(Vector(0,0,1)), ray.d);
+ absDot(sensor->getWorldTransform(0)(Vector(0,0,1)), ray.d);
/* Correct direction based on the current aperture sample.
This is necessary to support thin lens cameras */
diff --git a/src/libbidir/pathsampler.cpp b/src/libbidir/pathsampler.cpp
index 2aef655c..2c177d21 100644
--- a/src/libbidir/pathsampler.cpp
+++ b/src/libbidir/pathsampler.cpp
@@ -579,12 +579,26 @@ Float PathSampler::computeAverageLuminance(size_t sampleCount) {
return mean;
}
-static void seedCallback(std::vector &output, int s, int t, Float weight, Path &) {
+static void seedCallback(std::vector &output, const Bitmap *importanceMap,
+ Float &accum, int s, int t, Float weight, Path &path) {
+ accum += weight;
+
+ if (importanceMap) {
+ const Float *luminanceValues = importanceMap->getFloatData();
+ Vector2i size = importanceMap->getSize();
+
+ const Point2 &pos = path.getSamplePosition();
+ Point2i intPos(
+ std::min(std::max(0, (int) pos.x), size.x-1),
+ std::min(std::max(0, (int) pos.y), size.y-1));
+ weight /= luminanceValues[intPos.x + intPos.y * size.x];
+ }
+
output.push_back(PathSeed(0, weight, s, t));
}
Float PathSampler::generateSeeds(size_t sampleCount, size_t seedCount,
- bool fineGrained, std::vector &seeds) {
+ bool fineGrained, const Bitmap *importanceMap, std::vector &seeds) {
Log(EInfo, "Integrating luminance values over the image plane ("
SIZE_T_FMT " samples)..", sampleCount);
@@ -596,39 +610,40 @@ Float PathSampler::generateSeeds(size_t sampleCount, size_t seedCount,
tempSeeds.reserve(sampleCount);
SplatList splatList;
+ Float luminance;
PathCallback callback = boost::bind(&seedCallback,
- boost::ref(tempSeeds), _1, _2, _3, _4);
+ boost::ref(tempSeeds), importanceMap, boost::ref(luminance),
+ _1, _2, _3, _4);
Float mean = 0.0f, variance = 0.0f;
for (size_t i=0; igetSampleIndex();
- Float lum = 0.0f;
+ luminance = 0.0f;
if (fineGrained) {
samplePaths(Point2i(-1), callback);
/* Fine seed granularity (e.g. for Veach-MLT).
Set the correct the sample index value */
- for (size_t j = seedIndex; jgetFloatData();
+ Vector2i size = importanceMap->getSize();
+
+ const Point2 &pos = path.getSamplePosition();
+ Point2i intPos(
+ std::min(std::max(0, (int) pos.x), size.x-1),
+ std::min(std::max(0, (int) pos.y), size.y-1));
+ weight /= luminanceValues[intPos.x + intPos.y * size.x];
+ }
+
if (seed.luminance != weight)
SLog(EError, "Internal error in reconstructPath(): luminances "
"don't match (%f vs %f)!", weight, seed.luminance);
@@ -669,7 +695,7 @@ static void reconstructCallback(const PathSeed &seed, Path &result, MemoryPool &
}
}
-void PathSampler::reconstructPath(const PathSeed &seed, Path &result) {
+void PathSampler::reconstructPath(const PathSeed &seed, const Bitmap *importanceMap, Path &result) {
ReplayableSampler *rplSampler = static_cast(m_sensorSampler.get());
Assert(result.length() == 0);
@@ -678,7 +704,8 @@ void PathSampler::reconstructPath(const PathSeed &seed, Path &result) {
rplSampler->setSampleIndex(seed.sampleIndex);
PathCallback callback = boost::bind(&reconstructCallback,
- boost::cref(seed), boost::ref(result), boost::ref(m_pool), _1, _2, _3, _4);
+ boost::cref(seed), importanceMap,
+ boost::ref(result), boost::ref(m_pool), _1, _2, _3, _4);
samplePaths(Point2i(-1), callback);
diff --git a/src/libbidir/vertex.cpp b/src/libbidir/vertex.cpp
index 1729a1fe..7bfc193e 100644
--- a/src/libbidir/vertex.cpp
+++ b/src/libbidir/vertex.cpp
@@ -245,7 +245,7 @@ bool PathVertex::sampleNext(const Scene *scene, Sampler *sampler,
ray.setDirection(pRec.wo);
measure = ESolidAngle;
- if (!(phase->getType() & BSDF::ENonSymmetric)) {
+ if (!(phase->getType() & PhaseFunction::ENonSymmetric)) {
/* Make use of symmetry -- no need to re-evaluate */
pdf[1-mode] = pdf[mode];
weight[1-mode] = weight[mode];
@@ -636,7 +636,7 @@ bool PathVertex::perturbDirection(const Scene *scene, const PathVertex *pred,
measure = ESolidAngle;
- if (!(phase->getType() & BSDF::ENonSymmetric)) {
+ if (!(phase->getType() & PhaseFunction::ENonSymmetric)) {
/* Make use of symmetry -- no need to re-evaluate */
pdf[1-mode] = pdf[mode];
weight[1-mode] = weight[mode];
@@ -710,7 +710,13 @@ bool PathVertex::propagatePerturbation(const Scene *scene, const PathVertex *pre
bRec.typeMask = BSDF::EAll;
Float prob = bsdf->pdf(bRec, EDiscrete);
- weight[mode] = bsdf->eval(bRec, EDiscrete)/prob;
+ if (prob == 0) {
+ SLog(EWarn, "Unable to recreate specular vertex in perturbation (bsdf=%s)",
+ bsdf->toString().c_str());
+ return false;
+ }
+
+ weight[mode] = bsdf->eval(bRec, EDiscrete) / prob;
pdf[mode] = prob;
measure = EDiscrete;
componentType = componentType_;
@@ -1139,6 +1145,9 @@ bool PathVertex::cast(const Scene *scene, EVertexType desired) {
PositionSamplingRecord pRec(its);
pRec.object = sensor;
pRec.pdf = 0.0f;
+
+ Vector2i size = sensor->getFilm()->getSize();
+ pRec.uv.x *= size.x; pRec.uv.y *= size.y;
getPositionSamplingRecord() = pRec;
degenerate = sensor->getType() & Sensor::EDeltaDirection;
diff --git a/src/libcore/CMakeLists.txt b/src/libcore/CMakeLists.txt
index c646daf8..090d23dd 100644
--- a/src/libcore/CMakeLists.txt
+++ b/src/libcore/CMakeLists.txt
@@ -58,17 +58,18 @@ set(HDRS
${INCLUDE_DIR}/sfcurve.h
${INCLUDE_DIR}/shvector.h
${INCLUDE_DIR}/spectrum.h
+ ${INCLUDE_DIR}/spline.h
${INCLUDE_DIR}/sse.h
${INCLUDE_DIR}/ssemath.h
${INCLUDE_DIR}/ssevector.h
${INCLUDE_DIR}/sshstream.h
${INCLUDE_DIR}/sstream.h
${INCLUDE_DIR}/statistics.h
- ${INCLUDE_DIR}/stl.h
${INCLUDE_DIR}/stream.h
${INCLUDE_DIR}/thread.h
${INCLUDE_DIR}/timer.h
${INCLUDE_DIR}/tls.h
+ ${INCLUDE_DIR}/track.h
${INCLUDE_DIR}/transform.h
${INCLUDE_DIR}/triangle.h
${INCLUDE_DIR}/util.h
@@ -108,6 +109,7 @@ set(SRCS
serialization.cpp
shvector.cpp
spectrum.cpp
+ spline.cpp
ssemath.cpp
sshstream.cpp
sstream.cpp
@@ -116,6 +118,7 @@ set(SRCS
thread.cpp
timer.cpp
tls.cpp
+ track.cpp
transform.cpp
triangle.cpp
util.cpp
@@ -141,7 +144,7 @@ set(LIBS ${ZLIB_LIBRARIES} ${PNG_LIBRARIES} ${JPEG_LIBRARIES}
${ILMBASE_LIBRARIES} ${OPENEXR_LIBRARIES}
${Boost_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS})
if(WIN32)
- list(APPEND LIBS ws2_32)
+ list(APPEND LIBS ws2_32 psapi)
elseif (${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
list(APPEND LIBS rt)
elseif (APPLE)
diff --git a/src/libcore/SConscript b/src/libcore/SConscript
index 62ba5036..57e69154 100644
--- a/src/libcore/SConscript
+++ b/src/libcore/SConscript
@@ -24,17 +24,22 @@ if coreEnv.has_key('JPEGLIB'):
coreEnv.Prepend(LIBS=env['JPEGLIB'])
coreEnv.Prepend(CPPDEFINES = [['MTS_BUILD_MODULE', 'MTS_MODULE_CORE']])
+
+
+if sys.platform == 'win32':
+ coreEnv.Append(LIBS=['psapi'])
+
libcore_objects = [
'class.cpp', 'object.cpp', 'statistics.cpp', 'thread.cpp', 'brent.cpp',
'logger.cpp', 'appender.cpp', 'formatter.cpp', 'lock.cpp', 'qmc.cpp',
'random.cpp', 'timer.cpp', 'util.cpp', 'properties.cpp', 'half.cpp',
- 'transform.cpp', 'spectrum.cpp', 'aabb.cpp', 'stream.cpp',
+ 'transform.cpp', 'spectrum.cpp', 'aabb.cpp', 'stream.cpp',
'fstream.cpp', 'plugin.cpp', 'triangle.cpp', 'bitmap.cpp',
- 'fmtconv.cpp', 'serialization.cpp', 'sstream.cpp', 'cstream.cpp',
- 'mstream.cpp', 'sched.cpp', 'sched_remote.cpp', 'sshstream.cpp',
+ 'fmtconv.cpp', 'serialization.cpp', 'sstream.cpp', 'cstream.cpp',
+ 'mstream.cpp', 'sched.cpp', 'sched_remote.cpp', 'sshstream.cpp',
'zstream.cpp', 'shvector.cpp', 'fresolver.cpp', 'rfilter.cpp',
'quad.cpp', 'mmap.cpp', 'chisquare.cpp', 'warp.cpp', 'vmf.cpp',
- 'tls.cpp', 'ssemath.cpp'
+ 'tls.cpp', 'ssemath.cpp', 'spline.cpp', 'track.cpp'
]
# Add some platform-specific components
diff --git a/src/libcore/bitmap.cpp b/src/libcore/bitmap.cpp
index a29cc62d..99b93ae3 100644
--- a/src/libcore/bitmap.cpp
+++ b/src/libcore/bitmap.cpp
@@ -38,6 +38,11 @@
#include
#include
#include
+#include
+#include
+#include
+#include
+#include
#include
#include
#include
@@ -233,7 +238,7 @@ extern "C" {
p->mgr.free_in_buffer = 0;
}
- METHODDEF(void) jpeg_error_exit (j_common_ptr cinfo) {
+ METHODDEF(void) jpeg_error_exit (j_common_ptr cinfo) throw(std::runtime_error) {
char msg[JMSG_LENGTH_MAX];
(*cinfo->err->format_message) (cinfo, msg);
SLog(EError, "Critcal libjpeg error: %s", msg);
@@ -386,19 +391,6 @@ int Bitmap::getBytesPerComponent() const {
}
}
-
-void Bitmap::setString(const std::string &key, const std::string &value) {
- m_metadata[key] = value;
-}
-
-std::string Bitmap::getString(const std::string &key) const {
- std::map::const_iterator it = m_metadata.find(key);
- if (it != m_metadata.end())
- return it->second;
- else
- return "";
-}
-
Bitmap::~Bitmap() {
if (m_data)
freeAligned(m_data);
@@ -430,47 +422,113 @@ void Bitmap::flipVertically() {
}
}
-void Bitmap::accumulate(const Bitmap *bitmap, const Point2i &offset) {
+ref Bitmap::rotateFlip(ERotateFlipType type) const {
+ /* Based on the GDI+ rotate/flip function in Wine */
+ if (m_componentFormat == EBitmask)
+ Log(EError, "Transformations involving bitmasks are currently not supported!");
+
+ int width = m_size.x, height = m_size.y;
+ bool flip_x = (type & 6) == 2 || (type & 6) == 4;
+ bool flip_y = (type & 3) == 1 || (type & 3) == 2;
+ bool rotate_90 = type & 1;
+
+ if (rotate_90)
+ std::swap(width, height);
+
+ ref result = new Bitmap(m_pixelFormat, m_componentFormat,
+ Vector2i(width, height), m_channelCount);
+
+ ssize_t bypp = getBytesPerPixel(),
+ src_stride = m_size.x * bypp,
+ dst_stride = width * bypp;
+
+ uint8_t *dst = result->getUInt8Data();
+ uint8_t *dst_row = dst, *src_row = m_data;
+
+ if (flip_x)
+ src_row += bypp * (m_size.x - 1);
+
+ if (flip_y)
+ src_row += src_stride * (m_size.y - 1);
+
+ ssize_t src_x_step, src_y_step;
+ if (rotate_90) {
+ src_x_step = flip_y ? -src_stride : src_stride;
+ src_y_step = flip_x ? -bypp : bypp;
+ } else {
+ src_x_step = flip_x ? -bypp : bypp;
+ src_y_step = flip_y ? -src_stride : src_stride;
+ }
+
+ for (int y=0; ygetPixelFormat() &&
getComponentFormat() == bitmap->getComponentFormat() &&
getChannelCount() == bitmap->getChannelCount());
- const int
- offsetX = std::max(offset.x, 0),
- offsetY = std::max(offset.y, 0),
- endX = std::min(offset.x + bitmap->getSize().x, m_size.x),
- endY = std::min(offset.y + bitmap->getSize().y, m_size.y);
+ Vector2i offsetIncrease(
+ std::max(0, std::max(-sourceOffset.x, -targetOffset.x)),
+ std::max(0, std::max(-sourceOffset.y, -targetOffset.y))
+ );
- if (offsetX >= endX || offsetY >= endY)
+ sourceOffset += offsetIncrease;
+ targetOffset += offsetIncrease;
+ size -= offsetIncrease;
+
+ Vector2i sizeDecrease(
+ std::max(0, std::max(sourceOffset.x + size.x - bitmap->getWidth(), targetOffset.x + size.x - getWidth())),
+ std::max(0, std::max(sourceOffset.y + size.y - bitmap->getHeight(), targetOffset.y + size.y - getHeight())));
+
+ size -= sizeDecrease;
+
+ if (size.x <= 0 || size.y <= 0)
return;
const size_t
- columns = (endX - offsetX) * m_channelCount,
+ columns = size.x * m_channelCount,
pixelStride = getBytesPerPixel(),
- sourceStride = bitmap->getSize().x * pixelStride,
- targetStride = m_size.x * pixelStride;
+ sourceStride = bitmap->getWidth() * pixelStride,
+ targetStride = getWidth() * pixelStride;
const uint8_t *source = bitmap->getUInt8Data() +
- (offsetX - offset.x + (offsetY - offset.y) * bitmap->getSize().x) * pixelStride;
+ (sourceOffset.x + sourceOffset.y * (size_t) bitmap->getWidth()) * pixelStride;
uint8_t *target = m_data +
- (offsetX + offsetY * m_size.x) * pixelStride;
+ (targetOffset.x + targetOffset.y * (size_t) m_size.x) * pixelStride;
- for (int y = offsetY; y < endY; ++y) {
+ for (int y = 0; y < size.y; ++y) {
switch (m_componentFormat) {
case EUInt8:
for (size_t i = 0; i < columns; ++i)
- ((uint8_t *) target)[i] += ((uint8_t *) source)[i];
+ ((uint8_t *) target)[i] = (uint8_t) std::min(0xFF, ((uint8_t *) source)[i] + ((uint8_t *) target)[i]);
+
break;
case EUInt16:
for (size_t i = 0; i < columns; ++i)
- ((uint16_t *) target)[i] += ((uint16_t *) source)[i];
+ ((uint16_t *) target)[i] = (uint16_t) std::min(0xFFFF, ((uint16_t *) source)[i] + ((uint16_t *) target)[i]);
break;
case EUInt32:
for (size_t i = 0; i < columns; ++i)
- ((uint32_t *) target)[i] += ((uint32_t *) source)[i];
+ ((uint32_t *) target)[i] = std::min((uint32_t) 0xFFFFFFFFUL, ((uint32_t *) source)[i] + ((uint32_t *) target)[i]);
break;
case EFloat16:
@@ -554,8 +612,9 @@ void Bitmap::setPixel(const Point2i &pos, const Spectrum &value) {
}
void Bitmap::drawHLine(int y, int x1, int x2, const Spectrum &value) {
- AssertEx( y >= 0 && y < m_size.y &&
- x1 >= 0 && x2 < m_size.x, "Bitmap::drawVLine(): out of bounds!");
+ if (y < 0 || y >= m_size.y)
+ return;
+ x1 = std::max(x1, 0); x2 = std::min(x2, m_size.x-1);
const FormatConverter *cvt = FormatConverter::getInstance(
std::make_pair(EFloat, m_componentFormat)
@@ -574,8 +633,9 @@ void Bitmap::drawHLine(int y, int x1, int x2, const Spectrum &value) {
}
void Bitmap::drawVLine(int x, int y1, int y2, const Spectrum &value) {
- AssertEx( x >= 0 && x < m_size.x &&
- y1 >= 0 && y2 < m_size.y, "Bitmap::drawVLine(): out of bounds!");
+ if (x < 0 || x >= m_size.x)
+ return;
+ y1 = std::max(y1, 0); y2 = std::min(y2, m_size.y-1);
const FormatConverter *cvt = FormatConverter::getInstance(
std::make_pair(EFloat, m_componentFormat)
@@ -601,9 +661,12 @@ void Bitmap::drawRect(const Point2i &offset, const Vector2i &size, const Spectru
drawVLine(offset.x + size.x - 1, offset.y, offset.y + size.y - 1, value);
}
-void Bitmap::fill(const Point2i &offset, const Vector2i &size, const Spectrum &value) {
- AssertEx(offset.x >= 0 && offset.x + size.x <= m_size.x &&
- offset.y >= 0 && offset.y + size.y <= m_size.y, "Bitmap::fill(): out of bounds!");
+void Bitmap::fillRect(Point2i offset, Vector2i size, const Spectrum &value) {
+ int sx = std::max(0, -offset.x), sy = std::max(0, -offset.y);
+ size.x -= sx; size.y -= sy; offset.x += sx; offset.y += sy;
+
+ size.x -= std::max(0, offset.x + size.x - m_size.x);
+ size.y -= std::max(0, offset.y + size.y - m_size.y);
const FormatConverter *cvt = FormatConverter::getInstance(
std::make_pair(EFloat, m_componentFormat)
@@ -897,7 +960,9 @@ ref Bitmap::separateChannel(int channelIndex) {
if (channelIndex == 0 && channelCount == 1)
return this;
- Assert(channelIndex > 0 && channelIndex < channelCount);
+ if (channelIndex < 0 || channelIndex >= channelCount)
+ Log(EError, "Bitmap::separateChannel(%i): channel index "
+ "must be between 0 and %i", channelIndex, channelCount-1);
ref result = new Bitmap(ELuminance, m_componentFormat, m_size);
result->setMetadata(m_metadata);
@@ -995,6 +1060,79 @@ ref Bitmap::crop(const Point2i &offset, const Vector2i &size) const {
return result;
}
+void Bitmap::applyMatrix(Float matrix_[3][3]) {
+ int stride = 0;
+
+ if (m_pixelFormat == ERGB || m_pixelFormat == EXYZ)
+ stride = 3;
+ else if (m_pixelFormat == ERGBA || m_pixelFormat == EXYZA)
+ stride = 4;
+ else
+ Log(EError, "Bitmap::applyMatrix(): unsupported pixel format!");
+
+ size_t pixels = (size_t) m_size.x * (size_t) m_size.y;
+
+ switch (m_componentFormat) {
+ case EFloat16: {
+ float matrix[3][3];
+ half *data = getFloat16Data();
+ for (int i=0; i<3; ++i)
+ for (int j=0; j<3; ++j)
+ matrix[i][j] = (float) matrix_[i][j];
+
+ for (size_t i=0; i static void resample(const ReconstructionFilter *rfilter,
ReconstructionFilter::EBoundaryCondition bch,
@@ -1099,12 +1237,17 @@ std::string Bitmap::toString() const {
<< " type = " << m_pixelFormat << endl
<< " componentFormat = " << m_componentFormat << endl
<< " size = " << m_size.toString() << endl;
- if (!m_metadata.empty()) {
+
+ std::vector keys = m_metadata.getPropertyNames();
+ if (!keys.empty()) {
oss << " metadata = {" << endl;
- for (std::map::const_iterator it = m_metadata.begin();
- it != m_metadata.end();) {
- oss << " \"" << it->first << "\" => \"" << it->second << "\"";
- if (++it != m_metadata.end())
+ for (std::vector::const_iterator it = keys.begin(); it != keys.end(); ) {
+ std::string value = m_metadata.getAsString(*it);
+ if (value.size() > 50)
+ value = value.substr(0, 50) + ".. [truncated]";
+
+ oss << " \"" << *it << "\" => \"" << value << "\"";
+ if (++it != keys.end())
oss << ",";
oss << endl;
}
@@ -1191,7 +1334,7 @@ void Bitmap::readPNG(Stream *stream) {
png_get_text(png_ptr, info_ptr, &text_ptr, &textIdx);
for (int i=0; ikey] = text_ptr->text;
+ setMetadataString(text_ptr->key, text_ptr->text);
int intent; double gamma;
if (png_get_sRGB(png_ptr, info_ptr, &intent)) {
@@ -1267,20 +1410,23 @@ void Bitmap::writePNG(Stream *stream, int compression) const {
png_text *text = NULL;
- std::map metadata = m_metadata;
- metadata["generated-by"] = "Mitsuba version " MTS_VERSION;
+ Properties metadata(m_metadata);
+ metadata.setString("generatedBy", "Mitsuba version " MTS_VERSION);
- text = new png_text[metadata.size()];
- memset(text, 0, sizeof(png_text) * metadata.size());
- int textIndex = 0;
- for (std::map::iterator it = metadata.begin();
- it != metadata.end(); ++it) {
- text[textIndex].key = const_cast(it->first.c_str());
- text[textIndex].text = const_cast(it->second.c_str());
- text[textIndex++].compression = PNG_TEXT_COMPRESSION_NONE;
+ std::vector keys = metadata.getPropertyNames();
+ std::vector values(keys.size());
+
+ text = new png_text[keys.size()];
+ memset(text, 0, sizeof(png_text) * keys.size());
+
+ for (size_t i = 0; i(keys[i].c_str());
+ text[i].text = const_cast(values[i].c_str());
+ text[i].compression = PNG_TEXT_COMPRESSION_NONE;
}
- png_set_text(png_ptr, info_ptr, text, textIndex);
+ png_set_text(png_ptr, info_ptr, text, (int) keys.size());
if (m_gamma == -1)
png_set_sRGB_gAMA_and_cHRM(png_ptr, info_ptr, PNG_sRGB_INTENT_ABSOLUTE);
@@ -1575,11 +1721,37 @@ void Bitmap::readOpenEXR(Stream *stream, const std::string &_prefix) {
/* Load metadata if present */
for (Imf::Header::ConstIterator it = header.begin(); it != header.end(); ++it) {
std::string name = it.name(), typeName = it.attribute().typeName();
- const Imf::StringAttribute *sattr = NULL;
+ const Imf::StringAttribute *sattr;
+ const Imf::IntAttribute *iattr;
+ const Imf::FloatAttribute *fattr;
+ const Imf::DoubleAttribute *dattr;
+ const Imf::V3fAttribute *vattr;
+ const Imf::M44fAttribute *mattr;
if (typeName == "string" &&
(sattr = header.findTypedAttribute(name.c_str())))
- m_metadata[name] = sattr->value();
+ m_metadata.setString(name, sattr->value());
+ else if (typeName == "int" &&
+ (iattr = header.findTypedAttribute(name.c_str())))
+ m_metadata.setInteger(name, iattr->value());
+ else if (typeName == "float" &&
+ (fattr = header.findTypedAttribute(name.c_str())))
+ m_metadata.setFloat(name, (Float) fattr->value());
+ else if (typeName == "double" &&
+ (dattr = header.findTypedAttribute(name.c_str())))
+ m_metadata.setFloat(name, (Float) dattr->value());
+ else if (typeName == "v3f" &&
+ (vattr = header.findTypedAttribute(name.c_str()))) {
+ Imath::V3f vec = vattr->value();
+ m_metadata.setVector(name, Vector(vec.x, vec.y, vec.z));
+ } else if (typeName == "m44f" &&
+ (mattr = header.findTypedAttribute(name.c_str()))) {
+ Matrix4x4 M;
+ for (int i=0; i<4; ++i)
+ for (int j=0; j<4; ++j)
+ M(i, j) = mattr->value().x[i][j];
+ m_metadata.setTransform(name, Transform(M));
+ }
}
updateChannelCount();
@@ -1806,13 +1978,45 @@ void Bitmap::writeOpenEXR(Stream *stream,
pixelFormat = ERGBA;
#endif
- std::map metadata = m_metadata;
- metadata["generated-by"] = "Mitsuba version " MTS_VERSION;
+ Properties metadata(m_metadata);
+ metadata.setString("generatedBy", "Mitsuba version " MTS_VERSION);
+
+ std::vector keys = metadata.getPropertyNames();
Imf::Header header(m_size.x, m_size.y);
- for (std::map::const_iterator it = metadata.begin();
- it != metadata.end(); ++it)
- header.insert(it->first.c_str(), Imf::StringAttribute(it->second.c_str()));
+ for (std::vector::const_iterator it = keys.begin(); it != keys.end(); ++it) {
+ Properties::EPropertyType type = metadata.getType(*it);
+
+ switch (type) {
+ case Properties::EString:
+ header.insert(it->c_str(), Imf::StringAttribute(metadata.getString(*it)));
+ break;
+ case Properties::EInteger:
+ header.insert(it->c_str(), Imf::IntAttribute(metadata.getInteger(*it)));
+ break;
+ case Properties::EFloat:
+ header.insert(it->c_str(), Imf::FloatAttribute((float) metadata.getFloat(*it)));
+ break;
+ case Properties::EPoint: {
+ Point val = metadata.getPoint(*it);
+ header.insert(it->c_str(), Imf::V3fAttribute(
+ Imath::V3f((float) val.x, (float) val.y, (float) val.z)));
+ }
+ break;
+ case Properties::ETransform: {
+ Matrix4x4 val = metadata.getTransform(*it).getMatrix();
+ header.insert(it->c_str(), Imf::M44fAttribute(Imath::M44f(
+ (float) val(0, 0), (float) val(0, 1), (float) val(0, 2), (float) val(0, 3),
+ (float) val(1, 0), (float) val(1, 1), (float) val(1, 2), (float) val(1, 3),
+ (float) val(2, 0), (float) val(2, 1), (float) val(2, 2), (float) val(2, 3),
+ (float) val(3, 0), (float) val(3, 1), (float) val(3, 2), (float) val(3, 3))));
+ }
+ break;
+ default:
+ header.insert(it->c_str(), Imf::StringAttribute(metadata.getAsString(*it)));
+ break;
+ }
+ }
if (pixelFormat == EXYZ || pixelFormat == EXYZA) {
Imf::addChromaticities(header, Imf::Chromaticities(
@@ -2273,14 +2477,16 @@ void Bitmap::writeRGBE(Stream *stream) const {
Log(EError, "writeRGBE(): pixel format must be ERGB or ERGBA!");
stream->writeLine("#?RGBE");
- for (std::map::const_iterator it = m_metadata.begin();
- it != m_metadata.end(); ++it) {
- stream->writeLine(formatString("# Metadata [%s]:", it->first.c_str()));
- std::istringstream iss(it->second);
+
+ std::vector keys = m_metadata.getPropertyNames();
+ for (std::vector::const_iterator it = keys.begin(); it != keys.end(); ) {
+ stream->writeLine(formatString("# Metadata [%s]:", it->c_str()));
+ std::istringstream iss(m_metadata.getAsString(*it));
std::string buf;
while (std::getline(iss, buf))
stream->writeLine(formatString("# %s", buf.c_str()));
}
+
stream->writeLine("FORMAT=32-bit_rle_rgbe\n");
stream->writeLine(formatString("-Y %i +X %i", m_size.y, m_size.x));
diff --git a/src/libcore/class.cpp b/src/libcore/class.cpp
index 954b4f05..5fb696df 100644
--- a/src/libcore/class.cpp
+++ b/src/libcore/class.cpp
@@ -78,9 +78,9 @@ void Class::initializeOnce(Class *theClass) {
}
void Class::staticInitialization() {
- std::for_each(__classes->begin(), __classes->end(),
- compose1(std::ptr_fun(initializeOnce),
- select2nd()));
+ for (ClassMap::iterator it = __classes->begin();
+ it != __classes->end(); ++it)
+ initializeOnce(it->second);
m_isInitialized = true;
}
diff --git a/src/libcore/fmtconv.cpp b/src/libcore/fmtconv.cpp
index ae831528..f463de68 100644
--- a/src/libcore/fmtconv.cpp
+++ b/src/libcore/fmtconv.cpp
@@ -156,7 +156,6 @@ template struct FormatConverterImpl : public FormatConverter {
precomp[i] = convertScalar(detail::safe_cast(i), sourceGamma, NULL, multiplier, invDestGamma);
}
- const DestFormat zero = convertScalar(0.0f);
const DestFormat one = convertScalar(1.0f);
Spectrum spec;
@@ -192,15 +191,20 @@ template struct FormatConverterImpl : public FormatConverter {
case Bitmap::EXYZ:
for (size_t i=0; i(*source++, sourceGamma, precomp, multiplier, invDestGamma);
- *dest++ = zero; *dest++ = value; *dest++ = zero;
+ Float value = convertScalar(*source++, sourceGamma);
+ *dest++ = convertScalar(value * 0.950456f, 1.0f, NULL, multiplier, invDestGamma);
+ *dest++ = convertScalar(value, 1.0f, NULL, multiplier, invDestGamma);
+ *dest++ = convertScalar(value * 1.08875f, 1.0f, NULL, multiplier, invDestGamma);
}
break;
case Bitmap::EXYZA:
for (size_t i=0; i(*source++, sourceGamma, precomp, multiplier, invDestGamma);
- *dest++ = zero; *dest++ = value; *dest++ = zero; *dest++ = one;
+ Float value = convertScalar(*source++, sourceGamma);
+ *dest++ = convertScalar(value * 0.950456f, 1.0f, NULL, multiplier, invDestGamma);
+ *dest++ = convertScalar(value, 1.0f, NULL, multiplier, invDestGamma);
+ *dest++ = convertScalar(value * 1.08875f, 1.0f, NULL, multiplier, invDestGamma);
+ *dest++ = one;
}
break;
@@ -270,16 +274,20 @@ template struct FormatConverterImpl : public FormatConverter {
case Bitmap::EXYZ:
for (size_t i=0; i(*source++, sourceGamma, precomp, multiplier, invDestGamma);
- *dest++ = zero; *dest++ = value; *dest++ = zero;
+ Float value = convertScalar(*source++, sourceGamma);
+ *dest++ = convertScalar(value * 0.950456f, 1.0f, NULL, multiplier, invDestGamma);
+ *dest++ = convertScalar(value, 1.0f, NULL, multiplier, invDestGamma);
+ *dest++ = convertScalar(value * 1.08875f, 1.0f, NULL, multiplier, invDestGamma);
source++;
}
break;
case Bitmap::EXYZA:
for (size_t i=0; i(*source++, sourceGamma, precomp, multiplier, invDestGamma);
- *dest++ = zero; *dest++ = value; *dest++ = zero;
+ Float value = convertScalar(*source++, sourceGamma);
+ *dest++ = convertScalar(value * 0.950456f, 1.0f, NULL, multiplier, invDestGamma);
+ *dest++ = convertScalar(value, 1.0f, NULL, multiplier, invDestGamma);
+ *dest++ = convertScalar(value * 1.08875f, 1.0f, NULL, multiplier, invDestGamma);
*dest++ = convertScalar(*source++);
}
break;
diff --git a/src/libcore/fresolver.cpp b/src/libcore/fresolver.cpp
index b32d53d0..6131e5c4 100644
--- a/src/libcore/fresolver.cpp
+++ b/src/libcore/fresolver.cpp
@@ -1,61 +1,93 @@
#include
#include
-#if defined(__WINDOWS__)
+#if defined(__LINUX__)
+# if !defined(_GNU_SOURCE)
+# define _GNU_SOURCE
+# endif
+# include
+#elif defined(__OSX__)
+# include
+#elif defined(__WINDOWS__)
# include
# include
#endif
+
+
MTS_NAMESPACE_BEGIN
-FileResolver::FileResolver() {
- m_paths.push_back(fs::current_path());
-#if defined(__LINUX__)
- char exePathTemp[PATH_MAX];
- memset(exePathTemp, 0, PATH_MAX);
- if (readlink("/proc/self/exe", exePathTemp, PATH_MAX) != -1) {
- fs::path exePath(exePathTemp);
+#if defined(__WINDOWS__) || defined(__LINUX__)
+ namespace {
+ void dummySymbol() { }
+ }
+#endif
- /* Make sure that we're not running inside a Python interpreter */
- if (exePath.filename().string().find("python") == std::string::npos) {
- prependPath(exePath.parent_path());
- // Handle local installs: ~/local/bin/:~/local/share/mitsuba/*
- fs::path sharedDir = exePath.parent_path().parent_path()
- / fs::path("share") / fs::path("mitsuba");
- if (fs::exists(sharedDir))
- prependPath(sharedDir);
+FileResolver::FileResolver() {
+ /* Try to detect the base path of the Mitsuba installation */
+ fs::path basePath;
+#if defined(__LINUX__)
+ Dl_info info;
+
+ dladdr((const void *) &dummySymbol, &info);
+ if (info.dli_fname) {
+ /* Try to detect a few default setups */
+ if (boost::starts_with(info.dli_fname, "/usr/lib") ||
+ boost::starts_with(info.dli_fname, "/lib")) {
+ basePath = fs::path("/usr/share/mitsuba");
+ } else if (boost::starts_with(info.dli_fname, "/usr/local/lib")) {
+ basePath = fs::path("/usr/local/share/mitsuba");
+ } else {
+ /* This is a locally-compiled repository */
+ basePath = fs::path(info.dli_fname).parent_path();
}
- } else {
- Log(EError, "Could not detect the executable path!");
}
#elif defined(__OSX__)
MTS_AUTORELEASE_BEGIN()
- fs::path path = __mts_bundlepath();
- if (path.filename() != fs::path("Python.app"))
- prependPath(path);
+ uint32_t imageCount = _dyld_image_count();
+ for (uint32_t i=0; i lpFilename(MAX_PATH);
+ // Module handle to this DLL. If the function fails it sets handle to NULL.
+ // In that case GetModuleFileName will get the name of the executable which
+ // is acceptable soft-failure behavior.
+ HMODULE handle;
+ GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
+ | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ reinterpret_cast(&dummySymbol), &handle);
+
// Try to get the path with the default MAX_PATH length (260 chars)
- DWORD nSize = GetModuleFileNameW(NULL, &lpFilename[0], MAX_PATH);
+ DWORD nSize = GetModuleFileNameW(handle, &lpFilename[0], MAX_PATH);
// Adjust the buffer size in case if was too short
- while (nSize == lpFilename.size()) {
+ while (nSize != 0 && nSize == lpFilename.size()) {
lpFilename.resize(nSize * 2);
- nSize = GetModuleFileNameW(NULL, &lpFilename[0], nSize);
+ nSize = GetModuleFileNameW(handle, &lpFilename[0],
+ static_cast(lpFilename.size()));
}
// There is an error if and only if the function returns 0
- if (nSize != 0) {
- fs::path path(lpFilename);
- if (boost::to_lower_copy(path.filename().string()).find("python") == std::string::npos)
- prependPath(path.parent_path());
- } else {
- const std::string msg(lastErrorText());
- Log(EError, "Could not detect the executable path! (%s)", msg.c_str());
- }
+ if (nSize != 0)
+ basePath = fs::path(lpFilename).parent_path();
+ else
+ Log(EError, "Could not detect the executable path! (%s)", lastErrorText().c_str());
#endif
+ #if BOOST_VERSION >= 104800
+ m_paths.push_back(fs::canonical(basePath));
+ #else
+ m_paths.push_back(fs::absolute(basePath));
+ #endif
+ m_paths.push_back(fs::current_path());
}
FileResolver *FileResolver::clone() const {
diff --git a/src/libcore/logger.cpp b/src/libcore/logger.cpp
index cbab6b8c..179288bb 100644
--- a/src/libcore/logger.cpp
+++ b/src/libcore/logger.cpp
@@ -111,7 +111,11 @@ void Logger::log(ELogLevel level, const Class *theClass,
memset(exePath, 0, PATH_MAX);
if (readlink(formatString("/proc/%i/exe", ppid).c_str(), exePath, PATH_MAX) != -1) {
if (!strcmp(exePath, "/usr/bin/gdb")) {
+#if defined(__i386__) || defined(__x86_64__)
__asm__ ("int $3");
+#else
+ __builtin_trap();
+#endif
}
}
#elif defined(__OSX__)
diff --git a/src/libcore/properties.cpp b/src/libcore/properties.cpp
index da93cf3c..23ab813e 100644
--- a/src/libcore/properties.cpp
+++ b/src/libcore/properties.cpp
@@ -18,6 +18,7 @@
#include
#include
+#include
/* Keep the boost::variant includes outside of properties.h,
since they noticeably add to the overall compile times */
@@ -26,7 +27,7 @@
MTS_NAMESPACE_BEGIN
typedef boost::variant<
- bool, int64_t, Float, Point, Vector, Transform,
+ bool, int64_t, Float, Point, Vector, Transform, AnimatedTransform *,
Spectrum, std::string, Properties::Data> ElementData;
struct PropertyElement {
@@ -45,7 +46,7 @@ struct PropertyElement {
Type Properties::get##TypeName(const std::string &name) const { \
std::map::const_iterator it = m_elements->find(name); \
if (it == m_elements->end()) \
- SLog(EError, "Property \"%s\" missing", name.c_str()); \
+ SLog(EError, "Property \"%s\" has not been specified!", name.c_str()); \
const BaseType *result = boost::get(&it->second.data); \
if (!result) \
SLog(EError, "The property \"%s\" has the wrong type (expected <" #ReadableName ">). The " \
@@ -78,18 +79,127 @@ DEFINE_PROPERTY_ACCESSOR(Spectrum, Spectrum, Spectrum, spectrum)
DEFINE_PROPERTY_ACCESSOR(std::string, std::string, String, string)
DEFINE_PROPERTY_ACCESSOR(Properties::Data, Properties::Data, Data, data)
-class type_visitor : public boost::static_visitor {
-public:
- Properties::EPropertyType operator()(const bool &) const { return Properties::EBoolean; }
- Properties::EPropertyType operator()(const int64_t &) const { return Properties::EInteger; }
- Properties::EPropertyType operator()(const Float &) const { return Properties::EFloat; }
- Properties::EPropertyType operator()(const Point &) const { return Properties::EPoint; }
- Properties::EPropertyType operator()(const Vector &) const { return Properties::EVector; }
- Properties::EPropertyType operator()(const Transform &) const { return Properties::ETransform; }
- Properties::EPropertyType operator()(const Spectrum &) const { return Properties::ESpectrum; }
- Properties::EPropertyType operator()(const std::string &) const { return Properties::EString; }
- Properties::EPropertyType operator()(const Properties::Data &) const { return Properties::EData; }
-};
+void Properties::setAnimatedTransform(const std::string &name, const AnimatedTransform *value, bool warnDuplicates) {
+ if (hasProperty(name)) {
+ AnimatedTransform **old = boost::get(&((*m_elements)[name].data));
+ if (old)
+ (*old)->decRef();
+ if (warnDuplicates)
+ SLog(EWarn, "Property \"%s\" was specified multiple times!", name.c_str());
+ }
+ (*m_elements)[name].data = (AnimatedTransform *) value;
+ (*m_elements)[name].queried = false;
+ value->incRef();
+}
+
+ref Properties::getAnimatedTransform(const std::string &name) const {
+ std::map::const_iterator it = m_elements->find(name);
+ if (it == m_elements->end())
+ SLog(EError, "Property \"%s\" missing", name.c_str());
+ const AnimatedTransform * const * result1 = boost::get(&it->second.data);
+ const Transform *result2 = boost::get(&it->second.data);
+
+ if (!result1 && !result2)
+ SLog(EError, "The property \"%s\" has the wrong type (expected or ). The "
+ "complete property record is :\n%s", name.c_str(), toString().c_str());
+ it->second.queried = true;
+
+ if (result1)
+ return *result1;
+ else
+ return new AnimatedTransform(*result2);
+}
+
+ref Properties::getAnimatedTransform(const std::string &name, const AnimatedTransform *defVal) const {
+ std::map::const_iterator it = m_elements->find(name);
+ if (it == m_elements->end())
+ return defVal;
+ AnimatedTransform * const * result1 = boost::get(&it->second.data);
+ const Transform *result2 = boost::get(&it->second.data);
+
+ if (!result1 && !result2)
+ SLog(EError, "The property \"%s\" has the wrong type (expected or ). The "
+ "complete property record is :\n%s", name.c_str(), toString().c_str());
+
+ it->second.queried = true;
+
+ if (result1)
+ return *result1;
+ else
+ return new AnimatedTransform(*result2);
+}
+
+ref Properties::getAnimatedTransform(const std::string &name, const Transform &defVal) const {
+ std::map::const_iterator it = m_elements->find(name);
+ if (it == m_elements->end())
+ return new AnimatedTransform(defVal);
+
+ AnimatedTransform * const * result1 = boost::get(&it->second.data);
+ const Transform *result2 = boost::get(&it->second.data);
+
+ if (!result1 && !result2)
+ SLog(EError, "The property \"%s\" has the wrong type (expected or ). The "
+ "complete property record is :\n%s", name.c_str(), toString().c_str());
+ it->second.queried = true;
+
+ if (result1)
+ return *result1;
+ else
+ return new AnimatedTransform(*result2);
+}
+
+namespace {
+ class TypeVisitor : public boost::static_visitor {
+ public:
+ Properties::EPropertyType operator()(const bool &) const { return Properties::EBoolean; }
+ Properties::EPropertyType operator()(const int64_t &) const { return Properties::EInteger; }
+ Properties::EPropertyType operator()(const Float &) const { return Properties::EFloat; }
+ Properties::EPropertyType operator()(const Point &) const { return Properties::EPoint; }
+ Properties::EPropertyType operator()(const Vector &) const { return Properties::EVector; }
+ Properties::EPropertyType operator()(const Transform &) const { return Properties::ETransform; }
+ Properties::EPropertyType operator()(const AnimatedTransform *) const { return Properties::EAnimatedTransform; }
+ Properties::EPropertyType operator()(const Spectrum &) const { return Properties::ESpectrum; }
+ Properties::EPropertyType operator()(const std::string &) const { return Properties::EString; }
+ Properties::EPropertyType operator()(const Properties::Data &) const { return Properties::EData; }
+ };
+
+ class EqualityVisitor : public boost::static_visitor {
+ public:
+ EqualityVisitor(const ElementData *ref) : ref(ref) { }
+
+ bool operator()(const bool &v) const { const bool *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ bool operator()(const int64_t &v) const { const int64_t *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ bool operator()(const Float &v) const { const Float *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ bool operator()(const Point &v) const { const Point *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ bool operator()(const Vector &v) const { const Vector *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ bool operator()(const Transform &v) const { const Transform *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ bool operator()(const AnimatedTransform *v) const { AnimatedTransform * const *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ bool operator()(const Spectrum &v) const { const Spectrum *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ bool operator()(const std::string &v) const { const std::string *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ bool operator()(const Properties::Data &v) const { const Properties::Data *v2 = boost::get(ref); return v2 ? (v == *v2) : false; }
+ private:
+ const ElementData *ref;
+ };
+
+ class StringVisitor : public boost::static_visitor {
+ public:
+ StringVisitor(std::ostringstream &oss, bool quote) : oss(oss), quote(quote) { }
+
+ void operator()(const bool &v) const { oss << (v ? "true" : "false"); }
+ void operator()(const int64_t &v) const { oss << v; }
+ void operator()(const Float &v) const { oss << v; }
+ void operator()(const Point &v) const { oss << v.toString(); }
+ void operator()(const Vector &v) const { oss << v.toString(); }
+ void operator()(const Transform &v) const { oss << v.toString(); }
+ void operator()(const AnimatedTransform *v) const { oss << ((Object *) v)->toString(); }
+ void operator()(const Spectrum &v) const { oss << v.toString(); }
+ void operator()(const std::string &v) const { oss << (quote ? "\"" : "") << v << (quote ? "\"" : ""); }
+ void operator()(const Properties::Data &v) const { oss << v.ptr << " (size=" << v.size << ")"; }
+ private:
+ std::ostringstream &oss;
+ bool quote;
+ };
+}
Properties::Properties()
: m_id("unnamed") {
@@ -104,16 +214,44 @@ Properties::Properties(const std::string &pluginName)
Properties::Properties(const Properties &props)
: m_pluginName(props.m_pluginName), m_id(props.m_id) {
m_elements = new std::map(*props.m_elements);
+
+ for (std::map::iterator it = m_elements->begin();
+ it != m_elements->end(); ++it) {
+ AnimatedTransform **trafo = boost::get(&(*it).second.data);
+ if (trafo)
+ (*trafo)->incRef();
+ }
}
Properties::~Properties() {
+ for (std::map::iterator it = m_elements->begin();
+ it != m_elements->end(); ++it) {
+ AnimatedTransform **trafo = boost::get(&(*it).second.data);
+ if (trafo)
+ (*trafo)->decRef();
+ }
+
delete m_elements;
}
void Properties::operator=(const Properties &props) {
+ for (std::map::iterator it = m_elements->begin();
+ it != m_elements->end(); ++it) {
+ AnimatedTransform **trafo = boost::get(&(*it).second.data);
+ if (trafo)
+ (*trafo)->decRef();
+ }
+
m_pluginName = props.m_pluginName;
m_id = props.m_id;
*m_elements = *props.m_elements;
+
+ for (std::map::iterator it = m_elements->begin();
+ it != m_elements->end(); ++it) {
+ AnimatedTransform **trafo = boost::get(&(*it).second.data);
+ if (trafo)
+ (*trafo)->incRef();
+ }
}
bool Properties::hasProperty(const std::string &name) const {
@@ -124,6 +262,9 @@ bool Properties::removeProperty(const std::string &name) {
std::map::iterator it = m_elements->find(name);
if (it == m_elements->end())
return false;
+ AnimatedTransform **trafo = boost::get(&(*it).second.data);
+ if (trafo)
+ (*trafo)->decRef();
m_elements->erase(it);
return true;
}
@@ -145,13 +286,32 @@ Properties::EPropertyType Properties::getType(const std::string &name) const {
if (it == m_elements->end())
SLog(EError, "Property \"%s\" has not been specified!", name.c_str());
- type_visitor myVisitor;
- return boost::apply_visitor(myVisitor, it->second.data);
+ return boost::apply_visitor(TypeVisitor(), it->second.data);
+}
+
+std::string Properties::getAsString(const std::string &name, const std::string &defVal) const {
+ if (m_elements->find(name) == m_elements->end())
+ return defVal;
+ return getAsString(name);
+}
+
+std::string Properties::getAsString(const std::string &name) const {
+ std::map::const_iterator it = m_elements->find(name);
+ if (it == m_elements->end())
+ SLog(EError, "Property \"%s\" has not been specified!", name.c_str());
+
+ std::ostringstream oss;
+ StringVisitor strVisitor(oss, false);
+ boost::apply_visitor(strVisitor, it->second.data);
+ it->second.queried = true;
+
+ return oss.str();
}
std::string Properties::toString() const {
std::map::const_iterator it = m_elements->begin();
std::ostringstream oss;
+ StringVisitor strVisitor(oss, true);
oss << "Properties[" << endl
<< " pluginName = \"" << m_pluginName << "\"," << endl
@@ -160,36 +320,7 @@ std::string Properties::toString() const {
while (it != m_elements->end()) {
oss << " \"" << (*it).first << "\" -> ";
const ElementData &data = (*it).second.data;
- EPropertyType type = boost::apply_visitor(type_visitor(), data);
- switch (type) {
- case EBoolean:
- oss << (boost::get(data) ? "true" : "false");
- break;
- case EInteger:
- oss << boost::get(data);
- break;
- case EFloat:
- oss << boost::get(data);
- break;
- case EPoint:
- oss << boost::get(data).toString();
- break;
- case ETransform:
- oss << indent(boost::get(data).toString());
- break;
- case ESpectrum:
- oss << boost::get(data).toString();
- break;
- case EString:
- oss << "\"" << boost::get(data) << "\"";
- break;
- case EData:
- oss << boost::get(data).ptr << " (size="
- << boost::get(data).size << ")";
- break;
- default:
- oss << "";
- }
+ boost::apply_visitor(strVisitor, data);
if (++it != m_elements->end())
oss << ",";
oss << endl;
@@ -219,6 +350,29 @@ void Properties::putPropertyNames(std::vector &results) const {
results.push_back((*it).first);
}
+void Properties::copyAttribute(const Properties &properties,
+ const std::string &sourceName, const std::string &targetName) {
+ std::map::const_iterator it = properties.m_elements->find(sourceName);
+ if (it == properties.m_elements->end())
+ SLog(EError, "copyAttribute(): Could not find parameter \"%s\"!", sourceName.c_str());
+ m_elements->operator[](targetName) = it->second;
+}
+
+bool Properties::operator==(const Properties &p) const {
+ if (m_pluginName != p.m_pluginName || m_id != p.m_id || m_elements->size() != p.m_elements->size())
+ return false;
+
+ std::map::const_iterator it = m_elements->begin();
+ for (; it != m_elements->end(); ++it) {
+ const PropertyElement &first = it->second;
+ const PropertyElement &second = (*p.m_elements)[it->first];
+
+ if (!boost::apply_visitor(EqualityVisitor(&first.data), second.data))
+ return false;
+ }
+
+ return true;
+}
ConfigurableObject::ConfigurableObject(Stream *stream, InstanceManager *manager)
: SerializableObject(stream, manager) {
diff --git a/src/libcore/quad.cpp b/src/libcore/quad.cpp
index ab78a584..f54534d9 100644
--- a/src/libcore/quad.cpp
+++ b/src/libcore/quad.cpp
@@ -96,6 +96,7 @@ static std::pair legendreQ(int l, double x) {
if (l == 1) {
return std::make_pair(0.5 * (3*x*x-1) - 1, 3*x);
} else {
+ /* Evaluate the recurrence in double precision */
double Lppred = 1.0, Lpred = x, Lcur = 0.0,
Dppred = 0.0, Dpred = 1.0, Dcur = 0.0;
@@ -113,6 +114,65 @@ static std::pair legendreQ(int l, double x) {
}
}
+double legendreP(int l, int m, double x) {
+ double p_mm = 1;
+
+ if (m > 0) {
+ double somx2 = std::sqrt((1 - x) * (1 + x));
+ double fact = 1;
+ for (int i=1; i<=m; i++) {
+ p_mm *= (-fact) * somx2;
+ fact += 2;
+ }
+ }
+
+ if (l == m)
+ return p_mm;
+
+ double p_mmp1 = x * (2*m + 1) * p_mm;
+ if (l == m+1)
+ return p_mmp1;
+
+ double p_ll = 0;
+ for (int ll=m+2; ll <= l; ++ll) {
+ p_ll = ((2*ll-1)*x*p_mmp1 - (ll+m-1) * p_mm) / (ll-m);
+ p_mm = p_mmp1;
+ p_mmp1 = p_ll;
+ }
+
+ return p_ll;
+}
+
+float legendreP(int l, int m, float x) {
+ /* Evaluate the recurrence in double precision */
+ double p_mm = 1;
+
+ if (m > 0) {
+ double somx2 = std::sqrt((1 - x) * (1 + x));
+ double fact = 1;
+ for (int i=1; i<=m; i++) {
+ p_mm *= (-fact) * somx2;
+ fact += 2;
+ }
+ }
+
+ if (l == m)
+ return (float) p_mm;
+
+ double p_mmp1 = x * (2*m + 1) * p_mm;
+ if (l == m+1)
+ return (float) p_mmp1;
+
+ double p_ll = 0;
+ for (int ll=m+2; ll <= l; ++ll) {
+ p_ll = ((2*ll-1)*x*p_mmp1 - (ll+m-1) * p_mm) / (ll-m);
+ p_mm = p_mmp1;
+ p_mmp1 = p_ll;
+ }
+
+ return (float) p_ll;
+}
+
void gaussLegendre(int n, Float *nodes, Float *weights) {
if (n-- < 1)
SLog(EError, "gaussLegendre(): n must be >= 1");
diff --git a/src/libcore/random.cpp b/src/libcore/random.cpp
index a91f5849..f85625cb 100644
--- a/src/libcore/random.cpp
+++ b/src/libcore/random.cpp
@@ -477,11 +477,11 @@ Random::Random() : mt(NULL) {
seed();
#else
#if 0
- uint64_t buf[MT_N];
- memset(buf, 0, MT_N * sizeof(uint64_t)); /* Make GCC happy */
+ uint64_t buf[N64];
+ memset(buf, 0, N64 * sizeof(uint64_t)); /* Make GCC happy */
ref urandom = new FileStream("/dev/urandom", FileStream::EReadOnly);
- urandom->readULongArray(buf, MT_N);
- seed(buf, MT_N);
+ urandom->readULongArray(buf, N64);
+ seed(buf, N64);
#else
seed();
#endif
diff --git a/src/libcore/sched.cpp b/src/libcore/sched.cpp
index a2765adb..21f9a734 100644
--- a/src/libcore/sched.cpp
+++ b/src/libcore/sched.cpp
@@ -158,10 +158,11 @@ void Scheduler::retainResource(int id) {
rec->refCount++;
}
-void Scheduler::unregisterResource(int id) {
+bool Scheduler::unregisterResource(int id) {
LockGuard lock(m_mutex);
if (m_resources.find(id) == m_resources.end()) {
- Log(EError, "unregisterResource(): could not find the resource with ID %i!", id);
+ Log(EWarn, "unregisterResource(): could not find the resource with ID %i!", id);
+ return false;
}
ResourceRecord *rec = m_resources[id];
if (--rec->refCount == 0) {
@@ -175,6 +176,7 @@ void Scheduler::unregisterResource(int id) {
for (size_t i=0; isignalResourceExpiration(id);
}
+ return true;
}
SerializableObject *Scheduler::getResource(int id, int coreIndex) {
diff --git a/src/libcore/sched_remote.cpp b/src/libcore/sched_remote.cpp
index d11e763a..95f737c5 100644
--- a/src/libcore/sched_remote.cpp
+++ b/src/libcore/sched_remote.cpp
@@ -325,14 +325,7 @@ StreamBackend::StreamBackend(const std::string &thrName, Scheduler *scheduler,
m_memStream->setByteOrder(Stream::ENetworkByteOrder);
}
-StreamBackend::~StreamBackend() {
- if (m_stream->getClass()->derivesFrom(MTS_CLASS(SocketStream))) {
- SocketStream *sstream = static_cast(m_stream.get());
- Log(EInfo, "Closing connection to %s - received %i KB / sent %i KB",
- sstream->getPeer().c_str(), (int) (sstream->getReceivedBytes() / 1024),
- (int) (sstream->getSentBytes() / 1024));
- }
-}
+StreamBackend::~StreamBackend() { }
void StreamBackend::run() {
if (m_detach)
@@ -489,6 +482,13 @@ void StreamBackend::run() {
Log(EWarn, "Removing stray resource %i", (*it).first);
m_scheduler->unregisterResource((*it).second);
}
+
+ if (m_stream->getClass()->derivesFrom(MTS_CLASS(SocketStream))) {
+ SocketStream *sstream = static_cast(m_stream.get());
+ Log(EInfo, "Closing connection to %s - received %i KB / sent %i KB",
+ sstream->getPeer().c_str(), (int) (sstream->getReceivedBytes() / 1024),
+ (int) (sstream->getSentBytes() / 1024));
+ }
}
void StreamBackend::sendCancellation(int id, int numLost) {
diff --git a/src/libcore/shvector.cpp b/src/libcore/shvector.cpp
index d08a1778..3a19bbff 100644
--- a/src/libcore/shvector.cpp
+++ b/src/libcore/shvector.cpp
@@ -62,12 +62,12 @@ Float SHVector::eval(Float theta, Float phi) const {
for (int l=0; l0) {
- Float somx2 = std::sqrt(((Float) 1 - x) * ((Float) 1 + x));
- Float fact = 1.0;
- for (int i=1; i<=m; i++) {
- pmm *= (-fact) * somx2;
- fact += (Float) 2;
- }
- }
-
- if (l==m)
- return pmm;
-
- Float pmmp1 = x * ((Float) 2 * m + (Float) 1) * pmm;
- if (l==m+1)
- return pmmp1;
-
- Float pll = (Float) 0;
- for (int ll=m+2; ll<=l; ++ll) {
- pll = (((Float) 2 * ll - (Float) 1)*x*pmmp1
- - (ll + m - (Float) 1) * pmm ) / (ll-m);
- pmm = pmmp1;
- pmmp1 = pll;
- }
-
- return pll;
-}
-
void SHVector::normalize() {
Float correction = 1/(2 * (Float) std::sqrt(M_PI)*operator()(0,0));
@@ -559,8 +529,8 @@ Float *SHSampler::legendreIntegrals(Float a, Float b) {
for (int l=0; l.
+*/
+
+#include
+
+MTS_NAMESPACE_BEGIN
+
+Float evalCubicInterp1D(Float x, const Float *values, size_t size, Float min, Float max, bool extrapolate) {
+ /* Give up when given an out-of-range or NaN argument */
+ if (!(x >= min && x <= max) && !extrapolate)
+ return 0.0f;
+
+ /* Transform 'x' so that knots lie at integer positions */
+ Float t = ((x - min) * (size - 1)) / (max - min);
+
+ /* Find the index of the left knot in the queried subinterval, be
+ robust to cases where 't' lies exactly on the right endpoint */
+ size_t k = std::max((size_t) 0, std::min((size_t) t, size - 2));
+
+ Float f0 = values[k],
+ f1 = values[k+1],
+ d0, d1;
+
+ /* Approximate the derivatives */
+ if (k > 0)
+ d0 = 0.5f * (values[k+1] - values[k-1]);
+ else
+ d0 = values[k+1] - values[k];
+
+ if (k + 2 < size)
+ d1 = 0.5f * (values[k+2] - values[k]);
+ else
+ d1 = values[k+1] - values[k];
+
+ /* Compute the relative position within the interval */
+ t = t - (Float) k;
+
+ Float t2 = t*t, t3 = t2*t;
+
+ return
+ ( 2*t3 - 3*t2 + 1) * f0 +
+ (-2*t3 + 3*t2) * f1 +
+ ( t3 - 2*t2 + t) * d0 +
+ ( t3 - t2) * d1;
+}
+
+Float evalCubicInterp1DN(Float x, const Float *nodes, const Float *values, size_t size, bool extrapolate) {
+ /* Give up when given an out-of-range or NaN argument */
+ if (!(x >= nodes[0] && x <= nodes[size-1]) && !extrapolate)
+ return 0.0f;
+
+ size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size - 2,
+ std::lower_bound(nodes, nodes + size, x) - nodes - 1));
+
+ Float f0 = values[k],
+ f1 = values[k+1],
+ width = nodes[k+1] - nodes[k],
+ d0, d1;
+
+ /* Approximate the derivatives */
+ if (k > 0)
+ d0 = width * (f1 - values[k-1]) / (nodes[k+1] - nodes[k-1]);
+ else
+ d0 = f1 - f0;
+
+ if (k + 2 < size)
+ d1 = width * (values[k+2] - f0) / (nodes[k+2] - nodes[k]);
+ else
+ d1 = f1 - f0;
+
+ Float t = (x - nodes[k]) / width;
+ Float t2 = t*t, t3 = t2*t;
+
+ return
+ ( 2*t3 - 3*t2 + 1) * f0 +
+ (-2*t3 + 3*t2) * f1 +
+ ( t3 - 2*t2 + t) * d0 +
+ ( t3 - t2) * d1;
+}
+
+Float integrateCubicInterp1D(size_t idx, const Float *values, size_t size, Float min, Float max) {
+ Float f0 = values[idx], f1 = values[idx+1], d0, d1;
+
+ /* Approximate the derivatives */
+ if (idx > 0)
+ d0 = 0.5f * (values[idx+1] - values[idx-1]);
+ else
+ d0 = values[idx+1] - values[idx];
+
+ if (idx + 2 < size)
+ d1 = 0.5f * (values[idx+2] - values[idx]);
+ else
+ d1 = values[idx+1] - values[idx];
+
+ return ((d0-d1) * (Float) (1.0 / 12.0) + (f0+f1) * 0.5f) * (max-min) / (size - 1);
+}
+
+Float integrateCubicInterp1DN(size_t idx, const Float *nodes, const Float *values, size_t size) {
+ Float f0 = values[idx],
+ f1 = values[idx+1],
+ width = nodes[idx+1] - nodes[idx],
+ d0, d1;
+
+ /* Approximate the derivatives */
+ if (idx > 0)
+ d0 = width * (f1 - values[idx-1]) / (nodes[idx+1] - nodes[idx-1]);
+ else
+ d0 = f1 - f0;
+
+ if (idx + 2 < size)
+ d1 = width * (values[idx+2] - f0) / (nodes[idx+2] - nodes[idx]);
+ else
+ d1 = f1 - f0;
+
+ return ((d0-d1) * (Float) (1.0 / 12.0) + (f0+f1) * 0.5f) * width;
+}
+
+Float sampleCubicInterp1D(size_t idx, Float *values, size_t size, Float min,
+ Float max, Float sample, Float *fval) {
+ Float f0 = values[idx], f1 = values[idx+1], d0, d1;
+
+ /* Approximate the derivatives */
+ if (idx > 0)
+ d0 = 0.5f * (values[idx+1] - values[idx-1]);
+ else
+ d0 = values[idx+1] - values[idx];
+
+ if (idx + 2 < size)
+ d1 = 0.5f * (values[idx+2] - values[idx]);
+ else
+ d1 = values[idx+1] - values[idx];
+
+ /* Bracketing interval and starting guess */
+ Float a = 0, c = 1, b;
+
+ if (f0 != f1) /* Importance sample linear interpolant */
+ b = (f0-math::safe_sqrt(f0*f0 + sample * (f1*f1-f0*f0))) / (f0-f1);
+ else
+ b = sample;
+
+ sample *= ((d0-d1) * (Float) (1.0 / 12.0) + (f0+f1) * 0.5f);
+
+ /* Invert CDF using Newton-Bisection */
+ while (true) {
+ if (!(b >= a && b <= c))
+ b = 0.5f * (a + c);
+
+ /* CDF and PDF in Horner form */
+ Float value = b*(f0 + b*(.5f*d0 + b*((Float) (1.0f/3.0f) * (-2*d0-d1)
+ + f1 - f0 + b*(0.25f*(d0 + d1) + 0.5f * (f0 - f1))))) - sample;
+ Float deriv = f0 + b*(d0 + b*(-2*d0 - d1 + 3*(f1-f0) + b*(d0 + d1 + 2*(f0 - f1))));
+
+ if (std::abs(value) < 1e-6f) {
+ if (fval)
+ *fval = deriv;
+ return min + (idx+b) * (max-min) / (size-1);
+ }
+
+ if (value > 0)
+ c = b;
+ else
+ a = b;
+
+ b -= value / deriv;
+ }
+}
+
+Float sampleCubicInterp1DN(size_t idx, Float *nodes, Float *values,
+ size_t size, Float sample, Float *fval) {
+ Float f0 = values[idx],
+ f1 = values[idx+1],
+ width = nodes[idx+1] - nodes[idx],
+ d0, d1;
+
+ /* Approximate the derivatives */
+ if (idx > 0)
+ d0 = width * (f1 - values[idx-1]) / (nodes[idx+1] - nodes[idx-1]);
+ else
+ d0 = f1 - f0;
+
+ if (idx + 2 < size)
+ d1 = width * (values[idx+2] - f0) / (nodes[idx+2] - nodes[idx]);
+ else
+ d1 = f1 - f0;
+
+ /* Bracketing interval and starting guess */
+ Float a = 0, c = 1, b;
+
+ if (f0 != f1) /* Importance sample linear interpolant */
+ b = (f0-math::safe_sqrt(f0*f0 + sample * (f1*f1-f0*f0))) / (f0-f1);
+ else
+ b = sample;
+
+ sample *= ((d0-d1) * (Float) (1.0 / 12.0) + (f0+f1) * 0.5f);
+
+ /* Invert CDF using Newton-Bisection */
+ while (true) {
+ if (!(b >= a && b <= c))
+ b = 0.5f * (a + c);
+
+ /* CDF and PDF in Horner form */
+ Float value = b*(f0 + b*(.5f*d0 + b*((Float) (1.0f/3.0f) * (-2*d0-d1)
+ + f1 - f0 + b*(0.25f*(d0 + d1) + 0.5f * (f0 - f1))))) - sample;
+ Float deriv = f0 + b*(d0 + b*(-2*d0 - d1 + 3*(f1-f0) + b*(d0 + d1 + 2*(f0 - f1))));
+
+ if (std::abs(value) < 1e-6f) {
+ if (fval)
+ *fval = deriv;
+ return nodes[idx] + width*b;
+ }
+
+ if (value > 0)
+ c = b;
+ else
+ a = b;
+
+ b -= value / deriv;
+ }
+}
+
+Float evalCubicInterp2D(const Point2 &p, const Float *values, const Size2 &size,
+ const Point2 &min, const Point2 &max, bool extrapolate) {
+ Float knotWeights[2][4];
+ Size2 knot;
+
+ /* Compute interpolation weights separately for each dimension */
+ for (int dim=0; dim<2; ++dim) {
+ Float *weights = knotWeights[dim];
+ /* Give up when given an out-of-range or NaN argument */
+ if (!(p[dim] >= min[dim] && p[dim] <= max[dim]) && !extrapolate)
+ return 0.0f;
+
+ /* Transform 'p' so that knots lie at integer positions */
+ Float t = ((p[dim] - min[dim]) * (size[dim] - 1))
+ / (max[dim]-min[dim]);
+
+ /* Find the index of the left knot in the queried subinterval, be
+ robust to cases where 't' lies exactly on the right endpoint */
+ knot[dim] = std::min((size_t) t, size[dim] - 2);
+
+ /* Compute the relative position within the interval */
+ t = t - (Float) knot[dim];
+
+ /* Compute node weights */
+ Float t2 = t*t, t3 = t2*t;
+ weights[0] = 0.0f;
+ weights[1] = 2*t3 - 3*t2 + 1;
+ weights[2] = -2*t3 + 3*t2;
+ weights[3] = 0.0f;
+
+ /* Derivative weights */
+ Float d0 = t3 - 2*t2 + t,
+ d1 = t3 - t2;
+
+ /* Turn derivative weights into node weights using
+ an appropriate chosen finite differences stencil */
+ if (knot[dim] > 0) {
+ weights[2] += 0.5f * d0;
+ weights[0] -= 0.5f * d0;
+ } else {
+ weights[2] += d0;
+ weights[1] -= d0;
+ }
+
+ if (knot[dim] + 2 < size[dim]) {
+ weights[3] += 0.5f * d1;
+ weights[1] -= 0.5f * d1;
+ } else {
+ weights[2] += d1;
+ weights[1] -= d1;
+ }
+ }
+
+ Float result = 0.0f;
+ for (int y=-1; y<=2; ++y) {
+ Float wy = knotWeights[1][y+1];
+ for (int x=-1; x<=2; ++x) {
+ Float wxy = knotWeights[0][x+1] * wy;
+
+ if (wxy == 0)
+ continue;
+
+ size_t pos = (knot[1] + y) * size[0] + knot[0] + x;
+
+ result += values[pos] * wxy;
+ }
+ }
+ return result;
+}
+
+Float evalCubicInterp2DN(const Point2 &p, const Float **nodes_,
+ const Float *values, const Size2 &size, bool extrapolate) {
+ Float knotWeights[2][4];
+ Size2 knot;
+
+ /* Compute interpolation weights separately for each dimension */
+ for (int dim=0; dim<2; ++dim) {
+ const Float *nodes = nodes_[dim];
+ Float *weights = knotWeights[dim];
+
+ /* Give up when given an out-of-range or NaN argument */
+ if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1]) && !extrapolate)
+ return 0.0f;
+
+ /* Find the index of the left knot in the queried subinterval, be
+ robust to cases where 't' lies exactly on the right endpoint */
+ size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size[dim] - 2,
+ std::lower_bound(nodes, nodes + size[dim], p[dim]) - nodes - 1));
+ knot[dim] = k;
+
+ Float width = nodes[k+1] - nodes[k];
+
+ /* Compute the relative position within the interval */
+ Float t = (p[dim] - nodes[k]) / width,
+ t2 = t*t, t3 = t2*t;
+
+ /* Compute node weights */
+ weights[0] = 0.0f;
+ weights[1] = 2*t3 - 3*t2 + 1;
+ weights[2] = -2*t3 + 3*t2;
+ weights[3] = 0.0f;
+
+ /* Derivative weights */
+ Float d0 = t3 - 2*t2 + t, d1 = t3 - t2;
+
+ /* Turn derivative weights into node weights using
+ an appropriate chosen finite differences stencil */
+ if (k > 0) {
+ Float factor = width / (nodes[k+1]-nodes[k-1]);
+ weights[2] += d0 * factor;
+ weights[0] -= d0 * factor;
+ } else {
+ weights[2] += d0;
+ weights[1] -= d0;
+ }
+
+ if (k + 2 < size[dim]) {
+ Float factor = width / (nodes[k+2]-nodes[k]);
+ weights[3] += d1 * factor;
+ weights[1] -= d1 * factor;
+ } else {
+ weights[2] += d1;
+ weights[1] -= d1;
+ }
+ }
+
+ Float result = 0.0f;
+ for (int y=-1; y<=2; ++y) {
+ Float wy = knotWeights[1][y+1];
+ for (int x=-1; x<=2; ++x) {
+ Float wxy = knotWeights[0][x+1] * wy;
+
+ if (wxy == 0)
+ continue;
+
+ size_t pos = (knot[1] + y) * size[0] + knot[0] + x;
+
+ result += values[pos] * wxy;
+ }
+ }
+ return result;
+}
+
+Float evalCubicInterp3D(const Point3 &p, const Float *values, const Size3 &size,
+ const Point3 &min, const Point3 &max, bool extrapolate) {
+ Float knotWeights[3][4];
+ Size3 knot;
+
+ /* Compute interpolation weights separately for each dimension */
+ for (int dim=0; dim<3; ++dim) {
+ Float *weights = knotWeights[dim];
+ /* Give up when given an out-of-range or NaN argument */
+ if (!(p[dim] >= min[dim] && p[dim] <= max[dim]) && !extrapolate)
+ return 0.0f;
+
+ /* Transform 'p' so that knots lie at integer positions */
+ Float t = ((p[dim] - min[dim]) * (size[dim] - 1))
+ / (max[dim]-min[dim]);
+
+ /* Find the index of the left knot in the queried subinterval, be
+ robust to cases where 't' lies exactly on the right endpoint */
+ knot[dim] = std::min((size_t) t, size[dim] - 2);
+
+ /* Compute the relative position within the interval */
+ t = t - (Float) knot[dim];
+
+ /* Compute node weights */
+ Float t2 = t*t, t3 = t2*t;
+ weights[0] = 0.0f;
+ weights[1] = 2*t3 - 3*t2 + 1;
+ weights[2] = -2*t3 + 3*t2;
+ weights[3] = 0.0f;
+
+ /* Derivative weights */
+ Float d0 = t3 - 2*t2 + t,
+ d1 = t3 - t2;
+
+ /* Turn derivative weights into node weights using
+ an appropriate chosen finite differences stencil */
+ if (knot[dim] > 0) {
+ weights[2] += 0.5f * d0;
+ weights[0] -= 0.5f * d0;
+ } else {
+ weights[2] += d0;
+ weights[1] -= d0;
+ }
+
+ if (knot[dim] + 2 < size[dim]) {
+ weights[3] += 0.5f * d1;
+ weights[1] -= 0.5f * d1;
+ } else {
+ weights[2] += d1;
+ weights[1] -= d1;
+ }
+ }
+
+ Float result = 0.0f;
+ for (int z=-1; z<=2; ++z) {
+ Float wz = knotWeights[2][z+1];
+ for (int y=-1; y<=2; ++y) {
+ Float wyz = knotWeights[1][y+1] * wz;
+ for (int x=-1; x<=2; ++x) {
+ Float wxyz = knotWeights[0][x+1] * wyz;
+
+ if (wxyz == 0)
+ continue;
+
+ size_t pos = ((knot[2] + z) * size[1] + (knot[1] + y))
+ * size[0] + knot[0] + x;
+
+ result += values[pos] * wxyz;
+ }
+ }
+ }
+ return result;
+}
+
+Float evalCubicInterp3DN(const Point3 &p, const Float **nodes_,
+ const Float *values, const Size3 &size, bool extrapolate) {
+ Float knotWeights[3][4];
+ Size3 knot;
+
+ /* Compute interpolation weights separately for each dimension */
+ for (int dim=0; dim<3; ++dim) {
+ const Float *nodes = nodes_[dim];
+ Float *weights = knotWeights[dim];
+
+ /* Give up when given an out-of-range or NaN argument */
+ if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1]) && !extrapolate)
+ return 0.0f;
+
+ /* Find the index of the left knot in the queried subinterval, be
+ robust to cases where 't' lies exactly on the right endpoint */
+ size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size[dim] - 2,
+ std::lower_bound(nodes, nodes + size[dim], p[dim]) - nodes - 1));
+ knot[dim] = k;
+
+ Float width = nodes[k+1] - nodes[k];
+
+ /* Compute the relative position within the interval */
+ Float t = (p[dim] - nodes[k]) / width,
+ t2 = t*t, t3 = t2*t;
+
+ /* Compute node weights */
+ weights[0] = 0.0f;
+ weights[1] = 2*t3 - 3*t2 + 1;
+ weights[2] = -2*t3 + 3*t2;
+ weights[3] = 0.0f;
+
+ /* Derivative weights */
+ Float d0 = t3 - 2*t2 + t, d1 = t3 - t2;
+
+ /* Turn derivative weights into node weights using
+ an appropriate chosen finite differences stencil */
+ if (k > 0) {
+ Float factor = width / (nodes[k+1]-nodes[k-1]);
+ weights[2] += d0 * factor;
+ weights[0] -= d0 * factor;
+ } else {
+ weights[2] += d0;
+ weights[1] -= d0;
+ }
+
+ if (k + 2 < size[dim]) {
+ Float factor = width / (nodes[k+2]-nodes[k]);
+ weights[3] += d1 * factor;
+ weights[1] -= d1 * factor;
+ } else {
+ weights[2] += d1;
+ weights[1] -= d1;
+ }
+ }
+
+ Float result = 0.0f;
+ for (int z=-1; z<=2; ++z) {
+ Float wz = knotWeights[2][z+1];
+ for (int y=-1; y<=2; ++y) {
+ Float wyz = knotWeights[1][y+1] * wz;
+ for (int x=-1; x<=2; ++x) {
+ Float wxyz = knotWeights[0][x+1] * wyz;
+
+ if (wxyz == 0)
+ continue;
+
+ size_t pos = ((knot[2] + z) * size[1] + (knot[1] + y))
+ * size[0] + knot[0] + x;
+
+ result += values[pos] * wxyz;
+ }
+ }
+ }
+ return result;
+}
+
+MTS_NAMESPACE_END
diff --git a/src/libcore/ssemath.cpp b/src/libcore/ssemath.cpp
index 83a11a2b..b1cc466d 100644
--- a/src/libcore/ssemath.cpp
+++ b/src/libcore/ssemath.cpp
@@ -16,6 +16,12 @@
along with this program. If not, see .
*/
+#if defined(__GXX_EXPERIMENTAL_CXX0X__)
+ /* Needed to prevent a segmentation fault in the Intel C++
+ compiler on Linux (as of Nov 2012) */
+ #undef __GXX_EXPERIMENTAL_CXX0X__
+#endif
+
#if MTS_SSE
#include
#include
diff --git a/src/libcore/statistics.cpp b/src/libcore/statistics.cpp
index f8b1fb82..7ae41f44 100644
--- a/src/libcore/statistics.cpp
+++ b/src/libcore/statistics.cpp
@@ -134,7 +134,7 @@ void Statistics::logPlugin(const std::string &name, const std::string &descr) {
}
void Statistics::printStats() {
- SLog(EInfo, "Statistics: \n%s", getStats().c_str());
+ SLog(EInfo, "Statistics:\n%s", getStats().c_str());
}
std::string Statistics::getStats() {
diff --git a/src/libcore/thread.cpp b/src/libcore/thread.cpp
index 2811463b..9fd9e7fb 100644
--- a/src/libcore/thread.cpp
+++ b/src/libcore/thread.cpp
@@ -18,7 +18,7 @@
#include
#include
-#ifdef MTS_OPENMP
+#if defined(MTS_OPENMP)
# include
#endif
@@ -373,9 +373,9 @@ void Thread::yield() {
void Thread::exit() {
Log(EDebug, "Thread \"%s\" has finished", d->name.c_str());
d->running = false;
- decRef();
- ThreadPrivate::self->set(NULL);
+ Assert(ThreadPrivate::self->get() == this);
detail::destroyLocalTLS();
+ decRef();
}
std::string Thread::toString() const {
@@ -396,6 +396,7 @@ static boost::mutex __unmanagedMutex;
#if defined(MTS_OPENMP) && defined(__OSX__)
static int __omp_threadCount = 0;
static pthread_key_t __omp_key;
+static bool __omp_key_created;
int mts_omp_get_max_threads() {
/* This function exists to sidestep an annoying
@@ -446,35 +447,44 @@ void Thread::staticShutdown() {
__unmanagedThreads[i]->decRef();
__unmanagedThreads.clear();
getThread()->d->running = false;
- ThreadPrivate::self->set(NULL);
+ detail::destroyLocalTLS();
delete ThreadPrivate::self;
ThreadPrivate::self = NULL;
- detail::destroyLocalTLS();
detail::destroyGlobalTLS();
#if defined(__OSX__)
+ #if defined(MTS_OPENMP)
+ if (__omp_key_created)
+ pthread_key_delete(__omp_key);
+ #endif
__mts_autorelease_shutdown();
#endif
}
void Thread::initializeOpenMP(size_t threadCount) {
-#ifdef MTS_OPENMP
+#if defined(MTS_OPENMP)
ref logger = Thread::getThread()->getLogger();
ref fResolver = Thread::getThread()->getFileResolver();
#if defined(__OSX__)
+ if (!__omp_key_created) {
+ pthread_key_create(&__omp_key, NULL);
+ __omp_key_created = true;
+ }
__omp_threadCount = threadCount;
- pthread_key_create(&__omp_key, NULL);
#endif
+ if (omp_get_dynamic())
+ omp_set_dynamic(0);
+
omp_set_num_threads((int) threadCount);
- omp_set_dynamic(false);
int counter = 0;
#pragma omp parallel
{
#if defined(__OSX__)
- pthread_setspecific(__omp_key, reinterpret_cast(counter));
+ if (!pthread_getspecific(__omp_key))
+ pthread_setspecific(__omp_key, reinterpret_cast(counter));
#endif
detail::initializeLocalTLS();
Thread *thread = Thread::getThread();
@@ -485,14 +495,25 @@ void Thread::initializeOpenMP(size_t threadCount) {
formatString("omp%i", counter));
counter++;
}
+ const std::string threadName = "Mitsuba: " + thread->getName();
+
+ #if defined(__LINUX__)
+ prctl(PR_SET_NAME, threadName.c_str());
+ #elif defined(__OSX__)
+ pthread_setname_np(threadName.c_str());
+ #elif defined(__WINDOWS__)
+ SetThreadName(threadName.c_str());
+ #endif
+
thread->d->running = false;
thread->d->joined = false;
thread->d->fresolver = fResolver;
thread->d->logger = logger;
thread->incRef();
ThreadPrivate::self->set(thread);
+
#pragma omp critical
- __unmanagedThreads.push_back((UnmanagedThread *) thread);
+ __unmanagedThreads.push_back((UnmanagedThread *) thread);
}
}
#else
diff --git a/src/libcore/tls.cpp b/src/libcore/tls.cpp
index 7237ec54..75f6912b 100644
--- a/src/libcore/tls.cpp
+++ b/src/libcore/tls.cpp
@@ -17,16 +17,26 @@
*/
#include
+
#include
+#include
#include
-#include
-#include
+#include
+
+#include