metadata
Wenzel Jakob 2012-11-02 19:41:04 -04:00
commit f66737cf39
57 changed files with 704 additions and 519 deletions

View File

@ -6,3 +6,4 @@ e3c0182ba64b77319ce84c9e2a8581649e68273d v0.2.1
cb6e89af8012fac22cc0f3c5ad247c98c701bdda v0.3.0
ee26517b27207353b0c8a7d357bcb4977b5d93fb v0.4.0
7db07694ea00eb1655f7a1adcc3ae880e8e116f9 v0.4.1
13a39b11aceee517c19d2e2cec2e6b875546062c v0.4.2

View File

@ -2,4 +2,4 @@
cp /opt/intel/composer_xe_*/compiler/lib/libiomp5.dylib Mitsuba.app/Contents/Frameworks
find Mitsuba.app/Contents/MacOS/ Mitsuba.app/plugins -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib
find Mitsuba.app/Contents/Frameworks/libmitsuba-* -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib
find Mitsuba.app/Contents/python -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib
find Mitsuba.app/python -type f | xargs -n 1 install_name_tool -change libiomp5.dylib @rpath/libiomp5.dylib

View File

@ -1,3 +1,18 @@
mitsuba (0.4.2-1) unstable; urgency=low
* Volumetric path tracers: improved sampling when dealing with index-matched medium transitions. This is essentially a re-implementation of an optimization that Mitsuba 0.3.1 already had, but which got lost in the bidirectional rewrite.
* Batch tonemapper: due to an unfortunate bug, the batch tonemapper in the last release produced invalid results for images containing an alpha channel. This is now fixed.
* Shapes: corrected some differential geometry issues in the "cylinder" and "rectangle" shapes.
* MLT: fixed 2-stage MLT, which was producing incorrect results.
* MEPT: fixed the handling of directional light sources.
* Robustness: got rid of various corner-cases that could produce NaNs.
* Filenames: to facilitate loading scenes created on Windows/OSX, the Linux version now resolves files case-insensitively if they could not be found after a case-sensitive search.
* Python: added Python bindings for shapes and triangle meshes. The Python plugin should now be easier to load (previously, this was unfortunately rather difficult on several platforms). The documentation was also given an overhaul.
* Particle tracing: I've decided to disable the adjoint BSDF for shading normals in the particle tracer, since it causes an unacceptable amount of variance in scenes containing poorly tesselated geometry. This affects the plugins ptracer, ppm, sppm and photonmapper.
* Subsurface scattering: fixed parallel network renderings involving the dipole model.
* Homogeneous medium & dipole: added many more material presets by Narasimhan et al.
* OBJ loader: further robustness improvements to the OBJ loader and the associated MTL material translator.
-- Wenzel Jakob <wenzel@cs.cornell.edu> Wed, 31 Oct 2012 00:00:00 -0400
mitsuba (0.4.1-1) unstable; urgency=low
* negative pixel values in textures and environment maps are handled more gracefully.
* minor robustness improvements to the OBJ and COLLADA importers.

View File

@ -1,5 +1,5 @@
Name: mitsuba
Version: 0.4.1
Version: 0.4.2
Release: 1%{?dist}
Summary: Mitsuba renderer
Group: Applications/Graphics
@ -62,6 +62,9 @@ rm -rf $RPM_BUILD_ROOT
/usr/include/*
%changelog
* Wed Oct 31 2012 Wenzel Jakob <wenzel@cs.cornell.edu> 0.4.2%{?dist}
- Upgrade to version 0.4.2
* Wed Oct 10 2012 Wenzel Jakob <wenzel@cs.cornell.edu> 0.4.1%{?dist}
- Upgrade to version 0.4.1

View File

@ -1,11 +1,11 @@
\section{Acknowledgments}
I am indebted to my advisor Steve Marschner for allowing me to devote
a significant amount of my research time to this project. His insightful and
a significant amount of my research time to this project. His insightful and
encouraging suggestions have helped transform this program into much more than
I ever thought it would be.
The architecture of Mitsuba as well as some individual components are based on
implementations discussed in: \emph{Physically Based Rendering - From Theory
The architecture of Mitsuba as well as some individual components are based on
implementations discussed in: \emph{Physically Based Rendering - From Theory
To Implementation} by Matt Pharr and Greg Humphreys.
Some of the GUI icons were taken from the Humanity icon set by Canonical Ltd.
@ -13,10 +13,10 @@ The material test scene was created by Jonas Pilo, and the environment map
it uses is courtesy of Bernhard Vogl.
The included index of refraction data files for conductors are copied from
PBRT. They are originally from the Luxpop database (\url{www.luxpop.com})
PBRT. They are originally from the Luxpop database (\url{www.luxpop.com})
and are based on data by Palik et al. \cite{Palik1998Handbook}
and measurements of atomic scattering factors made by the Center For
X-Ray Optics (CXRO) at Berkeley and the Lawrence Livermore National
X-Ray Optics (CXRO) at Berkeley and the Lawrence Livermore National
Laboratory (LLNL).
The following people have kindly contributed code or bugfixes:
@ -28,7 +28,7 @@ The following people have kindly contributed code or bugfixes:
\item Leonhard Gr\"unschlo\ss
\end{itemize}
Mitsuba makes heavy use of the following amazing libraries and tools:
Mitsuba makes heavy use of the following amazing libraries and tools:
\begin{itemize}
\item Qt 4 by Digia
\item OpenEXR by Industrial Light \& Magic

View File

@ -4,18 +4,18 @@ The rendering functionality of Mitsuba can be accessed through
a command line interface and an interactive Qt-based frontend. This section
provides some basic instructions on how to use them.
\subsection{Interactive frontend}
To launch the interactive frontend, run \code{Mitsuba.app} on MacOS,
To launch the interactive frontend, run \code{Mitsuba.app} on MacOS,
\code{mtsgui.exe} on Windows, and \code{mtsgui} on Linux (after sourcing \code{setpath.sh}).
You can also drag and drop scene files onto the application icon or the running program to open them.
A quick video tutorial on using the GUI can be found here: \url{http://vimeo.com/13480342}.
\subsection{Command line interface}
\label{sec:mitsuba}
The \texttt{mitsuba} binary is an alternative non-interactive rendering
The \texttt{mitsuba} binary is an alternative non-interactive rendering
frontend for command-line usage and batch job operation.
To get a listing of the parameters it supports, run
the executable without parameters:
\begin{shell}
$\texttt{\$}$ mitsuba
$\texttt{\$}$ mitsuba
\end{shell}
\begin{console}[label=lst:mitsuba-cli,caption=Command line options of the \texttt{mitsuba} binary]
Mitsuba version $\texttt{\MitsubaVersion}$, Copyright (c) $\texttt{\MitsubaYear}$ Wenzel Jakob
@ -75,18 +75,18 @@ mode of operation is to render a single scene, which is provided as a parameter,
$\texttt{\$}$ mitsuba path-to/my-scene.xml
\end{shell}
It is also possible to connect to network render nodes, which essentially lets Mitsuba parallelize
over additional cores. To do this, pass a semicolon-separated list of machines to
the \code{-c} parameter.
over additional cores. To do this, pass a semicolon-separated list of machines to
the \code{-c} parameter.
\begin{shell}
$\texttt{\$}$ mitsuba -c machine1;machine2;... path-to/my-scene.xml
\end{shell}
There are two different ways in which you can access render nodes:
\begin{itemize}
\item\textbf{Direct}: Here, you create a direct connection to a running \code{mtssrv} instance on
another machine (\code{mtssrv} is the Mitsuba server process). From the the performance
standpoint, this approach should always be preferred over the SSH method described below when there is
another machine (\code{mtssrv} is the Mitsuba server process). From the the performance
standpoint, this approach should always be preferred over the SSH method described below when there is
a choice between them. There are some disadvantages though: first, you need to manually start
\code{mtssrv} on every machine you want to use.
\code{mtssrv} on every machine you want to use.
And perhaps more importantly: the direct communication
protocol makes no provisions for a malicious user on the remote side. It is too costly
@ -98,11 +98,11 @@ For direct connections, you can specify the remote port as follows:
\begin{shell}
$\texttt{\$}$ mitsuba -c machine:1234 path-to/my-scene.xml
\end{shell}
When no port is explicitly specified, Mitsuba uses default value of 7554.
\item \textbf{SSH}:
This approach works as follows: The renderer creates a SSH connection
When no port is explicitly specified, Mitsuba uses default value of 7554.
\item \textbf{SSH}:
This approach works as follows: The renderer creates a SSH connection
to the remote side, where it launches a Mitsuba worker instance.
All subsequent communication then passes through the encrypted link.
All subsequent communication then passes through the encrypted link.
This is completely secure but slower due to the encryption overhead.
If you are rendering a complex scene, there is a good chance that it
won't matter much since most time is spent doing computations rather than
@ -119,18 +119,18 @@ $\texttt{\$}$ mitsuba -c username@machine path-to/my-scene.xml
\begin{shell}
$\texttt{\$}$ mitsuba -c username@machine:/opt/mitsuba path-to/my-scene.xml
\end{shell}
For the SSH connection approach to work, you \emph{must} enable passwordless
For the SSH connection approach to work, you \emph{must} enable passwordless
authentication.
Try opening a terminal window and running the command \code{ssh username@machine}
Try opening a terminal window and running the command \code{ssh username@machine}
(replace with the details of your remote connection).
If you are asked for a password, something is not set up correctly --- please see
If you are asked for a password, something is not set up correctly --- please see
\url{http://www.debian-administration.org/articles/152} for instructions.
On Windows, the situation is a bit more difficult since there is no suitable SSH client by
default. To get SSH connections to work, Mitsuba requires \code{plink.exe} (from PuTTY) to
be on the path. For passwordless authentication with a Linux/OSX-based
server, convert your private key to PuTTY's format using \code{puttygen.exe}.
Afterwards, start \code{pageant.exe} to load and authenticate the key. All
Afterwards, start \code{pageant.exe} to load and authenticate the key. All
of these binaries are available from the PuTTY website.
It is possible to mix the two approaches to access some machines directly and others
@ -152,7 +152,7 @@ machine3.domain.org:7346
Any attribute in the XML-based scene description language can be parameterized from the
command line.
For instance, you can render a scene several times with different reflectance values
on a certain material by changing its description to something like
on a certain material by changing its description to something like
\begin{xml}
<bsdf type="diffuse">
<spectrum name="reflectance" value="$\texttt{\$}$reflectance"/>
@ -160,28 +160,28 @@ on a certain material by changing its description to something like
\end{xml}
and running Mitsuba as follows:
\begin{shell}
$\texttt{\$}$ mitsuba -Dreflectance=0.1 -o ref_0.1.exr scene.xml
$\texttt{\$}$ mitsuba -Dreflectance=0.2 -o ref_0.2.exr scene.xml
$\texttt{\$}$ mitsuba -Dreflectance=0.5 -o ref_0.5.exr scene.xml
$\texttt{\$}$ mitsuba -Dreflectance=0.1 -o ref_0.1.exr scene.xml
$\texttt{\$}$ mitsuba -Dreflectance=0.2 -o ref_0.2.exr scene.xml
$\texttt{\$}$ mitsuba -Dreflectance=0.5 -o ref_0.5.exr scene.xml
\end{shell}
\subsubsection{Writing partial images to disk}
When doing lengthy command line renders on Linux or OSX, it is possible
to send a signal to the process using
\begin{shell}
When doing lengthy command line renders on Linux or OSX, it is possible
to send a signal to the process using
\begin{shell}
$\texttt{\$}$ killall -HUP mitsuba
\end{shell}
This causes the renderer to write out the partially finished
image, after which it continues rendering. This can sometimes be useful to
This causes the renderer to write out the partially finished
image, after which it continues rendering. This can sometimes be useful to
check if everything is working correctly.
\subsubsection{Rendering an animation}
The command line interface is ideally suited for rendering large amounts of files in batch
operation. You can simply pass in the files using a wildcard in the filename.
operation. You can simply pass in the files using a wildcard in the filename.
If you've already rendered a subset of the frames and you only want to complete the remainder,
add the \texttt{-x} flag, and all files with existing output will be skipped. You can also
let the scheduler work on several scenes at once using the \texttt{-j} parameter --- this is
If you've already rendered a subset of the frames and you only want to complete the remainder,
add the \texttt{-x} flag, and all files with existing output will be skipped. You can also
let the scheduler work on several scenes at once using the \texttt{-j} parameter --- this is
especially useful when parallelizing over multiple machines: as some of the participating machines
finish rendering the current frame, they can immediately start working on the next one
instead of having to wait for all other cores to finish. Altogether, you
@ -206,26 +206,26 @@ $\texttt{\$}$ mtssrv -i maxwell.cs.cornell.edu
\end{shell}
As advised in Section~\ref{sec:mitsuba}, it is advised to run \code{mtssrv} \emph{only} in trusted networks.
One nice feature of \code{mtssrv} is that it (like the \code{mitsuba} executable)
also supports the \code{-c} and \code{-s} parameters, which create connections
One nice feature of \code{mtssrv} is that it (like the \code{mitsuba} executable)
also supports the \code{-c} and \code{-s} parameters, which create connections
to additional compute servers.
Using this feature, one can create hierarchies of compute nodes. For instance,
the root \code{mttsrv} instance of such a hierarchy could share its work with a
number of other machines running \code{mtssrv}, and each of these might also
the root \code{mttsrv} instance of such a hierarchy could share its work with a
number of other machines running \code{mtssrv}, and each of these might also
share their work with further machines, and so on...
The parallelization over such hierarchies happens transparently---when
connecting a renderering process to the root node, it sees a machine
The parallelization over such hierarchies happens transparently---when
connecting a renderering process to the root node, it sees a machine
with hundreds or thousands of cores, to which it can submit work without
needing to worry about how exactly it is going to be spread out in
needing to worry about how exactly it is going to be spread out in
the hierarchy.
Such hierarchies are mainly useful to reduce communication bottlenecks when distributing
large resources (such as scenes) to remote machines. Imagine the following hypothetical scenario:
you would like to render a 50MB-sized scene while at home, but rendering is too slow.
you would like to render a 50MB-sized scene while at home, but rendering is too slow.
You decide to tap into some extra machines available
at your workplace, but this usually doesn't make things much faster because of the relatively slow broadband
connection and the need to transmit your scene to every single compute node involved.
connection and the need to transmit your scene to every single compute node involved.
Using \code{mtssrv}, you can
instead designate a central scheduling node at your workplace, which accepts connections and delegates
@ -233,10 +233,10 @@ rendering tasks to the other machines. In this case, you will only have to trans
and the remaining distribution happens over the fast local network at your workplace.
\subsection{Utility launcher}
\label{sec:mtsutil}
When working on a larger project, one often needs to implement various utility programs that
When working on a larger project, one often needs to implement various utility programs that
perform simple tasks, such as applying a filter to an image or processing
a matrix stored in a file. In a framework like Mitsuba, this unfortunately involves
a significant coding overhead in initializing the necessary APIs on all supported platforms.
a matrix stored in a file. In a framework like Mitsuba, this unfortunately involves
a significant coding overhead in initializing the necessary APIs on all supported platforms.
To reduce this tedious work on the side of the programmer, Mitsuba comes with a utility launcher
called \code{mtsutil}.
@ -250,7 +250,7 @@ For a listing of all supported options and utilities, enter the command without
\label{sec:tonemapper}
One particularly useful utility that shall be mentioned here is the batch tonemapper, which
loads EXR/RGBE images and writes tonemapped 8-bit PNG/JPGs. This can save much time when one has to
process many high dynamic-range images such as animation frames using the same basic operations,
process many high dynamic-range images such as animation frames using the same basic operations,
e.g. gamma correction, changing the overall brightness, resizing, cropping, etc. The available
command line options are shown in \lstref{tonemap-cli}.
@ -282,14 +282,14 @@ Options/Arguments:
between [0, 1] chooses between low and high-key images and
'burn' (also [0, 1]) controls how much highlights may burn out
-x Temporal coherence mode: activate this flag when tonemapping
-x Temporal coherence mode: activate this flag when tonemapping
frames of an animation using the '-p' option to avoid flicker
-o file Save the output with a given filename
-t Multithreaded: process several files in parallel
The operations are ordered as follows: 1. crop, 2. resize, 3. color-balance,
4. tonemap, 5. annotate. To simply process a directory full of EXRs in
The operations are ordered as follows: 1. crop, 2. resize, 3. color-balance,
4. tonemap, 5. annotate. To simply process a directory full of EXRs in
parallel, run the following: 'mtsutil tonemap -t path-to-directory/*.exr'
\end{console}

View File

@ -1,23 +1,23 @@
\section{Compiling the renderer}
\label{sec:compiling}
To compile Mitsuba, you will need a recent C++ compiler (e.g. GCC 4.2+ or
Visual Studio 2010) and some additional libraries, which Mitsuba uses internally.
To compile Mitsuba, you will need a recent C++ compiler (e.g. GCC 4.2+ or
Visual Studio 2010) and some additional libraries, which Mitsuba uses internally.
Builds on all supported platforms are done using a unified system
based on SCons (\url{http://www.scons.org}), which is a Python-based
software construction tool. The exact process is different depending on
based on SCons (\url{http://www.scons.org}), which is a Python-based
software construction tool. The exact process is different depending on
which operating system is used and will be explained in the following subsections.
\subsection{Common steps}
To get started, you will need to download a recent version of the Mitsuba source code. Before
doing this, ensure that you have read the licensing agreement
(Section~\ref{sec:license}), and that you abide by its contents. Note that, being a ``viral''
doing this, ensure that you have read the licensing agreement
(Section~\ref{sec:license}), and that you abide by its contents. Note that, being a ``viral''
license, the GPL automatically applies to derivative work. Amongst other things, this
means that Mitsuba's source code is \emph{off-limits} to those who develop rendering
software not distributed under a compatible license.
Check that the Mercurial (\url{http://mercurial.selenic.com/}) versioning
system\footnote{On Windows, you might want to use the convenient TortoiseHG shell
extension (\url{http://tortoisehg.bitbucket.org/}) to run the subsequent steps directly from the Explorer.}
Check that the Mercurial (\url{http://mercurial.selenic.com/}) versioning
system\footnote{On Windows, you might want to use the convenient TortoiseHG shell
extension (\url{http://tortoisehg.bitbucket.org/}) to run the subsequent steps directly from the Explorer.}
is installed, which is required to fetch the most recent source code release.
Begin by entering the following at the command prompt (or run an equivalent command from a graphical Mercurial frontend):
\begin{shell}
@ -38,7 +38,7 @@ will run extremely slowly. Its main use is to track down elusive bugs.
\paragraph{Windows:}
On Windows, builds can either be performed using the Visual Studio 2010\footnote{No other Visual Studio versions are currently supported.}
compiler or Intel XE Composer (on top of Visual Studio 2010).
Note that Visual Studio 2010 Service Pack 1 \emph{must} be installed or the resulting binaries will crash.
Note that Visual Studio 2010 Service Pack 1 \emph{must} be installed or the resulting binaries will crash.
\begin{description}
\item[\code{build/config-\{win32, win64\}-\{msvc2010, msvc2010-debug\}.py}:] Create 32 or 64 bit binaries using Microsoft Visual C++ version 2010.
The configurations with the suffix \code{-debug} will include debug symbols in all binaries, which run very slowly.
@ -66,7 +66,7 @@ $\texttt{\$}$ cp build/config-linux-gcc.py config.py
\subsection{Compilation flags}
\label{sec:compiling-flags}
There are several flags that affect the behavior of Mitsuba and must be specified at compile time.
These usually don't need to be changed, but if you want to compile Mitsuba for spectral rendering, or
These usually don't need to be changed, but if you want to compile Mitsuba for spectral rendering, or
to use double precision for internal computations then the following may be useful. Otherwise, you may skip ahead to the subsection
that covers your operating system.
@ -77,13 +77,13 @@ The following options are available:
enabled by default (even in release builds).
\item[\texttt{MTS\_KD\_DEBUG}] Enable additional checks in the kd-tree. This
is quite slow and mainly useful to track down bugs when they are suspected.
\item[\texttt{MTS\_KD\_CONSERVE\_MEMORY}] Use a more compact representation
\item[\texttt{MTS\_KD\_CONSERVE\_MEMORY}] Use a more compact representation
for triangle geometry (at the cost of speed). This flag causes Mitsuba to use the somewhat slower
Moeller-Trumbore triangle intersection method instead of the default Wald
intersection test, which has an overhead of 48 bytes per triangle.
Off by default.
\item[\texttt{MTS\_SSE}]Activate optimized SSE routines. On by default.
\item[\texttt{MTS\_HAS\_COHERENT\_RT}]Include coherent ray tracing support (depends on \texttt{MTS\_SSE}). This flag is activated by default.
\item[\texttt{MTS\_HAS\_COHERENT\_RT}]Include coherent ray tracing support (depends on \texttt{MTS\_SSE}). This flag is activated by default.
\item[\texttt{MTS\_DEBUG\_FP}]Generated NaNs and overflows will cause floating point exceptions, which can be caught in a debugger. This is slow and mainly meant as a debugging tool for developers. Off by default.
\item[\texttt{SPECTRUM\_SAMPLES=}$\langle ..\rangle$]This setting defines the number of spectral samples (in the 368-830 $nm$ range) that are used to render scenes. The default is 3 samples, in which case the renderer automatically turns into an RGB-based system. For high-quality spectral rendering, this should be set to 30 or higher.
Refer also to \secref{colorspaces}.
@ -95,24 +95,24 @@ fallback instead of the hardware-accelerated realtime preview.
This is useful when the binary will be executed over a remote link using a
protocol such as RDP (which does not provide the requisite OpenGL features).
\end{description}
All of the default configurations files located in the \texttt{build} directory use the flags
All of the default configurations files located in the \texttt{build} directory use the flags
\code{SINGLE\_PRECISION}, \code{SPECTRUM\_SAMPLES=3}, \code{MTS\_DEBUG}, \code{MTS\_SSE},
as well as \code{MTS\_HAS\_COHERENT\_RT}.
\subsection{Building on Debian or Ubuntu Linux}
\label{sec:compiling-ubuntu}
You'll first need to install a number of dependencies. It is assumed here that you are using a
recent version of Ubuntu Linux (Precise Pangolin / 12.04 LTS or later), hence some of the
You'll first need to install a number of dependencies. It is assumed here that you are using a
recent version of Ubuntu Linux (Precise Pangolin / 12.04 LTS or later), hence some of the
package may be named differently if you are using Debian Linux or another Ubuntu version.
First, run
\begin{shell}
$\text{\$}$ sudo apt-get install build-essential scons mercurial qt4-dev-tools libpng12-dev
libjpeg62-dev libilmbase-dev libxerces-c-dev libboost-all-dev
$\text{\$}$ sudo apt-get install build-essential scons mercurial qt4-dev-tools libpng12-dev
libjpeg62-dev libilmbase-dev libxerces-c-dev libboost-all-dev
libopenexr-dev libglewmx1.5-dev libxxf86vm-dev libpcrecpp0 libeigen3-dev
\end{shell}
To get COLLADA support, you will also need to install the \texttt{collada-dom} packages
or build them from scratch. Here, we install the \code{x86\_64} binaries and development
To get COLLADA support, you will also need to install the \texttt{collada-dom} packages
or build them from scratch. Here, we install the \code{x86\_64} binaries and development
headers that can be found on the Mitsuba website (at \url{http://www.mitsuba-renderer.org/releases/current})
\begin{shell}
$\text{\$}$ sudo dpkg --install collada-dom_*.deb
@ -121,7 +121,7 @@ To start a regular build, run
\begin{shell}
$\text{\$}$ scons
\end{shell}
inside the Mitsuba directory. In the case that you have multiple processors, you might want to parallelize the
inside the Mitsuba directory. In the case that you have multiple processors, you might want to parallelize the
build by appending \code{-j }\emph{core count} to the \code{scons} command.
If all goes well, SCons should finish successfully within a few minutes:
\begin{shell}
@ -129,16 +129,15 @@ scons: $\texttt{done}$ building targets.
\end{shell}
To run the renderer from the command line, you first have to import it into your shell environment:
\begin{shell}
$\text{\$}$ . setpath.sh
$\text{\$}$ source setpath.sh
\end{shell}
(note the period at the beginning -- this assumes that you are using \code{bash}).
Having set up everything, you can now move on to \secref{basics}.
\subsubsection{Creating Debian or Ubuntu Linux packages}
The preferred way of redistristributing executables on Debian or Ubuntu Linux is to create
\code{.deb} package files. To make custom Mitsuba packages, it is strongly recommended
that you work with a pristine installation of the target operating system\footnote{Several commercial graphics
drivers ``pollute'' the OpenGL setup so that the compiled Mitsuba binaries
can only be used on machines using the same drivers. For this reason, it is
that you work with a pristine installation of the target operating system\footnote{Several commercial graphics
drivers ``pollute'' the OpenGL setup so that the compiled Mitsuba binaries
can only be used on machines using the same drivers. For this reason, it is
better to work from a clean boostrapped install.}. This can be done as follows:
first, install \code{debootstrap} and download the most recent operating system release
to a subdirectory. The following example is based on Ubuntu 12.04 LTS (``Precise Pangolin''),
@ -169,18 +168,18 @@ $\text{\$}$ dpkg-buildpackage -nc
After everything has been built, you should find the created package files
in the root directory.
\subsubsection{Releasing Ubuntu packages}
To redistribute Ubuntu packages over the Internet or a local network, it is convenient to
To redistribute Ubuntu packages over the Internet or a local network, it is convenient to
put them into an \code{apt}-compatible repository. To prepare such a
repository, put the two \code{deb}-files built in the last section,
as well as the \code{collada-dom} \code{deb}-files into a public directory
repository, put the two \code{deb}-files built in the last section,
as well as the \code{collada-dom} \code{deb}-files into a public directory
made available by a HTTP server and inside it, run
\begin{shell}
path-to-htdocs$\text{\$}$ dpkg-scanpackages path/to/deb-directory /dev/null | gzip -9c > path/to/deb-directory/Packages.gz
\end{shell}
This will create a respository index file named \code{Packages.gz}.
Note that you must execute this command in the root directory of the
HTTP server's web directory and provide the relative path to the
package files -- otherwise, the index file will specify the wrong package
HTTP server's web directory and provide the relative path to the
package files -- otherwise, the index file will specify the wrong package
paths. Finally, the whole directory can be uploaded to some public location
and then referenced by placing a line following the pattern
\begin{shell}
@ -190,7 +189,7 @@ into the \code{/etc/apt/sources.list} file. This setup is convenient for
distributing a custom Mitsuba build to many Debian or Ubuntu machines running (e.g. to nodes in a rendering cluster).
\subsection{Building on Fedora Core}
You'll first need to install a number of dependencies. It is assumed here
that you are using FC15, hence some of the package may be named differently if you are
that you are using FC15, hence some of the package may be named differently if you are
using another version.
First, run
@ -208,9 +207,8 @@ scons: $\texttt{done}$ building targets.
\end{shell}
To run the renderer from the command line, you first have to import it into your shell environment:
\begin{shell}
$\text{\$}$ . setpath.sh
$\text{\$}$ source setpath.sh
\end{shell}
(note the period at the beginning -- this assumes that you are using \code{bash}).
Having set up everything, you can now move on to \secref{basics}.
\subsubsection{Creating Fedora Core packages}
To create \code{RPM} packages, you will need to install the \code{RPM} development tools:
@ -232,7 +230,7 @@ $\text{\$}$ rpmbuild -bb mitsuba-$\code{\MitsubaVersion}$/data/linux/fedora/mits
\end{shell}
After this command finishes, its output can be found in the directory \code{rpmbuild/RPMS}.
\subsection{Building on Arch Linux}
You'll first need to install a number of dependencies:
You'll first need to install a number of dependencies:
\begin{shell}
$\text{\$}$ sudo pacman -S gcc xerces-c glew openexr boost libpng libjpeg qt scons mercurial python
\end{shell}
@ -261,9 +259,9 @@ scons: $\texttt{done}$ building targets.
\end{shell}
To run the renderer from the command line, you first have to import it into your shell environment:
\begin{shell}
$\text{\$}$ . setpath.sh
$\text{\$}$ source setpath.sh
\end{shell}
(note the period at the beginning -- this assumes that you are using \code{bash}).
Having set up everything, you can now move on to \secref{basics}.
\subsubsection{Creating Arch Linux packages}
Mitsuba ships with a \code{PKGBUILD} file, which automatically builds
@ -292,16 +290,16 @@ There are a few other things that need to be set up: make sure that your
installation of Visual Studio is up to date, since Mitsuba binaries created with versions
prior to Service Pack 1 will crash.
Next, you will need to install Python 2.6.x
(\url{www.python.org}) and SCons\footnote{Note that on some Windows machines, the SCons
installer generates a warning about not finding Python in the registry. In this case, you
can instead run \code{python setup.py install} within the source release of SCons.}
(\url{http://www.scons.org}, any 2.x version will do) and ensure that they are contained in the \code{\%PATH\%}
environment variable so that entering \code{scons} on the command prompt (\code{cmd.exe})
Next, you will need to install Python 2.6.x
(\url{www.python.org}) and SCons\footnote{Note that on some Windows machines, the SCons
installer generates a warning about not finding Python in the registry. In this case, you
can instead run \code{python setup.py install} within the source release of SCons.}
(\url{http://www.scons.org}, any 2.x version will do) and ensure that they are contained in the \code{\%PATH\%}
environment variable so that entering \code{scons} on the command prompt (\code{cmd.exe})
launches the build system.
Having installed all dependencies, run the ``Visual Studio \emph{2010} Command
Prompt'' from the Start Menu (\code{x86} for 32-bit or \code{x64} for 64bit),
Having installed all dependencies, run the ``Visual Studio \emph{2010} Command
Prompt'' from the Start Menu (\code{x86} for 32-bit or \code{x64} for 64bit),
navigate to the Mitsuba directory, and simply run
\begin{shell}
C:\mitsuba\>scons
@ -309,17 +307,17 @@ C:\mitsuba\>scons
In the case that you have multiple processors, you might want to parallelize the build by appending the option \code{-j }\emph{core count} to the \code{scons} command.
If all goes well, the build process will finish successfully after a few
minutes. \emph{Note} that in comparison to the other platforms, you don't have to run the \code{setpath.sh} script at this point.
minutes. \emph{Note} that in comparison to the other platforms, you don't have to run the \code{setpath.sh} script at this point.
All binaries are automatically copied into the \code{dist} directory, and they should be executed directly from there.
\subsubsection{Integration with the Visual Studio interface}
Basic Visual Studio 2010 integration with support for code completion
exists for those who develop Mitsuba code on Windows.
To use the supplied projects, simply double-click on one of the two files \code{build/mitsuba-msvc2010.sln}
Basic Visual Studio 2010 integration with support for code completion
exists for those who develop Mitsuba code on Windows.
To use the supplied projects, simply double-click on one of the two files \code{build/mitsuba-msvc2010.sln}
and \code{build/mitsuba-msvc2010.sln}. These Visual Studio projects still internally
use the SCons-based build system to compile Mitsuba; whatever
use the SCons-based build system to compile Mitsuba; whatever
build configuration is selected within Visual Studio will be used to pick a matching
configuration file from the \texttt{build} directory.
configuration file from the \texttt{build} directory.
\subsection{Building on Mac OS X}
\vspace{-5mm}
@ -332,13 +330,13 @@ configuration file from the \texttt{build} directory.
Compiling Mitsuba's dependencies on Mac OS is a laborious process; for convenience, there
is a repository that provides them in precompiled form. To use this repository, clone it
using Mercurial and rename the directory so that it forms the \code{dependencies} subdirectory
inside the main Mitsuba directory, i.e. run something like
inside the main Mitsuba directory, i.e. run something like
\begin{shell}
$\text{\$}$ cd mitsuba
$\text{\$}$ hg clone https://www.mitsuba-renderer.org/hg/dependencies_macos
$\text{\$}$ mv dependencies_macos dependencies
\end{shell}
You will also need to install SCons (>2.0.0, available at \code{www.scons.org}) and
You will also need to install SCons (>2.0.0, available at \code{www.scons.org}) and
a recent release of XCode, including its command-line compilation tools. Next, run
\begin{shell}
$\text{\$}$ scons
@ -350,7 +348,6 @@ scons: $\texttt{done}$ building targets.
\end{shell}
To run the renderer from the command line, you first have to import it into your shell environment:
\begin{shell}
$\text{\$}$ . setpath.sh
$\text{\$}$ source setpath.sh
\end{shell}
(note the period at the beginning -- this assumes that you are using \code{bash}).

View File

@ -1,7 +1,7 @@
\part{Development guide}
This chapter and the subsequent ones will provide an overview
of the the coding conventions and general architecture of Mitsuba.
You should only read them if if you wish to interface with the API
of the the coding conventions and general architecture of Mitsuba.
You should only read them if if you wish to interface with the API
in some way (e.g. by developing your own plugins). The coding style
section is only relevant if you plan to submit patches that are meant
to become part of the main codebase.
@ -9,15 +9,15 @@ to become part of the main codebase.
\section{Code structure}
Mitsuba is split into four basic support libraries:
\begin{itemize}
\item The core library (\code{libcore}) implements basic functionality such as
\item The core library (\code{libcore}) implements basic functionality such as
cross-platform file and bitmap I/O, data structures, scheduling, as well as logging and plugin management.
\item The rendering library (\code{librender}) contains abstractions
\item The rendering library (\code{librender}) contains abstractions
needed to load and represent scenes containing light sources, shapes, materials, and participating media.
\item The hardware acceleration library (\code{libhw})
implements a cross-platform display library, an object-oriented OpenGL
wrapper, as well as support for rendering interactive previews of scenes.
wrapper, as well as support for rendering interactive previews of scenes.
\item Finally, the bidirectional library (\code{libbidir})
contains a support layer that is used to implement bidirectional rendering algorithms such as
contains a support layer that is used to implement bidirectional rendering algorithms such as
Bidirectional Path Tracing and Metropolis Light Transport.
\end{itemize}
A detailed reference of these APIs is available at
@ -25,11 +25,11 @@ A detailed reference of these APIs is available at
present a few basic examples to get familiar with them.
\section{Coding style}
\paragraph{Indentation:} The Mitsuba codebase uses tabs for indentation,
\paragraph{Indentation:} The Mitsuba codebase uses tabs for indentation,
which expand to \emph{four} spaces. Please make sure that you configure your editor
this way, otherwise the source code layout will look garbled.
\paragraph{Placement of braces:} Opening braces should be placed on the
\paragraph{Placement of braces:} Opening braces should be placed on the
same line to make the best use of vertical space, i.e.
\begin{cpp}
if (x > y) {
@ -54,9 +54,9 @@ if ( x==y ){
..
\end{cpp}
\paragraph{Name format:} Names are always written in camel-case.
\paragraph{Name format:} Names are always written in camel-case.
Classes and structures start with a capital letter, whereas member functions
and attributes start with a lower-case letter. Attributes of classes
and attributes start with a lower-case letter. Attributes of classes
have the prefix \code{m\_}. Here is an example:
\begin{cpp}
class MyClass {
@ -86,14 +86,14 @@ and properly conveys the semantics.
as getters and setters.
\paragraph{Documentation:} Headers files should contain
Doxygen-compatible documentation. It is also a good idea to add
comments to a \code{.cpp} file to explain subtleties of an implemented algorithm.
comments to a \code{.cpp} file to explain subtleties of an implemented algorithm.
However, anything pertaining to the API should go into the header file.
\paragraph{Boost:} Use the boost libraries whenever this helps to save
time or write more compact code.
\paragraph{Classes vs structures:}In Mitsuba, classes usually go onto the heap,
whereas structures may be allocated both on the stack and the heap.
whereas structures may be allocated both on the stack and the heap.
Classes that derive from \code{Object} implement a protected virtual
deconstructor, which explicitly prevents them from being allocated on the stack.
@ -110,6 +110,6 @@ if (..) {
\paragraph{Separation of plugins:}Mitsuba encourages that plugins are only
used via the generic interface they implement. You will find that almost all plugins
(e.g. emitters) don't actually provide a header file, hence they can only be accessed
using the generic \code{Emitter} interface they implement. If any kind of special
interaction between plugins is needed, this is usually an indication that the
using the generic \code{Emitter} interface they implement. If any kind of special
interaction between plugins is needed, this is usually an indication that the
generic interface should be extended to accomodate this.

View File

@ -1,8 +1,8 @@
\section{Scene file format}
\label{sec:format}
Mitsuba uses a very simple and general XML-based format to represent scenes.
Since the framework's philosophy is to represent discrete blocks of functionality as plugins,
a scene file can essentially be interpreted as description that determines which
Mitsuba uses a very simple and general XML-based format to represent scenes.
Since the framework's philosophy is to represent discrete blocks of functionality as plugins,
a scene file can essentially be interpreted as description that determines which
plugins should be instantiated and how they should interface with each other.
In the following, we'll look at a few examples to get a feeling for the scope of the
format.
@ -18,17 +18,17 @@ something like this:
</scene>
\end{xml}
The scene version attribute denotes the release of Mitsuba that was used to
create the scene. This information allows Mitsuba to always correctly process the
create the scene. This information allows Mitsuba to always correctly process the
file irregardless of any potential future changes in the scene description language.
This example already contains the most important things to know about format: you can have
\emph{objects} (such as the objects instantiated by the \code{scene} or \code{shape} tags),
which are allowed to be nested within each other. Each object optionally accepts \emph{properties}
(such as the \code{string} tag), which further characterize its behavior. All objects except
\emph{objects} (such as the objects instantiated by the \code{scene} or \code{shape} tags),
which are allowed to be nested within each other. Each object optionally accepts \emph{properties}
(such as the \code{string} tag), which further characterize its behavior. All objects except
for the root object (the \code{scene}) cause the renderer to search and load a plugin from disk,
hence you must provide the plugin name using \code{type=".."} parameter.
hence you must provide the plugin name using \code{type=".."} parameter.
The object tags also let the renderer know \emph{what kind} of object is to be instantiated: for instance,
The object tags also let the renderer know \emph{what kind} of object is to be instantiated: for instance,
any plugin loaded using the \code{shape} tag must conform to the \emph{Shape} interface, which is
certainly the case for the plugin named \code{obj} (it contains a WaveFront OBJ loader).
Similarly, you could write
@ -40,12 +40,12 @@ Similarly, you could write
</shape>
</scene>
\end{xml}
This loads a different plugin (\code{sphere}) which is still a \emph{Shape}, but instead represents
a sphere configured with a radius of 10 world-space units. Mitsuba ships with
This loads a different plugin (\code{sphere}) which is still a \emph{Shape}, but instead represents
a sphere configured with a radius of 10 world-space units. Mitsuba ships with
a large number of plugins; please refer to the next chapter for a detailed
overview of them.
The most common scene setup is to declare an integrator, some geometry, a sensor (e.g. a camera), a film, a sampler
The most common scene setup is to declare an integrator, some geometry, a sensor (e.g. a camera), a film, a sampler
and one or more emitters. Here is a more complex example:
\begin{xml}
<?xml version="1.0" encoding="utf-8"?>
@ -103,9 +103,9 @@ and one or more emitters. Here is a more complex example:
</scene>
\end{xml}
This example introduces several new object types (\code{integrator, sensor, bsdf, sampler, film}, and \code{emitter})
and property types (\code{integer}, \code{transform}, and \code{rgb}).
and property types (\code{integer}, \code{transform}, and \code{rgb}).
As you can see in the example, objects are usually declared at the top level except if there is some
inherent relation that links them to another object. For instance, BSDFs are usually specific to a certain geometric object, so
inherent relation that links them to another object. For instance, BSDFs are usually specific to a certain geometric object, so
they appear as a child object of a shape. Similarly, the sampler and film affect the way in which
rays are generated from the sensor and how it records the resulting radiance samples, hence they are nested inside it.
@ -135,45 +135,45 @@ uses a basic linear RGB representation\footnote{The official
releases all use linear RGB---to do spectral renderings, you will have
to compile Mitsuba yourself.}.
Irrespective of which internal representation is used, Mitsuba supports
several different ways of specifying color information, which is then
several different ways of specifying color information, which is then
converted appropriately.
The preferred way of passing color spectra to the renderer is to explicitly
The preferred way of passing color spectra to the renderer is to explicitly
denote the associated wavelengths of each value:
\begin{xml}
<spectrum name="spectrumProperty" value="400:0.56, 500:0.18, 600:0.58, 700:0.24"/>
\end{xml}
This is a mapping from wavelength in nanometers (before the colon)
This is a mapping from wavelength in nanometers (before the colon)
to a reflectance or intensity value (after the colon).
Values in between are linearly interpolated from the two closest neighbors.
A useful shortcut to get a completely uniform spectrum, it is to provide
A useful shortcut to get a completely uniform spectrum, it is to provide
only a single value:
\begin{xml}
<spectrum name="spectrumProperty" value="0.56"/>
\end{xml}
Another (discouraged) option is to directly provide the spectrum in Mitsuba's
internal representation, avoiding the need for any kind of conversion.
internal representation, avoiding the need for any kind of conversion.
However, this is problematic, since the associated scene will likely not work
anymore when Mitsuba is compiled with a different value of
\texttt{SPECTRUM\_SAMPLES}.
For completeness, the possibility is explained nonetheless. Assuming that
the 360-830$nm$ range is discretized into ten 47$nm$-sized blocks
(i.e. \texttt{SPECTRUM\_SAMPLES} is set to 10), their values can be specified
anymore when Mitsuba is compiled with a different value of
\texttt{SPECTRUM\_SAMPLES}.
For completeness, the possibility is explained nonetheless. Assuming that
the 360-830$nm$ range is discretized into ten 47$nm$-sized blocks
(i.e. \texttt{SPECTRUM\_SAMPLES} is set to 10), their values can be specified
as follows:
\begin{xml}
<spectrum name="spectrumProperty" value=".2, .2, .8, .4, .6, .5, .1, .9, .4, .2"/>
\end{xml}
Another convenient way of providing color spectra is by specifying linear RGB
Another convenient way of providing color spectra is by specifying linear RGB
or sRGB values using floating-point triplets or hex values:
\begin{xml}
<rgb name="spectrumProperty" value="0.2, 0.8, 0.4"/>
<srgb name="spectrumProperty" value="0.4, 0.3, 0.2"/>
<srgb name="spectrumProperty" value="#f9aa34"/>
\end{xml}
When Mitsuba is compiled with the default settings, it internally uses
linear RGB to represent colors, so these values can directly be used.
When Mitsuba is compiled with the default settings, it internally uses
linear RGB to represent colors, so these values can directly be used.
However, when configured for doing spectral rendering, a suitable color
spectrum with the requested RGB reflectance must be found. This is a tricky
problem, since there is an infinite number of spectra with this property.
@ -190,9 +190,9 @@ The \texttt{reflectance} intent is used by default, so remember to
set it to \texttt{illuminant} when defining the brightness of a
light source with the \texttt{<rgb>} tag.
When spectral power or reflectance distributions are obtained from measurements
When spectral power or reflectance distributions are obtained from measurements
(e.g. at 10$nm$ intervals), they are usually quite unwiedy and can clutter
the scene description. For this reason, there is yet another way to pass
the scene description. For this reason, there is yet another way to pass
a spectrum by loading it from an external file:
\begin{xml}
<spectrum name="spectrumProperty" filename="measuredSpectrum.spd"/>
@ -213,23 +213,23 @@ are allowed. Here is an example:
\renderings{
\fbox{\includegraphics[width=10cm]{images/blackbody}}
\hfill\,
\caption{\label{fig:blackbody}A few simulated
\caption{\label{fig:blackbody}A few simulated
black body emitters over a range of temperature values}
}
\label{sec:blackbody}
Finally, it is also possible to specify the spectral distribution of a black body emitter (\figref{blackbody}),
Finally, it is also possible to specify the spectral distribution of a black body emitter (\figref{blackbody}),
where the temperature is given in Kelvin.
\begin{xml}
<blackbody name="spectrumProperty" temperature="5000K"/>
\end{xml}
Note that attaching a black body spectrum to the \texttt{intensity} property
of a emitter introduces physical units into the rendering process of
Mitsuba, which is ordinarily a unitless system\footnote{This means that the
units of pixel values in a rendering are completely dependent on the units of
the user input, including the unit of world-space distance and the units of
of a emitter introduces physical units into the rendering process of
Mitsuba, which is ordinarily a unitless system\footnote{This means that the
units of pixel values in a rendering are completely dependent on the units of
the user input, including the unit of world-space distance and the units of
the light source emission profile.}.
Specifically, the black body spectrum has units of power ($W$) per
Specifically, the black body spectrum has units of power ($W$) per
unit area ($m^{-2}$) per steradian ($sr^{-1}$) per unit wavelength ($nm^{-1}$).
If these units are inconsistent with your scene description, you may use the
optional \texttt{scale} attribute to adjust them, e.g.:
@ -244,11 +244,11 @@ Points and vectors can be specified as follows:
<point name="pointProperty" x="3" y="4" z="5"/>
<vector name="vectorProperty" x="3" y="4" z="5"/>
\end{xml}
It is important that whatever you choose as world-space units (meters, inches, etc.) is
It is important that whatever you choose as world-space units (meters, inches, etc.) is
used consistently in all places.
\subsubsection{Transformations}
Transformations are the only kind of property that require more than a single tag. The idea is that, starting
with the identity, one can build up a transformation using a sequence of commands. For instance, a transformation that
with the identity, one can build up a transformation using a sequence of commands. For instance, a transformation that
does a translation followed by a rotation might be written like this:
\begin{xml}
<transform name="trafoProperty">
@ -277,7 +277,7 @@ choices are available:
<matrix value="0 -0.53 0 -1.79 0.92 0 0 8.03 0 0 0.53 0 0 0 0 1"/>
\end{xml}
\item lookat transformations --- this is primarily useful for setting up cameras (and spot lights). The \code{origin} coordinates
specify the camera origin, \code{target} is the point that the camera will look at, and the
specify the camera origin, \code{target} is the point that the camera will look at, and the
(optional) \code{up} parameter determines the ``upward'' direction in the final rendered image.
The \code{up} parameter is not needed for spot lights.
\begin{xml}
@ -297,7 +297,7 @@ of how this works:
</texture>
<bsdf type="diffuse" id="myMaterial">
<!-- Reference the texture named myImage and pass it
<!-- Reference the texture named myImage and pass it
to the BRDF as the reflectance parameter -->
<ref name="reflectance" id="myImage"/>
</bsdf>
@ -311,7 +311,7 @@ of how this works:
</scene>
\end{xml}
By providing a unique \texttt{id} attribute in the
object declaration, the object is bound to that identifier
object declaration, the object is bound to that identifier
upon instantiation.
Referencing this identifier at a later point (using the \texttt{<ref id="..."/>} tag)
will add the instance to the parent object, with no further memory
@ -326,7 +326,7 @@ it cannot be used to instantiate geometry---if this functionality is needed,
take a look at the \pluginref{instance} plugin.
\subsection{Including external files}
A scene can be split into multiple pieces for better readability.
A scene can be split into multiple pieces for better readability.
to include an external file, please use the following command:
\begin{xml}
<include filename="nested-scene.xml"/>

View File

@ -1,7 +1,7 @@
\section{License}
\label{sec:license}
Mitsuba is licensed under the terms of Version 3 of the GNU General Public License,
which is reproduced here in its entirety. The license itself is copyrighted
which is reproduced here in its entirety. The license itself is copyrighted
\copyright\ 2007 by the Free Software Foundation, Inc. \texttt{http://fsf.org/}.
\subsection{Preamble}

View File

@ -3,9 +3,9 @@ Suppose you want to design a custom integrator to render scenes in Mitsuba.
There are two general ways you can do this, and which one you should take
mostly depends on the characteristics of your particular integrator.
The framework distinguishes between \emph{sampling-based} integrators and
\emph{generic} ones. A sampling-based integrator is able to generate
(usually unbiased) estimates of the incident radiance along a specified rays, and this
The framework distinguishes between \emph{sampling-based} integrators and
\emph{generic} ones. A sampling-based integrator is able to generate
(usually unbiased) estimates of the incident radiance along a specified rays, and this
is done a large number of times to render a scene. A generic integrator
is more like a black box, where no assumptions are made on how the the image is
created. For instance, the VPL renderer uses OpenGL to rasterize the scene
@ -13,44 +13,44 @@ using hardware acceleration, which certainly doesn't fit into the sampling-based
For that reason, it must be implemented as a generic integrator.
Generally, if you can package up your code to fit into the
\code{SampleIntegrator} interface, you should do it, because you'll get
\code{SamplingIntegrator} interface, you should do it, because you'll get
parallelization and network rendering essentially for free. This is done
by transparently sending instances of your integrator class to all participating cores
and assigning small image blocks for each one to work on. Also, sampling-based
integrators can be nested within some other integrators, such as an
irradiance cache or an adaptive integrator. This cannot be done with generic
integrators due to their black-box nature. Note that it is often still
possible to parallelize generic integrators, but this involves significantly
integrators due to their black-box nature. Note that it is often still
possible to parallelize generic integrators, but this involves significantly
more work.
In this section, we'll design a rather contrived sampling-based integrator,
which renders a monochromatic image of your scene, where the intensity
denotes the distance to the camera. But to get a feel for the overall
framework, we'll start with an even simpler one, that just renders a
In this section, we'll design a rather contrived sampling-based integrator,
which renders a monochromatic image of your scene, where the intensity
denotes the distance to the camera. But to get a feel for the overall
framework, we'll start with an even simpler one, that just renders a
solid-color image.
\subsection{Basic implementation}
In Mitsuba's \code{src/integrators} directory, create a file named
\code{myIntegrator.cpp}.
In Mitsuba's \code{src/integrators} directory, create a file named
\code{myIntegrator.cpp}.
\begin{cpp}
#include <mitsuba/render/scene.h>
MTS_NAMESPACE_BEGIN
class MyIntegrator : public SampleIntegrator {
class MyIntegrator : public SamplingIntegrator {
public:
MTS_DECLARE_CLASS()
};
MTS_IMPLEMENT_CLASS_S(MyIntegrator, false, SampleIntegrator)
MTS_IMPLEMENT_CLASS_S(MyIntegrator, false, SamplingIntegrator)
MTS_EXPORT_PLUGIN(MyIntegrator, "A contrived integrator");
MTS_NAMESPACE_END
\end{cpp}
The \code{scene.h} header file contains all of the dependencies we'll need
for now.
To avoid conflicts with other libraries, the whole framework is located in
a separate namespace named \code{mitsuba}, and the lines starting with
a separate namespace named \code{mitsuba}, and the lines starting with
\code{MTS\_NAMESPACE} ensure that our integrator is placed there
as well.
@ -61,17 +61,17 @@ and serialization/unserialization support. Let's take a look at the second of th
lines, because it contains several important pieces of information:
The suffix \code{S} in \code{MTS\_IMPLEMENT\_CLASS\_S} specifies that this is
a serializable class, which means that it can be sent over the network or
a serializable class, which means that it can be sent over the network or
written to disk and later restored. That also implies that certain methods
need to be provided by the implementation --- we'll add those in a moment.
The three following parameters specify the name of this class (\code{MyIntegrator}),
the fact that it is \emph{not} an abstract class (\code{false}), and the name of its
parent class (\code{SampleIntegrator}).
parent class (\code{SamplingIntegrator}).
Just below, you can see a line that starts with
\code{MTS\_EXPORT\_PLUGIN}. As the name suggests, this line is only necessary
for plugins, and it ensures that the specified class (\code{MyIntegrator}) is
for plugins, and it ensures that the specified class (\code{MyIntegrator}) is
what you want to be instantiated when somebody loads this plugin. It is also
possible to supply a short descriptive string.
\vspace{3mm}
@ -80,7 +80,7 @@ Let's add an instance variable and a constructor:
\begin{cpp}
public:
/// Initialize the integrator with the specified properties
MyIntegrator(const Properties &props) : SampleIntegrator(props) {
MyIntegrator(const Properties &props) : SamplingIntegrator(props) {
Spectrum defaultColor;
defaultColor.fromLinearRGB(0.2f, 0.5f, 0.2f);
m_color = props.getSpectrum("color", defaultColor);
@ -106,41 +106,41 @@ Next, we need to add serialization and unserialization support:
\begin{cpp}
/// Unserialize from a binary data stream
MyIntegrator(Stream *stream, InstanceManager *manager)
: SampleIntegrator(stream, manager) {
: SamplingIntegrator(stream, manager) {
m_color = Spectrum(stream);
}
/// Serialize to a binary data stream
void serialize(Stream *stream, InstanceManager *manager) const {
SampleIntegrator::serialize(stream, manager);
SamplingIntegrator::serialize(stream, manager);
m_color.serialize(stream);
}
\end{cpp}
This makes use of a \emph{stream} abstraction similar in style to Java.
A stream can represent various things, such as a file, a console session, or a
This makes use of a \emph{stream} abstraction similar in style to Java.
A stream can represent various things, such as a file, a console session, or a
network communication link. Especially when dealing with multiple machines,
it is important to realize that the machines may use different binary representations
related to their respective \emph{endianness}. To prevent issues from arising,
the \code{Stream} interface provides many methods for writing and reading
the \code{Stream} interface provides many methods for writing and reading
small chunks of data (e.g. \code{writeShort}, \code{readFloat}, ..),
which automatically perform endianness translation. In our case, the
\code{Spectrum} class already provides serialization/unserialization support,
so we don't really have to do anything.
Note that it is crucial that your code calls the serialization and unserialization
Note that it is crucial that your code calls the serialization and unserialization
implementations of the superclass, since it will also read/write some
information to the stream.
We haven't used the \texttt{manager} parameter yet, so here is a quick overview
of what it does: if many cases, we don't just want to serialize a single class,
but a whole graph of objects. Some may be referenced many
times from different places, and potentially there are even cycles. If we just
naively called the serialization and unserialization implementation of members
recursively within each class, we'd waste much bandwitdth and potentially
times from different places, and potentially there are even cycles. If we just
naively called the serialization and unserialization implementation of members
recursively within each class, we'd waste much bandwitdth and potentially
end up stuck in an infinite recursion.
This is where the instance manager comes in. Every time you want to serialize
a heap-allocated object (suppose it is of type \code{SomeClass}),
a heap-allocated object (suppose it is of type \code{SomeClass}),
instead of calling its serialize method, write
\begin{cpp}
@ -155,7 +155,7 @@ ref<SomeClass> myObject = static_cast<SomeClass *>(manager->getInstance(stream))
Behind the scenes, the object manager adds annotations to the data stream,
which ensure that you will end up with the exact same reference graph on the
remote side, while only one copy of every object is transmitted and no
remote side, while only one copy of every object is transmitted and no
infinite recursion can occur. But we digress -- let's go back to our integrator.
\vspace{3mm}
@ -182,8 +182,8 @@ That is admittedly not very exciting --- so let's do some actual computation.
\subsection{Visualizing depth}
Add an instance variable \code{Float m\_maxDist;} to the implementation. This
will store the maximum distance from the camera to any object, which is needed
to map distances into the $[0,1]$ range. Note the upper-case \code{Float} ---
this means that either a single- or a double-precision variable is
to map distances into the $[0,1]$ range. Note the upper-case \code{Float} ---
this means that either a single- or a double-precision variable is
substituted based the compilation flags. This variable constitutes local
state, thus it must not be forgotten in the serialization- and unserialization routines:
append
@ -202,18 +202,19 @@ To avoid having to do this every time \code{Li()} is called,
we can override the \code{preprocess} function:
\begin{cpp}
/// Preprocess function -- called on the initiating machine
bool preprocess(const Scene *scene, RenderQueue *queue,
const RenderJob *job, int sceneResID, int cameraResID,
bool preprocess(const Scene *scene, RenderQueue *queue,
const RenderJob *job, int sceneResID, int cameraResID,
int samplerResID) {
SampleIntegrator::preprocess(scene, queue, job, sceneResID,
SamplingIntegrator::preprocess(scene, queue, job, sceneResID,
cameraResID, samplerResID);
const AABB &sceneAABB = scene->getAABB();
Point cameraPosition = scene->getCamera()->getPosition();
/* Find the camera position at t=0 seconds */
Point cameraPosition = scene->getSensor()->getWorldTransform()->eval(0).transformAffine(Point(0.0f));
m_maxDist = - std::numeric_limits<Float>::infinity();
for (int i=0; i<8; ++i)
m_maxDist = std::max(m_maxDist,
m_maxDist = std::max(m_maxDist,
(cameraPosition - sceneAABB.getCorner(i)).length());
return true;
@ -228,13 +229,13 @@ global resource identifiers. When a network render job runs, many associated
pieces of information (the scene, the camera, etc.) are wrapped into global resource chunks
shared amongst all nodes, and these can be referenced using such identifiers.
One important aspect of the \code{preprocess} function is that it is executed
on the initiating node and before any of the parallel rendering begins.
One important aspect of the \code{preprocess} function is that it is executed
on the initiating node and before any of the parallel rendering begins.
This can be used to compute certain things only once. Any
information updated here (such as \code{m\_maxDist}) will be forwarded to the
other nodes before the rendering begins.
Now, replace the body of the \code{Li} method with
Now, replace the body of the \code{Li} method with
\begin{cpp}
if (rRec.rayIntersect(r)) {
Float distance = rRec.its.t;
@ -255,36 +256,36 @@ to intersect a ray against the scene actually works like this:
/* Do something with the intersection stored in 'its' */
}
\end{cpp}
As you can see, we did something slightly different in the distance
As you can see, we did something slightly different in the distance
renderer fragment above (we called \code{RadianceQueryRecord::rayIntersect()}
on the supplied parameter \code{rRec}), and the reason for this is \emph{nesting}.
\subsection{Nesting}
The idea of of nesting is that sampling-based rendering techniques can be
embedded within each other for added flexibility: for instance, one
might concoct a 1-bounce indirect rendering technique complete with
irradiance caching and adaptive integration simply by writing the following
embedded within each other for added flexibility: for instance, one
might concoct a 1-bounce indirect rendering technique complete with
irradiance caching and adaptive integration simply by writing the following
into a scene XML file:
\begin{xml}
<!-- Adaptively integrate using the nested technique -->
<integrator type="adaptive">
<integrator type="adaptive">
<!-- Irradiance caching + final gathering with the nested technique -->
<integrator type="irrcache">
<integrator type="irrcache">
<!-- Simple direct illumination technique -->
<integrator type="direct">
<integrator type="direct">
</integrator>
</integrator>
\end{xml}
To support this kind of complex interaction, some information needs to be passed between the
To support this kind of complex interaction, some information needs to be passed between the
integrators, and the \code{RadianceQueryRecord} parameter of the function
\code{SampleIntegrator::Li} is used for this.
\code{SamplingIntegrator::Li} is used for this.
This brings us back to the odd way of computing an intersection a moment ago:
the reason why we didn't just do this by calling
This brings us back to the odd way of computing an intersection a moment ago:
the reason why we didn't just do this by calling
\code{scene->rayIntersect()} is that our technique might actually be nested
within a parent technique, which has already computed this intersection.
To avoid wasting resources, the function \code{rRec.rayIntersect} first
determines whether an intersection record has already been provided.
If yes, it does nothing. Otherwise, it takes care of computing one.
To avoid wasting resources, the function \code{rRec.rayIntersect} first
determines whether an intersection record has already been provided.
If yes, it does nothing. Otherwise, it takes care of computing one.
The radiance query record also lists the particular \emph{types} of radiance requested
by the parent integrator -- your implementation should respect these as much

View File

@ -1,69 +1,69 @@
\part{Using Mitsuba}
\textbf{Disclaimer:} This is manual documents the usage, file format, and
internal design of the Mitsuba rendering system. It is currently a work
internal design of the Mitsuba rendering system. It is currently a work
in progress, hence some parts may still be incomplete or missing.
\section{About Mitsuba}
Mitsuba is a research-oriented rendering system in the style of PBRT
Mitsuba is a research-oriented rendering system in the style of PBRT
(\url{www.pbrt.org}), from which it derives much inspiration.
It is written in portable C++, implements unbiased as well
as biased techniques, and contains heavy optimizations targeted
towards current CPU architectures.
Mitsuba is extremely modular: it consists of a small set of core libraries
and over 100 different plugins that implement functionality ranging
It is written in portable C++, implements unbiased as well
as biased techniques, and contains heavy optimizations targeted
towards current CPU architectures.
Mitsuba is extremely modular: it consists of a small set of core libraries
and over 100 different plugins that implement functionality ranging
from materials and light sources to complete rendering algorithms.
In comparison to other open source renderers, Mitsuba places a strong
emphasis on experimental rendering techniques, such as path-based
In comparison to other open source renderers, Mitsuba places a strong
emphasis on experimental rendering techniques, such as path-based
formulations of Metropolis Light Transport and volumetric
modeling approaches. Thus, it may be of genuine interest to those who
would like to experiment with such techniques that haven't yet found
their way into mainstream renderers, and it also provides a solid
would like to experiment with such techniques that haven't yet found
their way into mainstream renderers, and it also provides a solid
foundation for research in this domain.
Other design considerations are:
\parheader{Performance:}
Mitsuba provides optimized implementations of the most commonly
Mitsuba provides optimized implementations of the most commonly
used rendering algorithms. By virtue of running on a shared foundation, comparisons between them can
better highlight the merits and limitations of different approaches. This is in contrast to, say,
comparing two completely different rendering products, where technical information on the underlying
better highlight the merits and limitations of different approaches. This is in contrast to, say,
comparing two completely different rendering products, where technical information on the underlying
implementation is often intentionally not provided.
\parheader{Robustness:}
In many cases, physically-based rendering packages force the user to model scenes with the underlying
algorithm (specifically: its convergence behavior) in mind. For instance, glass windows are routinely
replaced with light portals, photons must be manually guided to the relevant parts of a scene, and
interactions with complex materials are taboo, since they cannot be importance sampled exactly.
One focus of Mitsuba will be to develop path-space light transport algorithms, which handle such
In many cases, physically-based rendering packages force the user to model scenes with the underlying
algorithm (specifically: its convergence behavior) in mind. For instance, glass windows are routinely
replaced with light portals, photons must be manually guided to the relevant parts of a scene, and
interactions with complex materials are taboo, since they cannot be importance sampled exactly.
One focus of Mitsuba will be to develop path-space light transport algorithms, which handle such
cases more gracefully.
\parheader{Scalability:} Mitsuba instances can be merged into large clusters, which transparently distribute and
\parheader{Scalability:} Mitsuba instances can be merged into large clusters, which transparently distribute and
jointly execute tasks assigned to them using only node-to-node communcation. It has successfully
scaled to large-scale renderings that involved more than 1000 cores working on a single image.
Most algorithms in Mitsuba are written using a generic parallelization layer, which can tap
Most algorithms in Mitsuba are written using a generic parallelization layer, which can tap
into this cluster-wide parallelism. The principle is that if any component of the renderer produces
work that takes longer than a second or so, it at least ought to use all of the processing power
work that takes longer than a second or so, it at least ought to use all of the processing power
it can get.
The renderer also tries to be very conservative in its use of memory, which allows it to handle
The renderer also tries to be very conservative in its use of memory, which allows it to handle
large scenes (>30 million triangles) and multi-gigabyte heterogeneous volumes on consumer hardware.
\parheader{Realism and accuracy:} Mitsuba comes with a large repository of physically-based
reflectance models for surfaces and participating media. These implementations
are designed so that they can be used to build complex shader networks, while
providing enough flexibility to be compatible with a wide range of different
rendering techniques, including path tracing, photon mapping, hardware-accelerated rendering
rendering techniques, including path tracing, photon mapping, hardware-accelerated rendering
and bidirectional methods.
The unbiased path tracers in Mitsuba are battle-proven and produce
reference-quality results that can be used for predictive rendering, and to verify
implementations of other rendering methods.
The unbiased path tracers in Mitsuba are battle-proven and produce
reference-quality results that can be used for predictive rendering, and to verify
implementations of other rendering methods.
\parheader{Usability:}
Mitsuba comes with a graphical user interface to interactively explore scenes. Once a suitable
viewpoint has been found, it is straightforward to perform renderings using any of the
implemented rendering techniques, while tweaking their parameters to find the most suitable
Mitsuba comes with a graphical user interface to interactively explore scenes. Once a suitable
viewpoint has been found, it is straightforward to perform renderings using any of the
implemented rendering techniques, while tweaking their parameters to find the most suitable
settings. Experimental integration into Blender 2.5 is also available.
\section{Limitations}
@ -72,22 +72,22 @@ However, there are some inherent limitations of the system that users should be
\begin{enumerate}[(i)]
\item \textbf{Wave Optics}: Mitsuba is fundamentally based on the geometric optics toolbox,
which means that it generally does not simulate phenomena that arise due to
the wave properties of light (diffraction, for instance).
the wave properties of light (diffraction, for instance).
\item \textbf{Polarization}: Mitsuba does not account for polarization. In
other words, light is always assumed to be randomly polarized. This can be a problem for
some predictive rendering applications.
\item \textbf{Numerical accuracy}: The accuracy of any result produced with this
system is constrained by the underlying floating point computations.
system is constrained by the underlying floating point computations.
For instance, an intricate scene that can be rendered without problems,
may produce the wrong answer when all objects are translated away from the
origin by a large distance, since floating point numbers are spaced less densely at the
new position. To avoid these sorts of pitfalls, it is good to have a basic
understanding of the IEEE-754 standard.
new position. To avoid these sorts of pitfalls, it is good to have a basic
understanding of the IEEE-754 standard.
\end{enumerate}
\section{License}
Mitsuba is free software and can be redistributed and modified under the terms of the GNU General
Mitsuba is free software and can be redistributed and modified under the terms of the GNU General
Public License (Version 3) as provided by the Free Software Foundation.
\remarks{

View File

@ -92,7 +92,7 @@
}}\hspace*{1cm}}\end{figure}
\setlength\fboxrule\fboxrulebackup
}
\newcommand{\renderings}[1]{
\begin{figure}[htb!]
\setcounter{subfigure}{0}

View File

@ -39,7 +39,7 @@
\setcounter{secnumdepth}{3}
\setcounter{tocdepth}{3}
\newcommand{\MitsubaVersion}{0.4.1}
\newcommand{\MitsubaVersion}{0.4.2}
\newcommand{\MitsubaYear}{2012}
\typearea[current]{last}

View File

@ -2,9 +2,9 @@
\label{sec:miscellaneous}
\subsection{A word about color spaces}
\label{sec:colorspaces}
When using one of the downloadable release builds of Mitsuba, or a version
When using one of the downloadable release builds of Mitsuba, or a version
that was compiled with the default settings, the renderer internally
operates in \emph{RGB mode}: all computations are performed using a representation
operates in \emph{RGB mode}: all computations are performed using a representation
that is based on the three colors red, green, and blue.
More specifically, these are the intensities of the red, green, and blue primaries
@ -14,20 +14,20 @@ rendering. This is an intuitive default which yields fast computations and
satisfactory results for most applications.
Low dynamic range images exported using the \pluginref{ldrfilm} will be stored in a
sRGB-compatible format that accounts for the custom gamma curves mandated by this
sRGB-compatible format that accounts for the custom gamma curves mandated by this
standard. They should display as intended across a wide range of display devices.
When saving high dynamic range output (e.g. OpenEXR, RGBE, or PFM), the computed radiance
values are exported in a linear form (i.e. without having the sRGB gamma curve applied to it),
which is the most common way of storing high dynamic range data.
It is important to keep in mind that other applications may not support this
It is important to keep in mind that other applications may not support this
``linearized sRGB'' space---in particular, the Mac OS preview currently
does not display images with this encoding correctly.
\subsubsection{Spectral mode}
Some predictive rendering applications will require a more realistic space for
interreflection computations. In such cases, Mitsuba can be switched to \emph{spectral mode}.
This can be done by compiling it with the \code{SPECTRUM\_SAMPLES=}$n$ parameter
This can be done by compiling it with the \code{SPECTRUM\_SAMPLES=}$n$ parameter
(\secref{compiling}), where $n$ is usually between 15 and 30.
Now, all input parameters are converted into color spectra with the specified

View File

@ -5,23 +5,23 @@ The guiding principle is that if an operation can potentially take longer than a
few seconds, it ought to use all the cores it can get.
Here, we will go through a basic example, which will hopefully provide sufficient intuition
to realize more complex tasks.
to realize more complex tasks.
To obtain good (i.e. close to linear) speedups, the parallelization layer depends on
several key assumptions of the task to be parallelized:
\begin{itemize}
\item The task can easily be split up into a discrete number of \emph{work units}, which requires a negligible amount of computation.
\item Each work unit is small in footprint so that it can easily be transferred over the network or shared memory.
\item Each work unit is small in footprint so that it can easily be transferred over the network or shared memory.
\item A work unit constitutes a significant amount of computation, which by far outweighs the cost of transmitting it to another node.
\item The \emph{work result} obtained by processing a work unit is again small in footprint, so that it can easily be transferred back.
\item Merging all work results to a solution of the whole problem requires a negligible amount of additional computation.
\end{itemize}
This essentially corresponds to a parallel version of \emph{Map} (one part of \emph{Map\&Reduce}) and is
ideally suited for most rendering workloads.
This essentially corresponds to a parallel version of \emph{Map} (one part of \emph{Map\&Reduce}) and is
ideally suited for most rendering workloads.
The example we consider here computes a \code{ROT13} ``encryption'' of a string, which
The example we consider here computes a \code{ROT13} ``encryption'' of a string, which
most certainly violates the ``significant amount of computation'' assumption.
It was chosen due to the inherent parallelism and simplicity of this task.
While of course over-engineered to the extreme, the example hopefully
While of course over-engineered to the extreme, the example hopefully
communicates how this framework might be used in more complex scenarios.
We will implement this program as a plugin for the utility launcher \code{mtsutil}, which
@ -69,7 +69,7 @@ $\texttt{\$}$ mtsutil rot13
Hello world!
\end{shell}
Our approach for implementing distributed ROT13 will be to treat each character as an
Our approach for implementing distributed ROT13 will be to treat each character as an
indpendent work unit. Since the ordering is lost when sending out work units, we must
also include the position of the character in both the work units and the work results.
@ -78,7 +78,7 @@ For reference, here are the interfaces of \code{WorkUnit} and \code{WorkResult}:
\begin{cpp}
/**
* Abstract work unit. Represents a small amount of information
* that encodes part of a larger processing task.
* that encodes part of a larger processing task.
*/
class MTS_EXPORT_CORE WorkUnit : public Object {
public:
@ -100,7 +100,7 @@ protected:
virtual ~WorkUnit() { }
};
/**
* Abstract work result. Represents the information that encodes
* Abstract work result. Represents the information that encodes
* the result of a processed <tt>WorkUnit</tt> instance.
*/
class MTS_EXPORT_CORE WorkResult : public Object {
@ -125,7 +125,7 @@ In our case, the \code{WorkUnit} implementation then looks like this:
class ROT13WorkUnit : public WorkUnit {
public:
void set(const WorkUnit *workUnit) {
const ROT13WorkUnit *wu =
const ROT13WorkUnit *wu =
static_cast<const ROT13WorkUnit *>(workUnit);
m_char = wu->m_char;
m_pos = wu->m_pos;
@ -135,10 +135,10 @@ public:
m_char = stream->readChar();
m_pos = stream->readInt();
}
void save(Stream *stream) const {
stream->writeChar(m_char);
stream->writeInt(m_pos);
stream->writeInt(m_pos);
}
std::string toString() const {
@ -163,14 +163,14 @@ private:
MTS_IMPLEMENT_CLASS(ROT13WorkUnit, false, WorkUnit)
\end{cpp}
The \code{ROT13WorkResult} implementation is not reproduced since it is almost identical
The \code{ROT13WorkResult} implementation is not reproduced since it is almost identical
(except that it doesn't need the \code{set} method).
The similarity is not true in general: for most algorithms, the work unit and result
The similarity is not true in general: for most algorithms, the work unit and result
will look completely different.
Next, we need a class, which does the actual work of turning a work unit into a work result
(a subclass of \code{WorkProcessor}). Again, we need to implement a range of support
methods to enable the various ways in which work processor instances will be submitted to
methods to enable the various ways in which work processor instances will be submitted to
remote worker nodes and replicated amongst local threads.
\begin{cpp}
class ROT13WorkProcessor : public WorkProcessor {
@ -190,7 +190,7 @@ public:
return new ROT13WorkUnit();
}
ref<WorkResult> createWorkResult() const {
ref<WorkResult> createWorkResult() const {
return new ROT13WorkResult();
}
@ -202,9 +202,9 @@ public:
void prepare() { }
/// Do the actual computation
void process(const WorkUnit *workUnit, WorkResult *workResult,
void process(const WorkUnit *workUnit, WorkResult *workResult,
const bool &stop) {
const ROT13WorkUnit *wu
const ROT13WorkUnit *wu
= static_cast<const ROT13WorkUnit *>(workUnit);
ROT13WorkResult *wr = static_cast<ROT13WorkResult *>(workResult);
wr->setPos(wu->getPos());
@ -252,9 +252,9 @@ public:
}
void processResult(const WorkResult *result, bool cancelled) {
if (cancelled) // indicates a work unit, which was
if (cancelled) // indicates a work unit, which was
return; // cancelled partly through its execution
const ROT13WorkResult *wr =
const ROT13WorkResult *wr =
static_cast<const ROT13WorkResult *>(result);
m_output[wr->getPos()] = wr->getChar();
}
@ -273,7 +273,7 @@ MTS_IMPLEMENT_CLASS(ROT13Process, false, ParallelProcess)
\end{cpp}
The \code{generateWork} method produces work units until we have moved past
the end of the string, after which it returns the status code \code{EFailure}.
Note the method \code{getRequiredPlugins()}: this is necessary to use
Note the method \code{getRequiredPlugins()}: this is necessary to use
the utility across
machines. When communicating with another node, it ensures that the remote side
loads the \code{ROT13*} classes at the right moment.
@ -302,9 +302,9 @@ from the main utility function (the `Hello World' code we wrote earlier). We can
}
\end{cpp}
After compiling everything using \code{scons}, a simple example
involving the utility would be to encode a string (e.g. \code{SECUREBYDESIGN}), while
involving the utility would be to encode a string (e.g. \code{SECUREBYDESIGN}), while
forwarding all computation to a network machine. (\code{-p0} disables
all local worker threads). Adding a verbose flag (\code{-v}) shows
all local worker threads). Adding a verbose flag (\code{-v}) shows
some additional scheduling information:
\begin{shell}
$\texttt{\$}$ mtsutil -vc feynman -p0 rot13 SECUREBYDESIGN

View File

@ -1,9 +1,9 @@
\section{Plugin reference}
\vspace{-2mm}
The following subsections describe the available Mitsuba plugins, usually along
The following subsections describe the available Mitsuba plugins, usually along
with example renderings and a description of what each parameter does.
They are separated into subsections covering textures, surface scattering
models, etc.
models, etc.
Each subsection begins with a brief general description.
The documentation of a plugin always starts on a new page and is preceded
@ -14,7 +14,7 @@ by a table similar to the one below:
\default{\code{false}}
}
\parameter{darkMatter}{\Float}{
Controls the proportionate amount of dark matter present in the scene.
Controls the proportionate amount of dark matter present in the scene.
\default{0.83}
}
}
@ -27,10 +27,10 @@ this description, it can be instantiated from an XML scene file using a custom c
<float name="darkMatter" value="0.4"/>
</integrator>
\end{xml}
In some cases\footnote{Note that obvious parameters are generally omitted.
For instance, all shape plugins accept a surface scattering plugin, but this
is left out from the documentation for brevity.}, plugins also indicate that they accept nested plugins
as input arguments. These can either be \emph{named} or \emph{unnamed}. If
In some cases\footnote{Note that obvious parameters are generally omitted.
For instance, all shape plugins accept a surface scattering plugin, but this
is left out from the documentation for brevity.}, plugins also indicate that they accept nested plugins
as input arguments. These can either be \emph{named} or \emph{unnamed}. If
the \code{amazing} integrator also accepted the following two parameters\vspace{-2mm}
\parameters{
\parameter{\Unnamed}{\Integrator}{A nested integrator which does the actual hard work}

View File

@ -1,14 +1,21 @@
\section{Python integration}
\label{sec:python}
A recent feature of Mitsuba is a simple Python interface to the renderer API.
A recent feature of Mitsuba is a Python interface to the renderer API.
While the interface is still limited at this point, it can already be
used for many useful purposes. To access the API, start your Python
interpreter and enter
\begin{python}
import mitsuba
\end{python}
\paragraph{Mac OS:}
For this to work on MacOS X, you will first have to run the ``\emph{Apple
Menu}$\to$\emph{Command-line access}'' menu item from within Mitsuba.
In the unlikely case that you run into shared library loading issues (this is
taken care of by default), you may have to set the \code{LD\_LIBRARY\_PATH}
environment variable before starting Python so that it points to where the
Mitsuba libraries are installed (e.g. the \code{Mitsuba.app/Contents/Frameworks}
directory).
\paragraph{Windows and Linux:}
On Windows and \emph{non-packaged} Linux builds, you may have to explicitly
specify the required extension search path before issuing the \code{import} command, e.g.:
\begin{python}
@ -29,6 +36,9 @@ os.environ['PATH'] = 'path-to-mitsuba-directory' + os.pathsep + os.environ['PATH
import mitsuba
\end{python}
In rare cases when running on Linux, it may also be necessary to set the
\code{LD\_LIBRARY\_PATH} environment variable before starting Python so that it
points to where the Mitsuba core libraries are installed.
For an overview of the currently exposed API subset, please refer
to the following page: \url{http://www.mitsuba-renderer.org/api/group__libpython.html}.
@ -64,8 +74,8 @@ classes, function, or entire namespaces when running an interactive Python shell
...
\end{shell}
The docstrings list the currently exported functionality, as well as C++ and Python signatures, but they
don't document what these functions actually do. The web API documentation is the preferred source for
this information.
don't document what these functions actually do. The web API documentation is
the preferred source of this information.
\subsection{Basics}
Generally, the Python API tries to mimic the C++ API as closely as possible.

View File

@ -13,34 +13,34 @@
\label{sec:bsdfs}
Surface scattering models describe the manner in which light interacts
with surfaces in the scene. They conveniently summarize the mesoscopic
scattering processes that take place within the material and
with surfaces in the scene. They conveniently summarize the mesoscopic
scattering processes that take place within the material and
cause it to look the way it does.
This represents one central component of the material system in Mitsuba---another
part of the renderer concerns itself with what happens
\emph{in between} surface interactions. For more information on this aspect,
This represents one central component of the material system in Mitsuba---another
part of the renderer concerns itself with what happens
\emph{in between} surface interactions. For more information on this aspect,
please refer to Sections~\ref{sec:media} and \ref{sec:subsurface}.
This section presents an overview of all surface scattering models that are
This section presents an overview of all surface scattering models that are
supported, along with their parameters.
\subsubsection*{BSDFs}
To achieve realistic results, Mitsuba comes with a library of both
To achieve realistic results, Mitsuba comes with a library of both
general-purpose surface scattering models (smooth or rough glass, metal,
plastic, etc.) and specializations to particular materials (woven cloth,
masks, etc.). Some model plugins fit neither category and can best be described
as \emph{modifiers} that are applied on top of one or more scattering models.
as \emph{modifiers} that are applied on top of one or more scattering models.
Throughout the documentation and within the scene description
Throughout the documentation and within the scene description
language, the word \emph{BSDF} is used synonymously with the term ``surface
scattering model''. This is an abbreviation for \emph{Bidirectional
Scattering Distribution Function}, a more precise technical
term.
scattering model''. This is an abbreviation for \emph{Bidirectional
Scattering Distribution Function}, a more precise technical
term.
In Mitsuba, BSDFs are
In Mitsuba, BSDFs are
assigned to \emph{shapes}, which describe the visible surfaces in
the scene. In the scene description language, this assignment can
either be performed by nesting BSDFs within shapes, or they can
be named and then later referenced by their name.
either be performed by nesting BSDFs within shapes, or they can
be named and then later referenced by their name.
The following fragment shows an example of both kinds of usages:
\begin{xml}
<scene version=$\MtsVer$>
@ -75,35 +75,35 @@ memory usage.
\label{fig:glass-explanation}
Some of the scattering models in Mitsuba need to know
the indices of refraction on the exterior and interior-facing
side of a surface.
side of a surface.
It is therefore important to decompose the mesh into meaningful
separate surfaces corresponding to each index of refraction change.
The example here shows such a decomposition for a water-filled Glass.
}
\end{figure}
A vital consideration when modeling a scene in a physically-based rendering
system is that the used materials do not violate physical properties, and
A vital consideration when modeling a scene in a physically-based rendering
system is that the used materials do not violate physical properties, and
that their arrangement is meaningful. For instance, imagine having designed
an architectural interior scene that looks good except for a white desk that
seems a bit too dark. A closer inspection reveals that it uses a Lambertian
material with a diffuse reflectance of $0.9$.
an architectural interior scene that looks good except for a white desk that
seems a bit too dark. A closer inspection reveals that it uses a Lambertian
material with a diffuse reflectance of $0.9$.
In many rendering systems, it would be feasible to increase the
reflectance value above $1.0$ in such a situation. But in Mitsuba, even a
small surface that reflects a little more light than it receives will
likely break the available rendering algorithms, or cause them to produce otherwise
In many rendering systems, it would be feasible to increase the
reflectance value above $1.0$ in such a situation. But in Mitsuba, even a
small surface that reflects a little more light than it receives will
likely break the available rendering algorithms, or cause them to produce otherwise
unpredictable results. In fact, the right solution in this case would be to switch to
a different the lighting setup that causes more illumination to be received by
the desk and then \emph{reduce} the material's reflectance---after all, it is quite unlikely that
the desk and then \emph{reduce} the material's reflectance---after all, it is quite unlikely that
one could find a real-world desk that reflects 90\% of all incident light.
As another example of the necessity for a meaningful material description, consider
the glass model illustrated in \figref{glass-explanation}. Here, careful thinking
is needed to decompose the object into boundaries that mark index of
the glass model illustrated in \figref{glass-explanation}. Here, careful thinking
is needed to decompose the object into boundaries that mark index of
refraction-changes. If this is done incorrectly and a beam of light can
potentially pass through a sequence of incompatible index of refraction changes (e.g. $1.00\to 1.33$
followed by $1.50\to1.33$), the output is undefined and will quite likely
even contain inaccuracies in parts of the scene that are far
even contain inaccuracies in parts of the scene that are far
away from the glass.

View File

@ -3,7 +3,7 @@
\label{sec:films}
A film defines how conducted measurements are stored and converted into the final
output file that is written to disk at the end of the rendering process. Mitsuba comes with a few
films that can write to high and low dynamic range image formats (OpenEXR, JPEG or PNG), as well
films that can write to high and low dynamic range image formats (OpenEXR, JPEG or PNG), as well
more scientifically oriented data formats (e.g. MATLAB or Mathematica).
In the XML scene description language, a normal film configuration might look as follows
@ -20,7 +20,7 @@ In the XML scene description language, a normal film configuration might look as
<integer name="width" value="1920"/>
<integer name="height" value="1080"/>
<!-- Use a Gaussian reconstruction filter. For
<!-- Use a Gaussian reconstruction filter. For
details on these, refer to the next subsection -->
<rfilter type="gaussian"/>
</film>
@ -28,6 +28,6 @@ In the XML scene description language, a normal film configuration might look as
</scene>
\end{xml}
The \code{film} plugin should be instantiated nested inside a \code{sensor} declaration.
Note how the output filename is never specified---it is automatically inferred
from the scene filename and can be manually overridden by passing the configuration parameter
Note how the output filename is never specified---it is automatically inferred
from the scene filename and can be manually overridden by passing the configuration parameter
\code{-o} to the \code{mitsuba} executable when rendering from the command line.

View File

@ -1,14 +1,14 @@
\newpage
\subsection{Integrators}
\label{sec:integrators}
In Mitsuba, the different rendering techniques are collectively referred to as
In Mitsuba, the different rendering techniques are collectively referred to as
\emph{integrators}, since they perform integration over a high-dimensional
space. Each integrator represents a specific approach for solving
the light transport equation---usually favored in certain scenarios, but
at the same time affected by its own set of intrinsic limitations.
Therefore, it is important to carefully select an integrator based on
user-specified accuracy requirements and properties of the scene to be
rendered.
Therefore, it is important to carefully select an integrator based on
user-specified accuracy requirements and properties of the scene to be
rendered.
In Mitsuba's XML description language, a single integrator
is usually instantiated by declaring it at the top level within the
@ -28,15 +28,15 @@ scene, e.g.
</scene>
\end{xml}
This section gives a brief overview of the available choices
This section gives a brief overview of the available choices
along with their parameters.
\subsubsection*{Choosing an integrator}
Due to the large number of integrators in Mitsuba, the decision of which
Due to the large number of integrators in Mitsuba, the decision of which
one is suitable may seem daunting. Assuming that the goal is to solve
the full light transport equation without approximations, a few integrators
(\pluginref{ao}, \pluginref{direct}, \pluginref{vpl})
can already be ruled out. The adjoint particle tracer \pluginref{ptracer} is
can already be ruled out. The adjoint particle tracer \pluginref{ptracer} is
also rarely used.
The following ``algorithm'' may help to decide amongst the remaining ones:
@ -46,11 +46,11 @@ The following ``algorithm'' may help to decide amongst the remaining ones:
Mitsuba currently comes with three path tracer variations that target different setups: It your
scene contains no media and no surfaces with opacity masks, use the plain path tracer (\pluginref{path}).
Otherwise, use one of the volumetric path tracers (\pluginref[volpathsimple]{volpath\_simple}
Otherwise, use one of the volumetric path tracers (\pluginref[volpathsimple]{volpath\_simple}
or \pluginref{volpath}). The latter is preferable if the scene contains glossy surface scattering models.
\item If step 1 produced poor (i.e. noisy and slowly converging) results, try
\item If step 1 produced poor (i.e. noisy and slowly converging) results, try
the bidirectional path tracer (\pluginref{bdpt}).
\item If steps 1 and 2 failed, the scene contains a relatively difficult lighting setup, potentially
\item If steps 1 and 2 failed, the scene contains a relatively difficult lighting setup, potentially
including interaction with complex materials.
In many cases, these difficulties can be greatly ameliorated by running a ``metropolized'' version
of a path tracer. This is implemented in the Primary Sample Space MLT (\pluginref{pssmlt}) plugin.
@ -69,7 +69,7 @@ method (\pluginref{mlt}, \pluginref{erpt}).
\smallrendering{Max. depth = $\infty$}{pathdepth-all}
\caption{
\label{fig:pathdepths}
These Cornell box renderings demonstrate the visual
These Cornell box renderings demonstrate the visual
effect of a maximum path depth. As the paths
are allowed to grow longer, the color saturation
increases due to multiple scattering interactions
@ -79,13 +79,13 @@ method (\pluginref{mlt}, \pluginref{erpt}).
\end{figure}
Almost all integrators use the concept of \emph{path depth}.
Here, a path refers to a chain of scattering events that
Here, a path refers to a chain of scattering events that
starts at the light source and ends at the eye or sensor.
It is often useful to limit the path depth (\figref{pathdepths})
when rendering scenes for preview purposes, since this reduces the amount
It is often useful to limit the path depth (\figref{pathdepths})
when rendering scenes for preview purposes, since this reduces the amount
of computation that is necessary per pixel. Furthermore, such renderings
usually converge faster and therefore need fewer samples per pixel.
When reference-quality is desired, one should always leave the path
When reference-quality is desired, one should always leave the path
depth unlimited.
\begin{figure}[h!]
@ -95,21 +95,21 @@ depth unlimited.
\vspace{-5mm}
\caption{
\label{fig:path-explanation}
A ray of emitted light is scattered by an object and subsequently
A ray of emitted light is scattered by an object and subsequently
reaches the eye/sensor.
In Mitsuba, this is a \emph{depth-2} path, since it has two edges.
}
\end{figure}
Mitsuba counts depths starting at $1$, which correspond to
visible light sources (i.e. a path that starts at the light
source and ends at the eye or sensor without any scattering
visible light sources (i.e. a path that starts at the light
source and ends at the eye or sensor without any scattering
interaction in between).
A depth-$2$ path (also known as ``direct illumination'') includes
a single scattering event (\figref{path-explanation}).
\subsubsection*{Progressive versus non-progressive}
Some of the rendering techniques in Mitsuba are \emph{progressive}.
What this means is that they display a rough preview, which improves over time.
What this means is that they display a rough preview, which improves over time.
Leaving them running indefinitely will continually reduce noise (in unbiased algorithms
such as Metropolis Light Transport) or noise and bias (in biased
such as Metropolis Light Transport) or noise and bias (in biased
rendering techniques such as Progressive Photon Mapping).

View File

@ -11,7 +11,7 @@
\caption{Participating media are not limited to smoke or fog: they are
also great for rendering fuzzy materials such as these knitted sweaters
(made using the \pluginref{heterogeneous} and \pluginref{microflake} plugins).
Figure courtesy of Yuksel et al. \cite{Yuksel2012Stitch}, models courtesy of
Figure courtesy of Yuksel et al. \cite{Yuksel2012Stitch}, models courtesy of
Rune Spaans and Christer Sveen.}
}
In Mitsuba, participating media are used to simulate materials ranging from

View File

@ -1,17 +1,17 @@
\newpage
\subsection{Phase functions}
\label{sec:phase}
This section contains a description of all implemented medium scattering models, which
are also known as \emph{phase functions}. These are very similar in principle to surface
scattering models (or \emph{BSDF}s), and essentially describe where light travels after
This section contains a description of all implemented medium scattering models, which
are also known as \emph{phase functions}. These are very similar in principle to surface
scattering models (or \emph{BSDF}s), and essentially describe where light travels after
hitting a particle within the medium.
The most commonly used models for smoke, fog, and other homogeneous media
are isotropic scattering (\pluginref{isotropic}) and the Henyey-Greenstein
are isotropic scattering (\pluginref{isotropic}) and the Henyey-Greenstein
phase function (\pluginref{hg}). Mitsuba also supports \emph{anisotropic}
media, where the behavior of the medium changes depending on the direction
of light propagation (e.g. in volumetric representations of fabric). These
are the Kajiya-Kay (\pluginref{kkay}) and Micro-flake (\pluginref{microflake})
media, where the behavior of the medium changes depending on the direction
of light propagation (e.g. in volumetric representations of fabric). These
are the Kajiya-Kay (\pluginref{kkay}) and Micro-flake (\pluginref{microflake})
models.
Finally, there is also a phase function for simulating scattering in

View File

@ -2,7 +2,7 @@
\subsection{Reconstruction filters}
\label{sec:rfilters}
Image reconstruction filters are responsible for converting a series of radiance samples generated
jointly by the \emph{sampler} and \emph{integrator} into the final output image that will be written
jointly by the \emph{sampler} and \emph{integrator} into the final output image that will be written
to disk at the end of a rendering process.
This section gives a brief overview of the reconstruction filters that are available in Mitsuba.
There is no universally superior filter, and the final choice depends on a trade-off between
@ -15,44 +15,44 @@ near discontinuities, such as a light-shadow transiton.
\begin{description}
\item[Box filter (\code{box}):]
the fastest, but also about the worst possible
reconstruction filter, since it is extremely prone to aliasing.
the fastest, but also about the worst possible
reconstruction filter, since it is extremely prone to aliasing.
It is included mainly for completeness, though some rare situations
may warrant its use.
\item[Tent filter (\code{tent}):]
Simple tent, or triangle filter. This reconstruction filter never
suffers from ringing and usually causes less aliasing than a naive
suffers from ringing and usually causes less aliasing than a naive
box filter. When rendering scenes with sharp brightness discontinuities,
this may be useful; otherwise, negative-lobed filters will be preferable
(e.g. Mitchell-Netravali or Lanczos Sinc)
\item[Gaussian filter (\code{gaussian}):]
this is a windowed Gaussian filter with configurable standard deviation.
It produces pleasing results and never suffers from ringing, but may
It produces pleasing results and never suffers from ringing, but may
occasionally introduce too much blurring.
When no reconstruction filter is explicitly requested, this is the default
choice in Mitsuba.
\item[Mitchell-Netravali filter (\code{mitchell}):]
Separable cubic spline reconstruction filter by Mitchell and Netravali
\cite{Mitchell:1988:Reconstruction}
This is often a good compromise between sharpness and ringing.
This is often a good compromise between sharpness and ringing.
The plugin has two \code{float}-valued parameters named \texttt{B} and \texttt{C} that
The plugin has two \code{float}-valued parameters named \texttt{B} and \texttt{C} that
correspond to the two parameters in the original research paper. By default, these
are set to the recommended value of $1/3$, but can be tweaked if desired.
\item[Catmull-Rom filter (\code{catmullrom}):]
This is a special version of the Mitchell-Netravali filter that has the
This is a special version of the Mitchell-Netravali filter that has the
constants \texttt{B} and \texttt{C} adjusted to produce higher sharpness at the
cost of increased susceptibility to ringing.
\item[Lanczos Sinc filter (\code{lanczos}):]
This is a windowed version of the theoretically optimal low-pass filter.
It is generally one of the best available filters in terms of producing sharp
high-quality output. Its main disadvantage is that it produces strong ringing around
discontinuities, which can become a serious problem when rendering bright objects
with sharp edges (for instance, a directly visible light source will have black
fringing artifacts around it).
It is generally one of the best available filters in terms of producing sharp
high-quality output. Its main disadvantage is that it produces strong ringing around
discontinuities, which can become a serious problem when rendering bright objects
with sharp edges (for instance, a directly visible light source will have black
fringing artifacts around it).
This is also the computationally slowest reconstruction filter.
This plugin has an \code{integer}-valued parameter named \code{lobes}, that
@ -60,19 +60,19 @@ sets the desired number of filter side-lobes. The higher, the closer
the filter will approximate an optimal low-pass filter, but this also
increases the susceptibility to ringing. Values of 2 or 3 are common (3 is the default).
\end{description}
The next section contains a series of comparisons between reconstruction filters. In the first
case, a very high-resolution input image (corresponding to a hypothetical radiance field
The next section contains a series of comparisons between reconstruction filters. In the first
case, a very high-resolution input image (corresponding to a hypothetical radiance field
incident at the camera) is reconstructed at low resolutions.
\newpage
\subsubsection{Reconstruction filter comparison 1: frequency attenuation and aliasing}
\vspace{-2mm}
Here, a high frequency function is reconstructed at low resolutions. A good filter
Here, a high frequency function is reconstructed at low resolutions. A good filter
(e.g. Lanczos Sinc) will capture all oscillations that are representable at the desired
resolution and attenuate the remainder to a uniform gray. The filters are ordered by their
approximate level of success at this benchmark.
\renderings{
\subfloat[A high resolution input image whose frequency decreases
\subfloat[A high resolution input image whose frequency decreases
towards the borders. If you are looking at this on a computer, you may
have to zoom in.]{\fbox{\includegraphics[width=0.43\textwidth]{images/rfilter_sines_input}}}
\hfill
@ -92,7 +92,7 @@ approximate level of success at this benchmark.
}
\newpage
\subsubsection{Reconstruction filter comparison 2: ringing}
This comparison showcases the ringing artifacts that can occur when the rendered
This comparison showcases the ringing artifacts that can occur when the rendered
image contains extreme and discontinuous brightness transitions. The
Mitchell-Netravali, Catmull-Rom, and Lanczos Sinc filters are affected by this problem.
Note the black fringing around the light source in the cropped Cornell box renderings below.

View File

@ -1,16 +1,16 @@
\newpage
\subsection{Sample generators}
\label{sec:samplers}
When rendering an image, Mitsuba has to solve a high-dimensional integration problem that involves the
geometry, materials, lights, and sensors that make up the scene. Because of the mathematical complexity
When rendering an image, Mitsuba has to solve a high-dimensional integration problem that involves the
geometry, materials, lights, and sensors that make up the scene. Because of the mathematical complexity
of these integrals, it is generally impossible to solve them analytically --- instead, they
are solved \emph{numerically} by evaluating the function to be integrated at a large number of
different positions referred to as \emph{samples}. Sample generators are an essential ingredient to this
process: they produce points in a (hypothetical) infinite dimensional hypercube $[0,1]^{\infty}$ that
are solved \emph{numerically} by evaluating the function to be integrated at a large number of
different positions referred to as \emph{samples}. Sample generators are an essential ingredient to this
process: they produce points in a (hypothetical) infinite dimensional hypercube $[0,1]^{\infty}$ that
constitute the canonical representation of these samples.
To do its work, a rendering algorithm, or \emph{integrator}, will send many queries to the sample generator.
Generally, it will request subsequent 1D or 2D components of this infinite-dimensional ``point'' and map
Generally, it will request subsequent 1D or 2D components of this infinite-dimensional ``point'' and map
them into a more convenient space (for instance, positions on surfaces). This allows it to construct
light paths to eventually evaluate the flow of light through the scene.
@ -18,6 +18,6 @@ Since the whole process starts with a large number of points in the abstract spa
it is natural to consider different ways of positioning them. Desirable properties of a sampler are
that it ``randomly'' covers the whole space evenly with samples, but without placing samples too close
to each other. This leads to such notions as \emph{stratified sampling} and \emph{low-discrepancy}
number sequences. The samplers in this section make different guarantees on the quality of generated
samples based on these criteria. To obtain intuition about their behavior, the provided point plots
number sequences. The samplers in this section make different guarantees on the quality of generated
samples based on these criteria. To obtain intuition about their behavior, the provided point plots
illustrate the resulting sample placement.

View File

@ -8,12 +8,12 @@ into a given direction or the irradiance received by a certain surface. The foll
section lists the available choices.
\subsubsection*{Handedness convention}
Sensors in Mitsuba are \emph{right-handed}.
Sensors in Mitsuba are \emph{right-handed}.
Any number of rotations and translations can be applied to them
without changing this property. By default they are located at the
origin and oriented in such a way that in the rendered image, $+X$ points left,
$+Y$ points upwards, and $+Z$ points along the viewing direction.
Left-handed sensors are also supported. To switch the handedness,
flip any one of the axes, e.g. by passing a scale transformation like
flip any one of the axes, e.g. by passing a scale transformation like
\code{<scale x="-1"/>} to the sensor's \code{toWorld} parameter.

View File

@ -5,11 +5,11 @@ This section presents an overview of the shape plugins that are released along w
In Mitsuba, shapes define surfaces that mark transitions between different types of materials. For instance,
a shape could describe a boundary between air and a solid object, such as a piece of rock. Alternatively,
a shape can mark the beginning of a region of space that isn't solid at all, but rather contains a participating
a shape can mark the beginning of a region of space that isn't solid at all, but rather contains a participating
medium, such as smoke or steam. Finally, a shape can be used to create an object that emits light on its own.
Shapes are usually declared along with a surface scattering model (named ``BSDF'', see \secref{bsdfs} for details).
This BSDF characterizes what happens \emph{at the surface}. In the XML scene description language, this might look like
Shapes are usually declared along with a surface scattering model (named ``BSDF'', see \secref{bsdfs} for details).
This BSDF characterizes what happens \emph{at the surface}. In the XML scene description language, this might look like
the following:
\begin{xml}
<scene version=$\MtsVer$>
@ -20,7 +20,7 @@ the following:
... $\code{bsdf}$ parameters ..
</bsdf>
<!-- Alternatively: reference a named BSDF that
<!-- Alternatively: reference a named BSDF that
has been declared previously
<ref id="myBSDF"/>
@ -41,12 +41,12 @@ of the shape. This informs the renderer about what happens in the region of spac
<medium name="interior" type="... medium type ...">
... $\code{medium}$ parameters ...
</medium>
<medium name="exterior" type="... medium type ...">
... $\code{medium}$ parameters ...
</medium>
<!-- Alternatively: reference named media that
<!-- Alternatively: reference named media that
have been declared previously
<ref name="interior" id="myMedium1"/>
@ -58,8 +58,8 @@ of the shape. This informs the renderer about what happens in the region of spac
You may have noticed that the previous XML example dit not make any mention of surface
scattering models (BSDFs). In Mitsuba, such a shape declaration creates an \emph{index-matched} boundary.
This means that incident illumination will pass through the surface without undergoing any kind of
interaction. However, the renderer will still uses the information available in the shape to correctly
This means that incident illumination will pass through the surface without undergoing any kind of
interaction. However, the renderer will still uses the information available in the shape to correctly
account for the medium change.
It is also possible to create \emph{index-mismatched} boundaries between media, where some of
@ -76,7 +76,7 @@ the light is affected by the boundary transition:
<medium name="interior" type="... medium type ...">
... $\code{medium}$ parameters ...
</medium>
<medium name="exterior" type="... medium type ...">
... $\code{medium}$ parameters ...
</medium>

View File

@ -4,6 +4,6 @@
The following section describes the available texture data sources. In Mitsuba,
textures are objects that can be attached to certain
surface scattering model parameters to introduce spatial variation.
In the documentation, these are listed as supporting the ``\Texture'' type.
In the documentation, these are listed as supporting the ``\Texture'' type.
See \secref{bsdfs} for many examples.

View File

@ -70,6 +70,10 @@ struct Normal : public TVector3<Float> {
}
};
inline Normal normalize(const Normal &n) {
return n / n.length();
}
MTS_NAMESPACE_END
#endif /* __MITSUBA_CORE_NORMAL_H_ */

View File

@ -40,7 +40,7 @@ extern const int MTS_EXPORT_CORE primeTable[primeTableSize];
/// Van der Corput radical inverse in base 2 with single precision
inline float radicalInverse2Single(uint32_t n, uint32_t scramble = 0U) {
/* Efficiently reverse the bits in 'n' using binary operations */
#if defined __GNUC__ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
#if (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))) || defined(__clang__)
n = __builtin_bswap32(n);
#else
n = (n << 16) | (n >> 16);
@ -59,7 +59,7 @@ inline float radicalInverse2Single(uint32_t n, uint32_t scramble = 0U) {
/// Van der Corput radical inverse in base 2 with double precision
inline double radicalInverse2Double(uint64_t n, uint64_t scramble = 0ULL) {
/* Efficiently reverse the bits in 'n' using binary operations */
#if defined __GNUC__ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
#if (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))) || defined(__clang__)
n = __builtin_bswap64(n);
#else
n = (n << 32) | (n >> 32);

View File

@ -417,8 +417,10 @@ public:
*
* Note that the resource's won't be removed until all processes using
* it have terminated)
*
* \return \c false if the resource could not be found
*/
void unregisterResource(int id);
bool unregisterResource(int id);
/**
* \brief Return the ID of a registered resource

View File

@ -185,6 +185,9 @@ extern MTS_EXPORT_CORE int mts_omp_get_thread_num();
#define mts_omp_get_max_threads omp_get_max_threads
#define mts_omp_get_thread_num omp_get_thread_num
#endif
#else
#define mts_omp_get_max_threads() 1
#define mts_omp_get_thread_num() 0
#endif
MTS_NAMESPACE_END

View File

@ -358,11 +358,14 @@ extern MTS_EXPORT_CORE bool solveQuadraticDouble(double a, double b,
* Position of the last knot
* \param size
* Denotes the size of the \c data array
* \param extrapolate
* Extrapolate data values when \c x is out of range? (default: \c false)
* \return
* The interpolated value or zero when \c x lies outside of [\c min, \c max]
* The interpolated value or zero when <tt>extrapolate=false</tt>tt>
* and \c x lies outside of [\c min, \c max]
*/
extern MTS_EXPORT_CORE Float interpCubic1D(Float x, const Float *data,
Float min, Float max, size_t size);
Float min, Float max, size_t size, bool extrapolate = false);
/**
* \brief Evaluate a cubic spline interpolant of an \a irregularly sampled 1D function
@ -382,11 +385,14 @@ extern MTS_EXPORT_CORE Float interpCubic1D(Float x, const Float *data,
* the entries of \c nodes.
* \param size
* Denotes the size of the \c data array
* \param extrapolate
* Extrapolate data values when \c x is out of range? (default: \c false)
* \return
* The interpolated value or zero when \c x lies outside of \a [\c min, \c max]
* The interpolated value or zero when <tt>extrapolate=false</tt>tt>
* and \c x lies outside of \a [\c min, \c max]
*/
extern MTS_EXPORT Float interpCubic1DIrregular(Float x, const Float *nodes,
const Float *data, size_t size);
const Float *data, size_t size, bool extrapolate = false);
/**
* \brief Evaluate a cubic spline interpolant of a regularly sampled 2D function
@ -407,11 +413,14 @@ extern MTS_EXPORT Float interpCubic1DIrregular(Float x, const Float *nodes,
* Position of the last knot on each dimension
* \param size
* Denotes the size of the \c data array (along each dimension)
* \param extrapolate
* Extrapolate data values when \c p is out of range? (default: \c false)
* \return
* The interpolated value or zero when \c p lies outside of the knot range
* The interpolated value or zero when <tt>extrapolate=false</tt>tt> and
* \c p lies outside of the knot range
*/
extern MTS_EXPORT_CORE Float interpCubic2D(const Point2 &p, const Float *data,
const Point2 &min, const Point2 &max, const Size2 &size);
const Point2 &min, const Point2 &max, const Size2 &size, bool extrapolate = false);
/**
* \brief Evaluate a cubic spline interpolant of an \a irregularly sampled 2D function
@ -435,11 +444,14 @@ extern MTS_EXPORT_CORE Float interpCubic2D(const Point2 &p, const Float *data,
* Consecutive entries of this array correspond to increments in the 'x' coordinate.
* \param size
* Denotes the size of the \c data array (along each dimension)
* \param extrapolate
* Extrapolate data values when \c p is out of range? (default: \c false)
* \return
* The interpolated value or zero when \c p lies outside of the knot range
* The interpolated value or zero when <tt>extrapolate=false</tt>tt> and
* \c p lies outside of the knot range
*/
extern MTS_EXPORT_CORE Float interpCubic2DIrregular(const Point2 &p, const Float **nodes,
const Float *data, const Size2 &size);
const Float *data, const Size2 &size, bool extrapolate = false);
/**
* \brief Evaluate a cubic spline interpolant of a regularly sampled 3D function
@ -461,11 +473,14 @@ extern MTS_EXPORT_CORE Float interpCubic2DIrregular(const Point2 &p, const Float
* Position of the last knot on each dimension
* \param size
* Denotes the size of the \c data array (along each dimension)
* \param extrapolate
* Extrapolate data values when \c p is out of range? (default: \c false)
* \return
* The interpolated value or zero when \c p lies outside of the knot range
* The interpolated value or zero when <tt>extrapolate=false</tt>tt> and
* \c p lies outside of the knot range
*/
extern MTS_EXPORT_CORE Float interpCubic3D(const Point3 &p, const Float *data,
const Point3 &min, const Point3 &max, const Size3 &size);
const Point3 &min, const Point3 &max, const Size3 &size, bool extrapolate = false);
/**
* \brief Evaluate a cubic spline interpolant of an \a irregularly sampled 3D function
@ -490,11 +505,14 @@ extern MTS_EXPORT_CORE Float interpCubic3D(const Point3 &p, const Float *data,
* then 'y', and finally 'z' increments.
* \param size
* Denotes the size of the \c data array (along each dimension)
* \param extrapolate
* Extrapolate data values when \c p is out of range? (default: \c false)
* \return
* The interpolated value or zero when \c p lies outside of the knot range
* The interpolated value or zero when <tt>extrapolate=false</tt>tt> and
* \c p lies outside of the knot range
*/
extern MTS_EXPORT_CORE Float interpCubic3DIrregular(const Point3 &p, const Float **nodes,
const Float *data, const Size3 &size);
const Float *data, const Size3 &size, bool extrapolate = false);
//// Convert radians to degrees
inline Float radToDeg(Float value) { return value * (180.0f / M_PI); }

View File

@ -26,7 +26,7 @@ MTS_NAMESPACE_BEGIN
* \brief Current release of Mitsuba
* \ingroup libcore
*/
#define MTS_VERSION "0.4.1"
#define MTS_VERSION "0.4.2"
/**
* \brief Year of the current release

View File

@ -206,8 +206,15 @@ public:
*/
void computeUVTangents();
/// Generate surface normals
void computeNormals();
/**
* \brief Generate smooth vertex normals?
*
* \param force
* When this parameter is set to true, the function
* generates normals <em>even</em> when there are
* already existing ones.
*/
void computeNormals(bool force = false);
/**
* \brief Rebuild the mesh so that adjacent faces

27
setpath.csh Executable file
View File

@ -0,0 +1,27 @@
#!/bin/tcsh
set called=($_)
if ("$called" != "") then
set reldir=`dirname $called[2]`
else if ("$0" != "tcsh") then
set reldir=`dirname 0`
else
echo "Unable to detect path!"
exit 1
endif
set MITSUBA_DIR=`cd $reldir && pwd`
if ("`uname`" == "Darwin") then
setenv PATH "$MITSUBA_DIR/Mitsuba.app/Contents/MacOS:$PATH"
else
if (! ($?LD_LIBRARY_PATH) ) then
setenv LD_LIBRARY_PATH "$MITSUBA_DIR/dist"
else
setenv LD_LIBRARY_PATH "$MITSUBA_DIR/dist:$LD_LIBRARY_PATH"
endif
setenv PATH "$MITSUBA_DIR/dist:$PATH"
# Generate core dumps if something goes wrong
limit coredumpsize 1000000000
endif
unset reldir

0
setpath.sh Normal file → Executable file
View File

View File

@ -65,6 +65,10 @@ MTS_NAMESPACE_BEGIN
* This parameter can be used to scale the the amount of illumination
* emitted by the sun emitter. \default{1}
* }
* \parameter{sunRadiusScale}{\Float}{
* Scale factor to adjust the radius of the sun, while preserving its power.
* Set to \code{0} to turn it into a directional light source.
* }
* \parameter{samplingWeight}{\Float}{
* Specifies the relative amount of samples
* allocated to this emitter. \default{1}
@ -74,7 +78,6 @@ MTS_NAMESPACE_BEGIN
* Preetham et al. \cite{Preetham1999Practical}. Using the provided position
* and time information (see \pluginref{sky} for details), it can determine the
* position of the sun as seen from the position of the observer.
*
* The radiance arriving at the earth surface is then found based on the spectral
* emission profile of the sun and the extinction cross-section of the
* atmosphere (which depends on the \code{turbidity} and the zenith angle of the sun).
@ -146,6 +149,23 @@ public:
Emitter *getElement(size_t i) {
if (i != 0)
return NULL;
if (m_sunRadiusScale == 0) {
Properties props("directional");
const Transform &trafo = m_worldTransform->eval(0);
props.setVector("direction", -trafo(m_sunDir));
props.setFloat("samplingWeight", m_samplingWeight);
props.setSpectrum("irradiance", m_radiance * m_solidAngle);
Emitter *emitter = static_cast<Emitter *>(
PluginManager::getInstance()->createObject(
MTS_CLASS(Emitter), props));
emitter->configure();
return emitter;
}
/* Rasterizing the sphere to an environment map and checking the
individual pixels for coverage (which is what Mitsuba 0.3.0 did)
was slow and not very effective; for instance the power varied

View File

@ -74,16 +74,22 @@ MTS_NAMESPACE_BEGIN
* This parameter can be used to separately scale the the amount of illumination
* emitted by the sky.\default{1}
* }
* \parameter{sunRadiusScale}{\Float}{
* Scale factor to adjust the radius of the sun, while preserving its power.
* Set to \code{0} to turn it into a directional light source.
* }
* }
* \vspace{-3mm}
*
* \renderings{
* \medrendering{\pluginref{sky} emitter}{emitter_sunsky_sky}
* \medrendering{\pluginref{sun} emitter}{emitter_sunsky_sun}
* \medrendering{\pluginref{sunsky} emitter}{emitter_sunsky_sunsky}
* \vspace{-2mm}
* \caption{A coated rough copper test ball lit with the three
* provided daylight illumination models}
* }
* \vspace{5mm}
* \vspace{1mm}
* This convenience plugin has the sole purpose of instantiating
* \pluginref{sun} and \pluginref{sky} and merging them into a joint
* environment map. Please refer to these plugins individually for more
@ -95,7 +101,8 @@ public:
: Emitter(props) {
Float scale = props.getFloat("scale", 1.0f),
sunScale = props.getFloat("sunScale", scale),
skyScale = props.getFloat("skyScale", scale);
skyScale = props.getFloat("skyScale", scale),
sunRadiusScale = props.getFloat("sunRadiusScale", 1.0f);
const Transform &trafo = m_worldTransform->eval(0);
@ -159,39 +166,53 @@ public:
props.getFloat("turbidity", 3.0f)) * sunScale;
sun.elevation *= props.getFloat("stretch", 1.0f);
Frame sunFrame = Frame(toSphere(sun));
Float theta = degToRad(SUN_APP_RADIUS * 0.5f);
size_t pixelCount = resolution*resolution/2;
Float cosTheta = std::cos(theta * props.getFloat("sunRadiusScale", 1.0f));
if (sunRadiusScale == 0) {
Float solidAngle = 2 * M_PI * (1 - std::cos(theta));
Properties props("directional");
props.setVector("direction", -trafo(sunFrame.n));
props.setFloat("samplingWeight", m_samplingWeight);
props.setSpectrum("irradiance", sunRadiance * solidAngle);
/* Ratio of the sphere that is covered by the sun */
Float coveredPortion = 0.5f * (1 - cosTheta);
m_dirEmitter = static_cast<Emitter *>(
PluginManager::getInstance()->createObject(
MTS_CLASS(Emitter), props));
} else {
size_t pixelCount = resolution*resolution/2;
Float cosTheta = std::cos(theta * sunRadiusScale);
/* Approx. number of samples that need to be generated,
be very conservative */
size_t nSamples = (size_t) std::max((Float) 100,
(pixelCount * coveredPortion * 1000));
/* Ratio of the sphere that is covered by the sun */
Float coveredPortion = 0.5f * (1 - cosTheta);
factor = Point2(bitmap->getWidth() / (2*M_PI),
bitmap->getHeight() / M_PI);
/* Approx. number of samples that need to be generated,
be very conservative */
size_t nSamples = (size_t) std::max((Float) 100,
(pixelCount * coveredPortion * 1000));
Spectrum value =
sunRadiance * (2 * M_PI * (1-std::cos(theta))) *
(bitmap->getWidth() * bitmap->getHeight())
/ (2.0f * M_PI * M_PI * (Float) nSamples);
factor = Point2(bitmap->getWidth() / (2*M_PI),
bitmap->getHeight() / M_PI);
for (size_t i=0; i<nSamples; ++i) {
Vector dir = sunFrame.toWorld(
Warp::squareToUniformCone(cosTheta, sample02(i)));
Spectrum value =
sunRadiance * (2 * M_PI * (1-std::cos(theta))) *
(bitmap->getWidth() * bitmap->getHeight())
/ (2.0f * M_PI * M_PI * (Float) nSamples);
Float sinTheta = math::safe_sqrt(1-dir.y*dir.y);
SphericalCoordinates sphCoords = fromSphere(dir);
for (size_t i=0; i<nSamples; ++i) {
Vector dir = sunFrame.toWorld(
Warp::squareToUniformCone(cosTheta, sample02(i)));
Point2i pos(
std::min(std::max(0, (int) (sphCoords.azimuth * factor.x)), bitmap->getWidth()-1),
std::min(std::max(0, (int) (sphCoords.elevation * factor.y)), bitmap->getHeight()-1));
Float sinTheta = math::safe_sqrt(1-dir.y*dir.y);
SphericalCoordinates sphCoords = fromSphere(dir);
Point2i pos(
std::min(std::max(0, (int) (sphCoords.azimuth * factor.x)), bitmap->getWidth()-1),
std::min(std::max(0, (int) (sphCoords.elevation * factor.y)), bitmap->getHeight()-1));
data[pos.x + pos.y * bitmap->getWidth()] += value / std::max((Float) 1e-3f, sinTheta);
}
data[pos.x + pos.y * bitmap->getWidth()] += value / std::max((Float) 1e-3f, sinTheta);
}
Log(EDebug, "Done (took %i ms)", timer->getMilliseconds());
@ -204,7 +225,7 @@ public:
envProps.setData("bitmap", bitmapData);
envProps.setTransform("toWorld", trafo);
envProps.setFloat("samplingWeight", m_samplingWeight);
m_emitter = static_cast<Emitter *>(
m_envEmitter = static_cast<Emitter *>(
PluginManager::getInstance()->createObject(
MTS_CLASS(Emitter), envProps));
@ -217,17 +238,24 @@ public:
SunSkyEmitter(Stream *stream, InstanceManager *manager)
: Emitter(stream, manager) {
m_emitter = static_cast<Emitter *>(manager->getInstance(stream));
m_envEmitter = static_cast<Emitter *>(manager->getInstance(stream));
if (stream->readBool())
m_dirEmitter = static_cast<Emitter *>(manager->getInstance(stream));
}
void serialize(Stream *stream, InstanceManager *manager) const {
Emitter::serialize(stream, manager);
manager->serialize(stream, m_emitter.get());
manager->serialize(stream, m_envEmitter.get());
stream->writeBool(m_dirEmitter.get() != NULL);
if (m_dirEmitter.get())
manager->serialize(stream, m_dirEmitter.get());
}
void configure() {
Emitter::configure();
m_emitter->configure();
m_envEmitter->configure();
if (m_dirEmitter)
m_dirEmitter->configure();
}
bool isCompound() const {
@ -240,14 +268,17 @@ public:
Emitter *getElement(size_t i) {
if (i == 0)
return m_emitter;
return m_envEmitter;
else if (i == 1)
return m_dirEmitter;
else
return NULL;
}
MTS_DECLARE_CLASS()
private:
ref<Emitter> m_emitter;
ref<Emitter> m_dirEmitter;
ref<Emitter> m_envEmitter;
};
MTS_IMPLEMENT_CLASS_S(SunSkyEmitter, false, Emitter)

View File

@ -643,7 +643,11 @@ bool SpecularManifold::update(Path &path, int start, int end) {
step = -1; mode = ERadiance;
}
for (int j=0, i=start; j < (int) m_vertices.size()-2; ++j, i += step) {
int last = (int) m_vertices.size() - 2;
if (m_vertices[0].type == EPinnedDirection)
last = std::max(last, 1);
for (int j=0, i=start; j < last; ++j, i += step) {
const SimpleVertex
&v = m_vertices[j],
&vn = m_vertices[j+1];
@ -664,7 +668,8 @@ bool SpecularManifold::update(Path &path, int start, int end) {
PathVertex::EMediumInteraction : PathVertex::ESurfaceInteraction;
if (v.type == EPinnedDirection) {
/* Create a fake vertex and use it to call sampleDirect() */
/* Create a fake vertex and use it to call sampleDirect(). This is
kind of terrible -- a nicer API is needed to cleanly support this */
PathVertex temp;
temp.type = PathVertex::EMediumInteraction;
temp.degenerate = false;
@ -681,7 +686,7 @@ bool SpecularManifold::update(Path &path, int start, int end) {
return false;
}
if (m_vertices.size() > 3) {
if (m_vertices.size() >= 3) {
PathVertex *succ2 = path.vertex(i+2*step);
PathEdge *succ2Edge = path.edge(predEdgeIdx + 2*step);
if (!succ->sampleNext(m_scene, NULL, vertex, succEdge, succ2Edge, succ2, mode)) {
@ -863,37 +868,30 @@ Float SpecularManifold::det(const Path &path, int a, int b, int c) {
}
Float SpecularManifold::multiG(const Path &path, int a, int b) {
if (a == 0) {
if (a == 0)
++a;
if (!path.vertex(a)->isConnectable())
++a;
} else if (a == path.length()) {
else if (a == path.length())
--a;
if (!path.vertex(a)->isConnectable())
--a;
}
if (b == 0) {
if (b == 0)
++b;
if (!path.vertex(b)->isConnectable())
++b;
} else if (b == path.length()) {
else if (b == path.length())
--b;
if (!path.vertex(b)->isConnectable())
--b;
}
int step = b > a ? 1 : -1;
while (!path.vertex(b)->isConnectable())
b -= step;
while (!path.vertex(a)->isConnectable())
a += step;
int step = b > a ? 1 : -1, start = a;
Float result = 1;
BDAssert(path.vertex(a)->isConnectable() && path.vertex(b)->isConnectable());
for (int i = a + step; i != b + step; i += step) {
for (int i = a + step, start = a; i != b + step; i += step) {
if (path.vertex(i)->isConnectable()) {
result *= G(path, start, i);
start = i;
}
}
BDAssert(start == b);
return result;
}

View File

@ -272,8 +272,10 @@ bool ManifoldPerturbation::sampleMutation(
for (int i=l+1; i<m; ++i) {
proposal.append(m_pool.allocVertex());
proposal.append(m_pool.allocEdge());
memset(proposal.vertex(proposal.vertexCount()-1), 0, sizeof(PathVertex)); /// XXX
}
proposal.append(source, m, k+1);
proposal.vertex(a) = proposal.vertex(a)->clone(m_pool);
proposal.vertex(c) = proposal.vertex(c)->clone(m_pool);
@ -521,8 +523,8 @@ bool ManifoldPerturbation::sampleMutation(
}
}
if ((vb_old->isSurfaceInteraction() && m_thetaDiffSurfaceSamples < DIFF_SAMPLES) ||
(vb_old->isMediumInteraction() && m_thetaDiffMediumSamples < DIFF_SAMPLES)) {
if (((vb_old->isSurfaceInteraction() && m_thetaDiffSurfaceSamples < DIFF_SAMPLES) ||
(vb_old->isMediumInteraction() && m_thetaDiffMediumSamples < DIFF_SAMPLES)) && b+1 != k && b-1 != 0) {
LockGuard guard(m_thetaDiffMutex);
if ((vb_old->isSurfaceInteraction() && m_thetaDiffSurfaceSamples < DIFF_SAMPLES) ||
@ -586,6 +588,7 @@ bool ManifoldPerturbation::sampleMutation(
}
}
}
if (!PathVertex::connect(m_scene,
proposal.vertexOrNull(q-1),
proposal.edgeOrNull(q-1),

View File

@ -580,7 +580,9 @@ Float PathSampler::computeAverageLuminance(size_t sampleCount) {
}
static void seedCallback(std::vector<PathSeed> &output, const Bitmap *importanceMap,
int s, int t, Float weight, Path &path) {
Float &accum, int s, int t, Float weight, Path &path) {
accum += weight;
if (importanceMap) {
const Float *luminanceValues = importanceMap->getFloatData();
Vector2i size = importanceMap->getSize();
@ -608,40 +610,40 @@ Float PathSampler::generateSeeds(size_t sampleCount, size_t seedCount,
tempSeeds.reserve(sampleCount);
SplatList splatList;
Float luminance;
PathCallback callback = boost::bind(&seedCallback,
boost::ref(tempSeeds), importanceMap, _1, _2, _3, _4);
boost::ref(tempSeeds), importanceMap, boost::ref(luminance),
_1, _2, _3, _4);
Float mean = 0.0f, variance = 0.0f;
for (size_t i=0; i<sampleCount; ++i) {
size_t seedIndex = tempSeeds.size();
size_t sampleIndex = m_sensorSampler->getSampleIndex();
Float lum = 0.0f;
luminance = 0.0f;
if (fineGrained) {
samplePaths(Point2i(-1), callback);
/* Fine seed granularity (e.g. for Veach-MLT).
Set the correct the sample index value */
for (size_t j = seedIndex; j<tempSeeds.size(); ++j) {
for (size_t j = seedIndex; j<tempSeeds.size(); ++j)
tempSeeds[j].sampleIndex = sampleIndex;
lum += tempSeeds[j].luminance;
}
} else {
/* Run the path sampling strategy */
sampleSplats(Point2i(-1), splatList);
luminance = splatList.luminance;
splatList.normalize(importanceMap);
lum = splatList.luminance;
/* Coarse seed granularity (e.g. for PSSMLT) */
if (lum != 0)
tempSeeds.push_back(PathSeed(sampleIndex, lum));
if (luminance != 0)
tempSeeds.push_back(PathSeed(sampleIndex, luminance));
}
/* Numerically robust online variance estimation using an
algorithm proposed by Donald Knuth (TAOCP vol.2, 3rd ed., p.232) */
Float delta = lum - mean;
Float delta = luminance - mean;
mean += delta / (Float) (i+1);
variance += delta * (lum - mean);
variance += delta * (luminance - mean);
}
BDAssert(m_pool.unused());
Float stddev = std::sqrt(variance / (sampleCount-1));

View File

@ -158,10 +158,11 @@ void Scheduler::retainResource(int id) {
rec->refCount++;
}
void Scheduler::unregisterResource(int id) {
bool Scheduler::unregisterResource(int id) {
LockGuard lock(m_mutex);
if (m_resources.find(id) == m_resources.end()) {
Log(EError, "unregisterResource(): could not find the resource with ID %i!", id);
Log(EWarn, "unregisterResource(): could not find the resource with ID %i!", id);
return false;
}
ResourceRecord *rec = m_resources[id];
if (--rec->refCount == 0) {
@ -175,6 +176,7 @@ void Scheduler::unregisterResource(int id) {
for (size_t i=0; i<m_workers.size(); ++i)
m_workers[i]->signalResourceExpiration(id);
}
return true;
}
SerializableObject *Scheduler::getResource(int id, int coreIndex) {

View File

@ -16,6 +16,12 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#if defined(__GXX_EXPERIMENTAL_CXX0X__)
/* Needed to prevent a segmentation fault in the Intel C++
compiler on Linux (as of Nov 2012) */
#undef __GXX_EXPERIMENTAL_CXX0X__
#endif
#if MTS_SSE
#include <mitsuba/mitsuba.h>
#include <mitsuba/core/ssemath.h>

View File

@ -134,7 +134,7 @@ void Statistics::logPlugin(const std::string &name, const std::string &descr) {
}
void Statistics::printStats() {
SLog(EInfo, "Statistics: \n%s", getStats().c_str());
SLog(EInfo, "Statistics:\n%s", getStats().c_str());
}
std::string Statistics::getStats() {

View File

@ -469,9 +469,9 @@ bool solveLinearSystem2x2(const Float a[2][2], const Float b[2], Float x[2]) {
return true;
}
Float interpCubic1D(Float x, const Float *data, Float min, Float max, size_t size) {
Float interpCubic1D(Float x, const Float *data, Float min, Float max, size_t size, bool extrapolate) {
/* Give up when given an out-of-range or NaN argument */
if (!(x >= min && x <= max))
if (!(x >= min && x <= max) && !extrapolate)
return 0.0f;
/* Transform 'x' so that knots lie at integer positions */
@ -508,9 +508,9 @@ Float interpCubic1D(Float x, const Float *data, Float min, Float max, size_t siz
( t3 - t2) * d1;
}
Float interpCubic1DIrregular(Float x, const Float *nodes, const Float *data, size_t size) {
Float interpCubic1DIrregular(Float x, const Float *nodes, const Float *data, size_t size, bool extrapolate) {
/* Give up when given an out-of-range or NaN argument */
if (!(x >= nodes[0] && x <= nodes[size-1]))
if (!(x >= nodes[0] && x <= nodes[size-1]) && !extrapolate)
return 0.0f;
size_t k = (size_t) std::max((ptrdiff_t) 0, std::min((ptrdiff_t) size - 2,
@ -545,7 +545,7 @@ Float interpCubic1DIrregular(Float x, const Float *nodes, const Float *data, siz
Float interpCubic2D(const Point2 &p, const Float *data,
const Point2 &min, const Point2 &max, const Size2 &size) {
const Point2 &min, const Point2 &max, const Size2 &size, bool extrapolate) {
Float knotWeights[2][4];
Size2 knot;
@ -553,7 +553,7 @@ Float interpCubic2D(const Point2 &p, const Float *data,
for (int dim=0; dim<2; ++dim) {
Float *weights = knotWeights[dim];
/* Give up when given an out-of-range or NaN argument */
if (!(p[dim] >= min[dim] && p[dim] <= max[dim]))
if (!(p[dim] >= min[dim] && p[dim] <= max[dim]) && !extrapolate)
return 0.0f;
/* Transform 'p' so that knots lie at integer positions */
@ -615,7 +615,7 @@ Float interpCubic2D(const Point2 &p, const Float *data,
}
Float interpCubic2DIrregular(const Point2 &p, const Float **nodes_,
const Float *data, const Size2 &size) {
const Float *data, const Size2 &size, bool extrapolate) {
Float knotWeights[2][4];
Size2 knot;
@ -625,7 +625,7 @@ Float interpCubic2DIrregular(const Point2 &p, const Float **nodes_,
Float *weights = knotWeights[dim];
/* Give up when given an out-of-range or NaN argument */
if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1]))
if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1]) && !extrapolate)
return 0.0f;
/* Find the index of the left knot in the queried subinterval, be
@ -689,7 +689,7 @@ Float interpCubic2DIrregular(const Point2 &p, const Float **nodes_,
}
Float interpCubic3D(const Point3 &p, const Float *data,
const Point3 &min, const Point3 &max, const Size3 &size) {
const Point3 &min, const Point3 &max, const Size3 &size, bool extrapolate) {
Float knotWeights[3][4];
Size3 knot;
@ -697,7 +697,7 @@ Float interpCubic3D(const Point3 &p, const Float *data,
for (int dim=0; dim<3; ++dim) {
Float *weights = knotWeights[dim];
/* Give up when given an out-of-range or NaN argument */
if (!(p[dim] >= min[dim] && p[dim] <= max[dim]))
if (!(p[dim] >= min[dim] && p[dim] <= max[dim]) && !extrapolate)
return 0.0f;
/* Transform 'p' so that knots lie at integer positions */
@ -763,7 +763,7 @@ Float interpCubic3D(const Point3 &p, const Float *data,
}
Float interpCubic3DIrregular(const Point3 &p, const Float **nodes_,
const Float *data, const Size3 &size) {
const Float *data, const Size3 &size, bool extrapolate) {
Float knotWeights[3][4];
Size3 knot;
@ -773,7 +773,7 @@ Float interpCubic3DIrregular(const Point3 &p, const Float **nodes_,
Float *weights = knotWeights[dim];
/* Give up when given an out-of-range or NaN argument */
if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1]))
if (!(p[dim] >= nodes[0] && p[dim] <= nodes[size[dim]-1]) && !extrapolate)
return 0.0f;
/* Find the index of the left knot in the queried subinterval, be

View File

@ -542,7 +542,7 @@ void TriMesh::rebuildTopology(Float maxAngle) {
configure();
}
void TriMesh::computeNormals() {
void TriMesh::computeNormals(bool force) {
int invalidNormals = 0;
if (m_faceNormals) {
if (m_normals) {
@ -558,7 +558,7 @@ void TriMesh::computeNormals() {
}
}
} else {
if (m_normals) {
if (m_normals && !force) {
if (m_flipNormals) {
for (size_t i=0; i<m_vertexCount; i++)
m_normals[i] *= -1;
@ -566,7 +566,8 @@ void TriMesh::computeNormals() {
/* Do nothing */
}
} else {
m_normals = new Normal[m_vertexCount];
if (!m_normals)
m_normals = new Normal[m_vertexCount];
memset(m_normals, 0, sizeof(Normal)*m_vertexCount);
/* Well-behaved vertex normal computation based on
@ -617,7 +618,7 @@ void TriMesh::computeNormals() {
}
void TriMesh::computeUVTangents() {
int degenerate = 0;
// int degenerate = 0;
if (!m_texcoords) {
bool anisotropic = hasBSDF() && m_bsdf->getType() & BSDF::EAnisotropic;
if (anisotropic)
@ -654,7 +655,7 @@ void TriMesh::computeUVTangents() {
Normal n = Normal(cross(dP1, dP2));
Float length = n.length();
if (length == 0) {
++degenerate;
// ++degenerate;
continue;
}
@ -670,9 +671,12 @@ void TriMesh::computeUVTangents() {
}
}
if (degenerate > 0)
Log(EWarn, "\"%s\": computeTangentSpace(): Mesh contains %i "
"degenerate triangles!", getName().c_str(), degenerate);
#if 0
/* Don't be so noisy -- this isn't usually a problem.. */
if (degenerate > 0)
Log(EWarn, "\"%s\": computeTangentSpace(): Mesh contains %i "
"degenerate triangles!", getName().c_str(), degenerate);
#endif
}
void TriMesh::getNormalDerivative(const Intersection &its,

View File

@ -1593,7 +1593,7 @@ void MainWindow::on_actionStartServer_triggered() {
void MainWindow::on_actionEnableCommandLine_triggered() {
if (QMessageBox::question(this, tr("Enable command line access"),
tr("<p>If you proceed, Mitsuba will create symbolic links in <tt>/usr/bin</tt> and <tt>/Library/Python/{2.6,2.7}/site-packages</tt>, "
tr("<p>If you proceed, Mitsuba will create symbolic links in <tt>/usr/bin</tt> and <tt>/Library/Python/{2.6,2.7}/site-packages</tt>, as well as an entry in .bashrc, "
"which enable command line and Python usage. Note that you will have to "
"repeat this process every time the Mitsuba application is moved.</p>"
"<p>Create links?</p>"),

View File

@ -4,6 +4,7 @@
#include <AuthorizationTags.h>
#include <unistd.h>
#include <iostream>
#include <sstream>
namespace mitsuba {
extern std::string __mts_bundlepath();
@ -30,7 +31,10 @@ bool create_symlinks() {
}
std::string bundlePath = mitsuba::__mts_bundlepath();
std::string path = bundlePath + "/Contents/MacOS/symlinks_install";
char *args[] = { const_cast<char *>(bundlePath.c_str()), NULL };
std::ostringstream oss;
oss << getuid();
std::string uid = oss.str();
char *args[] = { const_cast<char *>(bundlePath.c_str()), const_cast<char *>(uid.c_str()), NULL };
FILE *pipe = NULL;
flags = kAuthorizationFlagDefaults;
status = AuthorizationExecuteWithPrivileges(ref, const_cast<char *>(path.c_str()), flags, args, &pipe);

View File

@ -3,6 +3,7 @@
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/param.h>
#include <pwd.h>
void installPython(const char *basedir, const char *version) {
char fname[MAXPATHLEN];
@ -23,6 +24,22 @@ void installPython(const char *basedir, const char *version) {
fclose(f);
}
void appendShellConfig(const char *basedir, const char *target, const char *fmt, const char *dir) {
char fname[MAXPATHLEN];
snprintf(fname, sizeof(fname), "%s/%s", basedir, target);
if (access(fname, R_OK) < 0)
return;
FILE *f = fopen(fname, "a");
if (!f)
return;
fprintf(f, fmt, dir);
fclose(f);
}
void install(const char *basedir, const char *name) {
char fname[MAXPATHLEN];
FILE *f;
@ -51,11 +68,11 @@ void install(const char *basedir, const char *name) {
}
int main(int argc, char **argv) {
if (argc != 2) {
if (argc != 3) {
fprintf(stderr, "Incorrect number of arguments!\n");
return -1;
}
if (setuid(0) != 0) {
fprintf(stderr, "setuid(): failed!\n");
return -1;
@ -68,6 +85,11 @@ int main(int argc, char **argv) {
install(argv[1], "mtsimport");
installPython(argv[1], "2.6");
installPython(argv[1], "2.7");
struct passwd *pw = getpwuid(atoi(argv[2]));
appendShellConfig(pw->pw_dir, ".bashrc", "\nexport LD_LIBRARY_PATH=%s/Contents/Frameworks:$LD_LIBRARY_PATH\n", argv[1]);
appendShellConfig(pw->pw_dir, ".zshrc", "\nexport LD_LIBRARY_PATH=%s/Contents/Frameworks:$LD_LIBRARY_PATH\n", argv[1]);
appendShellConfig(pw->pw_dir, ".cshrc", "\nsetenv LD_LIBRARY_PATH %s/Contents/Frameworks:${LD_LIBRARY_PATH}\n", argv[1]);
return 0;
}

View File

@ -510,8 +510,8 @@ public:
std::string toString() const {
std::ostringstream oss;
oss << "Cylinder[" << endl
<< " radius = " << m_radius << ", " << endl
<< " length = " << m_length << ", " << endl
<< " radius = " << m_radius << "," << endl
<< " length = " << m_length << "," << endl
<< " objectToWorld = " << indent(m_objectToWorld.toString()) << "," << endl
<< " bsdf = " << indent(m_bsdf.toString()) << "," << endl;
if (isMediumTransition())

View File

@ -261,7 +261,7 @@ public:
std::string toString() const {
std::ostringstream oss;
oss << "Disk[" << endl
<< " objectToWorld = " << indent(m_objectToWorld.toString()) << ", " << endl
<< " objectToWorld = " << indent(m_objectToWorld.toString()) << "," << endl
<< " bsdf = " << indent(m_bsdf.toString()) << "," << endl;
if (isMediumTransition()) {
oss << " interiorMedium = " << indent(m_interiorMedium.toString()) << "," << endl

View File

@ -39,7 +39,6 @@ MTS_NAMESPACE_BEGIN
Instance::Instance(const Properties &props) : Shape(props) {
m_objectToWorld = props.getTransform("toWorld", Transform());
m_worldToObject = m_objectToWorld.inverse();
m_invScale = 1.0f/m_objectToWorld(Vector(0, 0, 1)).length();
}
Instance::Instance(Stream *stream, InstanceManager *manager)
@ -47,14 +46,12 @@ Instance::Instance(Stream *stream, InstanceManager *manager)
m_shapeGroup = static_cast<ShapeGroup *>(manager->getInstance(stream));
m_objectToWorld = Transform(stream);
m_worldToObject = m_objectToWorld.inverse();
m_invScale = stream->readFloat();
}
void Instance::serialize(Stream *stream, InstanceManager *manager) const {
Shape::serialize(stream, manager);
manager->serialize(stream, m_shapeGroup.get());
m_objectToWorld.serialize(stream);
stream->writeFloat(m_invScale);
}
void Instance::configure() {
@ -130,17 +127,26 @@ void Instance::fillIntersectionRecord(const Ray &_ray,
void Instance::getNormalDerivative(const Intersection &its,
Vector &dndu, Vector &dndv, bool shadingFrame) const {
/// TODO: this is horrible
/* The following is really super-inefficient, but it's
needed to be able to deal with general transformations */
Intersection temp(its);
temp.p = m_worldToObject(its.p);
temp.dpdu = m_worldToObject(its.dpdu);
temp.dpdv = m_worldToObject(its.dpdv);
/* Determine the length of the transformed normal
*before* it was re-normalized */
Normal tn = m_objectToWorld(normalize(m_worldToObject(its.shFrame.n)));
Float invLen = 1/tn.length();
tn *= invLen;
its.shape->getNormalDerivative(temp, dndu, dndv, shadingFrame);
/* The following will probably be incorrect for
non-rigid transformations */
dndu = m_objectToWorld(Normal(dndu))*m_invScale;
dndv = m_objectToWorld(Normal(dndv))*m_invScale;
dndu = m_objectToWorld(Normal(dndu)) * invLen;
dndv = m_objectToWorld(Normal(dndv)) * invLen;
dndu -= tn * dot(tn, dndu);
dndv -= tn * dot(tn, dndv);
}
MTS_IMPLEMENT_CLASS_S(Instance, false, Shape)

View File

@ -81,7 +81,6 @@ public:
private:
ref<ShapeGroup> m_shapeGroup;
Transform m_objectToWorld, m_worldToObject;
Float m_invScale;
};
MTS_NAMESPACE_END

View File

@ -229,7 +229,7 @@ public:
std::string toString() const {
std::ostringstream oss;
oss << "Rectangle[" << endl
<< " objectToWorld = " << indent(m_objectToWorld.toString()) << ", " << endl;
<< " objectToWorld = " << indent(m_objectToWorld.toString()) << "," << endl;
if (isMediumTransition())
oss << " interiorMedium = " << indent(m_interiorMedium.toString()) << "," << endl
<< " exteriorMedium = " << indent(m_exteriorMedium.toString()) << "," << endl;

View File

@ -170,7 +170,7 @@ size_t ShapeGroup::getEffectivePrimitiveCount() const {
std::string ShapeGroup::toString() const {
std::ostringstream oss;
oss << "ShapeGroup[" << endl
<< " name = \"" << m_name << "\", " << endl
<< " name = \"" << m_name << "\"," << endl
<< " primCount = " << m_kdtree->getPrimitiveCount() << endl
<< "]";
return oss.str();

View File

@ -468,8 +468,8 @@ public:
std::string toString() const {
std::ostringstream oss;
oss << "Sphere[" << endl
<< " radius = " << m_radius << ", " << endl
<< " center = " << m_center.toString() << ", " << endl
<< " radius = " << m_radius << "," << endl
<< " center = " << m_center.toString() << "," << endl
<< " bsdf = " << indent(m_bsdf.toString()) << "," << endl;
if (isMediumTransition())
oss << " interiorMedium = " << indent(m_interiorMedium.toString()) << "," << endl