diff --git a/include/mitsuba/render/trimesh.h b/include/mitsuba/render/trimesh.h index 1ddac72c..c4d62365 100644 --- a/include/mitsuba/render/trimesh.h +++ b/include/mitsuba/render/trimesh.h @@ -54,7 +54,7 @@ struct TangentSpace { /** \brief Abstract triangle mesh base class * \ingroup librender - e \ingroup libpython + * \ingroup libpython */ class MTS_EXPORT_RENDER TriMesh : public Shape { public: diff --git a/src/integrators/SConscript b/src/integrators/SConscript index f17cc6e4..cef2524c 100644 --- a/src/integrators/SConscript +++ b/src/integrators/SConscript @@ -27,7 +27,7 @@ plugins += bidirEnv.SharedLibrary('bdpt', plugins += bidirEnv.SharedLibrary('pssmlt', ['pssmlt/pssmlt.cpp', 'pssmlt/pssmlt_sampler.cpp', - 'pssmlt/pssmlt_proc.cpp']); + 'pssmlt/pssmlt_proc.cpp']); plugins += bidirEnv.SharedLibrary('mlt', ['mlt/mlt.cpp', 'mlt/mlt_proc.cpp'] diff --git a/src/integrators/path/volpath_simple.cpp b/src/integrators/path/volpath_simple.cpp index 24623e41..3733fc5d 100644 --- a/src/integrators/path/volpath_simple.cpp +++ b/src/integrators/path/volpath_simple.cpp @@ -42,7 +42,7 @@ static StatsCounter avgPathLength("Volumetric path tracer", "Average path length * } * * This plugin provides a basic volumetric path tracer that can be used to - * compute approximate solutions to the radiative transfer equation. This + * compute approximate solutions of the radiative transfer equation. This * particular integrator is named ``simple'' because it does not make use of * multiple importance sampling. This results in a potentially * faster execution time. On the other hand, it also means that this @@ -50,6 +50,16 @@ static StatsCounter avgPathLength("Volumetric path tracer", "Average path length * highly glossy materials. In this case, please use \pluginref{volpath} * or one of the bidirectional techniques. * + * This integrator has special support for \emph{index-matched} transmission + * events (i.e. surface scattering events that do not change the direction + * of light). As a consequence, particating media enclosed by a stencil shape (see + * \secref{shapes} for details) are rendered considerably more efficiently when this + * shape has \emph{no}\footnote{this is what signals to Mitsuba that the boundary is + * index-matched and does not interact with light in any way. Alternatively, + * the \pluginref{mask} and \pluginref{thindielectric} BSDF can be used to specify + * index-matched boundaries that involve some amount of interaction.} BSDF assigned + * to it (as compared to, say, a \pluginref{dielectric} or \pluginref{roughdielectric} BSDF). + * * \remarks{ * \item This integrator performs poorly when rendering * participating media that have a different index of refraction compared diff --git a/src/librender/shape.cpp b/src/librender/shape.cpp index 8a865e70..11540521 100644 --- a/src/librender/shape.cpp +++ b/src/librender/shape.cpp @@ -67,6 +67,10 @@ void Shape::configure() { bsdf->configure(); addChild(bsdf); } + + if ((m_bsdf->getType() & BSDF::ENull) && (isEmitter() || isSensor() || hasSubsurface())) + Log(EError, "Shape \"%s\" has an index-matched BSDF and an " + "emitter/sensor/subsurface attachment. This is not allowed!", getName().c_str()); } bool Shape::isCompound() const { diff --git a/src/medium/materials.h b/src/medium/materials.h index 772eb63d..b31de859 100644 --- a/src/medium/materials.h +++ b/src/medium/materials.h @@ -33,18 +33,18 @@ struct MaterialEntry { static MaterialEntry materialData[] = { /* Fitted data from "A Practical Model for Subsurface scattering" (Jensen et al.). No anisotropy data available. */ - { "Apple", { 2.29f, 2.39f, 1.97f }, { 0.0030f, 0.0034f, 0.046f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Chicken1", { 0.15f, 0.21f, 0.38f }, { 0.0015f, 0.077f, 0.19f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Chicken2", { 0.19f, 0.25f, 0.32f }, { 0.0018f, 0.088f, 0.20f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Cream", { 7.38f, 5.47f, 3.15f }, { 0.0002f, 0.0028f, 0.0163f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Ketchup", { 0.18f, 0.07f, 0.03f }, { 0.061f, 0.97f, 1.45f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Marble", { 2.19f, 2.62f, 3.00f }, { 0.0021f, 0.0041f, 0.0071f }, { 1.0f, 1.0f, 1.0f }, 1.5f }, - { "Potato", { 0.68f, 0.70f, 0.55f }, { 0.0024f, 0.0090f, 0.12f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Skimmilk", { 0.70f, 1.22f, 1.90f }, { 0.0014f, 0.0025f, 0.0142f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Skin1", { 0.74f, 0.88f, 1.01f }, { 0.032f, 0.17f, 0.48f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Skin2", { 1.09f, 1.59f, 1.79f }, { 0.013f, 0.070f, 0.145f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Spectralon", { 11.6f, 20.4f, 14.9f }, { 0.00f, 0.00f, 0.00f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, - { "Wholemilk", { 2.55f, 3.21f, 3.77f }, { 0.0011f, 0.0024f, 0.014f }, { 1.0f, 1.0f, 1.0f }, 1.3f }, + { "Apple", { 2.29f, 2.39f, 1.97f }, { 0.0030f, 0.0034f, 0.046f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Chicken1", { 0.15f, 0.21f, 0.38f }, { 0.0015f, 0.077f, 0.19f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Chicken2", { 0.19f, 0.25f, 0.32f }, { 0.0018f, 0.088f, 0.20f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Cream", { 7.38f, 5.47f, 3.15f }, { 0.0002f, 0.0028f, 0.0163f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Ketchup", { 0.18f, 0.07f, 0.03f }, { 0.061f, 0.97f, 1.45f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Marble", { 2.19f, 2.62f, 3.00f }, { 0.0021f, 0.0041f, 0.0071f }, { 0.0f, 0.0f, 0.0f }, 1.5f }, + { "Potato", { 0.68f, 0.70f, 0.55f }, { 0.0024f, 0.0090f, 0.12f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Skimmilk", { 0.70f, 1.22f, 1.90f }, { 0.0014f, 0.0025f, 0.0142f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Skin1", { 0.74f, 0.88f, 1.01f }, { 0.032f, 0.17f, 0.48f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Skin2", { 1.09f, 1.59f, 1.79f }, { 0.013f, 0.070f, 0.145f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Spectralon", { 11.6f, 20.4f, 14.9f }, { 0.00f, 0.00f, 0.00f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, + { "Wholemilk", { 2.55f, 3.21f, 3.77f }, { 0.0011f, 0.0024f, 0.014f }, { 0.0f, 0.0f, 0.0f }, 1.3f }, /* From "Acquiring Scattering Properties of Participating Media by Dilution" by Narasimhan, Gupta, Donner, Ramamoorthi, Nayar, Jensen (SIGGRAPH 2006) */ diff --git a/src/mtsgui/rendersettingsdlg.cpp b/src/mtsgui/rendersettingsdlg.cpp index 9d6b5a41..0eb885ef 100644 --- a/src/mtsgui/rendersettingsdlg.cpp +++ b/src/mtsgui/rendersettingsdlg.cpp @@ -152,7 +152,7 @@ void RenderSettingsDialog::setDocumentation(const QString &text) { ui->groupBox->setTitle(tr("Documentation")); } - ui->helpViewer->setHtml(comments + m_currentDocumentation); + ui->helpViewer->setHtml(comments + "
@@ -31,11 +31,11 @@ situations where a light ray impinges on an object from a direction that is classified as "outside" according to the shading normal, and "inside" according to the true geometric normal.
The strictNormals - parameter specifies the intended behavior when such cases arise. The default (false, i.e. "carry on") + parameter specifies the intended behavior when such cases arise. The default (false, i.e. "carry on") gives precedence to information given by the shading normal and considers such light paths to be valid. This can theoretically cause light "leaks" through boundaries, but it is not much of a problem in practice.
When set to true, the path tracer detects inconsistencies and ignores these paths. When objects
- are poorly tesselated, this latter option may cause them to lose a significant amount of the incident
+ are poorly tesselated, this latter option may cause them to lose a significant amount of the incident
radiation (or, in other words, they will look dark).
The bidirectional integrators in Mitsuba (BDPT, PSSMLT, MLT, ..)
implicitly have strictNormals activated. Hence, another use of this parameter
@@ -46,21 +46,21 @@
Irradiance caching integrator - forwards all radiance computations
+ Irradiance caching integrator - forwards all radiance computations
to an arbitrary nested sampling-based integrator - with one exception:
whenever a Lambertian surface is intersected, an internal irradiance
- cache is queried for the indirect illumination at the surface position in
- question. If this query is successful, the sub-integrator is only
- used to compute the remaining types of radiance (direct, in-scatter,
+ cache is queried for the indirect illumination at the surface position in
+ question. If this query is successful, the sub-integrator is only
+ used to compute the remaining types of radiance (direct, in-scatter,
emission) and their sum is returned afterwards.
When a query is unsuccessful, a new data point is generated by a final
gathering step. The generality of this implementation allows it to be used in conjunction
- with photon mapping (the most likely application) as well as all other
+ with photon mapping (the most likely application) as well as all other
sampling-based integrators in Mitsuba. Several optimizations are used to
- improve the achieved interpolation quality, namely irradiance gradients
- [Ward et al.], neighbor clamping [Krivanek et al.], a screen-space
+ improve the achieved interpolation quality, namely irradiance gradients
+ [Ward et al.], neighbor clamping [Krivanek et al.], a screen-space
clamping metric and an improved error function [Tabellion et al.].
By default, this integrator also performs a distributed overture pass before
rendering, which is recommended to avoid artifacts resulting from the
@@ -69,7 +69,7 @@
Elevational resolution of the stratified final gather hemisphere.
The azimuthal resolution is three times this value. Default: 14, which
- leads to 14x(2*14)=392 samples
+ leads to 14x(2*14)=392 samples
Quality setting ("kappa" in the [Tabellion et al.] paper).
@@ -80,13 +80,13 @@
significantly improve the interpolation quality.
- Should neighbor clamping [Krivanek et al.] be used? This
- propagates geometry information amongst close-by samples
- and generally leads to better sample placement.
+ Should neighbor clamping [Krivanek et al.] be used? This
+ propagates geometry information amongst close-by samples
+ and generally leads to better sample placement.
If set to true, the influence region of samples will be clamped
- using the screen-space metric by [Tabellion et al.]?
+ using the screen-space metric by [Tabellion et al.]?
Turning this off may lead to excessive sample placement.
@@ -98,10 +98,10 @@
Multiplicative factor for the quality parameter following an
overture pass. This can be used to interpolate amongst more
samples, creating a visually smoother result. Must be
- 1 or less.
+ 1 or less.
- If set to true, direct illumination will be suppressed -
+ If set to true, direct illumination will be suppressed -
useful for checking the interpolation quality
@@ -114,17 +114,17 @@
This integrator implements a direct illumination technique that makes use
- of multiple importance sampling: for each pixel sample, the
+ of multiple importance sampling: for each pixel sample, the
integrator generates a user-specifiable number of BSDF and emitter
samples and combines them using the power heuristic. Usually, the BSDF
- sampling technique works very well on glossy objects but does badly
- everywhere else, while the opposite is true for the emitter sampling
+ sampling technique works very well on glossy objects but does badly
+ everywhere else, while the opposite is true for the emitter sampling
technique. By combining these approaches, one
can obtain a rendering technique that works well in both cases. The number of samples spent on either technique is configurable, hence
- it is also possible to turn this plugin into an emitter sampling-only
+ The number of samples spent on either technique is configurable, hence
+ it is also possible to turn this plugin into an emitter sampling-only
or BSDF sampling-only integrator. For best results, combine the direct illumination integrator with the
low-discrepancy sample generator. Generally, the number
- of pixel samples of the sample generator can be kept relatively
+ of pixel samples of the sample generator can be kept relatively
low (e.g. sampleCount=4), whereas the emitter and BSDF sample
parameters of this integrator should be increased until the variance in
the output renderings is acceptable. The strictNormals
- parameter specifies the intended behavior when such cases arise. The default (false, i.e. "carry on")
+ parameter specifies the intended behavior when such cases arise. The default (false, i.e. "carry on")
gives precedence to information given by the shading normal and considers such light paths to be valid.
This can theoretically cause light "leaks" through boundaries, but it is not much of a problem in practice. When set to true, the path tracer detects inconsistencies and ignores these paths. When objects
- are poorly tesselated, this latter option may cause them to lose a significant amount of the incident
+ are poorly tesselated, this latter option may cause them to lose a significant amount of the incident
radiation (or, in other words, they will look dark).
The bidirectional integrators in Mitsuba (BDPT, PSSMLT, MLT, ..)
implicitly have strictNormals activated. Hence, another use of this parameter
@@ -198,11 +198,11 @@
This plugin implements a simple adjoint particle tracer. It does
+ This plugin implements a simple adjoint particle tracer. It does
essentially the exact opposite of the simple volumetric path tracer:
instead of tracing rays from the sensor
and attempting to connect them to the light source, this integrator
@@ -241,21 +241,21 @@
Usually, this is a relatively useless rendering technique due to
its high variance, but there are some cases where it excels.
- In particular, it does a good job on scenes where most scattering
+ In particular, it does a good job on scenes where most scattering
events are directly visible to the camera. When rendering with a finite-aperture sensor
- this integrator is able to intersect the actual aperture, which allows it to
+ When rendering with a finite-aperture sensor
+ this integrator is able to intersect the actual aperture, which allows it to
handle certain caustic paths that would otherwise not be visible. It also supports a specialized "brute force" mode, where the integrator
does not attempt to create connections to the sensor and purely relies on
hitting it via ray tracing. This is one of the worst conceivable rendering
- and not recommended for any applications. It is mainly included for
+ and not recommended for any applications. It is mainly included for
debugging purposes. The number of traced particles is given by the number of "samples per
- pixel" of the sample generator times the pixel count of the output image.
+ pixel" of the sample generator times the pixel count of the output image.
For instance, 16 samples per pixel on a 512x512 image will cause 4M particles
to be generated. This integrator implements a hardware-accelerated global illumination
+ This integrator implements a hardware-accelerated global illumination
rendering technique based on the Instant Radiosity method by Keller.
This is the same approach that is also used in
Mitsuba's real-time preview; the reason for providing it as a separate
integrator plugin is to enable automated (e.g. scripted) usage. The method roughly works as follows: during a pre-process pass, any present direct
- and indirect illumination is converted into a set of virtual point light
- sources (VPLs). The scene is then separately rendered many times, each time using
+
+ The method roughly works as follows: during a pre-process pass, any present direct
+ and indirect illumination is converted into a set of virtual point light
+ sources (VPLs). The scene is then separately rendered many times, each time using
a different VPL as a source of illumination. All of the renderings created in this
manner are accumulated to create the final output image. Because the individual rendering steps can be exectuted on a
- graphics card, it is possible to render many (i.e. 100-1000) VPLs
+ graphics card, it is possible to render many (i.e. 100-1000) VPLs
per second. The method is not without problems, however. In particular,
it performs poorly when rendering glossy materials, and it produces
artifacts in corners and creases . Mitsuba automatically limits
@@ -310,7 +310,7 @@
Resolution of the shadow maps that are used to compute the point-to-point visibility
Specifies the longest path depth in the generated output image (where -1
- corresponds to ∞). A value of 1 will only render directly visible light sources.
+ corresponds to ∞). A value of 1 will only render directly visible light sources.
2 will lead to single-bounce (direct-only) illumination, and so on.
Relative clamping factor (0=no clamping, 1=full clamping)
@@ -328,15 +328,15 @@
unless an extremely large number of photons is used. A simple remedy is to combine the photon mapper with
an irradiance cache, which performs final gathering to remove these artifacts. Due to its caching nature,
the rendering process will be faster as well. When the scene contains homogeneous participating media, the Beam Radiance Estimate by Jarosz et al.
+
+ When the scene contains homogeneous participating media, the Beam Radiance Estimate by Jarosz et al.
is used to estimate the illumination due to volumetric scattering. This plugin implements the progressive photon mapping algorithm by Hachisuka et al.
+ This plugin implements the progressive photon mapping algorithm by Hachisuka et al.
Progressive photon mapping is a variant of photon
mapping that alternates between photon shooting and gathering passes that involve
a relatively small (e.g. 250K) numbers of photons that are subsequently discarded. This is done in a way such that the variance and bias of the resulting output
+ This is done in a way such that the variance and bias of the resulting output
vanish as the number of passes tends to infinity. The progressive nature of this
method enables renderings with an effectively arbitrary number of photons
without exhausting the available system memory. The desired sample count specified in the sample generator configuration
+ The desired sample count specified in the sample generator configuration
determines how many photon query points are created per pixel. It should not be
set too high, since the rendering time is approximately proportional to
- this number. For good results, use between 2-4 samples along with the
- low-discrepancy sampler. Once started, the rendering process continues indefinitely
+ this number. For good results, use between 2-4 samples along with the
+ low-discrepancy sampler. Once started, the rendering process continues indefinitely
until it is manually stopped. This integrator implements stochastic progressive photon mapping (PPM) by Hachisuka et al.
- This algorithm is an extension of progressive photon mapping that improves convergence
+ This integrator implements stochastic progressive photon mapping (PPM) by Hachisuka et al.
+ This algorithm is an extension of progressive photon mapping that improves convergence
when rendering scenes involving depth-of-field, motion blur, and glossy reflections. Note that this integrator ignores the sampler
- configuration—hence, the usual steps of choosing a sample generator and a desired
- number of samples per pixel are not necessary. As with PPM, once started,
+ configuration—hence, the usual steps of choosing a sample generator and a desired
+ number of samples per pixel are not necessary. As with PPM, once started,
the rendering process continues indefinitely until it is manually stopped.
A bidirectional path tracer generates a sample by starting two separate - random walks from an emitter and a sensor. The resulting subpaths are + random walks from an emitter and a sensor. The resulting subpaths are connected at every possible position, creating a large number of complete paths - of different lengths. These paths are then used to estimate the amount of + of different lengths. These paths are then used to estimate the amount of radiance that is transferred from the emitter to a pixel on the sensor.
Generally, some of the created paths will be unusable, since they lead to @@ -432,9 +432,9 @@ paths based on their predicted utility.
The bidirectional path tracer in Mitsuba is a complete implementation of the - technique that handles all sampling strategies, including those that involve - direct interactions with the sensor. For this purpose, finite-aperture sensors - are explicitly represented by surfaces in the scene so that they can be + technique that handles all sampling strategies, including those that involve + direct interactions with the sensor. For this purpose, finite-aperture sensors + are explicitly represented by surfaces in the scene so that they can be intersected by random walks started at emitters.
Bidirectional path tracing is a relatively "heavy" rendering technique—for @@ -444,19 +444,19 @@
The code parallelizes over multiple cores and machines, but with one caveat: some of the BDPT path sampling strategies are incompatble with the usual - approach of rendering an image tile by tile, since they can potentially + approach of rendering an image tile by tile, since they can potentially contribute to any pixel on the screen. This means that each rendering work unit must be associated with a full-sized image! When network render nodes are involved or the resolution of this light image - is very high, a bottleneck can arise where more work is spent accumulating or + is very high, a bottleneck can arise where more work is spent accumulating or transmitting these images than actual rendering.
There are two possible resorts should this situation arise: the first one - is to reduce the number of work units so that there is approximately one - unit per core (and hence one image to transmit per core). This can be done by - increasing the block size in the GUI preferences or passing the -b - parameter to the mitsuba executable. The second option is to simply - disable these sampling strategies at the cost of reducing the + is to reduce the number of work units so that there is approximately one + unit per core (and hence one image to transmit per core). This can be done by + increasing the block size in the GUI preferences or passing the -b + parameter to the mitsuba executable. The second option is to simply + disable these sampling strategies at the cost of reducing the effectiveness of bidirectional path tracing (particularly, when rendering caustics). For this, set lightImage to false. When rendering an image of a reasonable resolution without network nodes, @@ -464,14 +464,14 @@ Specifies the longest path depth in the generated output image (where -1 - corresponds to ∞). A value of 1 will only render directly visible light sources. + corresponds to ∞). A value of 1 will only render directly visible light sources. 2 will lead to single-bounce (direct-only) illumination, and so on. Include sampling strategies that connect paths traced from emitters directly to the camera? (i.e. what the adjoint particle tracer does) This improves the effectiveness of bidirectional path tracing - but severely increases the local and remote communication + but severely increases the local and remote communication overhead, since large light images must be transferred between threads or over the network. See the text below for a more detailed explanation. @@ -480,8 +480,8 @@ and sensors. Usually a good idea. - Specifies the minimum path depth, after which the implementation will start to use the - "russian roulette" path termination criterion (set to -1 to disable). + Specifies the minimum path depth, after which the implementation will start to use the + "russian roulette" path termination criterion (set to -1 to disable). @@ -493,19 +493,19 @@ based on Markov Chain Monte Carlo (MCMC) integration.
In contrast to simple methods like path tracing that render - images by performing a naive and memoryless random search for light paths, - PSSMLT actively searches for relevant light paths (as is the case - for other MCMC methods). Once such a path is found, the algorithm tries to + images by performing a naive and memoryless random search for light paths, + PSSMLT actively searches for relevant light paths (as is the case + for other MCMC methods). Once such a path is found, the algorithm tries to explore neighboring paths to amortize the cost of the search. This can significantly improve the convergence rate of difficult input. Scenes that were already relatively easy to render usually don't benefit - much from PSSMLT, since the MCMC data management causes additional + much from PSSMLT, since the MCMC data management causes additional computational overheads.
-The PSSMLT implementation in Mitsuba can operate on top of either a simple - unidirectional volumetric path tracer or a fully-fledged bidirectional path - tracer with multiple importance sampling, and this choice is controlled by the - bidirectional flag. The unidirectional path tracer is generally +
The PSSMLT implementation in Mitsuba can operate on top of either a simple + unidirectional volumetric path tracer or a fully-fledged bidirectional path + tracer with multiple importance sampling, and this choice is controlled by the + bidirectional flag. The unidirectional path tracer is generally much faster, but it produces lower-quality samples. Depending on the input, either may be preferable.
Caveats: @@ -513,20 +513,20 @@ to know. The first one is that they only render "relative" output images, meaning that there is a missing scale factor that must be applied to obtain proper scene radiance values. The implementation in Mitsuba relies - on an additional Monte Carlo estimator to recover this scale factor. By - default, it uses 100K samples (controlled by the luminanceSamples + on an additional Monte Carlo estimator to recover this scale factor. By + default, it uses 100K samples (controlled by the luminanceSamples parameter), which should be adequate for most applications.
The second caveat is that the amount of computational expense associated with a pixel in the output image is roughly proportional to its intensity. This means that when a bright object (e.g. the sun) is visible in a rendering, most resources are committed to rendering the - sun disk at the cost of increased variance everywhere else. Since this is + sun disk at the cost of increased variance everywhere else. Since this is usually not desired, the twoStage parameter can be used to enable Two-stage MLT in this case.
-In this mode of operation, the renderer first creates a low-resolution - version of the output image to determine the approximate distribution of +
In this mode of operation, the renderer first creates a low-resolution + version of the output image to determine the approximate distribution of luminance values. The second stage then performs the actual rendering, while using the previously collected information to ensure that the amount of time spent rendering each pixel is uniform.
@@ -542,19 +542,19 @@ for everything. This can be accomplished by setting directSamples=-1. - + If set to true, the MLT algorithm runs on top of a bidirectional path tracer with multiple importance sampling. Otherwise, the implementation reverts to a unidirectional path tracer. Specifies the longest path depth in the generated output image (where -1 - corresponds to ∞). A value of 1 will only render directly visible light sources. + corresponds to ∞). A value of 1 will only render directly visible light sources. 2 will lead to single-bounce (direct-only) illumination, and so on. - By default, this plugin renders the direct illumination component - separately using an optimized direct illumination sampling strategy + By default, this plugin renders the direct illumination component + separately using an optimized direct illumination sampling strategy that uses low-discrepancy number sequences for superior performance (in other words, it is not rendered by PSSMLT). This parameter specifies the number of samples allocated to that method. To @@ -570,7 +570,7 @@ is approximately twice as bright as another one, but the absolute scale is unknown. To recover it, this plugin computes the average luminance arriving at the sensor by generating a - number of samples. + number of samples. Rate at which the implementation tries to replace the current path @@ -578,15 +578,15 @@ this. - Specifies the minimum path depth, after which the implementation will start to use the - "russian roulette" path termination criterion (set to -1 to disable). + Specifies the minimum path depth, after which the implementation will start to use the + "russian roulette" path termination criterion (set to -1 to disable).Metropolis Light Transport (MLT) is a seminal rendering technique proposed by Veach and +
Metropolis Light Transport (MLT) is a seminal rendering technique proposed by Veach and Guibas, which applies the Metropolis-Hastings algorithm to the path-space formulation of light transport. Please refer to the PSSMLT documentation for a general description of MLT-type @@ -600,9 +600,9 @@ not use such an indirection: it operates directly on the actual light paths.
-This means that the algorithm has access to considerably more +
This means that the algorithm has access to considerably more information about the problem to be solved, which allows it to perform a - directed exploration of certain classes of light paths. The main downside + directed exploration of certain classes of light paths. The main downside is that the implementation is rather complex, which may make it more susceptible to unforeseen problems. Mitsuba reproduces the full MLT algorithm except for the lens subpath mutation. In addition, the plugin also provides the @@ -610,12 +610,12 @@
Energy Redistribution Path Tracing (ERPT) by Cline et al. +
Energy Redistribution Path Tracing (ERPT) by Cline et al. combines Path Tracing with the perturbation strategies of Metropolis Light Transport.
-An initial set of seed paths is generated using a standard bidirectional - path tracer, and for each one, a MLT-style Markov Chain is subsequently started +
An initial set of seed paths is generated using a standard bidirectional + path tracer, and for each one, a MLT-style Markov Chain is subsequently started and executed for some number of steps. This has the effect of redistributing the energy of the individual samples over a larger area, hence the name of this method.
- +This plugin shares all the perturbation strategies of the MLT plugin, and the same rules for selecting them apply. In contrast to the original paper by Cline et al., the Mitsuba implementation uses a bidirectional (rather than an unidirectional) bidirectional path tracer to create seed paths. - Also, since they add bias to the output, this plugin does not use the image + Also, since they add bias to the output, this plugin does not use the image post-processing filters proposed by the authors.
This is a windowed version of the theoretically optimal low-pass filter. - It is generally one of the best available filters in terms of producing sharp - high-quality output. Its main disadvantage is that it produces strong ringing around - discontinuities, which can become a serious problem when rendering bright objects - with sharp edges (for instance, a directly visible light source will have black - fringing artifacts around it). + It is generally one of the best available filters in terms of producing sharp + high-quality output. Its main disadvantage is that it produces strong ringing around + discontinuities, which can become a serious problem when rendering bright objects + with sharp edges (for instance, a directly visible light source will have black + fringing artifacts around it). This is also the computationally slowest reconstruction filter.
This plugin has an integer-valued parameter named lobes, that @@ -798,29 +798,29 @@
The independent sampler produces a stream of independent and uniformly distributed pseudorandom numbers. Internally, it relies on a fast SIMD version of the Mersenne Twister random number generator.
- -This is the most basic sample generator; because no precautions are taken to avoid + +
This is the most basic sample generator; because no precautions are taken to avoid sample clumping, images produced using this plugin will usually take longer to converge. In theory, this sampler is initialized using a deterministic procedure, which means - that subsequent runs of Mitsuba should create the same image. In practice, when - rendering with multiple threads and/or machines, this is not true anymore, since the + that subsequent runs of Mitsuba should create the same image. In practice, when + rendering with multiple threads and/or machines, this is not true anymore, since the ordering of samples is influenced by the operating system scheduler.
Note that the Metropolis-type integrators implemented in Mitsuba are incompatible with - the more sophisticated sample generators shown in this section. They require this + the more sophisticated sample generators shown in this section. They require this specific sampler and refuse to work otherwise.
-The stratified sample generator divides the domain into a discrete number - of strata and produces a sample within each one of them. This generally leads to less - sample clumping when compared to the independent sampler, as well as better - convergence. Due to internal storage costs, stratified samples are only provided up to a + of strata and produces a sample within each one of them. This generally leads to less + sample clumping when compared to the independent sampler, as well as better + convergence. Due to internal storage costs, stratified samples are only provided up to a certain dimension, after which independent sampling takes over.
- +Like the independent sampler, multicore and network renderings will generally produce different images in subsequent runs due to the nondeterminism introduced by the operating system scheduler.
@@ -830,43 +830,43 @@ Effective dimension, up to which stratified samples are provided. The - number here is to be interpreted as the number of subsequent 1D or 2D sample - requests that can be satisfied using "good" samples. Higher high values + number here is to be interpreted as the number of subsequent 1D or 2D sample + requests that can be satisfied using "good" samples. Higher high values increase both storage and computational costs.This plugin implements a simple hybrid sampler that combines aspects of a Quasi-Monte - Carlo sequence with a pseudorandom number generator based on a technique proposed +
This plugin implements a simple hybrid sampler that combines aspects of a Quasi-Monte + Carlo sequence with a pseudorandom number generator based on a technique proposed by Kollig and Keller. - It is a good and fast general-purpose sample generator and therefore chosen as + It is a good and fast general-purpose sample generator and therefore chosen as the default option in Mitsuba. Some of the QMC samplers in the following pages can generate even better distributed samples, but this comes at a higher cost in terms of performance.
- -Roughly, the idea of this sampler is that all of the individual 2D sample dimensions are - first filled using the same (0, 2)-sequence, which is then randomly scrambled and permuted - using numbers generated by a Mersenne Twister pseudorandom number generator - Note that due to internal storage costs, low discrepancy samples are only provided + +
Roughly, the idea of this sampler is that all of the individual 2D sample dimensions are + first filled using the same (0, 2)-sequence, which is then randomly scrambled and permuted + using numbers generated by a Mersenne Twister pseudorandom number generator + Note that due to internal storage costs, low discrepancy samples are only provided up to a certain dimension, after which independent sampling takes over. - The name of this plugin stems from the fact that (0, 2) sequences minimize the so-called - star disrepancy, which is a quality criterion on their spatial distribution. By + The name of this plugin stems from the fact that (0, 2) sequences minimize the so-called + star disrepancy, which is a quality criterion on their spatial distribution. By now, the name has become slightly misleading since there are other samplers in Mitsuba that just as much try to minimize discrepancy, namely the Sobol and Halton plugins.
- +Like the independent sampler, multicore and network renderings will generally produce different images in subsequent runs due to the nondeterminism introduced by the operating system scheduler.
This plugin implements a Quasi-Monte Carlo (QMC) sample generator based on the Hammersley sequence. QMC number sequences are designed to reduce sample clumping - across integration dimensions, which can lead to a higher order of + across integration dimensions, which can lead to a higher order of convergence in renderings. Because of the deterministic character of the samples, errors will manifest as grid or moiré patterns rather than random noise, but these diminish as the number of samples is increased.
- +The Hammerlsey sequence is closely related to the Halton sequence and yields a very high quality point set that is slightly more regular (and has lower discrepancy), especially in the first few dimensions. As is the case with the Halton sequence, the points should be scrambled to reduce patterns that manifest due due to correlations in higher dimensions.
-Note that this sampler will cause odd-looking intermediate results when combined with rendering - techniques that trace paths starting at light source (e.g. the adjoint particle tracer)—these vanish +
Note that this sampler will cause odd-looking intermediate results when combined with rendering + techniques that trace paths starting at light source (e.g. the adjoint particle tracer)—these vanish by the time the rendering process finishes.
This plugin implements a Quasi-Monte Carlo (QMC) sample generator based on the Halton sequence. QMC number sequences are designed to reduce sample clumping - across integration dimensions, which can lead to a higher order of + across integration dimensions, which can lead to a higher order of convergence in renderings. Because of the deterministic character of the samples, errors will manifest as grid or moiré patterns rather than random noise, but these diminish as the number of samples is increased.
- -The Halton sequence in particular provides a very high quality point set that unfortunately - becomes increasingly correlated in higher dimensions. To ameliorate this problem, the Halton + +
The Halton sequence in particular provides a very high quality point set that unfortunately + becomes increasingly correlated in higher dimensions. To ameliorate this problem, the Halton points are usually combined with a scrambling permutation, and this is also the default. - Because everything that happens inside this sampler is completely deterministic and - independent of operating system scheduling behavior, subsequent runs of Mitsuba will always - compute the same image, and this even holds when rendering with multiple threads + Because everything that happens inside this sampler is completely deterministic and + independent of operating system scheduling behavior, subsequent runs of Mitsuba will always + compute the same image, and this even holds when rendering with multiple threads and/or machines.
-By default, the implementation provides a scrambled variant of the Halton sequence based +
By default, the implementation provides a scrambled variant of the Halton sequence based on permutations by Faure that has better equidistribution properties in high dimensions, but this can be changed using the scramble parameter. - Internally, the plugin uses a table of prime numbers to provide elements + Internally, the plugin uses a table of prime numbers to provide elements of the Halton sequence up to a dimension of 1024. Because of this upper bound, the maximum path depth of the integrator must be limited (e.g. to 100), or - rendering might fail with the following error message: Lookup dimension - exceeds the prime number table size! You may have to reduce the 'maxDepth' + rendering might fail with the following error message: Lookup dimension + exceeds the prime number table size! You may have to reduce the 'maxDepth' parameter of your integrator.
This plugin implements a Quasi-Monte Carlo (QMC) sample generator based on the Sobol sequence. QMC number sequences are designed to reduce sample clumping - across integration dimensions, which can lead to a higher order of + across integration dimensions, which can lead to a higher order of convergence in renderings. Because of the deterministic character of the samples, errors will manifest as grid or moiré patterns rather than random noise, but these diminish as the number of samples is increased.
@@ -976,22 +976,22 @@ in the generated image. To minimize these artifacts, it is advisable to use a number of samples per pixel that is a power of two. -Because everything that happens inside this sampler is completely - deterministic and independent of operating system scheduling behavior, subsequent - runs of Mitsuba will always compute the same image, and this even holds when rendering +
Because everything that happens inside this sampler is completely + deterministic and independent of operating system scheduling behavior, subsequent + runs of Mitsuba will always compute the same image, and this even holds when rendering with multiple threads and/or machines.
The plugin relies on a fast implementation of the Sobol sequence by Leonhard Grünschloß using direction numbers provided by Joe and Kuo. - These direction numbers are given up to a dimension of 1024. Because of this + These direction numbers are given up to a dimension of 1024. Because of this upper bound, the maximum path depth of the integrator must be limited (e.g. to 100), or - rendering might fail with the following error message: Lookup dimension - exceeds the direction number table size! You may have to reduce the 'maxDepth' + rendering might fail with the following error message: Lookup dimension + exceeds the direction number table size! You may have to reduce the 'maxDepth' parameter of your integrator.