chi-square test on the sphere, improved microflake code

metadata
Wenzel Jakob 2011-04-14 02:46:01 +02:00
parent 9aa5c13daa
commit d3ad21f0da
7 changed files with 405 additions and 854 deletions

View File

@ -0,0 +1,278 @@
/*
This file is part of Mitsuba, a physically based rendering system.
Copyright (c) 2007-2010 by Wenzel Jakob and others.
Mitsuba is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License Version 3
as published by the Free Software Foundation.
Mitsuba is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#if !defined(__CHI_SQUARE_TEST_H)
#define __CHI_SQUARE_TEST_H
#include <mitsuba/core/fresolver.h>
#include <mitsuba/core/quad.h>
#include <mitsuba/core/timer.h>
#include <mitsuba/core/random.h>
#include <boost/bind.hpp>
#include <boost/math/distributions/chi_squared.hpp>
MTS_NAMESPACE_BEGIN
/**
* \brief Chi-square goodness-of-fit test on the sphere
*
* This class performs a chi-square goodness-of-fit test of the null hypothesis
* that a specified sampling procedure produces samples that are distributed
* according to a supplied density function. This is very useful to verify BRDF
* and phase function sampling codes for their correctness.
*
* This implementation works by generating a large batch of samples, which are
* then accumulated into rectangular bins in spherical coordinates. To obtain
* reference bin counts, the provided density function is numerically
* integrated over the area of each bin. Comparing the actual and reference
* bin counts yields the desired test statistic.
*
* Given a BSDF/phase function with the following interface
* <code>
* class MyDistribution {
* Vector sample(const Point2 &uniformSample) const;
* Float pdf(const Vector &direction) const;
* };
* </code>
*
* the code in this class might be used as follows
*
* <code>
* MyDistribution myDistrInstance;
* ChiSquareTest chiSqr;
*
* // Initialize the tables used by the chi-square test
* chiSqr.fill(
* boost::bind(&MyDistribution::sample, myDistrInstance, _1),
* boost::bind(&MyDistribution::pdf, myDistrInstance, _1)
* );
*
* // Optional: dump the tables to a MATLAB file for external analysis
* chiSqr.dumpTables("debug.m");
*
* // (the folowing assumes that the distribution has 1 parameter, e.g. exponent value)
* if (!chiSqr.runTest(1))
* Log(EError, "Uh oh -- test failed, the implementation is probably incorrect!");
* </code>
*/
class ChiSquareTest {
public:
/**
* \brief Create a new Chi-square test instance with the given
* resolution and sample count
*
* \param thetaBins
* Number of bins wrt. latitude. The default is 10
*
* \param phiBins
* Number of bins wrt. azimuth. The default is to use
* twice the number of \c thetaBins
*
* \param sampleCount
* Number of samples to be used when computing the bin
* values. The default is \c thetaBins*phiBins*1000
*/
ChiSquareTest(int thetaBins = 10, int phiBins = 0, size_t sampleCount = 0)
: m_thetaBins(thetaBins), m_phiBins(phiBins), m_sampleCount(sampleCount) {
if (m_phiBins == 0)
m_phiBins = 2*m_thetaBins;
if (m_sampleCount == 0)
m_sampleCount = m_thetaBins * m_phiBins * 1000;
m_table = new uint32_t[m_thetaBins*m_phiBins];
m_refTable = new Float[m_thetaBins*m_phiBins];
}
/// Release all memory
~ChiSquareTest() {
delete[] m_table;
delete[] m_refTable;
}
/**
* \brief Fill the actual and reference bin counts
*
* Please see the class documentation for a description
* on how to invoke this function
*/
void fill(
const boost::function<Vector (const Point2 &)> &sampleFn,
const boost::function<Float (const Vector &)> &pdfFn) {
ref<Random> random = new Random();
memset(m_table, 0, m_thetaBins*m_phiBins*sizeof(uint32_t));
SLog(EInfo, "Accumulating " SIZE_T_FMT " samples into a %ix%i"
" contingency table", m_sampleCount, m_thetaBins, m_phiBins);
Point2 factor(m_thetaBins / M_PI, m_phiBins / (2*M_PI));
ref<Timer> timer = new Timer();
for (size_t i=0; i<m_sampleCount; ++i) {
Point2 sample(random->nextFloat(), random->nextFloat());
Point2 sphCoords = toSphericalCoordinates(sampleFn(sample));
int thetaBin = std::min(std::max(0,
floorToInt(sphCoords.x * factor.x)), m_thetaBins-1);
int phiBin = std::min(std::max(0,
floorToInt(sphCoords.y * factor.y)), m_phiBins-1);
++m_table[thetaBin * m_phiBins + phiBin];
}
SLog(EInfo, "Done, took %i ms.", timer->getMilliseconds());
factor = Point2(M_PI / m_thetaBins, (2*M_PI) / m_phiBins);
SLog(EInfo, "Integrating reference contingency table");
timer->reset();
Float min[2], max[2];
size_t idx = 0;
NDIntegrator integrator(1, 2, 50000, 0, 1e-6f);
Float maxError = 0, integral = 0;
for (int i=0; i<m_thetaBins; ++i) {
min[0] = i * factor.x;
max[0] = (i+1) * factor.x;
for (int j=0; j<m_phiBins; ++j) {
min[1] = j * factor.y;
max[1] = (j+1) * factor.y;
Float result, error;
size_t evals;
integrator.integrateVectorized(
boost::bind(&ChiSquareTest::integrand, pdfFn, _1, _2, _3),
min, max, &result, &error, evals
);
integral += result;
m_refTable[idx++] = result * m_sampleCount;
maxError = std::max(maxError, error);
}
}
SLog(EInfo, "Done, took %i ms (max error = %f, integral=%f).",
timer->getMilliseconds(), maxError, integral);
}
/**
* \brief Dump the bin counts to a file using MATLAB format
*/
void dumpTables(const fs::path &filename) {
fs::ofstream out(filename);
out << "tbl_counts = [ ";
for (int i=0; i<m_thetaBins; ++i) {
for (int j=0; j<m_phiBins; ++j) {
out << m_table[i*m_phiBins+j];
if (j+1 < m_phiBins)
out << ", ";
}
if (i+1 < m_thetaBins)
out << "; ";
}
out << " ];" << endl
<< "tbl_ref = [ ";
for (int i=0; i<m_thetaBins; ++i) {
for (int j=0; j<m_phiBins; ++j) {
out << m_refTable[i*m_phiBins+j];
if (j+1 < m_phiBins)
out << ", ";
}
if (i+1 < m_thetaBins)
out << "; ";
}
out << " ];" << endl;
out.close();
}
/**
* \brief Perform the actual chi-square test
*
* \param distParams
* Number of parameters of the distribution in question.
* Anything such as lobe width, indices of refraction, etc.
* should be counted.
*
* \param pvalThresh
* The implementation will reject the null hypothesis
* when the computed p-value lies below this parameter
* (default: 0.01f)
*
* \return \c false if the null hypothesis was rejected
* and \c true otherwise.
*/
bool runTest(int distParams, Float pvalThresh = 0.01f) {
/* Compute the chi-square statistic */
Float chsq = 0.0f;
Float pooledCounts = 0, pooledRef = 0;
int pooledCells = 0;
int df = 0;
for (int i=0; i<m_thetaBins*m_phiBins; ++i) {
if (m_refTable[i] < 5) {
pooledCounts += m_table[i];
pooledRef += m_refTable[i];
++pooledCells;
} else {
Float diff = m_table[i]-m_refTable[i];
chsq += (diff*diff) / m_refTable[i];
++df;
}
}
if (pooledCells > 0) {
SLog(EInfo, "Pooled %i cells with an expected "
"number of < 5 entries!", pooledCells);
if (pooledRef < 5) {
SLog(EWarn, "Even after pooling, the expected "
"number of entries is < 5 (%f), expect badness!",
pooledRef);
}
Float diff = pooledCounts - pooledRef;
chsq += (diff*diff) / pooledRef;
++df;
}
df -= distParams + 1;
SLog(EInfo, "Chi-square statistic = %e (df=%i)", chsq, df);
boost::math::chi_squared chSqDist(df);
/* Probability of obtaining a test statistic at least
as extreme as the one observed under the assumption
that the distributions match */
Float pval = 1 - boost::math::cdf(chSqDist, chsq);
SLog(EInfo, "P-value = %e", pval);
if (pval < pvalThresh) {
SLog(EWarn, "Rejecting the null hypothesis");
return false;
}
return true;
}
protected:
static void integrand(
const boost::function<Float (const Vector &)> &pdfFn,
size_t nPts, const Float *in, Float *out) {
#pragma omp parallel for
for (size_t i=0; i<nPts; ++i)
out[i] = pdfFn(sphericalDirection(in[2*i], in[2*i+1])) * std::sin(in[2*i]);
}
private:
int m_thetaBins, m_phiBins;
size_t m_sampleCount;
uint32_t *m_table;
Float *m_refTable;
};
MTS_NAMESPACE_END
#endif /* __CHI_SQUARE_TEST_H */

View File

@ -88,76 +88,8 @@ namespace std {
};
using std::select2nd;
using std::compose1;
#endif
/* Forward declarations */
MTS_NAMESPACE_BEGIN
extern MTS_EXPORT_CORE void * __restrict allocAligned(size_t size);
extern MTS_EXPORT_CORE void freeAligned(void *ptr);
/**
* \brief Aligned memory allocator for use with SSE2-based code
* \headerfile mitsuba/core/stl.h mitsuba/mitsuba.h
*
* Basic implementaiton, which forwards all calls to \ref allocAligned.
*/
template <typename T> class aligned_allocator {
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
/// \cond
template <class U> struct rebind {
typedef aligned_allocator<U> other;
};
/// \endcond
pointer address (reference value) const {
return &value;
}
const_pointer address (const_reference value) const {
return &value;
}
aligned_allocator() throw() { }
aligned_allocator(const aligned_allocator&) throw() { }
template <class U> aligned_allocator (const aligned_allocator<U>&) throw() { }
~aligned_allocator() throw() { }
size_type max_size () const throw() {
return INT_MAX;
}
T* __restrict allocate (size_type num, const_pointer *hint = 0) {
return (T *) mitsuba::allocAligned(num*sizeof(T));
}
void construct (pointer p, const T& value) {
*p=value;
}
void destroy (pointer p) {
p->~T();
};
void deallocate (pointer p, size_type num) {
freeAligned(p);
}
};
MTS_NAMESPACE_END
#if defined(WIN32)
inline bool mts_isnan(float f) {
int classification = ::_fpclass(f);

View File

@ -616,10 +616,13 @@ Point2 squareToTriangle(const Point2 &sample) {
}
Point2 toSphericalCoordinates(const Vector &v) {
return Point2(
Point2 result(
std::acos(v.z),
std::atan2(v.y, v.x)
);
if (result.y < 0)
result.y += 2*M_PI;
return result;
}
Point2 squareToDiskConcentric(const Point2 &sample) {

View File

@ -1,736 +0,0 @@
/*
This file is part of Mitsuba, a physically based rendering system.
Copyright (c) 2007-2010 by Wenzel Jakob and others.
Mitsuba is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License Version 3
as published by the Free Software Foundation.
Mitsuba is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mitsuba/render/scene.h>
#include <mitsuba/render/volume.h>
#include <mitsuba/core/fstream.h>
#include <mitsuba/core/brent.h>
#include <mitsuba/core/shvector4d.h>
#include <mitsuba/core/statistics.h>
#include <fstream>
/*
* When inverting an integral of the form f(t):=\int_0^t [...] dt
* using composite Simpson's rule quadrature, the following constant
* specified how many times the step size will be reduced when we
* have almost reached the desired value.
*/
#define NUM_REFINES 8
/**
* Allow to stop integrating densities when the resulting segment
* has a throughput of less than 'Epsilon'
*/
#define HET_EARLY_EXIT 1
/// Header identification record for phase function caches
#define PHASE_FUNCTION_HEADER 0x40044004
#define USE_REJECTION_SAMPLING 1
#define USE_COSTHETA_DISTR 1
MTS_NAMESPACE_BEGIN
static StatsCounter totalSamples("Micro-flake model", "Sample generations");
static StatsCounter avgSampleIterations("Micro-flake model", "Average rejection sampling iterations", EAverage);
static StatsCounter brentSolves("Micro-flake model", "Brent solver calls");
static StatsCounter avgBrentFunEvals("Micro-flake model", "Average Brent solver function evaluations", EAverage);
struct AbsCos {
Float operator()(const Vector &w) const { return std::abs(w.z); }
};
#define USE_COSTHETA_DISTR 1
enum EMode {
ESinP = 0,
ECosP
};
struct FlakeDistr {
Float exp;
EMode mode;
FlakeDistr() { }
FlakeDistr(EMode mode, Float exp) : exp(exp), mode(mode) { }
inline Float operator()(const Vector &w) const {
switch (mode) {
case ESinP:
return std::pow(1-std::pow(w.z, 2), exp/2.0f);
case ECosP:
return std::pow(std::abs(w.z), exp);
default:
SLog(EError, "Invalid flake distribution!");
return 0.0f;
}
}
};
template <typename Distr> struct FlakePhaseFunctor {
Distr d;
FlakePhaseFunctor(Distr d) : d(d) { }
inline Float operator()(const Vector &wi, const Vector &wo) const {
Vector H = normalize(wi + wo);
return d(H) + d(-H);
}
};
class HeterogeneousFlakeMedium;
class FlakePhaseFunction : public PhaseFunction {
public:
FlakePhaseFunction(const SHVector &sigmaT, int samplingRecursions, const SHVector4D *phaseExpansion,
Float exponent, Float normalization, EMode mode, HeterogeneousFlakeMedium *medium) : PhaseFunction(Properties()),
m_sigmaT(sigmaT), m_samplingRecursions(samplingRecursions), m_distr(mode, exponent), m_phaseExpansion(phaseExpansion),
m_exponent(exponent), m_normalization(normalization), m_mode(mode), m_medium(medium) {
initialize();
}
FlakePhaseFunction(Stream *stream, InstanceManager *manager) :
PhaseFunction(stream, manager) {
/* Not implemented */
}
void serialize(Stream *stream, InstanceManager *manager) const {
PhaseFunction::serialize(stream, manager);
/* Not implemented */
}
void initialize() {
m_shSampler = new SHSampler(m_phaseExpansion->getBands(), m_samplingRecursions);
Log(EInfo, "Constructing a SH sampler: %s", m_shSampler->toString().c_str());
}
virtual ~FlakePhaseFunction() { }
/* Implementations are below */
Spectrum f(const PhaseFunctionQueryRecord &pRec) const;
Spectrum sample(PhaseFunctionQueryRecord &pRec,
Float &pdf, Sampler *sampler) const;
Spectrum sample(PhaseFunctionQueryRecord &pRec,
Sampler *sampler) const;
Float pdf(const PhaseFunctionQueryRecord &pRec) const;
std::string toString() const {
return "FlakePhaseFunction[]";
}
MTS_DECLARE_CLASS()
private:
SHVector m_sigmaT;
ref<SHSampler> m_shSampler;
int m_samplingRecursions;
FlakeDistr m_distr;
const SHVector4D *m_phaseExpansion;
mutable PrimitiveThreadLocal<SHVector> m_temp;
Float m_exponent;
Float m_normalization;
EMode m_mode;
const HeterogeneousFlakeMedium *m_medium;
};
class HeterogeneousFlakeMedium : public Medium {
public:
HeterogeneousFlakeMedium(const Properties &props)
: Medium(props) {
m_stepSize = props.getFloat("stepSize", 0);
/* Flake model configuration */
int bands = props.getInteger("bands", 7);
int phaseBands = props.getInteger("phaseBands", bands);
Assert(bands > 0 && phaseBands > 0);
m_samplingRecursions = props.getInteger("samplingRecursions", 10);
std::string mode = props.getString("mode", "sinp");
if (mode == "sinp") {
m_mode = ESinP;
} else if (mode == "cosp") {
m_mode = ECosP;
} else {
Log(EError, "Unknown mode: %s!", mode.c_str());
}
const int numSamples = 80;
ref<FileStream> stream;
SHVector absCos = SHVector(bands);
absCos.project(AbsCos(), numSamples);
Float absCosError = absCos.l2Error(AbsCos(), numSamples);
m_exponent = props.getFloat("exponent", 4);
FlakeDistr distrFunc(m_mode, m_exponent);
m_D = SHVector(bands);
m_D.project(distrFunc, numSamples);
m_normalization = 1 / (m_D(0, 0) * 2 * std::sqrt(M_PI));
Float dError = m_D.l2Error(distrFunc, numSamples);
m_D.normalize();
Log(EInfo, "======= Flake medium properties =======");
Log(EInfo, " distribution type = %s", mode.c_str());
Log(EInfo, " distribution exponent = %f", m_exponent);
Log(EInfo, " SH bands (D, sigmaT) = %i", bands);
Log(EInfo, " SH bands (phase fct) = %i", phaseBands);
Log(EInfo, " |cos| projection error = %f", absCosError);
Log(EInfo, " D projection error = %f", dError);
Log(EInfo, " sampling recursions = %i", m_samplingRecursions);
bool computePhaseProjection = true;
if (fs::exists("flake-phase.dat")) {
/* Avoid recomputing this every time */
stream = new FileStream("flake-phase.dat", FileStream::EReadOnly);
unsigned int header = stream->readUInt();
int nBands = stream->readInt();
int mode = stream->readInt();
Float exponent = stream->readFloat();
if (header == PHASE_FUNCTION_HEADER && nBands == phaseBands
&& exponent == m_exponent && mode == m_mode) {
m_phaseExpansion = SHVector4D(stream);
computePhaseProjection = false;
}
stream->close();
}
if (computePhaseProjection) {
int res = 50;
FlakePhaseFunctor<FlakeDistr> phaseFunctor(distrFunc);
m_phaseExpansion = SHVector4D(res, 2*res, phaseBands);
Log(EDebug, "Phase function parameterization: %s", m_phaseExpansion.toString().c_str());
m_phaseExpansion.project(phaseFunctor, numSamples);
m_phaseExpansion.normalize();
stream = new FileStream("flake-phase.dat", FileStream::ETruncReadWrite);
stream->writeInt(PHASE_FUNCTION_HEADER);
stream->writeInt(phaseBands);
stream->writeInt(m_mode);
stream->writeFloat(m_exponent);
m_phaseExpansion.serialize(stream);
stream->close();
}
for (int i=0; i<phaseBands; ++i)
Log(EInfo, "Energy in phase function band %i: %f", i, m_phaseExpansion.energy(i));
/* Compute sigma_t except for the a*rho factor */
m_sigmaT = SHVector(m_D);
m_sigmaT.convolve(absCos);
Assert(m_sigmaT.isAzimuthallyInvariant());
m_kdTree = new ShapeKDTree();
}
/* Unserialize from a binary data stream */
HeterogeneousFlakeMedium(Stream *stream, InstanceManager *manager)
: Medium(stream, manager) {
m_kdTree = new ShapeKDTree();
size_t shapeCount = stream->readUInt();
for (size_t i=0; i<shapeCount; ++i)
addChild("", static_cast<Shape *>(manager->getInstance(stream)));
m_densities = static_cast<VolumeDataSource *>(manager->getInstance(stream));
m_albedo = static_cast<VolumeDataSource *>(manager->getInstance(stream));
m_orientations = static_cast<VolumeDataSource *>(manager->getInstance(stream));
m_stepSize = stream->readFloat();
m_exponent = stream->readFloat();
m_normalization = stream->readFloat();
m_mode = (EMode) stream->readInt();
m_D = SHVector(stream);
m_sigmaT = SHVector(stream);
m_phaseExpansion = SHVector4D(stream);
m_samplingRecursions = stream->readInt();
configure();
}
virtual ~HeterogeneousFlakeMedium() {
for (size_t i=0; i<m_shapes.size(); ++i)
m_shapes[i]->decRef();
}
/* Serialize the volume to a binary data stream */
void serialize(Stream *stream, InstanceManager *manager) const {
Medium::serialize(stream, manager);
stream->writeUInt((uint32_t) m_shapes.size());
for (size_t i=0; i<m_shapes.size(); ++i)
manager->serialize(stream, m_shapes[i]);
manager->serialize(stream, m_densities.get());
manager->serialize(stream, m_albedo.get());
manager->serialize(stream, m_orientations.get());
stream->writeFloat(m_stepSize);
stream->writeFloat(m_exponent);
stream->writeFloat(m_normalization);
stream->writeInt(m_mode);
m_D.serialize(stream);
m_sigmaT.serialize(stream);
m_phaseExpansion.serialize(stream);
stream->writeInt(m_samplingRecursions);
}
void configure() {
Medium::configure();
if (m_densities.get() == NULL)
Log(EError, "No densities specified!");
if (m_albedo.get() == NULL)
Log(EError, "No albedo specified!");
if (m_orientations.get() == NULL)
Log(EError, "No orientations specified!");
if (m_stepSize == 0) {
m_stepSize = std::min(std::min(
m_densities->getStepSize(),
m_albedo->getStepSize()),
m_orientations->getStepSize());
if (m_stepSize == std::numeric_limits<Float>::infinity())
Log(EError, "Unable to infer step size, please specify!");
}
m_phaseFunction = new FlakePhaseFunction(m_sigmaT, m_samplingRecursions,
&m_phaseExpansion, m_exponent, m_normalization, m_mode, this);
if (m_shapes.size() > 0) {
m_kdTree->build();
m_aabb = m_kdTree->getAABB();
} else {
m_aabb = m_densities->getAABB();
}
}
void setParent(ConfigurableObject *parent) {
if (parent->getClass()->derivesFrom(MTS_CLASS(Shape)))
Log(EError, "Medium cannot be a parent of a shape");
}
void addChild(const std::string &name, ConfigurableObject *child) {
if (child->getClass()->derivesFrom(MTS_CLASS(Shape))) {
Shape *shape = static_cast<Shape *>(child);
if (shape->isCompound()) {
int ctr = 0;
while (true) {
ref<Shape> childShape = shape->getElement(ctr++);
if (!childShape)
break;
addChild("", childShape);
}
} else {
m_kdTree->addShape(shape);
shape->incRef();
m_shapes.push_back(shape);
}
} else if (child->getClass()->derivesFrom(MTS_CLASS(VolumeDataSource))) {
VolumeDataSource *volume = static_cast<VolumeDataSource *>(child);
if (name == "albedo") {
Assert(volume->supportsSpectrumLookups());
m_albedo = volume;
} else if (name == "density") {
Assert(volume->supportsFloatLookups());
m_densities = volume;
} else if (name == "orientations") {
Assert(volume->supportsVectorLookups());
m_orientations = volume;
}
} else {
Medium::addChild(name, child);
}
}
/**
* \brief Intersect a ray with the stencil and
* determine whether it started on the inside
*/
inline bool rayIntersect(const Ray &ray, Float &t, bool &inside) const {
if (m_shapes.size() != 0) {
Intersection its;
if (!m_kdTree->rayIntersect(ray, its))
return false;
inside = dot(its.geoFrame.n, ray.d) > 0;
t = its.t;
} else {
Float mint, maxt;
if (!m_aabb.rayIntersect(ray, mint, maxt))
return false;
if (mint <= ray.mint && maxt <= ray.mint)
return false;
inside = mint <= 0 && maxt > 0;
t = (mint <= 0) ? maxt : mint;
}
return true;
}
inline Vector lookupOrientation(const Point &p) const {
Vector orientation = m_orientations->lookupVector(p);
Float lengthSqr = orientation.lengthSquared();
if (lengthSqr != 0)
return orientation / std::sqrt(lengthSqr);
else
return Vector(0.0f);
}
inline Float sigmaT(const Point &p, const Vector &d) const {
/**
* Double the densities so that a uniform flake distribution
* reproduces isotropic scattering
*/
Float density = 2 * m_densities->lookupFloat(p) * m_densityMultiplier;
if (density == 0)
return 0.0f;
Vector orientation = lookupOrientation(p);
if (orientation == Vector(0.0f))
return 0.0f;
Vector localD = Frame(orientation).toLocal(d);
return density * m_sigmaT.evalAzimuthallyInvariant(localD);
}
/// Integrate densities using the composite Simpson's rule
inline Float integrateDensity(Ray ray, Float length) const {
if (length == 0)
return 0.0f;
int nParts = (int) std::ceil(length/m_stepSize);
nParts += nParts % 2;
const Float stepSize = length/nParts;
const Vector increment = ray.d * stepSize;
Float m = 4;
Float accumulatedDensity =
sigmaT(ray.o, ray.d) + sigmaT(ray(length), ray.d);
#ifdef HET_EARLY_EXIT
const Float stopAfterDensity = -std::log(Epsilon);
const Float stopValue = stopAfterDensity*3/stepSize;
#endif
ray.o += increment;
for (int i=1; i<nParts; ++i) {
Float value = sigmaT(ray.o, ray.d);
accumulatedDensity += value*m;
Point tmp = ray.o;
ray.o += increment;
if (ray.o == tmp) {
Log(EWarn, "integrateDensity(): failed to make forward progress -- "
"round-off error issues? The step size was %.18f", stepSize);
break;
}
#ifdef HET_EARLY_EXIT
if (accumulatedDensity > stopValue) // Stop early
return std::numeric_limits<Float>::infinity();
#endif
m = 6 - m;
}
accumulatedDensity *= stepSize/3;
return accumulatedDensity;
}
/**
* \brief Integrate the density covered by the specified ray
* segment, while clipping the volume to the underlying
* stencil volume.
*/
inline Float integrateDensity(const Ray &r) const {
Ray ray(r(r.mint), r.d,
0, std::numeric_limits<Float>::infinity(), 0.0f);
Float integral = 0, remaining = r.maxt - r.mint;
int iterations = 0;
bool inside = false;
Float t = 0;
while (remaining > 0 && rayIntersect(ray, t, inside)) {
if (inside)
integral += integrateDensity(ray, std::min(remaining, t));
remaining -= t;
ray.o = ray(t);
ray.mint = Epsilon;
if (++iterations > 100) {
/// Just a precaution..
Log(EWarn, "integrateDensity(): round-off error issues?");
break;
}
}
return integral;
}
/**
* Attempts to solve the following 1D integral equation for 't'
* \int_0^t density(ray.o + x * ray.d) * dx + accumulatedDensity == desiredDensity.
* When no solution can be found in [0, maxDist] the function returns
* false. For convenience, the function returns the current values of sigmaT
* and the albedo, as well as the 3D position 'ray.o+t*ray.d' upon
* success.
*/
bool invertDensityIntegral(Ray ray, Float maxDist,
Float &accumulatedDensity, Float desiredDensity,
Float &currentSigmaT, Spectrum &currentAlbedo,
Point &currentPoint) const {
if (maxDist == 0)
return std::numeric_limits<Float>::infinity();
int nParts = (int) std::ceil(maxDist/m_stepSize);
Float stepSize = maxDist/nParts;
Vector fullIncrement = ray.d * stepSize,
halfIncrement = fullIncrement * .5f;
Float t = 0;
int numRefines = 0;
bool success = false;
Float node1 = sigmaT(ray.o, ray.d);
while (t <= maxDist) {
Float node2 = sigmaT(ray.o + halfIncrement, ray.d);
Float node3 = sigmaT(ray.o + fullIncrement, ray.d);
const Float newDensity = accumulatedDensity +
(node1+node2*4+node3) * (stepSize/6);
if (newDensity > desiredDensity) {
if (t+stepSize/2 <= maxDist) {
/* Record the last "good" scattering event */
success = true;
if (EXPECT_TAKEN(node2 != 0)) {
currentPoint = ray.o + halfIncrement;
currentSigmaT = node2;
} else if (node3 != 0 && t+stepSize <= maxDist) {
currentPoint = ray.o + fullIncrement;
currentSigmaT = node3;
} else if (node1 != 0) {
currentPoint = ray.o;
currentSigmaT = node1;
}
}
if (++numRefines > NUM_REFINES)
break;
stepSize *= .5f;
fullIncrement = halfIncrement;
halfIncrement *= .5f;
continue;
}
if (ray.o+fullIncrement == ray.o) /* Unable to make forward progress - stop */
break;
accumulatedDensity = newDensity;
node1 = node3;
t += stepSize;
ray.o += fullIncrement;
}
if (success)
currentAlbedo = m_albedo->lookupSpectrum(currentPoint);
return success;
}
/// Evaluate the transmittance along a ray segment
Spectrum tau(const Ray &ray) const {
return Spectrum(std::exp(-integrateDensity(ray)));
}
bool sampleDistance(const Ray &r, MediumSamplingRecord &mRec,
Sampler *sampler) const {
Ray ray(r(r.mint), r.d, 0.0f);
Float distSurf = r.maxt - r.mint,
desiredDensity = -std::log(1-sampler->next1D()),
accumulatedDensity = 0.0f,
currentSigmaT = 0.0f;
Spectrum currentAlbedo(0.0f);
int iterations = 0;
bool inside = false, success = false;
Float t = 0;
Float sigmaTOrigin = sigmaT(ray.o, ray.d);
while (rayIntersect(ray, t, inside)) {
if (inside) {
Point currentPoint(0.0f);
success = invertDensityIntegral(ray, std::min(t, distSurf),
accumulatedDensity, desiredDensity, currentSigmaT, currentAlbedo,
currentPoint);
if (success) {
/* A medium interaction occurred */
mRec.p = currentPoint;
mRec.t = (mRec.p-r.o).length();
success = true;
break;
}
}
distSurf -= t;
if (distSurf < 0) {
/* A surface interaction occurred */
break;
}
ray.o = ray(t);
ray.mint = Epsilon;
if (++iterations > 100) {
/// Just a precaution..
Log(EWarn, "sampleDistance(): round-off error issues?");
break;
}
}
Float expVal = std::max(Epsilon, std::exp(-accumulatedDensity));
if (success)
mRec.orientation = lookupOrientation(mRec.p);
mRec.pdfFailure = expVal;
mRec.pdfSuccess = expVal * std::max((Float) Epsilon, currentSigmaT);
mRec.pdfSuccessRev = expVal * std::max((Float) Epsilon, sigmaTOrigin);
mRec.transmittance = Spectrum(expVal);
if (!success)
return false;
mRec.sigmaS = currentAlbedo * currentSigmaT;
mRec.sigmaA = Spectrum(currentSigmaT) - mRec.sigmaS;
mRec.albedo = currentAlbedo.max();
mRec.medium = this;
return true;
}
void pdfDistance(const Ray &ray, Float t, MediumSamplingRecord &mRec) const {
Float expVal = std::exp(-integrateDensity(Ray(ray, 0, t)));
mRec.transmittance = Spectrum(expVal);
mRec.pdfFailure = expVal;
mRec.pdfSuccess = expVal * std::max((Float) Epsilon,
sigmaT(ray(t), ray.d));
mRec.pdfSuccessRev = expVal * std::max((Float) Epsilon,
sigmaT(ray(ray.mint), ray.d));
}
std::string toString() const {
std::ostringstream oss;
oss << "HeterogeneousFlakeMedium[" << endl
<< " albedo=" << indent(m_albedo.toString()) << "," << endl
<< " orientations=" << indent(m_orientations.toString()) << "," << endl
<< " densities=" << indent(m_densities.toString()) << "," << endl
<< " stepSize=" << m_stepSize << "," << endl
<< " densityMultiplier=" << m_densityMultiplier << endl
<< "]";
return oss.str();
}
MTS_DECLARE_CLASS()
private:
ref<VolumeDataSource> m_densities;
ref<VolumeDataSource> m_albedo;
ref<VolumeDataSource> m_orientations;
ref<ShapeKDTree> m_kdTree;
std::vector<Shape *> m_shapes;
Float m_stepSize;
/* Flake model-related */
SHVector m_D, m_sigmaT;
SHVector4D m_phaseExpansion;
int m_samplingRecursions;
EMode m_mode;
Float m_exponent, m_normalization;
};
Spectrum FlakePhaseFunction::f(const PhaseFunctionQueryRecord &pRec) const {
Frame frame(pRec.mRec.orientation);
Vector wi = frame.toLocal(pRec.wi);
Vector wo = frame.toLocal(pRec.wo);
#if defined(FLAKE_EVAL_APPROXIMATION)
/* Evaluate spherical harmonics representation - might be inaccurate */
SHVector temp(m_phaseExpansion->getBands());
m_phaseExpansion->lookup(wi, temp);
Float result = std::max((Float) 0, temp.eval(wo));
return Spectrum(result);
#else
/* Evaluate the real phase function */
Float sigmaT = m_sigmaT.evalAzimuthallyInvariant(wi);
Vector H = wi + wo;
Float length = H.length();
if (length == 0)
return Spectrum(0.0f);
else
H /= length;
Float result = m_distr(H) / (2*sigmaT);
result *= m_normalization;
return Spectrum(result);
#endif
}
Spectrum FlakePhaseFunction::sample(PhaseFunctionQueryRecord &pRec,
Float &_pdf, Sampler *sampler) const {
#if defined(FLAKE_SAMPLE_UNIFORM)
/* Uniform sampling */
pRec.wo = squareToSphere(sampler->next2D());
_pdf = 1/(4*M_PI);
return f(pRec);
#else
Point2 sample(sampler->next2D());
Frame frame(pRec.mRec.orientation);
Vector wi = frame.toLocal(pRec.wi);
/* Importance sampling using the interpolated phase function */
SHVector &temp = m_temp.get();
if (EXPECT_NOT_TAKEN(temp.getBands() == 0))
temp = SHVector(m_phaseExpansion->getBands());
m_phaseExpansion->lookup(wi, temp);
_pdf = m_shSampler->warp(temp, sample);
Vector wo = sphericalDirection(sample.x, sample.y);
pRec.wo = frame.toWorld(wo);
#if defined(FLAKE_EVAL_APPROXIMATION)
return Spectrum(std::max((Float) 0, temp.eval(sample.x, sample.y)));
#else
return f(pRec);
#endif
#endif
}
Spectrum FlakePhaseFunction::sample(PhaseFunctionQueryRecord &pRec,
Sampler *sampler) const {
Float pdf;
Spectrum result = FlakePhaseFunction::sample(pRec, pdf, sampler);
if (!result.isZero())
return result / pdf;
else
return Spectrum(0.0f);
}
Float FlakePhaseFunction::pdf(const PhaseFunctionQueryRecord &pRec) const {
#if defined(FLAKE_SAMPLE_UNIFORM)
return 1/(4*M_PI);
#else
Frame frame(pRec.mRec.orientation);
Vector wi = frame.toLocal(pRec.wi);
Vector wo = frame.toLocal(pRec.wo);
SHVector temp(m_phaseExpansion->getBands());
m_phaseExpansion->lookup(wi, temp);
return std::max((Float) 0, temp.eval(wo));
#endif
}
MTS_IMPLEMENT_CLASS_S(HeterogeneousFlakeMedium, false, Medium)
MTS_IMPLEMENT_CLASS_S(FlakePhaseFunction, false, PhaseFunction)
MTS_EXPORT_PLUGIN(HeterogeneousFlakeMedium, "Heterogeneous micro-flake medium");
MTS_NAMESPACE_END

View File

@ -356,7 +356,7 @@ public:
x -= fx/dfx;
if (x <= a || x >= b || dfx == 0)
if (EXPECT_NOT_TAKEN(x <= a || x >= b || dfx == 0))
x = 0.5f * (b + a);
/* Integrated version of the above Lagrange polynomial */

View File

@ -16,16 +16,24 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mitsuba/core/chisquare.h>
#include <mitsuba/render/phase.h>
#include <mitsuba/render/sampler.h>
#include "microflake.h"
#include "microflake_fiber.h"
MTS_NAMESPACE_BEGIN
class MicroflakePhaseFunction : public PhaseFunction {
public:
MicroflakePhaseFunction(const Properties &props)
: PhaseFunction(props) {
MicroflakePhaseFunction(const Properties &props) : PhaseFunction(props) {
m_fiberDistr = GaussianFiberDistribution(props.getFloat("stddev"));
ChiSquareTest test(7);
test.fill(
boost::bind(&GaussianFiberDistribution::sample, m_fiberDistr, _1),
boost::bind(&GaussianFiberDistribution::pdf, m_fiberDistr, _1)
);
test.dumpTables("test.m");
test.runTest(1);
}
MicroflakePhaseFunction(Stream *stream, InstanceManager *manager)
@ -60,10 +68,16 @@ public:
}
std::string toString() const {
return "MicroflakePhaseFunction[]";
std::ostringstream oss;
oss << "MicroflakePhaseFunction[" << endl
<< " fiberDistr = " << indent(m_fiberDistr.toString()) << endl
<< "]";
return oss.str();
}
MTS_DECLARE_CLASS()
private:
GaussianFiberDistribution m_fiberDistr;
};

View File

@ -16,20 +16,30 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#if !defined(__MICROFLAKE_SIGMA_T_H)
#define __MICROFLAKE_SIGMA_T_H
#if !defined(__MICROFLAKE_FIBER_DIST_H)
#define __MICROFLAKE_FIBER_DIST_H
#include <mitsuba/core/statistics.h>
#include <mitsuba/core/brent.h>
#include <boost/math/special_functions/erf.hpp>
#include <boost/bind.hpp>
#include <boost/math/special_functions/erf.hpp>
MTS_NAMESPACE_BEGIN
#define SIGMA_T_RANGE_MIN 4e-08
#define SIGMA_T_RANGE_MAX 4
#define SIGMA_T_ELEMENTS 100
#define SIGMA_T_COEFFS 10
FINLINE Float mts_erf(Float arg) {
#if defined(__GNUC__) && defined(SINGLE_PRECISION)
return erff(arg);
#elif defined(__GNUC__) && defined(DOUBLE_PRECISION)
return erf(arg);
#else
return boost::math::erf<Float>(arg);
#endif
}
#define FIBERDIST_STDDEV_MIN 4e-08
#define FIBERDIST_STDDEV_MAX 4
#define FIBERDIST_SIGMA_T_ELEMENTS 100
#define FIBERDIST_SIGMA_T_COEFFS 10
/**
* Each row of this table table stores an expansion of \sigma_t in terms of
@ -37,11 +47,11 @@ MTS_NAMESPACE_BEGIN
* fiber axis and the incident direction. The standard deviation of the
* underlying distribution increases from row to row using a nonlinear
* mapping to capture the `interesting' parts more efficiently (see
* \ref sigmaT_fiberGaussian())
* \ref sigmaT_fiberSigmaT())
*
* The implementation here has an average absolute error of 3.17534e-05.
*/
const double fiberGaussianCoeffs[SIGMA_T_ELEMENTS][SIGMA_T_COEFFS] = {
const double fiberSigmaTCoeffs[FIBERDIST_SIGMA_T_ELEMENTS][FIBERDIST_SIGMA_T_COEFFS] = {
{ 2.5037347588631811e-08, 6.3661860705430207e-01, 1.7827351092236654e-05, -1.3015084365974872e-04, 5.2848718115683369e-04, -1.2810825925271274e-03, 1.8970200528656278e-03, -1.6818874867112754e-03, 8.1991128831759852e-04, -1.6898472595983094e-04 },
{ 4.0059757909494120e-07, 6.3660112034736627e-01, 2.8523763553955916e-04, -2.0824136415171779e-03, 8.4557955473769653e-03, -2.0497323170502568e-02, 3.0352323203715059e-02, -2.6910202150020268e-02, 1.3118581833396092e-02, -2.7037558688078889e-03 },
{ 2.0280252492606374e-06, 6.3652534713591225e-01, 1.4440155345667449e-03, -1.0542219099960448e-02, 4.2807465147063795e-02, -1.0376769900017280e-01, 1.5365863698661997e-01, -1.3623289910560743e-01, 6.6412820861387445e-02, -1.3687764171763206e-02 },
@ -144,27 +154,27 @@ const double fiberGaussianCoeffs[SIGMA_T_ELEMENTS][SIGMA_T_COEFFS] = {
{ 4.9740122861160618e-01, 4.9788891152147130e-06, 3.7427454517739989e-03, 1.2662117029549336e-03, -3.7139444921194809e-03, 2.9874687143092160e-03, 6.6568975580594270e-03, -1.6932579956119298e-02, 1.3976501563774946e-02, -4.0922961870819563e-03 }
};
double sigmaT_fiberGaussian(double stddev, double sinTheta) {
double sigmaT_fiberDist(double stddev, double sinTheta) {
/* Undo the nonlinear mapping */
const double factor = SIGMA_T_ELEMENTS / std::pow(SIGMA_T_RANGE_MAX, 0.25);
const double factor = FIBERDIST_SIGMA_T_ELEMENTS / std::pow(FIBERDIST_STDDEV_MAX, 0.25);
double pos = std::pow(stddev, 0.25) * factor - 1;
/* Clamp to the supported parameter range */
pos = std::min(std::max(0.0, pos), (double) (SIGMA_T_ELEMENTS-1));
pos = std::min(std::max(0.0, pos), (double) (FIBERDIST_SIGMA_T_ELEMENTS-1));
/* Linearly interpolate coefficients using \c alpha */
int idx0 = (int) std::floor(pos), idx1 = (int) std::ceil(pos);
double alpha = pos - idx0, base = 1, result = 0;
SAssert(
idx0 >= 0 && idx0 < SIGMA_T_ELEMENTS &&
idx1 >= 0 && idx1 < SIGMA_T_ELEMENTS
idx0 >= 0 && idx0 < FIBERDIST_SIGMA_T_ELEMENTS &&
idx1 >= 0 && idx1 < FIBERDIST_SIGMA_T_ELEMENTS
);
/* Compute the expansion */
for (int i=0; i<SIGMA_T_COEFFS; ++i) {
result += base * ((1-alpha) * fiberGaussianCoeffs[idx0][i]
+ alpha * fiberGaussianCoeffs[idx1][i]);
/* Evaluate the expansion */
for (int i=0; i<FIBERDIST_SIGMA_T_COEFFS; ++i) {
result += base * ((1-alpha) * fiberSigmaTCoeffs[idx0][i]
+ alpha * fiberSigmaTCoeffs[idx1][i]);
base *= sinTheta;
}
@ -176,56 +186,106 @@ static StatsCounter brentSolves("Micro-flake model",
static StatsCounter avgBrentFunEvals("Micro-flake model",
"Average Brent solver function evaluations", EAverage);
struct GaussianFiberDistribution {
class GaussianFiberDistribution {
public:
inline GaussianFiberDistribution() {}
inline GaussianFiberDistribution(Float stddev) : m_stddev(stddev) {
m_normalization = 1/(std::sqrt(2*M_PI) * m_stddev *
boost::math::erf<Float>(1/(SQRT_TWO * m_stddev)));
m_c1 = 1.0f/boost::math::erf<Float>(1/(SQRT_TWO * m_stddev));
m_normalization = 1/(std::pow(2*M_PI, 3.0f/2.0f) * m_stddev *
mts_erf(1/(SQRT_TWO * m_stddev)));
m_c1 = 1.0f/mts_erf(1/(SQRT_TWO * m_stddev));
if (stddev < FIBERDIST_STDDEV_MIN || stddev > FIBERDIST_STDDEV_MAX)
SLog(EError, "Standard deviation parameter is out of range (must "
"be in [%f, %f])!", FIBERDIST_STDDEV_MIN, FIBERDIST_STDDEV_MAX);
/* Determine expansion coefficients of sigma_t for a fixed stddev */
Float pos = std::pow(stddev / FIBERDIST_STDDEV_MAX, 0.25) *
FIBERDIST_SIGMA_T_ELEMENTS - 1;
pos = std::min(std::max((Float) 0, pos), (Float) (FIBERDIST_SIGMA_T_ELEMENTS-1));
int idx0 = (int) std::floor(pos), idx1 = (int) std::ceil(pos);
Float alpha = pos - idx0;
for (int i=0; i<FIBERDIST_SIGMA_T_COEFFS; ++i)
m_coeffs[i] = (Float) (((1-alpha) * fiberSigmaTCoeffs[idx0][i]
+ alpha * fiberSigmaTCoeffs[idx1][i]));
}
/// Evaluate \sigma_t as a function of \cos\theta
Float sigmaT(Float cosTheta) const {
return sigmaT_fiberGaussian(m_stddev,
std::sqrt(std::max((Float) 0, 1-cosTheta*cosTheta)));
inline Float sigmaT(Float cosTheta) const {
Float sinTheta = std::sqrt(std::max(
(Float) 0, 1-cosTheta*cosTheta)),
base = 1.0f, result = 0.0f;
/* Evaluate the expansion */
for (int i=0; i<FIBERDIST_SIGMA_T_COEFFS; ++i) {
result += base * m_coeffs[i];
base *= sinTheta;
}
/// Evaluate the longitudinal density as a function of \cos\theta
Float pdfCosTheta(Float cosTheta) const {
return std::exp(-cosTheta*cosTheta/(2*m_stddev*m_stddev)) * m_normalization;
return result;
}
/// Evaluate the longitudinal CDF as a function of \cos\theta
inline Float cdf(Float theta) const {
return 0.5f * (1.0f -
boost::math::erf<Float>(std::cos(theta)/(SQRT_TWO * m_stddev))*m_c1);
/// Evaluate the density as a function of \cos\theta
inline Float pdfCosTheta(Float cosTheta) const {
return std::exp(-cosTheta*cosTheta
/ (2*m_stddev*m_stddev)) * m_normalization;
}
/// Evaluate the density as a function of direction
inline Float pdf(const Vector &d) const {
return std::exp(-d.z*d.z
/ (2*m_stddev*m_stddev)) * m_normalization;
}
/**
* Apply the inversion method to sample \theta given
* a uniformly distributed \xi on [0, 1]
* \brief Apply the inversion method to sample \cos\theta given
* a uniformly distributed r.v. \xi on [0, 1]
*/
Float sample(Float xi) const {
Vector sample(const Point2 &sample) const {
BrentSolver brentSolver(100, 1e-6f);
BrentSolver::Result result = brentSolver.solve(
boost::bind(&GaussianFiberDistribution::cdfFunctor, this, xi, _1), 0, M_PI);
boost::bind(&GaussianFiberDistribution::cdfFunctor,
this, sample.x, _1), -1, 1);
SAssert(result.success);
avgBrentFunEvals.incrementBase();
++brentSolves;
return result.x;
}
private:
Float cdfFunctor(Float theta, Float xi) const {
++avgBrentFunEvals;
return cdf(theta)-xi;
Float cosTheta = result.x,
sinTheta = std::sqrt(std::max((Float) 0, 1-cosTheta*cosTheta)),
phi = 2 * M_PI * sample.y,
sinPhi = std::sin(phi), cosPhi = std::cos(phi);
return Vector(sinTheta * cosPhi, sinTheta * sinPhi, cosTheta);
}
std::string toString() const {
std::ostringstream oss;
oss << "GaussianFiberDistribution[stddev="
<< m_stddev << "]";
return oss.str();
}
protected:
/// Evaluate the longitudinal CDF as a function of \cos\theta
inline Float cdf(Float cosTheta) const {
return 0.5f * (1.0f -
mts_erf(cosTheta / (SQRT_TWO * m_stddev)) * m_c1);
}
Float cdfFunctor(Float xi, Float cosTheta) const {
++avgBrentFunEvals;
return cdf(cosTheta)-xi;
}
protected:
Float m_stddev;
Float m_normalization;
Float m_c1;
Float m_coeffs[FIBERDIST_SIGMA_T_COEFFS];
};
MTS_NAMESPACE_END
#endif /* __MICROFLAKE_SIGMA_T_H */
#endif /* __MICROFLAKE_FIBER_DIST_H */