various bugfixes
parent
da246ae3a5
commit
7405a77078
|
@ -72,10 +72,14 @@ public:
|
|||
|
||||
/// Set an integer value
|
||||
void setLong(const std::string &name, int64_t value, bool warnDuplicates = true);
|
||||
/// Get an intteger value
|
||||
/// Get an integer value
|
||||
int64_t getLong(const std::string &name) const;
|
||||
/// Get an integer value (with default);
|
||||
int64_t getLong(const std::string &name, int64_t defVal) const;
|
||||
/// Get a size value
|
||||
size_t getSize(const std::string &name) const;
|
||||
/// Get an size value (with default);
|
||||
size_t getSize(const std::string &name, size_t defVal) const;
|
||||
|
||||
/// Set a single precision floating point value
|
||||
void setFloat(const std::string &name, Float value, bool warnDuplicates = true);
|
||||
|
|
|
@ -120,10 +120,10 @@ public:
|
|||
void seed(uint64_t *values, uint64_t length);
|
||||
|
||||
/// Return an integer on the [0, 2^63-1]-interval
|
||||
uint64_t nextLong();
|
||||
uint64_t nextULong();
|
||||
|
||||
/// Return an integer on the [0, n)-interval
|
||||
unsigned int nextInteger(unsigned int n);
|
||||
uint32_t nextUInt(uint32_t n);
|
||||
|
||||
/// Return a floating point value on the [0, 1) interval
|
||||
Float nextFloat();
|
||||
|
@ -136,7 +136,7 @@ public:
|
|||
*/
|
||||
template <typename Iterator> void shuffle(Iterator it1, Iterator it2) {
|
||||
for (Iterator it = it2 - 1; it > it1; --it)
|
||||
std::iter_swap(it, it1 + nextInteger((int) (it-it1)));
|
||||
std::iter_swap(it, it1 + nextUInt((uint32_t) (it-it1)));
|
||||
}
|
||||
|
||||
/// Serialize a random generator to a binary data stream
|
||||
|
|
|
@ -144,9 +144,12 @@ public:
|
|||
/// Write an array of signed ints (64 bit) to the stream
|
||||
void writeLongArray(const int64_t *values, size_t size);
|
||||
|
||||
/// Write an unsigned int (32 bit) to the stream
|
||||
/// Write an unsigned int (64 bit) to the stream
|
||||
void writeULong(uint64_t value);
|
||||
|
||||
/// Write a size value to the stream
|
||||
void writeSize(size_t value) { writeULong((uint64_t) value); }
|
||||
|
||||
/// Write an array of unsigned ints (64 bit) to the stream
|
||||
void writeULongArray(const uint64_t *values, size_t size);
|
||||
|
||||
|
@ -231,6 +234,9 @@ public:
|
|||
/// Read an unsigned int (64 bit) from the stream
|
||||
uint64_t readULong();
|
||||
|
||||
/// Read a size value from the stream
|
||||
size_t readSize() { return (size_t) readULong(); }
|
||||
|
||||
/// Read an array of unsigned ints (64 bit) from the stream
|
||||
void readULongArray(uint64_t *dst, size_t size);
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ public:
|
|||
/// Add a child ConfigurableObject
|
||||
virtual void addChild(const std::string &name, ConfigurableObject *child);
|
||||
|
||||
/// Serialize this camera to disk
|
||||
/// Serialize this camera to a binary data stream
|
||||
virtual void serialize(Stream *stream, InstanceManager *manager) const;
|
||||
|
||||
// Set the parent node
|
||||
|
@ -209,7 +209,7 @@ public:
|
|||
/// Return a projection transformation that includes a pixel offset (using GL clip space coordinates)
|
||||
virtual Transform getGLProjectionTransform(const Point2 &jitter) const = 0;
|
||||
|
||||
/// Serialize this camera to disk
|
||||
/// Serialize this camera to a binary data stream
|
||||
virtual void serialize(Stream *stream, InstanceManager *manager) const;
|
||||
|
||||
/** \brief Configure the object (called _once_ after construction
|
||||
|
@ -242,7 +242,7 @@ public:
|
|||
and addition of all child ConfigurableObjects. */
|
||||
virtual void configure();
|
||||
|
||||
/// Serialize this camera to disk
|
||||
/// Serialize this camera to a binary data stream
|
||||
virtual void serialize(Stream *stream, InstanceManager *manager) const;
|
||||
|
||||
/// Calculate the image plane size for a plane of the given distance
|
||||
|
|
|
@ -86,7 +86,7 @@ public:
|
|||
/// Configure the film
|
||||
virtual void configure();
|
||||
|
||||
/// Serialize this film to disk
|
||||
/// Serialize this film to a binary data stream
|
||||
virtual void serialize(Stream *stream, InstanceManager *manager) const;
|
||||
|
||||
/// Does the destination already exist?
|
||||
|
|
|
@ -379,7 +379,7 @@ public:
|
|||
*/
|
||||
virtual void wakeup(std::map<std::string, SerializableObject *> ¶ms);
|
||||
|
||||
/// Serialize this integrator to disk
|
||||
/// Serialize this integrator to a binary data stream
|
||||
void serialize(Stream *stream, InstanceManager *manager) const;
|
||||
|
||||
MTS_DECLARE_CLASS()
|
||||
|
@ -404,7 +404,7 @@ protected:
|
|||
*/
|
||||
class MTS_EXPORT_RENDER MonteCarloIntegrator : public SampleIntegrator {
|
||||
public:
|
||||
/// Serialize this integrator to disk
|
||||
/// Serialize this integrator to a binary data stream
|
||||
void serialize(Stream *stream, InstanceManager *manager) const;
|
||||
|
||||
MTS_DECLARE_CLASS()
|
||||
|
|
|
@ -53,21 +53,21 @@ public:
|
|||
* Allocate storage for a hemispherical sample with M
|
||||
* elevation and N azimuthal samples.
|
||||
*/
|
||||
HemisphereSampler(int M, int N);
|
||||
HemisphereSampler(uint32_t M, uint32_t N);
|
||||
|
||||
/// Return the elevational resolution
|
||||
inline unsigned int getM() const { return m_M; }
|
||||
inline uint32_t getM() const { return m_M; }
|
||||
|
||||
/// Return the azimuthal resolution
|
||||
inline unsigned int getN() const { return m_N; }
|
||||
inline uint32_t getN() const { return m_N; }
|
||||
|
||||
/// Access a cell by index
|
||||
inline SampleEntry &operator() (int j, int k) {
|
||||
inline SampleEntry &operator() (uint32_t j, uint32_t k) {
|
||||
return m_entries[j*m_N + k];
|
||||
}
|
||||
|
||||
/// Access a cell by index (const version)
|
||||
inline const SampleEntry &operator() (int j, int k) const {
|
||||
inline const SampleEntry &operator() (uint32_t j, uint32_t k) const {
|
||||
return m_entries[j*m_N + k];
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ protected:
|
|||
/// Free all memory
|
||||
virtual ~HemisphereSampler();
|
||||
private:
|
||||
unsigned int m_M, m_N;
|
||||
uint32_t m_M, m_N;
|
||||
SampleEntry *m_entries;
|
||||
Vector *m_uk, *m_vk, *m_vkMinus;
|
||||
Spectrum m_E;
|
||||
|
|
|
@ -339,7 +339,7 @@ public:
|
|||
//! @{ \name Miscellaneous
|
||||
// =============================================================
|
||||
|
||||
/// Serialize this luminaire to disk
|
||||
/// Serialize this luminaire to a binary data stream
|
||||
virtual void serialize(Stream *stream, InstanceManager *manager) const;
|
||||
|
||||
/// Optional pre-process step before rendering starts
|
||||
|
|
|
@ -92,15 +92,9 @@ public:
|
|||
*/
|
||||
class MTS_EXPORT_RENDER Medium : public NetworkedObject {
|
||||
public:
|
||||
/**
|
||||
* \brief Compute the transmittance along a ray segment
|
||||
*
|
||||
* Computes the transmittance along a ray segment
|
||||
* [mint, maxt] associated with the ray. It is assumed
|
||||
* that the ray has a normalized direction value.
|
||||
*
|
||||
*/
|
||||
virtual Spectrum getTransmittance(const Ray &ray) const = 0;
|
||||
// =============================================================
|
||||
//! @{ \name Medium sampling strategy
|
||||
// =============================================================
|
||||
|
||||
/**
|
||||
* \brief Sample a distance along the ray segment [mint, maxt]
|
||||
|
@ -117,7 +111,7 @@ public:
|
|||
MediumSamplingRecord &mRec, Sampler *sampler) const = 0;
|
||||
|
||||
/**
|
||||
* \brief Compute the density of sampling distance \a t along the
|
||||
* \brief Compute the 1D density of sampling distance \a t along the
|
||||
* ray using the sampling strategy implemented by \a sampleDistance.
|
||||
*
|
||||
* The function computes the continuous densities in the case of
|
||||
|
@ -129,9 +123,44 @@ public:
|
|||
virtual void pdfDistance(const Ray &ray, Float t,
|
||||
MediumSamplingRecord &mRec) const = 0;
|
||||
|
||||
//! @}
|
||||
// =============================================================
|
||||
|
||||
// =============================================================
|
||||
//! @{ \name Functions for querying the medium
|
||||
// =============================================================
|
||||
|
||||
/**
|
||||
* \brief Compute the transmittance along a ray segment
|
||||
*
|
||||
* Computes the transmittance along a ray segment
|
||||
* [mint, maxt] associated with the ray. It is assumed
|
||||
* that the ray has a normalized direction value.
|
||||
*/
|
||||
virtual Spectrum getTransmittance(const Ray &ray) const = 0;
|
||||
|
||||
/// Return the phase function of this medium
|
||||
inline const PhaseFunction *getPhaseFunction() const { return m_phaseFunction.get(); }
|
||||
|
||||
/// Determine whether the medium is homogeneous
|
||||
virtual bool isHomogeneous() const = 0;
|
||||
|
||||
/// For homogeneous media: return the absorption coefficient
|
||||
inline const Spectrum &getSigmaA() const { return m_sigmaA; }
|
||||
|
||||
/// For homogeneous media: return the scattering coefficient
|
||||
inline const Spectrum &getSigmaS() const { return m_sigmaS; }
|
||||
|
||||
/// For homogeneous media: return the extinction coefficient
|
||||
inline const Spectrum &getSigmaT() const { return m_sigmaT; }
|
||||
|
||||
//! @}
|
||||
// =============================================================
|
||||
|
||||
// =============================================================
|
||||
//! @{ \name Miscellaneous
|
||||
// =============================================================
|
||||
|
||||
/** \brief Configure the object (called _once_ after construction
|
||||
and addition of all child ConfigurableObjects. */
|
||||
virtual void configure();
|
||||
|
@ -148,6 +177,9 @@ public:
|
|||
/// Return a string representation
|
||||
virtual std::string toString() const = 0;
|
||||
|
||||
//! @}
|
||||
// =============================================================
|
||||
|
||||
MTS_DECLARE_CLASS()
|
||||
protected:
|
||||
/// Create a new participating medium instance
|
||||
|
|
|
@ -74,10 +74,11 @@ public:
|
|||
*/
|
||||
bool storePhoton(const Photon &photon);
|
||||
|
||||
/**
|
||||
* Scale all photon power values contained in this photon map
|
||||
*/
|
||||
void setScale(Float value);
|
||||
/// Scale all photon power values contained in this photon map
|
||||
inline void setScaleFactor(Float value) { m_scale = value; }
|
||||
|
||||
/// Return the power scale factor of this photon map
|
||||
inline Float getScaleFactor() const { return m_scale; }
|
||||
|
||||
/**
|
||||
* Recursively build a left-balanced kd-tree. This has to be
|
||||
|
@ -183,9 +184,9 @@ public:
|
|||
return m_balanced;
|
||||
}
|
||||
|
||||
/// Return a photon in the photon map
|
||||
/// Return a photon in the photon map (1-based indexing!)
|
||||
inline const Photon &getPhoton(size_t pos) const {
|
||||
return m_photons[pos+1];
|
||||
return m_photons[pos];
|
||||
}
|
||||
|
||||
/// Set the minimum amount of photons to consider an estimate valid
|
||||
|
@ -270,6 +271,12 @@ protected:
|
|||
}
|
||||
};
|
||||
|
||||
/// Heap convenience routines
|
||||
inline size_t leftChild(size_t index) const { return 2*index; }
|
||||
inline size_t rightChild(size_t index) const { return 2*index + 1; }
|
||||
inline bool isInnerNode(size_t index) const { return index <= m_lastInnerNode; }
|
||||
inline bool hasRightChild(size_t index) const { return index <= m_lastRChildNode; }
|
||||
|
||||
/// \endcond
|
||||
protected:
|
||||
/* ===================================================================== */
|
||||
|
@ -313,12 +320,6 @@ protected:
|
|||
photon_iterator sortEnd,
|
||||
std::vector<size_t> &heapPermutation,
|
||||
AABB &aabb, size_t heapIndex) const;
|
||||
|
||||
/// Heap access routines
|
||||
inline size_t leftChild(size_t index) const { return 2*index; }
|
||||
inline size_t rightChild(size_t index) const { return 2*index + 1; }
|
||||
inline bool isInnerNode(size_t index) const { return index <= m_lastInnerNode; }
|
||||
inline bool hasRightChild(size_t index) const { return index <= m_lastRChildNode; }
|
||||
private:
|
||||
/* ===================================================================== */
|
||||
/* Protected attributes */
|
||||
|
|
|
@ -38,13 +38,13 @@ public:
|
|||
}
|
||||
|
||||
inline void load(Stream *stream) {
|
||||
m_rangeStart = (size_t) stream->readULong();
|
||||
m_rangeEnd = (size_t) stream->readULong();
|
||||
m_rangeStart = stream->readSize();
|
||||
m_rangeEnd = stream->readSize();
|
||||
}
|
||||
|
||||
inline void save(Stream *stream) const {
|
||||
stream->writeULong(m_rangeStart);
|
||||
stream->writeULong(m_rangeEnd);
|
||||
stream->writeSize(m_rangeStart);
|
||||
stream->writeSize(m_rangeEnd);
|
||||
}
|
||||
|
||||
inline std::string toString() const {
|
||||
|
|
|
@ -70,7 +70,7 @@ public:
|
|||
virtual void advance();
|
||||
|
||||
/// Manually set the current sample index
|
||||
virtual void setSampleIndex(uint64_t sampleIndex);
|
||||
virtual void setSampleIndex(size_t sampleIndex);
|
||||
|
||||
/// Retrieve the next component value from the current sample
|
||||
virtual Float next1D() = 0;
|
||||
|
@ -124,12 +124,12 @@ public:
|
|||
virtual Point2 independent2D() = 0;
|
||||
|
||||
/// Return total number of samples
|
||||
inline uint64_t getSampleCount() const { return m_sampleCount; }
|
||||
inline size_t getSampleCount() const { return m_sampleCount; }
|
||||
|
||||
/// Return the current sample index
|
||||
inline uint64_t getSampleIndex() const { return m_sampleIndex; }
|
||||
inline size_t getSampleIndex() const { return m_sampleIndex; }
|
||||
|
||||
/// Serialize this sampler to disk
|
||||
/// Serialize this sampler to a binary data stream
|
||||
virtual void serialize(Stream *stream, InstanceManager *manager) const;
|
||||
|
||||
/// Return the properties of this sampler
|
||||
|
@ -149,8 +149,8 @@ protected:
|
|||
/// Virtual destructor
|
||||
virtual ~Sampler();
|
||||
protected:
|
||||
uint64_t m_sampleCount;
|
||||
uint64_t m_sampleIndex;
|
||||
size_t m_sampleCount;
|
||||
size_t m_sampleIndex;
|
||||
std::vector<unsigned int> m_req1D, m_req2D;
|
||||
std::vector<Float *> m_sampleArrays1D;
|
||||
std::vector<Point2 *> m_sampleArrays2D;
|
||||
|
|
|
@ -58,21 +58,21 @@ public:
|
|||
the particle tracing task (default: 200K samples).
|
||||
Should be high enough so that sending and accumulating
|
||||
the partially exposed films is not the bottleneck. */
|
||||
m_granularity = (size_t) props.getLong("granularity", 200000);
|
||||
m_granularity = props.getSize("granularity", 200000);
|
||||
}
|
||||
|
||||
AdjointParticleTracer(Stream *stream, InstanceManager *manager)
|
||||
: Integrator(stream, manager) {
|
||||
m_maxDepth = stream->readInt();
|
||||
m_rrDepth = stream->readInt();
|
||||
m_granularity = (size_t) stream->readULong();
|
||||
m_granularity = stream->readSize();
|
||||
}
|
||||
|
||||
void serialize(Stream *stream, InstanceManager *manager) const {
|
||||
Integrator::serialize(stream, manager);
|
||||
stream->writeInt(m_maxDepth);
|
||||
stream->writeInt(m_rrDepth);
|
||||
stream->writeULong((uint64_t) m_granularity);
|
||||
stream->writeSize(m_granularity);
|
||||
}
|
||||
|
||||
bool preprocess(const Scene *scene, RenderQueue *queue, const RenderJob *job,
|
||||
|
@ -98,7 +98,7 @@ public:
|
|||
ref<Scheduler> scheduler = Scheduler::getInstance();
|
||||
ref<Camera> camera = scene->getCamera();
|
||||
const Film *film = camera->getFilm();
|
||||
uint64_t sampleCount = scene->getSampler()->getSampleCount();
|
||||
size_t sampleCount = scene->getSampler()->getSampleCount();
|
||||
size_t nCores = scheduler->getCoreCount();
|
||||
Log(EInfo, "Starting render job (%ix%i, %lld samples, " SIZE_T_FMT
|
||||
" %s, " SSE_STR ") ..", film->getCropSize().x, film->getCropSize().y,
|
||||
|
|
|
@ -16,30 +16,143 @@
|
|||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <mitsuba/render/medium.h>
|
||||
#include "bre.h"
|
||||
|
||||
MTS_NAMESPACE_BEGIN
|
||||
|
||||
BeamRadianceEstimate::BeamRadianceEstimate(PhotonMap *pmap) {
|
||||
int n = 100;
|
||||
BeamRadianceEstimator::BeamRadianceEstimator(const PhotonMap *pmap) {
|
||||
int n = 10;
|
||||
|
||||
PhotonMap::search_result *results =
|
||||
new PhotonMap::search_result[n+1];
|
||||
|
||||
Float *radii = new Float[pmap->getPhotonCount()];
|
||||
m_photonCount = pmap->getPhotonCount();
|
||||
m_scaleFactor = pmap->getScaleFactor();
|
||||
m_lastInnerNode = m_photonCount/2;
|
||||
m_lastRChildNode = (m_photonCount-1)/2;
|
||||
m_depth = log2i(m_photonCount)+1;
|
||||
|
||||
m_nodes = new BRENode[m_photonCount+1];
|
||||
|
||||
Log(EInfo, "Computing photon radii ..");
|
||||
for (size_t i=0; i<pmap->getPhotonCount(); ++i) {
|
||||
for (size_t i=1; i<=m_photonCount; ++i) {
|
||||
const Photon &photon = pmap->getPhoton(i);
|
||||
BRENode &node = m_nodes[i];
|
||||
node.photon = photon;
|
||||
|
||||
Float searchRadiusSqr = std::numeric_limits<Float>::infinity();
|
||||
pmap->nnSearch(photon.getPosition(), searchRadiusSqr, n, results);
|
||||
radii[i] = std::sqrt(searchRadiusSqr);
|
||||
node.radius = std::sqrt(searchRadiusSqr);
|
||||
}
|
||||
|
||||
delete[] radii;
|
||||
Log(EInfo, "Generating a hierarchy for the beam radiance estimate");
|
||||
|
||||
buildHierarchy(1);
|
||||
|
||||
delete[] results;
|
||||
}
|
||||
|
||||
MTS_IMPLEMENT_CLASS(BeamRadianceEstimate, false, Object)
|
||||
BeamRadianceEstimator::BeamRadianceEstimator(Stream *stream, InstanceManager *manager) {
|
||||
m_photonCount = stream->readSize();
|
||||
m_scaleFactor = stream->readFloat();
|
||||
m_lastInnerNode = m_photonCount/2;
|
||||
m_lastRChildNode = (m_photonCount-1)/2;
|
||||
m_depth = log2i(m_photonCount)+1;
|
||||
m_nodes = new BRENode[m_photonCount+1];
|
||||
for (size_t i=1; i<=m_photonCount; ++i) {
|
||||
BRENode &node = m_nodes[i];
|
||||
node.aabb = AABB(stream);
|
||||
node.photon = Photon(stream);
|
||||
node.radius = stream->readFloat();
|
||||
}
|
||||
}
|
||||
|
||||
void BeamRadianceEstimator::serialize(Stream *stream, InstanceManager *manager) const {
|
||||
stream->writeSize(m_photonCount);
|
||||
stream->writeFloat(m_scaleFactor);
|
||||
for (size_t i=1; i<=m_photonCount; ++i) {
|
||||
BRENode &node = m_nodes[i];
|
||||
node.aabb.serialize(stream);
|
||||
node.photon.serialize(stream);
|
||||
stream->writeFloat(node.radius);
|
||||
}
|
||||
}
|
||||
|
||||
AABB BeamRadianceEstimator::buildHierarchy(size_t index) {
|
||||
BRENode &node = m_nodes[index];
|
||||
|
||||
if (isInnerNode(index)) {
|
||||
node.aabb = buildHierarchy(leftChild(index));
|
||||
if (hasRightChild(index))
|
||||
node.aabb.expandBy(buildHierarchy(rightChild(index)));
|
||||
} else {
|
||||
Point center = node.photon.getPosition();
|
||||
Float radius = node.radius;
|
||||
node.aabb = AABB(
|
||||
center - Vector(radius, radius, radius),
|
||||
center + Vector(radius, radius, radius)
|
||||
);
|
||||
}
|
||||
|
||||
return node.aabb;
|
||||
}
|
||||
|
||||
inline Float K2(Float sqrParam) {
|
||||
Float tmp = 1-sqrParam;
|
||||
return (3/M_PI) * tmp * tmp;
|
||||
}
|
||||
|
||||
Spectrum BeamRadianceEstimator::query(const Ray &ray, const Medium *medium) const {
|
||||
uint32_t *stack = (uint32_t *) alloca((m_depth+1) * sizeof(uint32_t));
|
||||
uint32_t index = 1, stackPos = 1;
|
||||
Spectrum result(0.0f);
|
||||
size_t nNodes = 0;
|
||||
|
||||
const Spectrum &sigmaT = medium->getSigmaT();
|
||||
const Spectrum &sigmaS = medium->getSigmaS();
|
||||
|
||||
while (stackPos > 0) {
|
||||
const BRENode &node = m_nodes[index];
|
||||
|
||||
/* Test against the node's bounding box */
|
||||
Float mint, maxt;
|
||||
if (!node.aabb.rayIntersect(ray, mint, maxt) || maxt < ray.mint || mint > ray.maxt) {
|
||||
index = stack[--stackPos];
|
||||
continue;
|
||||
}
|
||||
++nNodes;
|
||||
|
||||
/* Recurse on inner nodes */
|
||||
if (isInnerNode(index)) {
|
||||
if (hasRightChild(index))
|
||||
stack[stackPos++] = leftChild(index);
|
||||
index = rightChild(index);
|
||||
} else {
|
||||
index = stack[--stackPos];
|
||||
}
|
||||
|
||||
Vector originToCenter = node.photon.getPosition() - ray.o;
|
||||
Float diskDistance = dot(originToCenter, ray.d), radSqr = node.radius * node.radius;
|
||||
Float distSqr = (ray(diskDistance) - node.photon.getPosition()).lengthSquared();
|
||||
|
||||
if (distSqr < radSqr) {
|
||||
Float weight = K2(distSqr/radSqr)/radSqr;
|
||||
|
||||
Spectrum tau = Spectrum(-sigmaT * diskDistance).exp();
|
||||
Spectrum contrib = tau * sigmaS * (1/(4*M_PI) * weight * m_scaleFactor)
|
||||
* node.photon.getPower() * 1e8;
|
||||
//cout << tau.toString() << " " << sigmaS.toString() << " " << weight << " " << m_scaleFactor << " " << node.photon.getPower().toString() << endl;
|
||||
}
|
||||
}
|
||||
cout << result.toString() << endl;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
BeamRadianceEstimator::~BeamRadianceEstimator() {
|
||||
delete[] m_nodes;
|
||||
}
|
||||
|
||||
MTS_IMPLEMENT_CLASS_S(BeamRadianceEstimator, false, Object)
|
||||
MTS_NAMESPACE_END
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#if !defined(__BEAM_RADIANCE_ESTIMATE_H)
|
||||
#define __BEAM_RADIANCE_ESTIMATE_H
|
||||
#if !defined(__BEAM_RADIANCE_ESTIMATOR_H)
|
||||
#define __BEAM_RADIANCE_ESTIMATOR_H
|
||||
|
||||
#include <mitsuba/render/photonmap.h>
|
||||
|
||||
|
@ -25,22 +25,58 @@ MTS_NAMESPACE_BEGIN
|
|||
|
||||
/**
|
||||
* Implements the beam radiance estimate described in
|
||||
* "The Beam Radiance Estimate for Volumetric Photon Mapping"
|
||||
* "The Beam Radiance Estimator for Volumetric Photon Mapping"
|
||||
* by Wojciech Jarosz, Matthias Zwicker, and Henrik Wann Jensen.
|
||||
*/
|
||||
|
||||
class BeamRadianceEstimate {
|
||||
class BeamRadianceEstimator : public SerializableObject {
|
||||
public:
|
||||
/**
|
||||
* \brief Create a BRE acceleration data structure from
|
||||
* an existing volumetric photon map
|
||||
*/
|
||||
BeamRadianceEstimate(PhotonMap *pmap);
|
||||
BeamRadianceEstimator(const PhotonMap *pmap);
|
||||
|
||||
/**
|
||||
* \brief Unserialize a BRE acceleration data structure from
|
||||
* a binary data stream
|
||||
*/
|
||||
BeamRadianceEstimator(Stream *stream, InstanceManager *manager);
|
||||
|
||||
/// Serialize to a binary data stream
|
||||
void serialize(Stream *stream, InstanceManager *manager) const;
|
||||
|
||||
/// Compute the beam radiance estimate for the given ray segment and medium
|
||||
Spectrum query(const Ray &ray, const Medium *medium) const;
|
||||
|
||||
MTS_DECLARE_CLASS()
|
||||
protected:
|
||||
/// Release all memory
|
||||
virtual ~BeamRadianceEstimator();
|
||||
|
||||
/// Fit a hierarchy of bounding boxes to the stored photons
|
||||
AABB buildHierarchy(size_t index);
|
||||
|
||||
/// Heap convenience routines
|
||||
inline size_t leftChild(size_t index) const { return 2*index; }
|
||||
inline size_t rightChild(size_t index) const { return 2*index + 1; }
|
||||
inline bool isInnerNode(size_t index) const { return index <= m_lastInnerNode; }
|
||||
inline bool hasRightChild(size_t index) const { return index <= m_lastRChildNode; }
|
||||
private:
|
||||
struct BRENode {
|
||||
AABB aabb;
|
||||
Photon photon;
|
||||
Float radius;
|
||||
};
|
||||
|
||||
BRENode *m_nodes;
|
||||
size_t m_photonCount;
|
||||
size_t m_lastInnerNode;
|
||||
size_t m_lastRChildNode;
|
||||
int m_depth;
|
||||
Float m_scaleFactor;
|
||||
};
|
||||
|
||||
MTS_NAMESPACE_END
|
||||
|
||||
#endif /* __BEAM_RADIANCE_ESTIMATE_H */
|
||||
#endif /* __BEAM_RADIANCE_ESTIMATOR_H */
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include <mitsuba/core/plugin.h>
|
||||
#include <mitsuba/render/gatherproc.h>
|
||||
#include "bre.h"
|
||||
|
||||
MTS_NAMESPACE_BEGIN
|
||||
|
||||
|
@ -42,11 +43,11 @@ public:
|
|||
/* Granularity of photon tracing work units (in shot particles, 0 => decide automatically) */
|
||||
m_granularity = props.getInteger("granularity", 0);
|
||||
/* Number of photons to collect for the global photon map */
|
||||
m_globalPhotons = (size_t) props.getLong("globalPhotons", 200000);
|
||||
m_globalPhotons = props.getSize("globalPhotons", 200000);
|
||||
/* Number of photons to collect for the caustic photon map */
|
||||
m_causticPhotons = (size_t) props.getLong("causticPhotons", 200000);
|
||||
m_causticPhotons = props.getSize("causticPhotons", 200000);
|
||||
/* Number of photons to collect for the volumetric photon map */
|
||||
m_volumePhotons = (size_t) props.getLong("volumePhotons", 200000);
|
||||
m_volumePhotons = props.getSize("volumePhotons", 200000);
|
||||
/* Radius of lookups in the global photon map (relative to the scene size) */
|
||||
m_globalLookupRadiusRel = props.getFloat("globalLookupRadius", 0.05f);
|
||||
/* Radius of lookups in the caustic photon map (relative to the scene size) */
|
||||
|
@ -75,9 +76,9 @@ public:
|
|||
m_directSamples = stream->readInt();
|
||||
m_glossySamples = stream->readInt();
|
||||
m_maxSpecularDepth = stream->readInt();
|
||||
m_globalPhotons = (size_t) stream->readULong();
|
||||
m_causticPhotons = (size_t) stream->readULong();
|
||||
m_volumePhotons = (size_t) stream->readULong();
|
||||
m_globalPhotons = stream->readSize();
|
||||
m_causticPhotons = stream->readSize();
|
||||
m_volumePhotons = stream->readSize();
|
||||
m_globalLookupRadius = stream->readFloat();
|
||||
m_causticLookupRadius = stream->readFloat();
|
||||
m_volumeLookupRadius = stream->readFloat();
|
||||
|
@ -95,9 +96,9 @@ public:
|
|||
stream->writeInt(m_directSamples);
|
||||
stream->writeInt(m_glossySamples);
|
||||
stream->writeInt(m_maxSpecularDepth);
|
||||
stream->writeULong(m_globalPhotons);
|
||||
stream->writeULong(m_causticPhotons);
|
||||
stream->writeULong(m_volumePhotons);
|
||||
stream->writeSize(m_globalPhotons);
|
||||
stream->writeSize(m_causticPhotons);
|
||||
stream->writeSize(m_volumePhotons);
|
||||
stream->writeFloat(m_globalLookupRadius);
|
||||
stream->writeFloat(m_causticLookupRadius);
|
||||
stream->writeFloat(m_volumeLookupRadius);
|
||||
|
@ -140,9 +141,16 @@ public:
|
|||
m_causticPhotons = 0;
|
||||
|
||||
/* Don't create a volumetric photon map if there are no participating media */
|
||||
if (!scene->hasMedia())
|
||||
const std::set<Medium *> &media = scene->getMedia();
|
||||
if (media.size() == 0)
|
||||
m_volumePhotons = 0;
|
||||
|
||||
for (std::set<Medium *>::const_iterator it = media.begin(); it != media.end(); ++it) {
|
||||
if (!(*it)->isHomogeneous())
|
||||
Log(EError, "Inhomogeneous media are currently not supported by the photon mapper!");
|
||||
}
|
||||
|
||||
|
||||
if (m_globalPhotonMap.get() == NULL && m_globalPhotons > 0) {
|
||||
/* Adapt to scene extents */
|
||||
m_globalLookupRadius = m_globalLookupRadiusRel * scene->getBSphere().radius;
|
||||
|
@ -164,12 +172,13 @@ public:
|
|||
if (proc->getReturnStatus() != ParallelProcess::ESuccess)
|
||||
return false;
|
||||
|
||||
m_globalPhotonMap = proc->getPhotonMap();
|
||||
m_globalPhotonMap->setScale(1 / (Float) proc->getShotParticles());
|
||||
m_globalPhotonMap->setMinPhotons(m_globalMinPhotons);
|
||||
m_globalPhotonMap->balance();
|
||||
Log(EDebug, "Global photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
|
||||
SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());
|
||||
|
||||
m_globalPhotonMap = proc->getPhotonMap();
|
||||
m_globalPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
|
||||
m_globalPhotonMap->setMinPhotons(m_globalMinPhotons);
|
||||
m_globalPhotonMap->balance();
|
||||
m_globalPhotonMapID = sched->registerResource(m_globalPhotonMap);
|
||||
}
|
||||
|
||||
|
@ -194,12 +203,13 @@ public:
|
|||
if (proc->getReturnStatus() != ParallelProcess::ESuccess)
|
||||
return false;
|
||||
|
||||
m_causticPhotonMap = proc->getPhotonMap();
|
||||
m_causticPhotonMap->setScale(1 / (Float) proc->getShotParticles());
|
||||
m_causticPhotonMap->setMinPhotons(m_causticMinPhotons);
|
||||
m_causticPhotonMap->balance();
|
||||
Log(EDebug, "Caustic photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
|
||||
SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());
|
||||
|
||||
m_causticPhotonMap = proc->getPhotonMap();
|
||||
m_causticPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
|
||||
m_causticPhotonMap->setMinPhotons(m_causticMinPhotons);
|
||||
m_causticPhotonMap->balance();
|
||||
m_causticPhotonMapID = sched->registerResource(m_causticPhotonMap);
|
||||
}
|
||||
|
||||
|
@ -224,13 +234,15 @@ public:
|
|||
if (proc->getReturnStatus() != ParallelProcess::ESuccess)
|
||||
return false;
|
||||
|
||||
m_volumePhotonMap = proc->getPhotonMap();
|
||||
m_volumePhotonMap->setScale(1 / (Float) proc->getShotParticles());
|
||||
m_volumePhotonMap->setMinPhotons(m_volumeMinPhotons);
|
||||
m_volumePhotonMap->balance();
|
||||
Log(EDebug, "Volume photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
|
||||
SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());
|
||||
m_volumePhotonMapID = sched->registerResource(m_volumePhotonMap);
|
||||
|
||||
ref<PhotonMap> volumePhotonMap = proc->getPhotonMap();
|
||||
volumePhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
|
||||
volumePhotonMap->balance();
|
||||
|
||||
m_bre = new BeamRadianceEstimator(volumePhotonMap);
|
||||
m_breID = sched->registerResource(m_bre);
|
||||
}
|
||||
|
||||
sched->unregisterResource(qmcSamplerID);
|
||||
|
@ -244,8 +256,8 @@ public:
|
|||
proc->bindResource("globalPhotonMap", m_globalPhotonMapID);
|
||||
if (m_causticPhotonMap.get())
|
||||
proc->bindResource("causticPhotonMap", m_causticPhotonMapID);
|
||||
if (m_volumePhotonMap.get())
|
||||
proc->bindResource("volumePhotonMap", m_volumePhotonMapID);
|
||||
if (m_bre.get())
|
||||
proc->bindResource("bre", m_breID);
|
||||
}
|
||||
|
||||
/// Connect to globally shared resources
|
||||
|
@ -254,8 +266,8 @@ public:
|
|||
m_globalPhotonMap = static_cast<PhotonMap *>(params["globalPhotonMap"]);
|
||||
if (!m_causticPhotonMap.get() && params.find("causticPhotonMap") != params.end())
|
||||
m_causticPhotonMap = static_cast<PhotonMap *>(params["causticPhotonMap"]);
|
||||
if (!m_volumePhotonMap.get() && params.find("volumetricPhotonMap") != params.end())
|
||||
m_volumePhotonMap = static_cast<PhotonMap *>(params["volumetricPhotonMap"]);
|
||||
if (!m_bre.get() && params.find("bre") != params.end())
|
||||
m_bre = static_cast<BeamRadianceEstimator *>(params["bre"]);
|
||||
|
||||
if (getParent() != NULL && getParent()->getClass()->derivesFrom(MTS_CLASS(SampleIntegrator)))
|
||||
m_parentIntegrator = static_cast<SampleIntegrator *>(getParent());
|
||||
|
@ -271,7 +283,7 @@ public:
|
|||
}
|
||||
|
||||
Spectrum Li(const RayDifferential &ray, RadianceQueryRecord &rRec) const {
|
||||
Spectrum Li(0.0f);
|
||||
Spectrum LiSurf(0.0f), LiMedium(0.0f), transmittance(1.0f);
|
||||
Intersection &its = rRec.its;
|
||||
LuminaireSamplingRecord lRec;
|
||||
|
||||
|
@ -279,24 +291,28 @@ public:
|
|||
intersection has already been provided). */
|
||||
rRec.rayIntersect(ray);
|
||||
|
||||
if ((rRec.type & RadianceQueryRecord::EVolumeRadiance) && rRec.medium) {
|
||||
if (rRec.medium) {
|
||||
Ray mediumRaySegment(ray, 0, its.t);
|
||||
transmittance = rRec.medium->getTransmittance(mediumRaySegment);
|
||||
if (rRec.type & RadianceQueryRecord::EVolumeRadiance)
|
||||
LiMedium = m_bre->query(mediumRaySegment, rRec.medium);
|
||||
}
|
||||
|
||||
if (!its.isValid()) {
|
||||
/* If no intersection could be found, possibly return
|
||||
attenuated radiance from a background luminaire */
|
||||
if (rRec.type & RadianceQueryRecord::EEmittedRadiance)
|
||||
Li += rRec.scene->LeBackground(ray);
|
||||
return Li;
|
||||
LiSurf += rRec.scene->LeBackground(ray);
|
||||
return LiSurf * transmittance + LiMedium;
|
||||
}
|
||||
|
||||
/* Possibly include emitted radiance if requested */
|
||||
if (its.isLuminaire() && (rRec.type & RadianceQueryRecord::EEmittedRadiance))
|
||||
Li += its.Le(-ray.d);
|
||||
LiSurf += its.Le(-ray.d);
|
||||
|
||||
/* Include radiance from a subsurface integrator if requested */
|
||||
if (its.hasSubsurface() && (rRec.type & RadianceQueryRecord::ESubsurfaceRadiance))
|
||||
Li += its.LoSub(rRec.scene, -ray.d);
|
||||
LiSurf += its.LoSub(rRec.scene, -ray.d);
|
||||
|
||||
const BSDF *bsdf = its.getBSDF(ray);
|
||||
|
||||
|
@ -306,8 +322,8 @@ public:
|
|||
if (bsdf == NULL) {
|
||||
RadianceQueryRecord rRec2;
|
||||
rRec2.recursiveQuery(rRec);
|
||||
Li += m_parentIntegrator->Li(RayDifferential(its.p, ray.d, ray.time), rRec2);
|
||||
return Li;
|
||||
LiSurf += m_parentIntegrator->Li(RayDifferential(its.p, ray.d, ray.time), rRec2);
|
||||
return LiSurf * transmittance + LiMedium;
|
||||
}
|
||||
|
||||
int bsdfType = bsdf->getType();
|
||||
|
@ -335,7 +351,7 @@ public:
|
|||
/* Evaluate BSDF * cos(theta) */
|
||||
const Spectrum bsdfVal = bsdf->fCos(bRec);
|
||||
|
||||
Li += lRec.value * bsdfVal * weight;
|
||||
LiSurf += lRec.value * bsdfVal * weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -343,11 +359,11 @@ public:
|
|||
if (bsdfType == BSDF::EDiffuseReflection) {
|
||||
/* Hit a diffuse material - do a direct photon map visualization. */
|
||||
if (rRec.type & RadianceQueryRecord::EIndirectSurfaceRadiance)
|
||||
Li += m_globalPhotonMap->estimateIrradianceFiltered(its.p,
|
||||
LiSurf += m_globalPhotonMap->estimateIrradianceFiltered(its.p,
|
||||
its.shFrame.n, m_globalLookupRadius, m_globalLookupSize)
|
||||
* bsdf->getDiffuseReflectance(its) * INV_PI;
|
||||
if (rRec.type & RadianceQueryRecord::ECausticRadiance && m_causticPhotonMap.get())
|
||||
Li += m_causticPhotonMap->estimateIrradianceFiltered(its.p,
|
||||
LiSurf += m_causticPhotonMap->estimateIrradianceFiltered(its.p,
|
||||
its.shFrame.n, m_causticLookupRadius, m_causticLookupSize)
|
||||
* bsdf->getDiffuseReflectance(its) * INV_PI;
|
||||
} else if ((bsdfType & BSDF::EDelta) != 0
|
||||
|
@ -370,7 +386,7 @@ public:
|
|||
|
||||
rRec2.recursiveQuery(rRec, RadianceQueryRecord::ERadiance);
|
||||
recursiveRay = Ray(its.p, its.toWorld(bRec.wo), ray.time);
|
||||
Li += m_parentIntegrator->Li(recursiveRay, rRec2) * bsdfVal;
|
||||
LiSurf += m_parentIntegrator->Li(recursiveRay, rRec2) * bsdfVal;
|
||||
}
|
||||
}
|
||||
} else if (rRec.depth == 1 && (bsdf->getType() & BSDF::EGlossy)) {
|
||||
|
@ -387,14 +403,14 @@ public:
|
|||
|
||||
rRec2.recursiveQuery(rRec, RadianceQueryRecord::ERadianceNoEmission);
|
||||
recursiveRay = Ray(its.p, its.toWorld(bRec.wo), ray.time);
|
||||
Li += m_parentIntegrator->Li(recursiveRay, rRec2) * bsdfVal * weight;
|
||||
LiSurf += m_parentIntegrator->Li(recursiveRay, rRec2) * bsdfVal * weight;
|
||||
}
|
||||
} else {
|
||||
Li += m_globalPhotonMap->estimateRadianceFiltered(its,
|
||||
LiSurf += m_globalPhotonMap->estimateRadianceFiltered(its,
|
||||
m_globalLookupRadius, m_globalLookupSize);
|
||||
}
|
||||
|
||||
return Li;
|
||||
return LiSurf * transmittance + LiMedium;
|
||||
}
|
||||
|
||||
std::string toString() const {
|
||||
|
@ -415,8 +431,9 @@ private:
|
|||
ref<PhotonMap> m_causticPhotonMap;
|
||||
ref<PhotonMap> m_volumePhotonMap;
|
||||
ref<ParallelProcess> m_proc;
|
||||
ref<BeamRadianceEstimator> m_bre;
|
||||
SampleIntegrator *m_parentIntegrator;
|
||||
int m_globalPhotonMapID, m_causticPhotonMapID, m_volumePhotonMapID;
|
||||
int m_globalPhotonMapID, m_causticPhotonMapID, m_breID;
|
||||
size_t m_globalPhotons, m_causticPhotons, m_volumePhotons;
|
||||
int m_globalMinPhotons, m_globalLookupSize;
|
||||
int m_causticMinPhotons, m_causticLookupSize;
|
||||
|
|
|
@ -115,7 +115,7 @@ public:
|
|||
size_t nCores = sched->getCoreCount();
|
||||
Sampler *cameraSampler = (Sampler *) sched->getResource(samplerResID, 0);
|
||||
|
||||
uint64_t sampleCount = cameraSampler->getSampleCount();
|
||||
size_t sampleCount = cameraSampler->getSampleCount();
|
||||
Log(EInfo, "Starting render job (%ix%i, %lld %s, " SIZE_T_FMT
|
||||
" %s, " SSE_STR ") ..", film->getCropSize().x, film->getCropSize().y,
|
||||
sampleCount, sampleCount == 1 ? "sample" : "samples", nCores,
|
||||
|
@ -182,7 +182,7 @@ public:
|
|||
int y = cropOffset.y + yofs + yofsInt;
|
||||
int x = cropOffset.x + xofs + xofsInt;
|
||||
cameraSampler->generate();
|
||||
for (uint64_t j = 0; j<sampleCount; j++) {
|
||||
for (size_t j = 0; j<sampleCount; j++) {
|
||||
if (needsLensSample)
|
||||
lensSample = cameraSampler->next2D();
|
||||
if (needsTimeSample)
|
||||
|
|
|
@ -128,6 +128,31 @@ int64_t Properties::getLong(const std::string &name, int64_t defVal) const {
|
|||
return (*it).second.v_long;
|
||||
}
|
||||
|
||||
size_t Properties::getSize(const std::string &name) const {
|
||||
if (!hasProperty(name))
|
||||
SLog(EError, "Property \"%s\" missing", name.c_str());
|
||||
std::map<std::string, Element>::const_iterator it = m_elements.find(name);
|
||||
if ((*it).second.type != EInteger)
|
||||
SLog(EError, "Property \"%s\" has wrong type", name.c_str());
|
||||
if ((*it).second.v_long < 0)
|
||||
SLog(EError, "Size property \"%s\": expected a nonnegative value!");
|
||||
(*it).second.queried = true;
|
||||
return (*it).second.v_long;
|
||||
}
|
||||
|
||||
size_t Properties::getSize(const std::string &name, size_t defVal) const {
|
||||
if (!hasProperty(name))
|
||||
return defVal;
|
||||
std::map<std::string, Element>::const_iterator it = m_elements.find(name);
|
||||
if ((*it).second.type != EInteger)
|
||||
SLog(EError, "Property \"%s\" has wrong type", name.c_str());
|
||||
if ((*it).second.v_long < 0)
|
||||
SLog(EError, "Size property \"%s\": expected a nonnegative value!");
|
||||
(*it).second.queried = true;
|
||||
return (*it).second.v_long;
|
||||
}
|
||||
|
||||
|
||||
void Properties::setFloat(const std::string &name, Float value, bool warnDuplicates) {
|
||||
if (hasProperty(name) && warnDuplicates)
|
||||
SLog(EWarn, "Property \"%s\" has already been specified!", name.c_str());
|
||||
|
|
|
@ -100,7 +100,7 @@ void Random::seed(uint64_t s) {
|
|||
void Random::seed(Random *random) {
|
||||
uint64_t buf[MT_N];
|
||||
for (int i=0; i<MT_N; ++i)
|
||||
buf[i] = random->nextLong();
|
||||
buf[i] = random->nextULong();
|
||||
seed(buf, MT_N);
|
||||
}
|
||||
|
||||
|
@ -133,7 +133,7 @@ void Random::seed(uint64_t *init_key, uint64_t key_length) {
|
|||
}
|
||||
|
||||
|
||||
uint64_t Random::nextLong() {
|
||||
uint64_t Random::nextULong() {
|
||||
int i;
|
||||
uint64_t x;
|
||||
static uint64_t mag01[2]={0ULL, MT_MATRIX_A};
|
||||
|
@ -168,9 +168,9 @@ uint64_t Random::nextLong() {
|
|||
return x;
|
||||
}
|
||||
|
||||
unsigned int Random::nextInteger(unsigned int n) {
|
||||
uint32_t Random::nextUInt(uint32_t n) {
|
||||
/* Determine a bit mask */
|
||||
unsigned int result, bitmask = n;
|
||||
uint32_t result, bitmask = n;
|
||||
bitmask |= bitmask >> 1;
|
||||
bitmask |= bitmask >> 2;
|
||||
bitmask |= bitmask >> 4;
|
||||
|
@ -178,7 +178,7 @@ unsigned int Random::nextInteger(unsigned int n) {
|
|||
bitmask |= bitmask >> 16;
|
||||
|
||||
/* Generate numbers until one in [0, n) is found */
|
||||
while ((result = (unsigned int) (nextLong() & bitmask)) >= n)
|
||||
while ((result = (uint32_t) (nextULong() & bitmask)) >= n)
|
||||
;
|
||||
|
||||
return result;
|
||||
|
@ -186,7 +186,7 @@ unsigned int Random::nextInteger(unsigned int n) {
|
|||
|
||||
#if defined(DOUBLE_PRECISION)
|
||||
Float Random::nextFloat() {
|
||||
return (Float) ((nextLong() >> 11) * (1.0/9007199254740992.0));
|
||||
return (Float) ((nextULong() >> 11) * (1.0/9007199254740992.0));
|
||||
}
|
||||
#else
|
||||
Float Random::nextFloat() {
|
||||
|
@ -196,7 +196,7 @@ Float Random::nextFloat() {
|
|||
uint32_t u;
|
||||
float f;
|
||||
} x;
|
||||
x.u = ((nextLong() & 0xFFFFFFFF) >> 9) | 0x3f800000UL;
|
||||
x.u = ((nextULong() & 0xFFFFFFFF) >> 9) | 0x3f800000UL;
|
||||
return x.f - 1.0f;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -497,7 +497,7 @@ void latinHypercube(Random *random, Float *dest, int nSamples, int nDim) {
|
|||
dest[nDim * i + j] = (i + random->nextFloat()) * delta;
|
||||
for (int i = 0; i < nDim; ++i) {
|
||||
for (int j = 0; j < nSamples; ++j) {
|
||||
int other = random->nextInteger(nSamples);
|
||||
int other = random->nextUInt(nSamples);
|
||||
std::swap(dest[nDim * j + i], dest[nDim * other + i]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -336,14 +336,14 @@ SparseWavelet2D::SparseWavelet2D(const SparseWavelet2D *sw)
|
|||
}
|
||||
|
||||
SparseWavelet2D::SparseWavelet2D(Stream *stream, InstanceManager *Manager) {
|
||||
m_size = (size_t) stream->readULong();
|
||||
m_size = stream->readSize();
|
||||
m_scalingFunction = stream->readSingle();
|
||||
size_t coefficientCount = (size_t) stream->readULong();
|
||||
size_t coefficientCount = stream->readSize();
|
||||
#if defined(USE_GOOGLE_DENSE_HASHMAP)
|
||||
m_data.set_empty_key(0xFFFFFFFFFFFFFFFFULL);
|
||||
#endif
|
||||
for (size_t i=0; i<coefficientCount; i++) {
|
||||
uint64_t key = stream->readULong();
|
||||
uint64_t key = stream->readSize();
|
||||
m_data[key] = stream->readSingle();
|
||||
}
|
||||
m_maxLevel = log2i(m_size)-1;
|
||||
|
@ -377,12 +377,12 @@ Float SparseWavelet2D::getPixel(const Point2i &pt) const {
|
|||
}
|
||||
|
||||
void SparseWavelet2D::serialize(Stream *stream, InstanceManager *Manager) const {
|
||||
stream->writeULong(m_size);
|
||||
stream->writeSize(m_size);
|
||||
stream->writeSingle(m_scalingFunction);
|
||||
stream->writeULong(m_data.size());
|
||||
stream->writeSize(m_data.size());
|
||||
|
||||
for (CoefficientIterator it = m_data.begin(); it != m_data.end(); ++it) {
|
||||
stream->writeULong((*it).first);
|
||||
stream->writeSize((*it).first);
|
||||
stream->writeSingle((*it).second);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ bool SampleIntegrator::render(Scene *scene,
|
|||
|
||||
size_t nCores = sched->getCoreCount();
|
||||
const Sampler *sampler = static_cast<const Sampler *>(sched->getResource(samplerResID, 0));
|
||||
uint64_t sampleCount = sampler->getSampleCount();
|
||||
size_t sampleCount = sampler->getSampleCount();
|
||||
|
||||
Log(EInfo, "Starting render job (%ix%i, %lld %s, " SIZE_T_FMT
|
||||
" %s, " SSE_STR ") ..", film->getCropSize().x, film->getCropSize().y,
|
||||
|
@ -152,7 +152,7 @@ void SampleIntegrator::renderBlock(const Scene *scene,
|
|||
if (stop)
|
||||
break;
|
||||
sampler->generate();
|
||||
for (uint64_t j = 0; j<sampler->getSampleCount(); j++) {
|
||||
for (size_t j = 0; j<sampler->getSampleCount(); j++) {
|
||||
rRec.newQuery(RadianceQueryRecord::ECameraRay, camera->getMedium());
|
||||
if (needsLensSample)
|
||||
lensSample = rRec.nextSample2D();
|
||||
|
@ -176,7 +176,7 @@ void SampleIntegrator::renderBlock(const Scene *scene,
|
|||
break;
|
||||
sampler->generate();
|
||||
mean = meanSqr = Spectrum(0.0f);
|
||||
for (uint64_t j = 0; j<sampler->getSampleCount(); j++) {
|
||||
for (size_t j = 0; j<sampler->getSampleCount(); j++) {
|
||||
rRec.newQuery(RadianceQueryRecord::ECameraRay, camera->getMedium());
|
||||
if (needsLensSample)
|
||||
lensSample = rRec.nextSample2D();
|
||||
|
@ -214,7 +214,7 @@ void SampleIntegrator::renderBlock(const Scene *scene,
|
|||
if (stop)
|
||||
break;
|
||||
sampler->generate();
|
||||
for (uint64_t j = 0; j<sampler->getSampleCount(); j++) {
|
||||
for (size_t j = 0; j<sampler->getSampleCount(); j++) {
|
||||
rRec.newQuery(RadianceQueryRecord::ECameraRay, camera->getMedium());
|
||||
if (needsLensSample)
|
||||
lensSample = rRec.nextSample2D();
|
||||
|
@ -239,7 +239,7 @@ void SampleIntegrator::renderBlock(const Scene *scene,
|
|||
break;
|
||||
sampler->generate();
|
||||
mean = meanSqr = Spectrum(0.0f);
|
||||
for (uint64_t j = 0; j<sampler->getSampleCount(); j++) {
|
||||
for (size_t j = 0; j<sampler->getSampleCount(); j++) {
|
||||
rRec.newQuery(RadianceQueryRecord::ECameraRay, camera->getMedium());
|
||||
if (needsLensSample)
|
||||
lensSample = rRec.nextSample2D();
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
MTS_NAMESPACE_BEGIN
|
||||
|
||||
HemisphereSampler::HemisphereSampler(int M, int N) : m_M(M), m_N(N) {
|
||||
HemisphereSampler::HemisphereSampler(uint32_t M, uint32_t N) : m_M(M), m_N(N) {
|
||||
m_entries = new SampleEntry[m_M*m_N];
|
||||
m_uk = new Vector[m_N];
|
||||
m_vk = new Vector[m_N];
|
||||
|
@ -36,8 +36,8 @@ HemisphereSampler::~HemisphereSampler() {
|
|||
}
|
||||
|
||||
void HemisphereSampler::generateDirections(const Intersection &its, Sampler *sampler) {
|
||||
for (unsigned int j=0; j<m_M; j++) {
|
||||
for (unsigned int k=0; k<m_N; k++) {
|
||||
for (uint32_t j=0; j<m_M; j++) {
|
||||
for (uint32_t k=0; k<m_N; k++) {
|
||||
SampleEntry &entry = m_entries[j*m_N + k];
|
||||
Point2 sample = sampler->independent2D();
|
||||
|
||||
|
@ -57,7 +57,7 @@ void HemisphereSampler::generateDirections(const Intersection &its, Sampler *sam
|
|||
|
||||
/* Precompute planar vectors - see "Practical Global Illumination" by Jaroslav Krivanek
|
||||
and Pascal Gautron for more details on this notation */
|
||||
for (unsigned int k=0; k<m_N; k++) {
|
||||
for (uint32_t k=0; k<m_N; k++) {
|
||||
Float phi = 2*M_PI*(k+.5f)/m_N,
|
||||
vk = phi - M_PI/2,
|
||||
vkMinus = (2*M_PI*k)/m_N + M_PI/2;
|
||||
|
@ -84,7 +84,7 @@ void HemisphereSampler::process(const Intersection &its) {
|
|||
m_hMinRestricted = std::numeric_limits<Float>::infinity();
|
||||
|
||||
Float invDists = 0;
|
||||
for (unsigned int j=0; j<m_M; j++) {
|
||||
for (uint32_t j=0; j<m_M; j++) {
|
||||
const Float cosThetaMinus = std::sqrt(1-j/(Float)m_M),
|
||||
sinThetaMinus = std::sqrt(j/(Float)m_M),
|
||||
cosTheta = std::sqrt(1-(j+.5f)/m_M),
|
||||
|
@ -92,7 +92,7 @@ void HemisphereSampler::process(const Intersection &its) {
|
|||
cosThetaPlus = std::sqrt(1-(j+1)/(Float)m_M),
|
||||
cosThetaDiff = cosThetaMinus - cosThetaPlus,
|
||||
tanTheta = sinTheta / cosTheta;
|
||||
for (unsigned int k=0; k<m_N; k++) {
|
||||
for (uint32_t k=0; k<m_N; k++) {
|
||||
const SampleEntry &entry = m_entries[j*m_N + k];
|
||||
|
||||
/* Rotational gradient - \pi/(MN) * \sum_{k=0}^{N-1}(v_k \sum_{j=0}^{M-1}) \tan\theta_j * L_{jk}) */
|
||||
|
@ -239,9 +239,9 @@ IrradianceCache::IrradianceCache(Stream *stream, InstanceManager *manager) :
|
|||
m_clampScreen = stream->readBool();
|
||||
m_clampNeighbor = stream->readBool();
|
||||
m_useGradients = stream->readBool();
|
||||
unsigned int recordCount = stream->readUInt();
|
||||
uint32_t recordCount = stream->readUInt();
|
||||
m_records.reserve(recordCount);
|
||||
for (unsigned int i=0; i<recordCount; ++i) {
|
||||
for (uint32_t i=0; i<recordCount; ++i) {
|
||||
Record *sample = new Record(stream);
|
||||
Float validRadius = sample->R0 / (2*m_kappa);
|
||||
m_octree.insert(sample, AABB(
|
||||
|
@ -266,8 +266,8 @@ void IrradianceCache::serialize(Stream *stream, InstanceManager *manager) const
|
|||
stream->writeBool(m_clampScreen);
|
||||
stream->writeBool(m_clampNeighbor);
|
||||
stream->writeBool(m_useGradients);
|
||||
stream->writeUInt((unsigned int) m_records.size());
|
||||
for (unsigned int i=0; i<m_records.size(); ++i)
|
||||
stream->writeUInt(m_records.size());
|
||||
for (uint32_t i=0; i<m_records.size(); ++i)
|
||||
m_records[i]->serialize(stream);
|
||||
}
|
||||
|
||||
|
|
|
@ -155,7 +155,7 @@ SparseMipmap3D::SparseMipmap3D(Stream *stream, InstanceManager *manager) {
|
|||
m_aabb = AABB(stream);
|
||||
m_size = (size_t) stream->readUInt();
|
||||
|
||||
size_t nodeCount = (size_t) stream->readULong();
|
||||
size_t nodeCount = stream->readSize();
|
||||
m_nodes.resize(nodeCount);
|
||||
for (size_t i=0; i<nodeCount; ++i) {
|
||||
stream->readIntArray(m_nodes[i].child, 8);
|
||||
|
@ -169,7 +169,7 @@ SparseMipmap3D::SparseMipmap3D(Stream *stream, InstanceManager *manager) {
|
|||
void SparseMipmap3D::serialize(Stream *stream, InstanceManager *manager) const {
|
||||
m_aabb.serialize(stream);
|
||||
stream->writeUInt(m_size);
|
||||
stream->writeULong(m_nodes.size());
|
||||
stream->writeSize(m_nodes.size());
|
||||
|
||||
for (size_t i=0; i<m_nodes.size(); ++i) {
|
||||
stream->writeIntArray(m_nodes[i].child, 8);
|
||||
|
|
|
@ -36,12 +36,12 @@ PhotonMap::PhotonMap(size_t maxPhotons)
|
|||
PhotonMap::PhotonMap(Stream *stream, InstanceManager *manager) {
|
||||
m_aabb = AABB(stream);
|
||||
m_balanced = stream->readBool();
|
||||
m_maxPhotons = (size_t) stream->readULong();
|
||||
m_minPhotons = (size_t) stream->readULong();
|
||||
m_lastInnerNode = (size_t) stream->readULong();
|
||||
m_lastRChildNode = (size_t) stream->readULong();
|
||||
m_maxPhotons = stream->readSize();
|
||||
m_minPhotons = stream->readSize();
|
||||
m_lastInnerNode = stream->readSize();
|
||||
m_lastRChildNode = stream->readSize();
|
||||
m_scale = (Float) stream->readFloat();
|
||||
m_photonCount = (size_t) stream->readULong();
|
||||
m_photonCount = stream->readSize();
|
||||
m_photons = new Photon[m_maxPhotons + 1];
|
||||
for (size_t i=1; i<=m_maxPhotons; ++i)
|
||||
m_photons[i] = Photon(stream);
|
||||
|
@ -69,12 +69,12 @@ void PhotonMap::serialize(Stream *stream, InstanceManager *manager) const {
|
|||
m_photonCount * 20.0f / 1024.0f);
|
||||
m_aabb.serialize(stream);
|
||||
stream->writeBool(m_balanced);
|
||||
stream->writeULong(m_maxPhotons);
|
||||
stream->writeULong(m_minPhotons);
|
||||
stream->writeULong(m_lastInnerNode);
|
||||
stream->writeULong(m_lastRChildNode);
|
||||
stream->writeSize(m_maxPhotons);
|
||||
stream->writeSize(m_minPhotons);
|
||||
stream->writeSize(m_lastInnerNode);
|
||||
stream->writeSize(m_lastRChildNode);
|
||||
stream->writeFloat(m_scale);
|
||||
stream->writeULong(m_photonCount);
|
||||
stream->writeSize(m_photonCount);
|
||||
for (size_t i=1; i<=m_maxPhotons; ++i)
|
||||
m_photons[i].serialize(stream);
|
||||
}
|
||||
|
@ -649,11 +649,6 @@ Spectrum PhotonMap::estimateVolumeRadiance(const MediumSamplingRecord &mRec, con
|
|||
return result * (m_scale / volFactor);
|
||||
}
|
||||
|
||||
|
||||
void PhotonMap::setScale(Float value) {
|
||||
m_scale = value;
|
||||
}
|
||||
|
||||
void PhotonMap::setMinPhotons(int minPhotons) {
|
||||
m_minPhotons = minPhotons;
|
||||
}
|
||||
|
@ -661,7 +656,7 @@ void PhotonMap::setMinPhotons(int minPhotons) {
|
|||
void PhotonMap::dumpOBJ(const std::string &filename) {
|
||||
std::ofstream os(filename.c_str());
|
||||
os << "o Photons" << endl;
|
||||
for (size_t i=0; i<getPhotonCount(); i++) {
|
||||
for (size_t i=1; i<=getPhotonCount(); i++) {
|
||||
Point p = getPhoton(i).getPosition();
|
||||
os << "v " << p.x << " " << p.y << " " << p.z << endl;
|
||||
}
|
||||
|
|
|
@ -26,19 +26,19 @@ Sampler::Sampler(const Properties &props)
|
|||
|
||||
Sampler::Sampler(Stream *stream, InstanceManager *manager)
|
||||
: ConfigurableObject(stream, manager) {
|
||||
m_sampleCount = stream->readULong();
|
||||
unsigned int n1DArrays = stream->readUInt();
|
||||
for (unsigned int i=0; i<n1DArrays; ++i)
|
||||
m_sampleCount = stream->readSize();
|
||||
uint32_t n1DArrays = stream->readUInt();
|
||||
for (uint32_t i=0; i<n1DArrays; ++i)
|
||||
request1DArray(stream->readUInt());
|
||||
unsigned int n2DArrays = stream->readUInt();
|
||||
for (unsigned int i=0; i<n2DArrays; ++i)
|
||||
uint32_t n2DArrays = stream->readUInt();
|
||||
for (uint32_t i=0; i<n2DArrays; ++i)
|
||||
request2DArray(stream->readUInt());
|
||||
}
|
||||
|
||||
void Sampler::serialize(Stream *stream, InstanceManager *manager) const {
|
||||
ConfigurableObject::serialize(stream, manager);
|
||||
|
||||
stream->writeULong(m_sampleCount);
|
||||
stream->writeSize(m_sampleCount);
|
||||
stream->writeUInt(m_req1D.size());
|
||||
for (size_t i=0; i<m_req1D.size(); ++i)
|
||||
stream->writeUInt(m_req1D[i]);
|
||||
|
@ -57,22 +57,22 @@ void Sampler::advance() {
|
|||
m_sampleDepth1DArray = m_sampleDepth2DArray = 0;
|
||||
}
|
||||
|
||||
void Sampler::setSampleIndex(uint64_t sampleIndex) {
|
||||
void Sampler::setSampleIndex(size_t sampleIndex) {
|
||||
m_sampleIndex = sampleIndex;
|
||||
m_sampleDepth1DArray = m_sampleDepth2DArray = 0;
|
||||
}
|
||||
|
||||
void Sampler::request1DArray(unsigned int size) {
|
||||
void Sampler::request1DArray(uint32_t size) {
|
||||
m_req1D.push_back(size);
|
||||
m_sampleArrays1D.push_back(new Float[m_sampleCount * size]);
|
||||
}
|
||||
|
||||
void Sampler::request2DArray(unsigned int size) {
|
||||
void Sampler::request2DArray(uint32_t size) {
|
||||
m_req2D.push_back(size);
|
||||
m_sampleArrays2D.push_back(new Point2[m_sampleCount * size]);
|
||||
}
|
||||
|
||||
Point2 *Sampler::next2DArray(unsigned int size) {
|
||||
Point2 *Sampler::next2DArray(uint32_t size) {
|
||||
Assert(m_sampleIndex < m_sampleCount);
|
||||
if (m_sampleDepth2DArray < (int) m_req2D.size()) {
|
||||
Assert(m_req2D[m_sampleDepth2DArray] == size);
|
||||
|
@ -84,7 +84,7 @@ Point2 *Sampler::next2DArray(unsigned int size) {
|
|||
}
|
||||
}
|
||||
|
||||
Float *Sampler::next1DArray(unsigned int size) {
|
||||
Float *Sampler::next1DArray(uint32_t size) {
|
||||
Assert(m_sampleIndex < m_sampleCount);
|
||||
if (m_sampleDepth1DArray < (int) m_req1D.size()) {
|
||||
Assert(m_req1D[m_sampleDepth1DArray] == size);
|
||||
|
|
|
@ -84,8 +84,8 @@ TriMesh::TriMesh(Stream *stream, InstanceManager *manager)
|
|||
m_aabb = AABB(stream);
|
||||
|
||||
uint32_t flags = stream->readUInt();
|
||||
m_vertexCount = (size_t) stream->readULong();
|
||||
m_triangleCount = (size_t) stream->readULong();
|
||||
m_vertexCount = stream->readSize();
|
||||
m_triangleCount = stream->readSize();
|
||||
|
||||
m_positions = new Point[m_vertexCount];
|
||||
stream->readFloatArray(reinterpret_cast<Float *>(m_positions),
|
||||
|
@ -190,8 +190,8 @@ TriMesh::TriMesh(Stream *_stream, int index)
|
|||
stream = new ZStream(stream);
|
||||
|
||||
uint32_t flags = stream->readUInt();
|
||||
m_vertexCount = (size_t) stream->readULong();
|
||||
m_triangleCount = (size_t) stream->readULong();
|
||||
m_vertexCount = stream->readSize();
|
||||
m_triangleCount = stream->readSize();
|
||||
|
||||
bool fileDoublePrecision = flags & EDoublePrecision;
|
||||
m_faceNormals = flags & EFaceNormals;
|
||||
|
@ -678,8 +678,8 @@ void TriMesh::serialize(Stream *stream, InstanceManager *manager) const {
|
|||
stream->writeString(m_name);
|
||||
m_aabb.serialize(stream);
|
||||
stream->writeUInt(flags);
|
||||
stream->writeULong(m_vertexCount);
|
||||
stream->writeULong(m_triangleCount);
|
||||
stream->writeSize(m_vertexCount);
|
||||
stream->writeSize(m_triangleCount);
|
||||
|
||||
stream->writeFloatArray(reinterpret_cast<Float *>(m_positions),
|
||||
m_vertexCount * sizeof(Point)/sizeof(Float));
|
||||
|
@ -764,8 +764,8 @@ void TriMesh::serialize(Stream *_stream) const {
|
|||
flags |= EFaceNormals;
|
||||
|
||||
stream->writeUInt(flags);
|
||||
stream->writeULong(m_vertexCount);
|
||||
stream->writeULong(m_triangleCount);
|
||||
stream->writeSize(m_vertexCount);
|
||||
stream->writeSize(m_triangleCount);
|
||||
|
||||
stream->writeFloatArray(reinterpret_cast<Float *>(m_positions),
|
||||
m_vertexCount * sizeof(Point)/sizeof(Float));
|
||||
|
|
|
@ -249,6 +249,10 @@ public:
|
|||
mRec.pdfFailure = mRec.pdfFailure * m_mediumSamplingWeight + (1-m_mediumSamplingWeight);
|
||||
}
|
||||
|
||||
bool isHomogeneous() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string toString() const {
|
||||
std::ostringstream oss;
|
||||
oss << "HomogeneousMedium[" << endl
|
||||
|
|
|
@ -39,7 +39,7 @@ public:
|
|||
|
||||
HaltonSequence(const Properties &props) : Sampler(props) {
|
||||
/* Number of samples per pixel when used with a sampling-based integrator */
|
||||
m_sampleCount = (uint64_t) props.getLong("sampleCount", 1);
|
||||
m_sampleCount = props.getSize("sampleCount", 1);
|
||||
}
|
||||
|
||||
void serialize(Stream *stream, InstanceManager *manager) const {
|
||||
|
@ -64,7 +64,7 @@ public:
|
|||
m_sampleIndex++;
|
||||
}
|
||||
|
||||
void setSampleIndex(uint64_t sampleIndex) {
|
||||
void setSampleIndex(size_t sampleIndex) {
|
||||
m_sampleDepth = 0;
|
||||
m_sampleIndex = sampleIndex;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public:
|
|||
|
||||
HammersleySequence(const Properties &props) : Sampler(props) {
|
||||
/* Number of samples per pixel when used with a sampling-based integrator */
|
||||
m_sampleCount = (uint64_t) props.getLong("sampleCount", 1);
|
||||
m_sampleCount = props.getSize("sampleCount", 1);
|
||||
m_invSamplesPerPixel = 1.0f / m_sampleCount;
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ public:
|
|||
m_sampleDepth = 0;
|
||||
}
|
||||
|
||||
void setSampleIndex(uint64_t sampleIndex) {
|
||||
void setSampleIndex(size_t sampleIndex) {
|
||||
m_sampleDepth = 0;
|
||||
m_sampleIndex = sampleIndex;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ public:
|
|||
|
||||
IndependentSampler(const Properties &props) : Sampler(props) {
|
||||
/* Number of samples per pixel when used with a sampling-based integrator */
|
||||
m_sampleCount = (uint64_t) props.getLong("sampleCount", 1);
|
||||
m_sampleCount = props.getSize("sampleCount", 1);
|
||||
m_random = new Random();
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public:
|
|||
|
||||
LowDiscrepancySampler(const Properties &props) : Sampler(props) {
|
||||
/* Number of samples per pixel when used with a sampling-based integrator */
|
||||
m_sampleCount = (uint64_t) props.getLong("sampleCount", 4);
|
||||
m_sampleCount = props.getSize("sampleCount", 4);
|
||||
|
||||
/* Depth, up to which which low discrepancy samples are guaranteed to be available. */
|
||||
m_depth = props.getInteger("depth", 3);
|
||||
|
@ -122,35 +122,35 @@ public:
|
|||
sample.y = sobol2(n, scramble[1]);
|
||||
}
|
||||
|
||||
inline void generate1D(Float *samples, unsigned int sampleCount) {
|
||||
uint32_t scramble = m_random->nextLong() & 0xFFFFFFFF;
|
||||
for (unsigned int i = 0; i < sampleCount; ++i)
|
||||
inline void generate1D(Float *samples, size_t sampleCount) {
|
||||
uint32_t scramble = m_random->nextULong() & 0xFFFFFFFF;
|
||||
for (size_t i = 0; i < sampleCount; ++i)
|
||||
samples[i] = vanDerCorput(i, scramble);
|
||||
m_random->shuffle(samples, samples + sampleCount);
|
||||
}
|
||||
|
||||
inline void generate2D(Point2 *samples, unsigned int sampleCount) {
|
||||
inline void generate2D(Point2 *samples, size_t sampleCount) {
|
||||
union {
|
||||
uint64_t qword;
|
||||
uint32_t dword[2];
|
||||
} scramble;
|
||||
scramble.qword = m_random->nextLong();
|
||||
for (unsigned int i = 0; i < sampleCount; ++i)
|
||||
scramble.qword = m_random->nextULong();
|
||||
for (size_t i = 0; i < sampleCount; ++i)
|
||||
sample02(i, scramble.dword, samples[i]);
|
||||
m_random->shuffle(samples, samples + sampleCount);
|
||||
}
|
||||
|
||||
void generate() {
|
||||
for (int i=0; i<m_depth; ++i) {
|
||||
generate1D(m_samples1D[i], (unsigned int) m_sampleCount);
|
||||
generate2D(m_samples2D[i], (unsigned int) m_sampleCount);
|
||||
generate1D(m_samples1D[i], m_sampleCount);
|
||||
generate2D(m_samples2D[i], m_sampleCount);
|
||||
}
|
||||
|
||||
for (size_t i=0; i<m_req1D.size(); i++)
|
||||
generate1D(m_sampleArrays1D[i], (unsigned int) m_sampleCount * m_req1D[i]);
|
||||
generate1D(m_sampleArrays1D[i], m_sampleCount * m_req1D[i]);
|
||||
|
||||
for (size_t i=0; i<m_req2D.size(); i++)
|
||||
generate2D(m_sampleArrays2D[i], (unsigned int) m_sampleCount * m_req2D[i]);
|
||||
generate2D(m_sampleArrays2D[i], m_sampleCount * m_req2D[i]);
|
||||
|
||||
m_sampleIndex = 0;
|
||||
m_sampleDepth1D = m_sampleDepth2D = 0;
|
||||
|
@ -163,7 +163,7 @@ public:
|
|||
m_sampleDepth1DArray = m_sampleDepth2DArray = 0;
|
||||
}
|
||||
|
||||
void setSampleIndex(uint64_t sampleIndex) {
|
||||
void setSampleIndex(size_t sampleIndex) {
|
||||
m_sampleIndex = sampleIndex;
|
||||
m_sampleDepth1D = m_sampleDepth2D = 0;
|
||||
m_sampleDepth1DArray = m_sampleDepth2DArray = 0;
|
||||
|
|
|
@ -46,8 +46,8 @@ public:
|
|||
m_permutations2D = new unsigned int*[m_depth];
|
||||
|
||||
for (int i=0; i<m_depth; i++) {
|
||||
m_permutations1D[i] = new unsigned int[(size_t) m_sampleCount];
|
||||
m_permutations2D[i] = new unsigned int[(size_t) m_sampleCount];
|
||||
m_permutations1D[i] = new unsigned int[m_sampleCount];
|
||||
m_permutations2D[i] = new unsigned int[m_sampleCount];
|
||||
}
|
||||
|
||||
m_invResolution = 1 / (Float) m_resolution;
|
||||
|
@ -63,8 +63,8 @@ public:
|
|||
m_permutations1D = new unsigned int*[m_depth];
|
||||
m_permutations2D = new unsigned int*[m_depth];
|
||||
for (int i=0; i<m_depth; i++) {
|
||||
m_permutations1D[i] = new unsigned int[(size_t) m_sampleCount];
|
||||
m_permutations2D[i] = new unsigned int[(size_t) m_sampleCount];
|
||||
m_permutations1D[i] = new unsigned int[m_sampleCount];
|
||||
m_permutations2D[i] = new unsigned int[m_sampleCount];
|
||||
}
|
||||
m_invResolution = 1.0f / m_resolution;
|
||||
m_invResolutionSquare = 1.0f / m_sampleCount;
|
||||
|
@ -99,8 +99,8 @@ public:
|
|||
sampler->m_permutations1D = new unsigned int*[m_depth];
|
||||
sampler->m_permutations2D = new unsigned int*[m_depth];
|
||||
for (int i=0; i<m_depth; i++) {
|
||||
sampler->m_permutations1D[i] = new unsigned int[(size_t) m_sampleCount];
|
||||
sampler->m_permutations2D[i] = new unsigned int[(size_t) m_sampleCount];
|
||||
sampler->m_permutations1D[i] = new unsigned int[m_sampleCount];
|
||||
sampler->m_permutations2D[i] = new unsigned int[m_sampleCount];
|
||||
}
|
||||
for (size_t i=0; i<m_req1D.size(); ++i)
|
||||
sampler->request2DArray(m_req1D[i]);
|
||||
|
@ -111,27 +111,27 @@ public:
|
|||
|
||||
void generate() {
|
||||
for (int i=0; i<m_depth; i++) {
|
||||
for (uint64_t j=0; j<m_sampleCount; j++)
|
||||
for (size_t j=0; j<m_sampleCount; j++)
|
||||
m_permutations1D[i][j] = (unsigned int) j;
|
||||
m_random->shuffle(&m_permutations1D[i][0], &m_permutations1D[i][m_sampleCount]);
|
||||
|
||||
for (uint64_t j=0; j<m_sampleCount; j++)
|
||||
for (size_t j=0; j<m_sampleCount; j++)
|
||||
m_permutations2D[i][j] = (unsigned int) j;
|
||||
m_random->shuffle(&m_permutations2D[i][0], &m_permutations2D[i][m_sampleCount]);
|
||||
}
|
||||
|
||||
for (size_t i=0; i<m_req1D.size(); i++)
|
||||
latinHypercube(m_random, m_sampleArrays1D[i], m_req1D[i] * (size_t) m_sampleCount, 1);
|
||||
latinHypercube(m_random, m_sampleArrays1D[i], m_req1D[i] * m_sampleCount, 1);
|
||||
for (size_t i=0; i<m_req2D.size(); i++)
|
||||
latinHypercube(m_random, reinterpret_cast<Float *>(m_sampleArrays2D[i]),
|
||||
m_req2D[i] * (size_t) m_sampleCount, 2);
|
||||
m_req2D[i] * m_sampleCount, 2);
|
||||
|
||||
m_sampleIndex = 0;
|
||||
m_sampleDepth1D = m_sampleDepth2D = 0;
|
||||
m_sampleDepth1DArray = m_sampleDepth2DArray = 0;
|
||||
}
|
||||
|
||||
void setSampleIndex(uint64_t sampleIndex) {
|
||||
void setSampleIndex(size_t sampleIndex) {
|
||||
m_sampleIndex = sampleIndex;
|
||||
m_sampleDepth1D = m_sampleDepth2D = 0;
|
||||
m_sampleDepth1DArray = m_sampleDepth2DArray = 0;
|
||||
|
|
Loading…
Reference in New Issue