Initial commit: Final state of the master project
This commit is contained in:
40
Research/scene/DirectionalLight.cpp
Normal file
40
Research/scene/DirectionalLight.cpp
Normal file
@@ -0,0 +1,40 @@
|
||||
#include "DirectionalLight.h"
|
||||
|
||||
|
||||
DirectionalLight::DirectionalLight()
|
||||
{
|
||||
mDirection = glm::vec3(0);
|
||||
mActive = false;
|
||||
}
|
||||
|
||||
|
||||
DirectionalLight::~DirectionalLight()
|
||||
{
|
||||
}
|
||||
|
||||
glm::vec3 DirectionalLight::GetDirection()
|
||||
{
|
||||
if (!mActive)
|
||||
return glm::vec3(0);
|
||||
return mDirection;
|
||||
}
|
||||
|
||||
void DirectionalLight::SetDirection(glm::vec3 dir)
|
||||
{
|
||||
if (dir != glm::vec3(0))
|
||||
{
|
||||
dir = glm::normalize(dir);
|
||||
}
|
||||
mDirection = dir;
|
||||
mActive = true;
|
||||
}
|
||||
|
||||
bool DirectionalLight::GetActive()
|
||||
{
|
||||
return mActive;
|
||||
}
|
||||
|
||||
void DirectionalLight::SetActive(bool active)
|
||||
{
|
||||
mActive = active;
|
||||
}
|
||||
19
Research/scene/DirectionalLight.h
Normal file
19
Research/scene/DirectionalLight.h
Normal file
@@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
#include "../inc/glm/geometric.hpp"
|
||||
|
||||
class DirectionalLight
|
||||
{
|
||||
public:
|
||||
DirectionalLight();
|
||||
~DirectionalLight();
|
||||
|
||||
glm::vec3 GetDirection();
|
||||
void SetDirection(glm::vec3 dir);
|
||||
|
||||
bool GetActive();
|
||||
void SetActive(bool active);
|
||||
private:
|
||||
glm::vec3 mDirection;
|
||||
bool mActive;
|
||||
};
|
||||
|
||||
40
Research/scene/Material/BaseMaterial.h
Normal file
40
Research/scene/Material/BaseMaterial.h
Normal file
@@ -0,0 +1,40 @@
|
||||
#pragma once
|
||||
#include "../../inc/glm/common.hpp"
|
||||
#include <string>
|
||||
#include "../../core/Hashers.h"
|
||||
|
||||
class BaseMaterial
|
||||
{
|
||||
public:
|
||||
// Should return a 10-bit per channel precision vec3
|
||||
virtual glm::u16vec3 GetProperties() const = 0;
|
||||
// Should update the materials in such a way that GetProperties should return the given properties
|
||||
virtual void SetProperties(glm::u16vec3 properties) = 0;
|
||||
|
||||
virtual std::string GetTypeSuffix() const = 0;
|
||||
|
||||
inline bool operator==(const BaseMaterial &other) const
|
||||
{
|
||||
auto otherV = other.GetProperties();
|
||||
auto thisV = GetProperties();
|
||||
return otherV == thisV;
|
||||
}
|
||||
};
|
||||
|
||||
namespace std {
|
||||
template<>
|
||||
struct hash<BaseMaterial> {
|
||||
size_t operator()(BaseMaterial const &value) const {
|
||||
glm::u16vec3 properties = value.GetProperties();
|
||||
// Since materials are only allowed to store 10 bits per channel, we should be able to hash them
|
||||
// perfectly in 32 bits.
|
||||
#ifdef ENVIRONMENT64
|
||||
return std::hash<glm::u16vec3>()(properties);
|
||||
#else
|
||||
unsigned short mask = 0x3F;
|
||||
return (((size_t) (properties.x & mask)) << 20) | (((size_t) (properties.y & mask)) << 10) |
|
||||
(size_t) (properties.z & mask);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
}
|
||||
170
Research/scene/Material/BitsMaterial.h
Normal file
170
Research/scene/Material/BitsMaterial.h
Normal file
@@ -0,0 +1,170 @@
|
||||
#pragma once
|
||||
|
||||
#include "../../core/BitHelper.h"
|
||||
#include "../../core/Defines.h"
|
||||
#include <cmath>
|
||||
|
||||
template <unsigned N>
|
||||
class BitsMaterial
|
||||
{
|
||||
public:
|
||||
const static size_t BYTECOUNT = N / 8 + ((N % 8) == 0 ? 0 : 1); // Calculate the space needed to store the required number of bits
|
||||
const static unsigned8 BITS = N;
|
||||
private:
|
||||
unsigned8 mValue[BYTECOUNT];
|
||||
|
||||
static size_t GrabBits(size_t value, unsigned8 startBit, unsigned8 length) {
|
||||
return ((BitHelper::GetLSMask<size_t>(startBit, startBit + length)) & value) >> startBit;
|
||||
}
|
||||
static size_t GrabBits(size_t value, unsigned8 startBit) { return GrabBits(value, startBit, N); }
|
||||
|
||||
inline void Init(size_t value)
|
||||
{
|
||||
for (unsigned8 i = 0; i < BYTECOUNT; i++)
|
||||
mValue[BYTECOUNT - i - 1] = (unsigned8)GrabBits(value, i * 8, 8);
|
||||
}
|
||||
public:
|
||||
BitsMaterial() { mValue[0] = 0; }
|
||||
BitsMaterial(size_t value) { Init(value); }
|
||||
// Grab some bits from the number and store them in this material
|
||||
BitsMaterial(unsigned value, unsigned8 startBit) { Init(GrabBits(value, startBit)); }
|
||||
~BitsMaterial() {}
|
||||
|
||||
bool Empty() { return mValue == 0; }
|
||||
|
||||
glm::u16vec3 GetProperties() const {
|
||||
unsigned32 value = (unsigned32)GetValue();
|
||||
return glm::u16vec3(GrabBits(value, 21, 11), GrabBits(value, 10, 11), GrabBits(value, 0, 10));
|
||||
}
|
||||
void SetProperties(glm::u16vec3 material)
|
||||
{
|
||||
unsigned32 value = (((unsigned32)material.x) << 21) | (((unsigned32)material.y & BitHelper::GetLSMask<unsigned32>(0, 11)) << 10) | (((unsigned32)material.z) & BitHelper::GetLSMask<unsigned32>(0, 10));
|
||||
Init(value);
|
||||
}
|
||||
|
||||
size_t GetValue() const {
|
||||
size_t value = 0;
|
||||
for (unsigned8 i = 0; i < BYTECOUNT; i++)
|
||||
value |= mValue[i] << ((BYTECOUNT - i - 1) >> 3); // Get the value, shift it to the correct position, and add it to the GetValue() result
|
||||
return value;
|
||||
}
|
||||
|
||||
std::string GetTypeSuffix() const {
|
||||
return "b" + std::to_string(N);
|
||||
}
|
||||
|
||||
static float Distance(BitsMaterial a, BitsMaterial b)
|
||||
{
|
||||
size_t maxValue = BitHelper::Exp2(N);
|
||||
return (float)std::abs((float)b.GetValue() - (float)a.GetValue()) / (float)maxValue;
|
||||
}
|
||||
|
||||
static BitsMaterial Average(const std::vector<BitsMaterial>& values)
|
||||
{
|
||||
size_t sum = 0;
|
||||
for (auto value : values) sum += value.GetValue();
|
||||
sum /= values.size();
|
||||
return BitsMaterial(sum);
|
||||
}
|
||||
|
||||
static BitsMaterial WeightedAverage(const std::vector<BitsMaterial>& values, std::vector<float> weights)
|
||||
{
|
||||
float sum = 0;
|
||||
for (size_t i = 0; i < values.size(); i++) sum += (float)values[i].GetValue() * weights[i];
|
||||
sum /= (float)CollectionHelper::Sum(weights);
|
||||
return BitsMaterial((size_t)sum);
|
||||
}
|
||||
|
||||
|
||||
void SetLS(size_t index, bool value)
|
||||
{
|
||||
size_t byte = index >> 8;
|
||||
unsigned8 mask = BitHelper::GetLSSingleBitMask<unsigned8>(index & 0x07);
|
||||
// Clear the bit
|
||||
mValue[byte] &= ~mask;
|
||||
// Set the bit
|
||||
if (value) mValue[byte] |= mask;
|
||||
}
|
||||
|
||||
bool GetLS(size_t index)
|
||||
{
|
||||
size_t byte = index >> 8;
|
||||
size_t bit = index % 8;
|
||||
return BitHelper::GetLS(mValue[byte], (unsigned8)bit);
|
||||
}
|
||||
|
||||
void SetHS(size_t index, bool value)
|
||||
{
|
||||
SetLS(N - index - 1, value);
|
||||
}
|
||||
|
||||
bool GetHS(size_t index)
|
||||
{
|
||||
return GetLS(N - index - 1);
|
||||
}
|
||||
|
||||
std::vector<unsigned8> Serialize() const {
|
||||
std::vector<unsigned8> res(BYTECOUNT);
|
||||
for (size_t i = 0; i < BYTECOUNT; i++) res[i] = mValue[i];
|
||||
return res;
|
||||
}
|
||||
|
||||
void Deserialize(std::vector<unsigned8> value) {
|
||||
assert(value.size() == BYTECOUNT);
|
||||
for (size_t i = 0; i < BYTECOUNT; i++) mValue[i] = value[i];
|
||||
}
|
||||
|
||||
void Serialize(std::ostream& stream) const {
|
||||
stream.write((char*)&mValue[0], BYTECOUNT);
|
||||
}
|
||||
void Deserialize(std::istream& stream) {
|
||||
stream.read((char*)&mValue[0], BYTECOUNT);
|
||||
}
|
||||
|
||||
bool operator==(const BitsMaterial<N>& other) const {
|
||||
for (unsigned i = 0; i < BYTECOUNT; i++)
|
||||
if (mValue[i] != other.mValue[i]) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool operator<(const BitsMaterial<N>& other) const
|
||||
{
|
||||
for (unsigned8 i = 0; i < BYTECOUNT; i++)
|
||||
if (mValue[i] != other.mValue[i]) return mValue[i] < other.mValue[i];
|
||||
return false; // If they are equal, return false
|
||||
}
|
||||
|
||||
operator unsigned() const
|
||||
{
|
||||
return (unsigned32)GetValue();
|
||||
}
|
||||
};
|
||||
|
||||
namespace std
|
||||
{
|
||||
template<unsigned N> struct hash <BitsMaterial<N>>
|
||||
{
|
||||
size_t operator()(BitsMaterial<N> const& value) const
|
||||
{
|
||||
// Check if a perfect hash is possible
|
||||
if (N < (sizeof(size_t) * 8))
|
||||
{
|
||||
return value.GetValue();
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: make a hash for all bytes
|
||||
return value.GetValue();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
template<unsigned N>
|
||||
struct BitsMaterialComparer
|
||||
{
|
||||
bool operator()(const BitsMaterial<N>& a, const BitsMaterial<N>& b)
|
||||
{
|
||||
return a.GetValue() < b.GetValue();
|
||||
}
|
||||
};
|
||||
56
Research/scene/Material/Block.h
Normal file
56
Research/scene/Material/Block.h
Normal file
@@ -0,0 +1,56 @@
|
||||
#pragma once
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../inc/tbb/parallel_sort.h"
|
||||
|
||||
template<typename T>
|
||||
struct Block
|
||||
{
|
||||
public:
|
||||
Block() { mData = std::vector<T>(); }
|
||||
|
||||
Block(unsigned size)
|
||||
{
|
||||
mData = std::vector<T>(size);
|
||||
}
|
||||
Block(const std::vector<T>& data)
|
||||
{
|
||||
// Copy the given vector to mData
|
||||
mData = std::vector<T>(data);
|
||||
}
|
||||
Block(const std::vector<T>& data, const size_t& startIndex, const size_t& endIndex)
|
||||
{
|
||||
mData = std::vector<T>(data.begin() + startIndex, data.begin() + endIndex);
|
||||
}
|
||||
~Block() {}
|
||||
|
||||
size_t size() const { return mData.size(); }
|
||||
const T& Get(const size_t& i) const { return mData[i]; }
|
||||
const std::vector<T>& GetData() const { return mData; }
|
||||
void Set(const size_t& i, T v) { mData[i] = v; }
|
||||
|
||||
template<typename Compare>
|
||||
void Sort(const Compare& comparer)
|
||||
{
|
||||
std::sort(mData.begin(), mData.end(), comparer);
|
||||
}
|
||||
|
||||
template<typename Compare>
|
||||
void ParallelSort(const Compare& comparer)
|
||||
{
|
||||
tbb::parallel_sort(mData.begin(), mData.end(), comparer);
|
||||
}
|
||||
|
||||
bool operator==(const Block<T>& other) const
|
||||
{
|
||||
if (other.size() != this->size())
|
||||
return false;
|
||||
|
||||
for (size_t i = 0; i < this->size(); i++)
|
||||
if (other.Get(i) != this->Get(i)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
const T& operator[](size_t i) const { return mData[i]; }
|
||||
private:
|
||||
std::vector<T> mData;
|
||||
};
|
||||
38
Research/scene/Material/BlockBasedMaterialLibrary.h
Normal file
38
Research/scene/Material/BlockBasedMaterialLibrary.h
Normal file
@@ -0,0 +1,38 @@
|
||||
#pragma once
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "MaterialLibrary.h"
|
||||
#include "Block.h"
|
||||
#include "../../inc/tbb/parallel_sort.h"
|
||||
#include <vector>
|
||||
#include <iterator>
|
||||
|
||||
template<typename T, typename Comparer = std::less<T>, unsigned8 channelsPerPixel = 3>
|
||||
class BlockBasedMaterialLibrary : public MaterialLibrary<T, Comparer, channelsPerPixel>
|
||||
{
|
||||
private:
|
||||
std::vector<Block<T>> mBlocks;
|
||||
std::vector<size_t> mBlocksImportance;
|
||||
public:
|
||||
BlockBasedMaterialLibrary() : MaterialLibrary<T, Comparer, channelsPerPixel>() {}
|
||||
BlockBasedMaterialLibrary(std::vector<unsigned char> texture, unsigned short textureSize, MaterialLibraryPointer highestMaterialIndex) : MaterialLibrary<T, Comparer, channelsPerPixel>(texture, textureSize, highestMaterialIndex)
|
||||
{ }
|
||||
|
||||
// Copy constructor
|
||||
BlockBasedMaterialLibrary(const BlockBasedMaterialLibrary& other) : MaterialLibrary<T, Comparer, channelsPerPixel>(other)
|
||||
{
|
||||
this->mBlocks = other.mBlocks;
|
||||
this->mBlocksImportance = other.mBlocksImportance;
|
||||
}
|
||||
|
||||
~BlockBasedMaterialLibrary() {}
|
||||
|
||||
//void Finalize()
|
||||
//{
|
||||
// std::srand(time(0));
|
||||
// // TODO: optimize the texture to get the lowest cost for the blocks
|
||||
// std::random_shuffle(mMaterials->begin(), mMaterials->end());
|
||||
// MaterialLibrary::Finalize(false);
|
||||
//}
|
||||
};
|
||||
131
Research/scene/Material/Color.cpp
Normal file
131
Research/scene/Material/Color.cpp
Normal file
@@ -0,0 +1,131 @@
|
||||
#include "Color.h"
|
||||
#include "../../core/ColorHelper.h"
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "../../core/MathHelper.h"
|
||||
#include "../../inc/tbb/parallel_reduce.h"
|
||||
#include "../../inc/tbb/parallel_for.h"
|
||||
#include "../../inc/tbb/blocked_range.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
Color::Color()
|
||||
{
|
||||
mColor = glm::u8vec3(0);
|
||||
}
|
||||
|
||||
Color::Color(glm::u8vec3 color)
|
||||
{
|
||||
mColor = color;
|
||||
}
|
||||
|
||||
glm::u16vec3 Color::GetProperties() const
|
||||
{
|
||||
return glm::u16vec3(mColor);
|
||||
}
|
||||
|
||||
void Color::SetProperties(glm::u16vec3 color)
|
||||
{
|
||||
mColor = glm::u8vec3(color);
|
||||
}
|
||||
|
||||
glm::u8vec3 Color::GetColor() const
|
||||
{
|
||||
return mColor;
|
||||
}
|
||||
|
||||
void Color::SetColor(glm::u8vec3 color)
|
||||
{
|
||||
mColor = color;;
|
||||
}
|
||||
|
||||
glm::vec3 Color::GetLAB() const
|
||||
{
|
||||
return ColorHelper::RGBtoLAB(mColor);
|
||||
}
|
||||
|
||||
Color Color::Average(const std::vector<Color>& colors)
|
||||
{
|
||||
glm::vec3 colorLabSum;
|
||||
for (auto color : colors)
|
||||
colorLabSum += ColorHelper::RGBtoLAB(color.GetColor());
|
||||
glm::vec3 colorLabAvg = colorLabSum / (float)colors.size();
|
||||
return Color(ColorHelper::LABtoRGB(colorLabAvg));
|
||||
}
|
||||
|
||||
Color Color::WeightedAverage(const std::vector<Color>& colors, const std::vector<float>& weights)
|
||||
{
|
||||
// Calculate the weighted average
|
||||
glm::vec3 colorLabSum;
|
||||
std::vector<glm::vec3> labColors(colors.size());
|
||||
float weightSum = 0;
|
||||
for (unsigned i = 0; i < colors.size(); i++)
|
||||
{
|
||||
labColors[i] = colors[i].GetLAB();
|
||||
colorLabSum += labColors[i] * weights[i];
|
||||
weightSum += weights[i];
|
||||
}
|
||||
glm::vec3 colorLabAvg = colorLabSum / weightSum;
|
||||
glm::vec3 res = colorLabAvg;
|
||||
|
||||
return Color(ColorHelper::LABtoRGB(res));
|
||||
}
|
||||
|
||||
float Color::Distance(const Color& color1, const Color& color2)
|
||||
{
|
||||
return ColorHelper::GetDeltaEFromRGB(color1.GetColor(), color2.GetColor()) / 130.f;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Color Color::Interpolate(const Color& color1, const Color& color2, float value)
|
||||
{
|
||||
glm::u8vec3 hsv1 = ColorHelper::RGBtoHSV(color1.GetColor());
|
||||
glm::u8vec3 hsv2 = ColorHelper::RGBtoHSV(color2.GetColor());
|
||||
glm::u8vec3 hsvRes;
|
||||
hsvRes.r = MathHelper::lerp(value, hsv1.r, hsv2.r);
|
||||
hsvRes.g = MathHelper::lerp(value, hsv1.g, hsv2.g);
|
||||
hsvRes.b = MathHelper::lerp(value, hsv1.b, hsv2.b);
|
||||
return Color(ColorHelper::HSVtoRGB(hsvRes));
|
||||
}
|
||||
|
||||
std::vector<unsigned8> Color::Serialize() const
|
||||
{
|
||||
std::vector<unsigned8> res(3);
|
||||
res[0] = mColor.r;
|
||||
res[1] = mColor.g;
|
||||
res[2] = mColor.b;
|
||||
return res;
|
||||
}
|
||||
void Color::Deserialize(const std::vector<unsigned8>& value)
|
||||
{
|
||||
assert(value.size() == 3);
|
||||
mColor.r = value[0];
|
||||
mColor.g = value[1];
|
||||
mColor.b = value[2];
|
||||
}
|
||||
|
||||
bool Color::operator==(const Color& color) const
|
||||
{
|
||||
return this->GetColor() == color.GetColor();
|
||||
}
|
||||
|
||||
bool Color::operator!=(const Color& other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
Color& Color::operator=(const unsigned& value)
|
||||
{
|
||||
mColor.x = (value & 0x00FF0000) >> 16;
|
||||
mColor.y = (value & 0x0000FF00) >> 8;
|
||||
mColor.z = (value & 0x000000FF);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Color::operator unsigned() const
|
||||
{
|
||||
return mColor.x << 16 | mColor.y << 8 | mColor.z;
|
||||
}
|
||||
|
||||
std::string Color::GetTypeSuffix() const { return "c"; }
|
||||
109
Research/scene/Material/Color.h
Normal file
109
Research/scene/Material/Color.h
Normal file
@@ -0,0 +1,109 @@
|
||||
#pragma once
|
||||
#include "NearestFinder.h"
|
||||
#include "../../inc/glm/common.hpp"
|
||||
#include "../../core/Hashers.h"
|
||||
#include "../../core/Comparers.h"
|
||||
#include "../../core/ColorHelper.h"
|
||||
|
||||
class Color
|
||||
{
|
||||
public:
|
||||
const static unsigned8 CHANNELSPERPIXEL = 3;
|
||||
const static unsigned8 BITS = 24;
|
||||
|
||||
Color();
|
||||
Color(glm::u8vec3 color);
|
||||
|
||||
glm::u16vec3 GetProperties() const;
|
||||
void SetProperties(glm::u16vec3 color);
|
||||
|
||||
glm::vec3 GetLAB() const;
|
||||
static Color Average(const std::vector<Color>& colors);
|
||||
static Color WeightedAverage(const std::vector<Color>& colors, const std::vector<float>& weights);
|
||||
// Interpolates between the two colors. Value should be in range [0, 1), and it use to determine where to interpolate
|
||||
static Color Interpolate(const Color& color1, const Color& color2, float value);
|
||||
// Returns the color that is nearest to the given color (O(N))
|
||||
static float Distance(const Color& color1, const Color& color2);
|
||||
|
||||
glm::u8vec3 GetColor() const;
|
||||
void SetColor(glm::u8vec3 color);
|
||||
|
||||
inline unsigned8 GetR() const { return mColor.r; }
|
||||
inline unsigned8 GetG() const { return mColor.g; }
|
||||
inline unsigned8 GetB() const { return mColor.b; }
|
||||
|
||||
std::string GetTypeSuffix() const;
|
||||
|
||||
std::vector<unsigned8> Serialize() const;
|
||||
void Deserialize(const std::vector<unsigned8>& value);
|
||||
|
||||
bool operator==(const Color& color) const;
|
||||
bool operator!=(const Color& color) const;
|
||||
|
||||
// Assignment operator, used for easy access to different kinds of materials
|
||||
Color& operator=(const unsigned& source);
|
||||
operator unsigned() const;
|
||||
unsigned8 operator[](unsigned8 i) const
|
||||
{
|
||||
return mColor[i];
|
||||
}
|
||||
private:
|
||||
glm::u8vec3 mColor;
|
||||
};
|
||||
|
||||
namespace std {
|
||||
template<>
|
||||
struct hash<Color> {
|
||||
size_t operator()(const Color &value) const {
|
||||
glm::u8vec3 color = value.GetColor();
|
||||
return std::hash<glm::u8vec3>()(color);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
struct ColorCompare
|
||||
{
|
||||
bool operator()(const Color& color1, const Color& color2) const
|
||||
{
|
||||
return u8vec3comparer()(color1.GetColor(), color2.GetColor());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<> struct NearestFinder<Color>
|
||||
{
|
||||
Color operator()(const Color& source, const std::vector<Color>& colors)
|
||||
{
|
||||
glm::vec3 lab = source.GetLAB();
|
||||
|
||||
Color nearestColor;
|
||||
float minDistance = std::numeric_limits<float>::max();
|
||||
for (auto color : colors)
|
||||
{
|
||||
float distance = ColorHelper::GetDeltaEFromLAB(color.GetLAB(), lab);
|
||||
if (distance < minDistance)
|
||||
{
|
||||
minDistance = distance;
|
||||
nearestColor = color;
|
||||
}
|
||||
}
|
||||
return nearestColor;
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct ParallelNearestFinder<Color>
|
||||
{
|
||||
Color operator()(const Color& source, const std::vector<Color>& colors)
|
||||
{
|
||||
// Calculate all distances:
|
||||
glm::vec3 lab = source.GetLAB();
|
||||
std::vector<float> distances(colors.size());
|
||||
tbb::parallel_for(size_t(0), colors.size(), [&](size_t i)
|
||||
{
|
||||
float res = ColorHelper::GetDeltaEFromLAB(colors[i].GetLAB(), lab);
|
||||
if (std::isnan(res)) res = std::numeric_limits<float>::max();
|
||||
distances[i] = res;
|
||||
});
|
||||
return colors[CollectionHelper::MinIndex(distances)];
|
||||
}
|
||||
};
|
||||
6
Research/scene/Material/ColorAndNormal.h
Normal file
6
Research/scene/Material/ColorAndNormal.h
Normal file
@@ -0,0 +1,6 @@
|
||||
#pragma once
|
||||
#include "ColorAndValue.h"
|
||||
#include "SmallNormal.h"
|
||||
|
||||
typedef ColorAndValue<SmallNormal> ColorAndNormal;
|
||||
typedef ColorAndValueCompare<SmallNormal, NormalCompare> ColorAndNormalCompare;
|
||||
9
Research/scene/Material/ColorAndNormalAndValue.h
Normal file
9
Research/scene/Material/ColorAndNormalAndValue.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
#include "ColorAndValue.h"
|
||||
#include "SmallNormal.h"
|
||||
#include "BitsMaterial.h"
|
||||
|
||||
typedef MaterialPair<SmallNormal, BitsMaterial<8>> NormalAndValue;
|
||||
typedef MaterialPairCompare<SmallNormal, NormalCompare, BitsMaterial<8>, BitsMaterialComparer<8>> NormalAndValueCompare;
|
||||
typedef ColorAndValue<NormalAndValue> ColorAndNormalAndValue;
|
||||
typedef ColorAndValueCompare<NormalAndValue, NormalAndValueCompare> ColorAndNormalAndValueCompare;
|
||||
6
Research/scene/Material/ColorAndOpacity.h
Normal file
6
Research/scene/Material/ColorAndOpacity.h
Normal file
@@ -0,0 +1,6 @@
|
||||
#pragma once
|
||||
#include "ColorAndValue.h"
|
||||
#include "BitsMaterial.h"
|
||||
|
||||
typedef ColorAndValue<BitsMaterial<16>> ColorAndOpacity;
|
||||
typedef ColorAndValueCompare<BitsMaterial<16>, BitsMaterialComparer<16>> ColorAndOpacityCompare;
|
||||
110
Research/scene/Material/ColorAndValue.h
Normal file
110
Research/scene/Material/ColorAndValue.h
Normal file
@@ -0,0 +1,110 @@
|
||||
#pragma once
|
||||
#include "NearestFinder.h"
|
||||
#include "Color.h"
|
||||
#include "MaterialPair.h"
|
||||
#include "../../core/ColorHelper.h"
|
||||
#include "../../inc/glm/glm.hpp"
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
|
||||
template<typename T>
|
||||
struct ParallelNearestFinder<MaterialPair<Color, T>>
|
||||
{
|
||||
MaterialPair<Color, T> operator()(const MaterialPair<Color, T>& source, const std::vector<MaterialPair<Color, T>>& values) const
|
||||
{
|
||||
// Get the color differences
|
||||
glm::vec3 lab = source.GetFirst().GetLAB();
|
||||
|
||||
std::vector<float> distances(values.size());
|
||||
tbb::parallel_for(size_t(0), values.size(), [&](size_t i)
|
||||
{
|
||||
float res = ColorHelper::GetDeltaEFromLAB(values[i].GetFirst().GetLAB(), lab);
|
||||
if (std::isnan(res)) res = std::numeric_limits<float>::max();
|
||||
distances[i] = res;
|
||||
});
|
||||
|
||||
size_t colorMinIndex = CollectionHelper::MinIndex(distances);
|
||||
float colorMinDistance = distances[colorMinIndex];
|
||||
float epsilon = 0.2f;
|
||||
std::vector<size_t> minIndices;
|
||||
float maxAllowedError = colorMinDistance + epsilon;
|
||||
for (size_t i = 0; i < values.size(); i++)
|
||||
if (distances[i] <= maxAllowedError) minIndices.push_back(i);
|
||||
|
||||
std::vector<T> minValues;
|
||||
for (size_t i = 0; i < minIndices.size(); i++) minValues.push_back(values[minIndices[i]].GetSecond());
|
||||
T minValue = ParallelNearestFinder<T>()(source.GetSecond(), minValues);
|
||||
|
||||
for (size_t i = 0; i < minIndices.size(); i++) if (values[minIndices[i]].GetSecond() == minValue) return values[minIndices[i]];
|
||||
return values[minIndices[0]];
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<typename T>
|
||||
struct NearestFinder<MaterialPair<Color, T>>
|
||||
{
|
||||
MaterialPair<Color, T> operator()(const MaterialPair<Color, T>& source, const std::vector<MaterialPair<Color, T>>& values) const
|
||||
{
|
||||
glm::vec3 lab = source.GetFirst().GetLAB();
|
||||
float minDistance = std::numeric_limits<float>::max();
|
||||
std::vector<float> distances(values.size());
|
||||
Color lastColor = values[values.size() - 1].GetFirst();
|
||||
for (size_t i = 0; i < values.size(); i++)
|
||||
{
|
||||
const Color& color = values[i].GetFirst();
|
||||
if (color == lastColor)
|
||||
distances[i] = distances[i - 1];
|
||||
else
|
||||
{
|
||||
distances[i] = ColorHelper::GetDeltaEFromLAB(color.GetLAB(), lab);
|
||||
lastColor = color;
|
||||
if (distances[i] < minDistance) minDistance = distances[i];
|
||||
}
|
||||
}
|
||||
|
||||
float epsilon = 0.5f;
|
||||
std::vector<size_t> allowedColors;
|
||||
float maxAllowedError = minDistance + epsilon;
|
||||
for (size_t i = 0; i < values.size(); i++)
|
||||
if (distances[i] <= maxAllowedError) allowedColors.push_back(i);
|
||||
|
||||
|
||||
std::vector<size_t> minIndices;
|
||||
for (size_t i = 0; i < values.size(); i++)
|
||||
if (distances[i] <= maxAllowedError) minIndices.push_back(i);
|
||||
|
||||
std::vector<T> minValues;
|
||||
for (size_t i = 0; i < minIndices.size(); i++) minValues.push_back(values[minIndices[i]].GetSecond());
|
||||
T minValue = NearestFinder<T>()(source.GetSecond(), minValues);
|
||||
|
||||
for (size_t i = 0; i < minIndices.size(); i++) if (values[minIndices[i]].GetSecond() == minValue) return values[minIndices[i]];
|
||||
return values[minIndices[0]];
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
//// Assignment operator, used for easy access to different kinds of materials
|
||||
//ColorAndValue& operator=(const unsigned& source)
|
||||
//{
|
||||
// mValue = source & 0x3FF;
|
||||
// glm::u8vec3 color;
|
||||
// color.r = (source >> (10 + 14 - 1)) & 0xFE;
|
||||
// color.g = (source >> (10 + 7 - 1)) & 0xFE;
|
||||
// color.b = (source >> (10 - 1)) & 0xFE;
|
||||
// mColor = Color(color);
|
||||
//}
|
||||
//operator unsigned() const
|
||||
//{
|
||||
// // Value is stored in the 10 lowest siginificant bits
|
||||
// unsigned32 res = (unsigned32)mValue;
|
||||
// // Colors are stored in the 21 highest significant bits (7 bits per channel, doesn't fit otherwise...)
|
||||
// res |= ((unsigned32)mColor.GetR() & 0xFE) << (10 + 14 - 1);
|
||||
// res |= ((unsigned32)mColor.GetG() & 0xFE) << (10 + 7 - 1);
|
||||
// res |= ((unsigned32)mColor.GetB() & 0xFE) << (10 - 1);
|
||||
// return res;
|
||||
//}
|
||||
|
||||
template<typename T> using ColorAndValue = MaterialPair<Color, T>;
|
||||
template<typename T, typename TCompare> using ColorAndValueCompare = MaterialPairCompare<Color, ColorCompare, T, TCompare>;
|
||||
23
Research/scene/Material/ColorChannel.cpp
Normal file
23
Research/scene/Material/ColorChannel.cpp
Normal file
@@ -0,0 +1,23 @@
|
||||
#include "ColorChannel.h"
|
||||
|
||||
ColorChannel::ColorChannel() { mValue = 0; }
|
||||
ColorChannel::ColorChannel(unsigned8 value) { mValue = value; }
|
||||
ColorChannel::~ColorChannel() {}
|
||||
|
||||
std::vector<unsigned8> ColorChannel::Serialize() const
|
||||
{
|
||||
return std::vector<unsigned8>(1, mValue);
|
||||
}
|
||||
|
||||
void ColorChannel::Deserialize(const std::vector<unsigned8>& value)
|
||||
{
|
||||
assert(value.size() == 1);
|
||||
mValue = value[0];
|
||||
}
|
||||
|
||||
glm::u16vec3 ColorChannel::GetProperties() const { return glm::u16vec3(mValue); }
|
||||
void ColorChannel::SetProperties(glm::u16vec3 material) { mValue = (unsigned char)material.x; }
|
||||
|
||||
unsigned8 ColorChannel::GetValue() const { return mValue; }
|
||||
|
||||
ColorChannel::operator unsigned() const { return (unsigned)mValue; }
|
||||
40
Research/scene/Material/ColorChannel.h
Normal file
40
Research/scene/Material/ColorChannel.h
Normal file
@@ -0,0 +1,40 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "../../inc/glm/common.hpp"
|
||||
#include "../../core/Defines.h"
|
||||
|
||||
class ColorChannel
|
||||
{
|
||||
public:
|
||||
ColorChannel();
|
||||
ColorChannel(unsigned8 value);
|
||||
~ColorChannel();
|
||||
|
||||
glm::u16vec3 GetProperties() const;
|
||||
void SetProperties(glm::u16vec3 material);
|
||||
|
||||
std::vector<unsigned8> Serialize() const;
|
||||
void Deserialize(const std::vector<unsigned8>& value);
|
||||
|
||||
unsigned8 GetValue() const;
|
||||
|
||||
std::string GetTypeSuffix() const { return "cc"; }
|
||||
|
||||
operator unsigned() const;
|
||||
|
||||
private:
|
||||
unsigned8 mValue;
|
||||
};
|
||||
|
||||
namespace std
|
||||
{
|
||||
template<>
|
||||
struct hash<ColorChannel>
|
||||
{
|
||||
size_t operator()(const ColorChannel &value) const
|
||||
{
|
||||
return value.GetValue();
|
||||
}
|
||||
};
|
||||
}
|
||||
277
Research/scene/Material/MaterialLibrary.h
Normal file
277
Research/scene/Material/MaterialLibrary.h
Normal file
@@ -0,0 +1,277 @@
|
||||
#pragma once
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "MaterialLibraryPointer.h"
|
||||
#include "../../inc/tbb/parallel_sort.h"
|
||||
#include <vector>
|
||||
#include <iterator>
|
||||
|
||||
// T should be some type with a method called "Serialize()".
|
||||
// This method should return some type that can be iterated over using operator [], and each item of the iteration should be an unsigned char (unsigned8) to put in the texture colors.
|
||||
// If sizeof(result) for the value is used, it should give the number of bytes needed to store in pixels.
|
||||
// An example of a working return type is std::vector<unsigned8>, glm::u8vec3, and unsigned char[].
|
||||
template<typename T, typename Comparer = std::less<T>, unsigned8 channelsPerPixel = 3>
|
||||
class MaterialLibrary
|
||||
{
|
||||
private:
|
||||
std::map<T, MaterialLibraryPointer, Comparer> mMaterialPointers;
|
||||
bool finalized = false;
|
||||
unsigned short mTextureSize;
|
||||
|
||||
protected:
|
||||
const size_t SIZE_OF_MATERIAL = sizeof(T);
|
||||
const unsigned32 PIXELS_PER_MATERIAL = (unsigned32)(SIZE_OF_MATERIAL / channelsPerPixel + (SIZE_OF_MATERIAL % channelsPerPixel == 0 ? 0 : 1));
|
||||
|
||||
std::vector<T>* mMaterials;
|
||||
|
||||
inline MaterialLibraryPointer WrapSetIndex(size_t setIndex) const
|
||||
{
|
||||
assert(finalized);
|
||||
setIndex *= PIXELS_PER_MATERIAL;
|
||||
return MaterialLibraryPointer(
|
||||
(unsigned16)(setIndex % (size_t)mTextureSize),
|
||||
(unsigned16)(setIndex / (size_t)mTextureSize)
|
||||
);
|
||||
}
|
||||
|
||||
inline size_t GetSetIndex(MaterialLibraryPointer textureIndex) const { assert(finalized); return (textureIndex.x + textureIndex.y * mTextureSize) / PIXELS_PER_MATERIAL; }
|
||||
|
||||
inline void RebuildMaterialPointers()
|
||||
{
|
||||
assert(finalized);
|
||||
mMaterialPointers.clear();
|
||||
// Build the result material pointers map:
|
||||
for (size_t i = 0; i < mMaterials->size(); i++)
|
||||
mMaterialPointers.insert(std::pair<T, MaterialLibraryPointer>(mMaterials->at(i), WrapSetIndex(i)));
|
||||
}
|
||||
public:
|
||||
MaterialLibrary() :
|
||||
mMaterialPointers(std::map<T, MaterialLibraryPointer, Comparer>()),
|
||||
finalized(false),
|
||||
mTextureSize(0),
|
||||
mMaterials(new std::vector<T>())
|
||||
{}
|
||||
|
||||
MaterialLibrary(std::vector<unsigned char> texture, unsigned short textureSize, MaterialLibraryPointer highestMaterialIndex) : MaterialLibrary()
|
||||
{
|
||||
ReadTexture(texture, textureSize, highestMaterialIndex);
|
||||
}
|
||||
|
||||
// Copy constructor
|
||||
MaterialLibrary(const MaterialLibrary& other) :
|
||||
mMaterialPointers(std::map<T, MaterialLibraryPointer, Comparer>(other.mMaterialPointers)),
|
||||
finalized(other.finalized),
|
||||
mTextureSize(other.mTextureSize),
|
||||
mMaterials(new std::vector<T>(*other.mMaterials))
|
||||
{}
|
||||
|
||||
~MaterialLibrary() { delete mMaterials; }
|
||||
|
||||
void Serialize(std::ostream& file)
|
||||
{
|
||||
unsigned16 materialTextureSize = GetTextureSize();
|
||||
MaterialLibraryPointer maxTextureIndex = GetMaxTextureIndex();
|
||||
std::vector<unsigned8> texture = GetTexture();
|
||||
|
||||
// The material texture size and max texture index used to be stored in one 32 bit unsigned integer.
|
||||
// However, this caused problems if the material texture contained more then 1024 colors.
|
||||
// Therefore, we now use 48 bits for this, precluded by an empty header in the old style, to ensure that the new style can be recognized correctly.
|
||||
Serializer<unsigned32>::Serialize(0, file);
|
||||
Serializer<unsigned16>::Serialize(materialTextureSize, file);
|
||||
Serializer<MaterialLibraryPointer>::Serialize(maxTextureIndex, file);
|
||||
Serializer<unsigned8*>::Serialize(&texture[0], (size_t)materialTextureSize * (size_t)materialTextureSize * (size_t)channelsPerPixel, file);
|
||||
}
|
||||
|
||||
void Deserialize(std::istream& file)
|
||||
{
|
||||
unsigned16 materialTextureSize = 0;
|
||||
MaterialLibraryPointer maxTextureIndex = MaterialLibraryPointer(0);
|
||||
|
||||
// If the first unsigned32 of a material library is 0, then the new style information is placed after it.
|
||||
// If it isn't 0, then use the old style
|
||||
unsigned materialTextureSizeSummary;
|
||||
Serializer<unsigned>::Deserialize(materialTextureSizeSummary, file);
|
||||
if (materialTextureSizeSummary == 0)
|
||||
{
|
||||
// New style (supports bigger libraries)
|
||||
Serializer<unsigned16>::Deserialize(materialTextureSize, file);
|
||||
Serializer<MaterialLibraryPointer>::Deserialize(maxTextureIndex, file);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Old style
|
||||
unsigned mask1 = BitHelper::GetLSMask<unsigned32>(20, 30);
|
||||
unsigned mask2 = BitHelper::GetLSMask<unsigned32>(10, 20);
|
||||
unsigned mask3 = BitHelper::GetLSMask<unsigned32>(0, 10);
|
||||
maxTextureIndex.x = (mask1 & materialTextureSizeSummary) >> 20;
|
||||
maxTextureIndex.y = (mask2 & materialTextureSizeSummary) >> 10;
|
||||
materialTextureSize = BitHelper::CeilToNearestPowerOfTwo(mask3 & materialTextureSizeSummary);
|
||||
}
|
||||
|
||||
size_t textureArraySize = (size_t)materialTextureSize * (size_t)materialTextureSize * (size_t)channelsPerPixel;
|
||||
std::vector<unsigned8> texture(textureArraySize);
|
||||
Serializer<unsigned8*>::Deserialize(&texture[0], textureArraySize, file);
|
||||
ReadTexture(texture, materialTextureSize, maxTextureIndex);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void AddMaterial(const T& material) {
|
||||
if(!finalized) mMaterials->push_back(material);
|
||||
}
|
||||
void RemoveMaterial(const T& material) { if (!finalized) mMaterials->erase(material); }
|
||||
void Finalize(bool filterUnique = true)
|
||||
{
|
||||
if (finalized) return;
|
||||
|
||||
if (filterUnique)
|
||||
CollectionHelper::Unique(*mMaterials, Comparer());
|
||||
|
||||
mTextureSize = GetTextureSize();
|
||||
finalized = true;
|
||||
|
||||
RebuildMaterialPointers();
|
||||
}
|
||||
bool IsFinalized() const { return finalized; }
|
||||
std::vector<T> GetMaterials() const
|
||||
{
|
||||
std::vector<T> materials(mMaterials->size());
|
||||
tbb::parallel_for(size_t(0), mMaterials->size(), [&](size_t i)
|
||||
{
|
||||
materials[i] = mMaterials->at(i);
|
||||
});
|
||||
return materials;
|
||||
}
|
||||
|
||||
T GetNearestMaterial(T material) const {
|
||||
return NearestFinder<T>()(material, *mMaterials);
|
||||
}
|
||||
T GetMaterial(MaterialLibraryPointer materialPointer)
|
||||
{
|
||||
assert(finalized);
|
||||
return mMaterials->at(GetSetIndex(materialPointer));
|
||||
}
|
||||
std::vector<std::pair<T, MaterialLibraryPointer>> GetMaterialTextureIndices() const
|
||||
{
|
||||
assert(finalized);
|
||||
std::vector<std::pair<T, MaterialLibraryPointer>> materials;
|
||||
size_t setIndex = 0;
|
||||
for (T material : (*mMaterials))
|
||||
{
|
||||
auto textureIndex = WrapSetIndex(setIndex++);
|
||||
materials.push_back(std::pair<T, MaterialLibraryPointer>(material, textureIndex));
|
||||
}
|
||||
return materials;
|
||||
}
|
||||
MaterialLibraryPointer GetTextureIndex(const T& material) const
|
||||
{
|
||||
assert(finalized);
|
||||
auto materialIt = mMaterialPointers.find(material);
|
||||
if (materialIt == mMaterialPointers.end())
|
||||
return GetTextureIndex(GetNearestMaterial(material));
|
||||
return materialIt->second;
|
||||
}
|
||||
|
||||
bool Contains(const T& material) const
|
||||
{
|
||||
auto materialIt = mMaterialPointers.find(material);
|
||||
return materialIt != mMaterialPointers.end();
|
||||
}
|
||||
|
||||
MaterialLibraryPointer GetMaxTextureIndex() const
|
||||
{
|
||||
return WrapSetIndex(mMaterials->size());
|
||||
}
|
||||
|
||||
unsigned short GetTextureSize() const {
|
||||
if (finalized) return mTextureSize;
|
||||
if (mMaterials->empty())
|
||||
return 0;
|
||||
size_t requiredPixels = PIXELS_PER_MATERIAL * mMaterials->size();
|
||||
if (requiredPixels < 4) requiredPixels = 4; // Make sure the texture is always 2x2
|
||||
unsigned16 v = (unsigned16)std::ceil(std::sqrt(double(requiredPixels)));
|
||||
return BitHelper::CeilToNearestPowerOfTwo(v);
|
||||
}
|
||||
|
||||
std::vector<unsigned8> GetTexture() const
|
||||
{
|
||||
std::vector<unsigned8> texture(mTextureSize * mTextureSize * channelsPerPixel);
|
||||
size_t i = 0;
|
||||
for (T material : (*mMaterials))
|
||||
{
|
||||
auto materialProperties = material.Serialize();
|
||||
unsigned props = (unsigned)materialProperties.size();
|
||||
unsigned prop = 0;
|
||||
assert(i + channelsPerPixel * PIXELS_PER_MATERIAL <= texture.size());
|
||||
|
||||
for (unsigned pixel = 0; pixel < PIXELS_PER_MATERIAL; pixel++)
|
||||
{
|
||||
for (unsigned8 channel = 0; channel < channelsPerPixel; channel++)
|
||||
{
|
||||
if (prop < props)
|
||||
texture[i + channel] = materialProperties[prop];
|
||||
prop++;
|
||||
}
|
||||
i += channelsPerPixel;
|
||||
}
|
||||
}
|
||||
return texture;
|
||||
}
|
||||
|
||||
void ReadTexture(std::vector<unsigned char> texture, unsigned textureSize, MaterialLibraryPointer maxTextureIndex)
|
||||
{
|
||||
mTextureSize = textureSize;
|
||||
unsigned props = PIXELS_PER_MATERIAL * channelsPerPixel;
|
||||
std::vector<unsigned8> curMatProps(SIZE_OF_MATERIAL);
|
||||
unsigned maxIndex = maxTextureIndex.x + maxTextureIndex.y * textureSize;
|
||||
bool endFound = maxIndex != 0;
|
||||
bool firstEmptyMaterialFound = false;
|
||||
size_t i = 0;
|
||||
while (!(endFound && i >= maxIndex * channelsPerPixel))
|
||||
{
|
||||
// Read the materials from the current pixels
|
||||
for (size_t j = 0; j < SIZE_OF_MATERIAL; j++)
|
||||
curMatProps[j] = texture[i + j];
|
||||
if (!endFound)
|
||||
{
|
||||
bool allZero = true;
|
||||
for (unsigned char prop : curMatProps)
|
||||
if (prop != 0)
|
||||
{
|
||||
allZero = false;
|
||||
break;
|
||||
}
|
||||
if (firstEmptyMaterialFound && allZero) endFound = true;
|
||||
firstEmptyMaterialFound |= allZero;
|
||||
}
|
||||
|
||||
T curMat;
|
||||
curMat.Deserialize(curMatProps);
|
||||
AddMaterial(curMat);
|
||||
|
||||
i += props;
|
||||
}
|
||||
finalized = true;
|
||||
RebuildMaterialPointers();
|
||||
}
|
||||
|
||||
bool operator==(const MaterialLibrary& other) const
|
||||
{
|
||||
// Check if the material count and finalized state are equal
|
||||
if (this->IsFinalized() != other.IsFinalized()
|
||||
|| this->mMaterials->size() != other.mMaterials->size())
|
||||
return false;
|
||||
|
||||
// Check if the materials are equal
|
||||
for (size_t i = 0; i < mMaterials->size(); i++)
|
||||
if (this->mMaterials->at(i) != other.mMaterials->at(i))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool operator!=(const MaterialLibrary& other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
55
Research/scene/Material/MaterialLibraryPointer.h
Normal file
55
Research/scene/Material/MaterialLibraryPointer.h
Normal file
@@ -0,0 +1,55 @@
|
||||
#pragma once
|
||||
#include "../../inc/glm/common.hpp"
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
#include <vector>
|
||||
|
||||
class MaterialLibraryPointer :
|
||||
public glm::u16vec2
|
||||
{
|
||||
public:
|
||||
MaterialLibraryPointer() : glm::u16vec2() {}
|
||||
MaterialLibraryPointer(const unsigned16 v) : glm::u16vec2(v) {}
|
||||
MaterialLibraryPointer(const unsigned16 x, const unsigned16 y) : glm::u16vec2(x, y) {}
|
||||
~MaterialLibraryPointer() {}
|
||||
|
||||
MaterialLibraryPointer& operator=(const unsigned& source)
|
||||
{
|
||||
x = (source & 0xFFFF0000) >> 16;
|
||||
y = source & 0x0000FFFF;
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::vector<unsigned8> Serialize()
|
||||
{
|
||||
return BitHelper::SplitInBytes(this->operator unsigned());
|
||||
}
|
||||
|
||||
operator unsigned() const
|
||||
{
|
||||
return (((unsigned)x) << 16) | y;
|
||||
}
|
||||
|
||||
bool operator==(const MaterialLibraryPointer& other) const
|
||||
{
|
||||
return x == other.x && y == other.y;
|
||||
}
|
||||
|
||||
bool operator!=(const MaterialLibraryPointer& other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
|
||||
// Perfect hash
|
||||
namespace std
|
||||
{
|
||||
template<>
|
||||
struct hash<MaterialLibraryPointer>
|
||||
{
|
||||
size_t operator()(MaterialLibraryPointer const &value) const
|
||||
{
|
||||
return (unsigned) value;
|
||||
}
|
||||
};
|
||||
}
|
||||
169
Research/scene/Material/MaterialPair.h
Normal file
169
Research/scene/Material/MaterialPair.h
Normal file
@@ -0,0 +1,169 @@
|
||||
#pragma once
|
||||
#include "Color.h"
|
||||
#include "../../core/ColorHelper.h"
|
||||
#include "../../inc/glm/glm.hpp"
|
||||
#include "../../core/Hashers.h"
|
||||
#include "../../core/Comparers.h"
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
|
||||
template<typename U, typename V>
|
||||
class MaterialPair
|
||||
{
|
||||
private:
|
||||
U mFirst;
|
||||
V mSecond;
|
||||
public:
|
||||
static const unsigned8 BITS = U::BITS + V::BITS;
|
||||
static const unsigned8 CHANNELSPERPIXEL = BITS % 32 == 0 ? 4 : 3;
|
||||
|
||||
MaterialPair() : mFirst(U()), mSecond(V()) {}
|
||||
MaterialPair(U first, V second) : mFirst(first), mSecond(second) {}
|
||||
MaterialPair(std::pair<U, V> pair) : mFirst(pair.first), mSecond(pair.second) {}
|
||||
template<typename UC, typename VC>
|
||||
MaterialPair(UC first, VC second) : MaterialPair(U(first), V(second)) {}
|
||||
|
||||
const U& GetFirst() const { return mFirst; }
|
||||
void SetFirst(U value) { mFirst = value; }
|
||||
const V& GetSecond() const { return mSecond; }
|
||||
void SetSecond(V value) { mSecond = value; }
|
||||
|
||||
static MaterialPair Average(const std::vector<MaterialPair>& values)
|
||||
{
|
||||
std::vector<U> firsts(values.size());
|
||||
std::vector<V> seconds(values.size());
|
||||
for (size_t i = 0; i < values.size(); i++)
|
||||
{
|
||||
firsts[i] = values[i].GetFirst();
|
||||
seconds[i] = values[i].GetSecond();
|
||||
}
|
||||
return MaterialPair(U::Average(first), V::Average(seconds));
|
||||
}
|
||||
static MaterialPair WeightedAverage(const std::vector<MaterialPair>& values, const std::vector<float>& weights)
|
||||
{
|
||||
std::vector<U> firsts(values.size());
|
||||
std::vector<V> seconds(values.size());
|
||||
for (size_t i = 0; i < values.size(); i++)
|
||||
{
|
||||
firsts[i] = values[i].GetFirst();
|
||||
seconds[i] = values[i].GetSecond();
|
||||
}
|
||||
return MaterialPair(U::WeightedAverage(firsts, weights), V::WeightedAverage(seconds, weights));
|
||||
}
|
||||
|
||||
static float Distance(const MaterialPair& a, const MaterialPair& b)
|
||||
{
|
||||
return (U::Distance(a.GetFirst(), a.GetSecond()) + V::Distance(a.GetSecond(), b.GetSecond())) * 0.5f;
|
||||
}
|
||||
|
||||
std::string GetTypeSuffix() const { return GetFirst().GetTypeSuffix() + GetSecond().GetTypeSuffix(); }
|
||||
|
||||
std::vector<unsigned8> Serialize() const {
|
||||
std::vector<unsigned8> firstSerialized = GetFirst().Serialize();
|
||||
std::vector<unsigned8> secondSerialized = GetSecond().Serialize();
|
||||
firstSerialized.insert(firstSerialized.end(), secondSerialized.begin(), secondSerialized.end());
|
||||
return firstSerialized;
|
||||
};
|
||||
|
||||
void Deserialize(const std::vector<unsigned8>& value)
|
||||
{
|
||||
mFirst.Deserialize(std::vector<unsigned8>(value.begin(), value.begin() + sizeof(mFirst)));
|
||||
mSecond.Deserialize(std::vector<unsigned8>(value.begin() + sizeof(mFirst), value.end()));
|
||||
}
|
||||
|
||||
bool operator==(const MaterialPair& v) const { return v.mFirst == mFirst && v.mSecond == mSecond; }
|
||||
bool operator!=(const MaterialPair& n) const { return !(n == *this); }
|
||||
|
||||
//// Assignment operator, used for easy access to different kinds of materials
|
||||
//ColorAndValue& operator=(const unsigned& source)
|
||||
//{
|
||||
// mValue = source & 0x3FF;
|
||||
// glm::u8vec3 color;
|
||||
// color.r = (source >> (10 + 14 - 1)) & 0xFE;
|
||||
// color.g = (source >> (10 + 7 - 1)) & 0xFE;
|
||||
// color.b = (source >> (10 - 1)) & 0xFE;
|
||||
// mColor = Color(color);
|
||||
//}
|
||||
//operator unsigned() const
|
||||
//{
|
||||
// // Value is stored in the 10 lowest siginificant bits
|
||||
// unsigned32 res = (unsigned32)mValue;
|
||||
// // Colors are stored in the 21 highest significant bits (7 bits per channel, doesn't fit otherwise...)
|
||||
// res |= ((unsigned32)mColor.GetR() & 0xFE) << (10 + 14 - 1);
|
||||
// res |= ((unsigned32)mColor.GetG() & 0xFE) << (10 + 7 - 1);
|
||||
// res |= ((unsigned32)mColor.GetB() & 0xFE) << (10 - 1);
|
||||
// return res;
|
||||
//}
|
||||
};
|
||||
|
||||
template<typename U, typename V>
|
||||
struct NearestFinder<MaterialPair<U, V>>
|
||||
{
|
||||
MaterialPair<U, V> operator()(const MaterialPair<U, V>& source, const std::vector<MaterialPair<U, V>>& values)
|
||||
{
|
||||
std::vector<float> distances(values.size());
|
||||
// Distances are defined as the sum.
|
||||
for (size_t i = 0; i < values.size(); i++)
|
||||
distances[i] = U::Distance(source.GetFirst(), values[i].GetFirst()) + V::Distance(source.GetSecond(), values[i].GetSecond());
|
||||
auto minIt = std::min_element(distances.begin(), distances.end());
|
||||
size_t minIndex = std::distance(distances.begin(), minIt);
|
||||
return values[minIndex];
|
||||
}
|
||||
};
|
||||
template<typename U, typename V>
|
||||
struct ParallelNearestFinder<MaterialPair<U, V>>
|
||||
{
|
||||
MaterialPair<U, V> operator()(const MaterialPair<U, V>& source, const std::vector<MaterialPair<U, V>>& values)
|
||||
{
|
||||
std::vector<float> distances(values.size());
|
||||
// Distances are defined as the sum.
|
||||
for (size_t i = 0; i < values.size(); i++)
|
||||
distances[i] = U::Distance(source.GetFirst(), values[i].GetFirst()) + V::Distance(source.GetSecond(), values[i].GetSecond());
|
||||
auto minIt = std::min_element(distances.begin(), distances.end());
|
||||
size_t minIndex = std::distance(distances.begin(), minIt);
|
||||
return values[minIndex];
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
namespace std {
|
||||
template<typename U, typename V>
|
||||
struct hash<MaterialPair<U, V>> {
|
||||
size_t operator()(const MaterialPair<U, V> &value) const {
|
||||
#ifdef ENVIRONMENT64
|
||||
size_t cHash = std::hash<U>()(value.GetFirst());
|
||||
size_t nHash = std::hash<V>()(value.GetSecond());
|
||||
return cHash | BitHelper::CircularShiftLeft<size_t>(nHash, V::BITS);
|
||||
#else
|
||||
return (unsigned32)value;
|
||||
#endif
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
template<typename U, typename UCompare, typename V, typename VCompare>
|
||||
struct MaterialPairCompare
|
||||
{
|
||||
bool operator()(const MaterialPair<U, V>& a, const MaterialPair<U, V>& b) const
|
||||
{
|
||||
if (a.GetFirst() != b.GetFirst()) return UCompare()(a.GetFirst(), b.GetFirst());
|
||||
return VCompare()(a.GetSecond(), b.GetSecond());
|
||||
}
|
||||
};
|
||||
|
||||
template<typename U, typename V>
|
||||
struct Serializer<MaterialPair<U, V>>
|
||||
{
|
||||
static void Serialize(const MaterialPair<U, V>& value, std::ostream& out)
|
||||
{
|
||||
Serializer<U>::Serialize(value.GetFirst(), out);
|
||||
Serializer<V>::Serialize(value.GetSecond(), out);
|
||||
}
|
||||
|
||||
static void Deserialize(MaterialPair<U, V>& value, std::istream& in)
|
||||
{
|
||||
U first; Serializer<U>::Deserialize(first, in); value.SetFirst(first);
|
||||
V second; Serializer<V>::Deserialize(second, in); value.SetSecond(second);
|
||||
}
|
||||
};
|
||||
18
Research/scene/Material/MaterialQuantizer/BaseQuantizer.h
Normal file
18
Research/scene/Material/MaterialQuantizer/BaseQuantizer.h
Normal file
@@ -0,0 +1,18 @@
|
||||
#pragma once
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
template<typename T, typename Comparer>
|
||||
class BaseQuantizer
|
||||
{
|
||||
public:
|
||||
virtual std::map<T, T, Comparer>* QuantizeMaterials(std::vector<T> materials) const = 0;
|
||||
virtual std::string GetQuantizerDescriptor() const = 0;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class QuickQuantizer
|
||||
{
|
||||
public:
|
||||
virtual T Quantize(const T& material) const = 0;
|
||||
};
|
||||
@@ -0,0 +1,48 @@
|
||||
#pragma once
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "BaseQuantizer.h"
|
||||
#include "ColorQuantizer/BaseColorQuantizer.h"
|
||||
#include "../ColorAndNormal.h"
|
||||
#include "../ColorAndOpacity.h"
|
||||
#include "../ColorAndNormalAndValue.h"
|
||||
#include "../../../core/CollectionHelper.h"
|
||||
|
||||
// Basically quantizes the colors using the given color quantizer and leaves the values as-is (assuming they are quantized enough already)
|
||||
// Deleting this object also deletes the associated color quantizer
|
||||
template<typename T, typename TCompare>
|
||||
class ColorAndValueQuantizer : public BaseQuantizer<ColorAndValue<T>, ColorAndValueCompare<T, TCompare>>
|
||||
{
|
||||
private:
|
||||
BaseColorQuantizer* mColorQuantizer;
|
||||
public:
|
||||
ColorAndValueQuantizer(BaseColorQuantizer* colorQuantizer)
|
||||
: mColorQuantizer(colorQuantizer)
|
||||
{}
|
||||
|
||||
~ColorAndValueQuantizer()
|
||||
{
|
||||
delete mColorQuantizer;
|
||||
}
|
||||
|
||||
virtual std::map<ColorAndValue<T>, ColorAndValue<T>, ColorAndValueCompare<T, TCompare>>* QuantizeMaterials(std::vector<ColorAndValue<T>> materials) const
|
||||
{
|
||||
std::vector<Color> colors(materials.size());
|
||||
for (size_t i = 0; i < materials.size(); i++) colors[i] = materials[i].GetFirst();
|
||||
CollectionHelper::Unique(colors, ColorCompare());
|
||||
auto quantizedColors = mColorQuantizer->QuantizeMaterials(colors);
|
||||
std::map<ColorAndValue<T>, ColorAndValue<T>, ColorAndValueCompare<T, TCompare>>* res = new std::map<ColorAndValue<T>, ColorAndValue<T>, ColorAndValueCompare<T, TCompare>>();
|
||||
for (auto mat : materials)
|
||||
{
|
||||
ColorAndValue<T> replacer(quantizedColors->at(mat.GetFirst()), mat.GetSecond());
|
||||
res->insert(std::make_pair(mat, replacer));
|
||||
}
|
||||
delete quantizedColors;
|
||||
return res;
|
||||
}
|
||||
virtual std::string GetQuantizerDescriptor() const { return mColorQuantizer->GetQuantizerDescriptor(); }
|
||||
};
|
||||
|
||||
typedef ColorAndValueQuantizer<ColorAndNormal, ColorAndNormalCompare> ColorAndNormalQuantizer;
|
||||
typedef ColorAndValueQuantizer<ColorAndOpacity, ColorAndOpacityCompare> ColorAndOpacityQuantizer;
|
||||
typedef ColorAndValueQuantizer<ColorAndNormalAndValue, ColorAndNormalAndValueCompare> ColorAndNormalAndValueQuantizer;
|
||||
@@ -0,0 +1,53 @@
|
||||
#include "BaseColorQuantizer.h"
|
||||
#include "../../../../inc/tbb/parallel_for.h"
|
||||
|
||||
std::map<Color, Color, ColorCompare>* BaseColorQuantizer::QuantizeMaterials(std::vector<Color> materials) const
|
||||
{
|
||||
// Convert the "Colors" to u8vec3
|
||||
std::vector<glm::u8vec3> colors(materials.size());
|
||||
tbb::parallel_for(size_t(0), materials.size(), [&](size_t i)
|
||||
{
|
||||
colors[i] = materials[i].GetColor();
|
||||
});
|
||||
|
||||
// Quantize the colors
|
||||
auto quantizedColors = QuantizeColors(colors);
|
||||
//PrintQuantizationStatistics(quantizedColors);
|
||||
|
||||
// Find out what the quantized materials are
|
||||
std::map<Color, Color, ColorCompare>* quantizedMaterials = new std::map<Color, Color, ColorCompare>();
|
||||
for (auto quantizedColor : *quantizedColors)
|
||||
{
|
||||
quantizedMaterials->insert(std::pair<Color, Color>(Color(quantizedColor.first), Color(quantizedColor.second)));
|
||||
glm::vec3 error = glm::abs(glm::vec3(quantizedColor.first) - glm::vec3(quantizedColor.second));
|
||||
}
|
||||
delete quantizedColors;
|
||||
return quantizedMaterials;
|
||||
}
|
||||
|
||||
void BaseColorQuantizer::PrintQuantizationStatistics(std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* quantizedColors) const
|
||||
{
|
||||
glm::vec3 sumError(0);
|
||||
unsigned maxError = 0;
|
||||
glm::uvec3 maxErrorValue(0);
|
||||
float sumDeltaE = 0;
|
||||
float maxDeltaE = 0;
|
||||
for (auto quantizedColor : *quantizedColors)
|
||||
{
|
||||
glm::vec3 error = glm::abs(glm::vec3(quantizedColor.first) - glm::vec3(quantizedColor.second));
|
||||
sumError += error;
|
||||
unsigned errorU = error.r + error.g + error.b;
|
||||
if (errorU > maxError)
|
||||
{
|
||||
maxError = errorU;
|
||||
maxErrorValue = error;
|
||||
}
|
||||
float deltaE = ColorHelper::GetDeltaEFromRGB(quantizedColor.first, quantizedColor.second);
|
||||
if (deltaE == deltaE) // Only sum if it is not NaN...
|
||||
sumDeltaE += deltaE;
|
||||
if (deltaE > maxDeltaE) maxDeltaE = deltaE;
|
||||
}
|
||||
glm::vec3 meanError = sumError / float(quantizedColors->size());
|
||||
float meanDeltaE = sumDeltaE / float(quantizedColors->size());
|
||||
printf("Mean errors: (%f, %f, %f), Max errors: (%u, %u, %u), Mean delta-E: %f, Max delta-E: %f\n", meanError.x, meanError.y, meanError.z, maxErrorValue.x, maxErrorValue.y, maxErrorValue.z, meanDeltaE, maxDeltaE);
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
#pragma once
|
||||
#include "../BaseQuantizer.h"
|
||||
#include "../../Color.h"
|
||||
|
||||
class BaseColorQuantizer : public BaseQuantizer<Color, ColorCompare>
|
||||
{
|
||||
public:
|
||||
virtual ~BaseColorQuantizer() {}
|
||||
|
||||
void PrintQuantizationStatistics(std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* quantizedColors) const;
|
||||
std::map<Color, Color, ColorCompare>* QuantizeMaterials(std::vector<Color> materials) const override;
|
||||
virtual std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* QuantizeColors(std::vector<glm::u8vec3> colors) const = 0;
|
||||
};
|
||||
@@ -0,0 +1,28 @@
|
||||
#include "ColorBitCutter.h"
|
||||
|
||||
ColorBitCutter::ColorBitCutter(unsigned8 bitCount)
|
||||
{
|
||||
assert(bitCount <= 8);
|
||||
mBitCount = bitCount;
|
||||
}
|
||||
ColorBitCutter::~ColorBitCutter() {}
|
||||
|
||||
std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* ColorBitCutter::QuantizeColors(std::vector<glm::u8vec3> colors) const
|
||||
{
|
||||
std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* res = new std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>();
|
||||
unsigned8 mask = BitHelper::GetHSMask<unsigned8>(0, mBitCount);
|
||||
for (glm::u8vec3 color : colors)
|
||||
{
|
||||
glm::u8vec3 replacementColor = glm::u8vec3(
|
||||
color.x & mask,
|
||||
color.y & mask,
|
||||
color.z & mask);
|
||||
res->insert(std::pair<glm::u8vec3, glm::u8vec3>(color, replacementColor));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
std::string ColorBitCutter::GetQuantizerDescriptor() const
|
||||
{
|
||||
return "b" + std::to_string(mBitCount);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
#include <map>
|
||||
#include "../../../../inc/glm/common.hpp"
|
||||
#include "../../../../core/Comparers.h"
|
||||
#include "BaseColorQuantizer.h"
|
||||
|
||||
class ColorBitCutter : public BaseColorQuantizer
|
||||
{
|
||||
public:
|
||||
ColorBitCutter(unsigned8 bitCount);
|
||||
~ColorBitCutter() override;
|
||||
|
||||
std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* QuantizeColors(std::vector<glm::u8vec3> colors) const override;
|
||||
std::string GetQuantizerDescriptor() const override;
|
||||
|
||||
private:
|
||||
unsigned8 mBitCount;
|
||||
};
|
||||
|
||||
@@ -0,0 +1,96 @@
|
||||
#include "MaxErrorClusterer.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <forward_list>
|
||||
|
||||
#include "../../../../inc/glm/geometric.hpp"
|
||||
|
||||
// Quantize the colors in CIELAB-space clusters that have a maximum size maxDistance.
|
||||
MaxErrorClusterer::MaxErrorClusterer(float maxDistance)
|
||||
{
|
||||
mMaxDistance = maxDistance;
|
||||
}
|
||||
MaxErrorClusterer::~MaxErrorClusterer() { }
|
||||
|
||||
std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* MaxErrorClusterer::QuantizeColors(std::vector<glm::u8vec3> colors) const
|
||||
{
|
||||
// Transform all colors to CIELAB space:
|
||||
ListNode<Color>* firstUnclusteredColor = new ListNode<Color>();
|
||||
ListNode<Color>* cur = firstUnclusteredColor;
|
||||
for (auto rgb : colors)
|
||||
{
|
||||
Color color(rgb);
|
||||
cur->color = color;
|
||||
cur->next = new ListNode<Color>();
|
||||
cur = cur->next;
|
||||
}
|
||||
|
||||
auto res = new std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>();
|
||||
while (firstUnclusteredColor != NULL)
|
||||
{
|
||||
Color curColor = firstUnclusteredColor->color;
|
||||
// Pop the head of the "list"
|
||||
{
|
||||
auto nextUnclusteredColor = firstUnclusteredColor->next;
|
||||
delete firstUnclusteredColor;
|
||||
firstUnclusteredColor = nextUnclusteredColor;
|
||||
}
|
||||
// Create a new cluster for this unclustered color
|
||||
auto cluster = std::vector<Color>();
|
||||
cluster.push_back(curColor);
|
||||
|
||||
// Loop through the unclustered colors
|
||||
if (firstUnclusteredColor != NULL)
|
||||
{
|
||||
cur = firstUnclusteredColor;
|
||||
ListNode<Color>* last = NULL;
|
||||
ListNode<Color>* next = cur->next;
|
||||
while (cur != NULL)
|
||||
{
|
||||
next = cur->next;
|
||||
if (ColorHelper::GetDeltaEFromLAB(cur->color.lab, curColor.lab) <= mMaxDistance)
|
||||
{
|
||||
// If we are close enough to the last color, add the current color to the cluster
|
||||
cluster.push_back(cur->color);
|
||||
|
||||
// Delete the current node from the list of unclustered nodes
|
||||
delete cur;
|
||||
|
||||
// If this isn't the first node in the list, make sure the last node points to this node
|
||||
if (last != NULL)
|
||||
{
|
||||
last->next = next;
|
||||
cur = last;
|
||||
}
|
||||
else
|
||||
{ // If this is the first node, we just deleted it, so replace the current first node by the next node
|
||||
firstUnclusteredColor = next;
|
||||
}
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
// If this node wasn't add to the cluster, the last node should be updated before advancing
|
||||
last = cur;
|
||||
}
|
||||
// Advance one position
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate the center of the current cluster
|
||||
glm::vec3 clusterSum(0);
|
||||
glm::u8vec3 clusterReplacementColor = curColor.rgb;
|
||||
for (auto color : cluster)
|
||||
res->insert(std::pair<glm::u8vec3, glm::u8vec3>(color.rgb, clusterReplacementColor));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string MaxErrorClusterer::GetQuantizerDescriptor() const
|
||||
{
|
||||
char str[7];
|
||||
sprintf(str, "%.2f", mMaxDistance);
|
||||
auto formattedMaxDistance = std::string(str);
|
||||
return "de" + formattedMaxDistance;
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
#include "BaseColorQuantizer.h"
|
||||
#include "../../../../core/ColorHelper.h"
|
||||
|
||||
class MaxErrorClusterer :
|
||||
public BaseColorQuantizer
|
||||
{
|
||||
public:
|
||||
// Quantize the colors in CIELAB-space clusters that have a maximum size maxDistance.
|
||||
// This is done using a greedy clustering algorithm that adds all colors to the cluster of a color if they're less than half the distance away.
|
||||
MaxErrorClusterer(float maxDistance);
|
||||
~MaxErrorClusterer() override;
|
||||
|
||||
std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* QuantizeColors(std::vector<glm::u8vec3> colors) const override;
|
||||
std::string GetQuantizerDescriptor() const override;
|
||||
protected:
|
||||
float mMaxDistance;
|
||||
private:
|
||||
template<typename T>
|
||||
struct ListNode
|
||||
{
|
||||
public:
|
||||
T color;
|
||||
ListNode<T>* next = NULL;
|
||||
|
||||
ListNode(T value) { color = value; }
|
||||
ListNode() { }
|
||||
};
|
||||
struct Color
|
||||
{
|
||||
glm::u8vec3 rgb;
|
||||
glm::vec3 lab;
|
||||
|
||||
Color(glm::u8vec3 rgb)
|
||||
{
|
||||
this->rgb = rgb;
|
||||
lab = ColorHelper::RGBtoLAB(rgb);
|
||||
}
|
||||
|
||||
Color() : Color(glm::u8vec3(0)) {}
|
||||
};
|
||||
};
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
#include "XiangCIELABClusterer.h"
|
||||
#include "../../../../core/ColorHelper.h"
|
||||
#include <string>
|
||||
|
||||
XiangCIELABClusterer::XiangCIELABClusterer(unsigned quantizedColorCount) : XiangClusterer(quantizedColorCount) {}
|
||||
XiangCIELABClusterer::~XiangCIELABClusterer() {}
|
||||
|
||||
glm::vec3 XiangCIELABClusterer::ScaleColor(glm::u8vec3 color) const
|
||||
{
|
||||
return ColorHelper::RGBtoLAB(color);
|
||||
}
|
||||
glm::u8vec3 XiangCIELABClusterer::ScaleBackColor(glm::vec3 color) const
|
||||
{
|
||||
return ColorHelper::LABtoRGB(color);
|
||||
}
|
||||
|
||||
std::string XiangCIELABClusterer::GetQuantizerDescriptor() const
|
||||
{
|
||||
return "lab" + std::to_string(mQuantizedColorCount);
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
#include "XiangClusterer.h"
|
||||
|
||||
class XiangCIELABClusterer :
|
||||
public XiangClusterer
|
||||
{
|
||||
public:
|
||||
// Quantize the colors to the given amount of colors, using the default RGB scaling, 0.5:1.0:0.25.
|
||||
XiangCIELABClusterer(unsigned quantizedColorCount);
|
||||
virtual ~XiangCIELABClusterer() override;
|
||||
|
||||
std::string GetQuantizerDescriptor() const override;
|
||||
protected:
|
||||
glm::vec3 ScaleColor(glm::u8vec3 color) const override;
|
||||
glm::u8vec3 ScaleBackColor(glm::vec3 color) const override;
|
||||
};
|
||||
|
||||
@@ -0,0 +1,104 @@
|
||||
#include "XiangClusterer.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "../../../../inc/glm/geometric.hpp"
|
||||
|
||||
XiangClusterer::XiangClusterer(unsigned quantizedColorCount, glm::vec3 colorScale)
|
||||
{
|
||||
mQuantizedColorCount = quantizedColorCount;
|
||||
mColorScale = colorScale;
|
||||
mInvColorScale = 1.0f / mColorScale;
|
||||
}
|
||||
|
||||
XiangClusterer::XiangClusterer(unsigned quantizedColorCount) : XiangClusterer(quantizedColorCount, glm::vec3(0.5, 1.0, 0.25)) {}
|
||||
|
||||
XiangClusterer::~XiangClusterer()
|
||||
{
|
||||
}
|
||||
|
||||
std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* XiangClusterer::QuantizeColors(std::vector<glm::u8vec3> colors) const
|
||||
{
|
||||
// Set the initial cluster head to the first color in the set
|
||||
std::vector<glm::vec3> h(mQuantizedColorCount);
|
||||
h[0] = *colors.begin();
|
||||
|
||||
// Initialize all colors to be in the same cluster (cluster 0)
|
||||
std::vector<ClusterColor*> B(colors.size());
|
||||
int i = 0;
|
||||
for (auto color : colors)
|
||||
{
|
||||
ClusterColor* cur = new ClusterColor();
|
||||
cur->originalColor = color;
|
||||
cur->color = ScaleColor(color);
|
||||
cur->centerDistance = glm::distance(glm::vec3(color), h[0]);
|
||||
cur->clusterID = 0;
|
||||
B[i++] = cur;
|
||||
}
|
||||
|
||||
ClusterColor* max = B[0];
|
||||
|
||||
// Start the algorithm: in each iteration create a new cluster
|
||||
for (unsigned x = 1; x < mQuantizedColorCount; x++)
|
||||
{
|
||||
// Find the point with the maximum distance to the cluster center
|
||||
for (auto color : B)
|
||||
if (color->centerDistance > max->centerDistance)
|
||||
max = color;
|
||||
// Define this point as the center of a new cluster
|
||||
max->clusterID = x;
|
||||
h[x] = max->color;
|
||||
|
||||
// Move all colors that are closer to this new cluster than to their current cluster center over to the new cluster.
|
||||
for (auto color : B)
|
||||
{
|
||||
float newDistance = glm::distance(color->color, h[x]);
|
||||
if (newDistance < color->centerDistance)
|
||||
{
|
||||
color->clusterID = x;
|
||||
color->centerDistance = newDistance;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find all cluster centers:
|
||||
std::vector<glm::vec3> clusterSums(mQuantizedColorCount);
|
||||
std::vector<float> invClusterCounts(mQuantizedColorCount);
|
||||
for (auto color : B)
|
||||
{
|
||||
clusterSums[color->clusterID] += color->color;
|
||||
invClusterCounts[color->clusterID]++;
|
||||
}
|
||||
// Inverse the cluster counts:
|
||||
for (size_t i = 0; i < invClusterCounts.size(); i++) invClusterCounts[i] = 1.0f / invClusterCounts[i];
|
||||
|
||||
// Calculate the color replacers
|
||||
std::vector<glm::u8vec3> clusterReplacers(mQuantizedColorCount);
|
||||
for (unsigned i = 0; i < mQuantizedColorCount; i++)
|
||||
clusterReplacers[i] = ScaleBackColor(clusterSums[i] * invClusterCounts[i]);
|
||||
|
||||
// Now that we have the clusters, build the replacement map
|
||||
auto replacements = new std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>();
|
||||
for (auto color : B)
|
||||
replacements->insert(std::pair<glm::u8vec3, glm::u8vec3>(color->originalColor, clusterReplacers[color->clusterID]));
|
||||
|
||||
for (auto color : B)
|
||||
delete color;
|
||||
|
||||
return replacements;
|
||||
}
|
||||
|
||||
glm::vec3 XiangClusterer::ScaleColor(glm::u8vec3 color) const
|
||||
{
|
||||
return glm::vec3(color) * mColorScale;
|
||||
}
|
||||
|
||||
glm::u8vec3 XiangClusterer::ScaleBackColor(glm::vec3 color) const
|
||||
{
|
||||
return glm::u8vec3(glm::round(color * mInvColorScale));
|
||||
}
|
||||
|
||||
std::string XiangClusterer::GetQuantizerDescriptor() const
|
||||
{
|
||||
return std::to_string(mQuantizedColorCount);
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
#pragma once
|
||||
#include "BaseColorQuantizer.h"
|
||||
|
||||
class XiangClusterer :
|
||||
public BaseColorQuantizer
|
||||
{
|
||||
public:
|
||||
XiangClusterer(unsigned quantizedColorCount, glm::vec3 colorScale);
|
||||
// Quantize the colors to the given amount of colors, using the default RGB scaling, 0.5:1.0:0.25.
|
||||
XiangClusterer(unsigned quantizedColorCount);
|
||||
~XiangClusterer() override;
|
||||
|
||||
std::map<glm::u8vec3, glm::u8vec3, u8vec3comparer>* QuantizeColors(std::vector<glm::u8vec3> colors) const override;
|
||||
std::string GetQuantizerDescriptor() const override;
|
||||
protected:
|
||||
virtual glm::vec3 ScaleColor(glm::u8vec3 color) const;
|
||||
virtual glm::u8vec3 ScaleBackColor(glm::vec3 color) const;
|
||||
unsigned mQuantizedColorCount;
|
||||
private:
|
||||
glm::vec3 mColorScale;
|
||||
glm::vec3 mInvColorScale;
|
||||
|
||||
struct ClusterColor
|
||||
{
|
||||
public:
|
||||
glm::u8vec3 originalColor;
|
||||
glm::vec3 color;
|
||||
float centerDistance;
|
||||
int clusterID;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
#pragma once
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "NormalQuantizer.h"
|
||||
#include "../../../core/BitHelper.h"
|
||||
|
||||
|
||||
NormalQuantizer::NormalQuantizer(unsigned8 bits): mBits(bits)
|
||||
{
|
||||
// Bits must be an even number, as there are two channels for an ONV
|
||||
assert(bits % 2 == 0);
|
||||
|
||||
// Precalculate the channel mask
|
||||
unsigned32 channelStartHS = 32 - SmallNormal::BITS / 2;
|
||||
mChannelMask = BitHelper::GetHSMask<unsigned32>(channelStartHS, channelStartHS + mBits / 2);
|
||||
}
|
||||
|
||||
std::map<SmallNormal, SmallNormal, NormalCompare>* NormalQuantizer::QuantizeMaterials(std::vector<SmallNormal> normals) const
|
||||
{
|
||||
std::map<SmallNormal, SmallNormal, NormalCompare>* res = new std::map<SmallNormal, SmallNormal, NormalCompare>();
|
||||
for (const SmallNormal& normal : normals)
|
||||
{
|
||||
res->insert(std::make_pair(normal, Quantize(normal)));
|
||||
}
|
||||
if (mBits <= QUANTIZE_ALL_UP_TO)
|
||||
{
|
||||
unsigned32 maxPerChannel = 1 << (mBits / 2);
|
||||
unsigned8 shift = (SmallNormal::BITS - mBits) / 2;
|
||||
for (unsigned32 x = 0; x < maxPerChannel; x++)
|
||||
for (unsigned32 y = 0; y < maxPerChannel; y++)
|
||||
{
|
||||
SmallNormal v;
|
||||
v.SetXComponent(x << shift);
|
||||
v.SetYComponent(y << shift);
|
||||
res->insert(std::make_pair(v, v));
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
SmallNormal NormalQuantizer::Quantize(const SmallNormal& normal) const
|
||||
{
|
||||
SmallNormal quantized;
|
||||
quantized.SetXComponent(normal.GetXComponent() & mChannelMask);
|
||||
quantized.SetYComponent(normal.GetYComponent() & mChannelMask);
|
||||
return quantized;
|
||||
}
|
||||
|
||||
std::string NormalQuantizer::GetQuantizerDescriptor() const
|
||||
{
|
||||
return std::to_string(mBits);
|
||||
}
|
||||
20
Research/scene/Material/MaterialQuantizer/NormalQuantizer.h
Normal file
20
Research/scene/Material/MaterialQuantizer/NormalQuantizer.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "../SmallNormal.h"
|
||||
#include "BaseQuantizer.h"
|
||||
|
||||
// Super simple quantizer: takes the original value, sets some bits to zero so that only the given number of bits remain
|
||||
class NormalQuantizer : public BaseQuantizer<SmallNormal, NormalCompare>, public QuickQuantizer<SmallNormal>
|
||||
{
|
||||
public:
|
||||
NormalQuantizer(unsigned8 bits);
|
||||
std::map<SmallNormal, SmallNormal, NormalCompare>* QuantizeMaterials(std::vector<SmallNormal> materials) const override;
|
||||
std::string GetQuantizerDescriptor() const override;
|
||||
SmallNormal Quantize(const SmallNormal& normal) const override;
|
||||
private:
|
||||
// if the number of bits is smaller than or equal to this number, all possible quantization values will be added.
|
||||
static const unsigned8 QUANTIZE_ALL_UP_TO = 14;
|
||||
unsigned8 mBits;
|
||||
unsigned32 mChannelMask;
|
||||
};
|
||||
38
Research/scene/Material/NearestFinder.h
Normal file
38
Research/scene/Material/NearestFinder.h
Normal file
@@ -0,0 +1,38 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include <assert.h>
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "../../inc/tbb/parallel_for.h"
|
||||
|
||||
template<typename T>
|
||||
struct NearestFinder
|
||||
{
|
||||
T operator()(const T& source, const std::vector<T>& values) const
|
||||
{
|
||||
assert(!values.empty());
|
||||
float min = T::Distance(source, values[0]);
|
||||
size_t minIndex = 0;
|
||||
for (size_t i = 0; i < values.size(); i++)
|
||||
{
|
||||
float distance = T::Distance(source, values[i]);
|
||||
if (distance < min)
|
||||
{
|
||||
min = distance;
|
||||
minIndex = i;
|
||||
}
|
||||
}
|
||||
return values[minIndex];
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct ParallelNearestFinder
|
||||
{
|
||||
T operator()(const T& source, const std::vector<T>& values) const
|
||||
{
|
||||
assert(!values.empty());
|
||||
std::vector<float> distances(values.size());
|
||||
tbb::parallel_for(size_t(0), values.size(), [&](size_t i) { distances[i] = T::Distance(source, values[i]); });
|
||||
return values[CollectionHelper::MinIndex(distances)];
|
||||
}
|
||||
};
|
||||
88
Research/scene/Material/SignedIntMaterial.h
Normal file
88
Research/scene/Material/SignedIntMaterial.h
Normal file
@@ -0,0 +1,88 @@
|
||||
#pragma once
|
||||
#include "../../core/Defines.h"
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include "../../core/BitHelper.h"
|
||||
|
||||
// Sort of like signed int, but the sign is stored in the first bit of the value, instead of storing it as 0xFFFFFFFF
|
||||
struct SignedIntMaterial
|
||||
{
|
||||
private:
|
||||
const static unsigned VALUE_MASK = 0x7FFFFFFF;
|
||||
const static unsigned SIGN_MASK = 0x80000000;
|
||||
|
||||
unsigned mValue;
|
||||
|
||||
inline unsigned GetValue() const { return mValue & VALUE_MASK; }
|
||||
inline void SetValue(unsigned value)
|
||||
{
|
||||
mValue &= ~VALUE_MASK;
|
||||
mValue |= value & VALUE_MASK;
|
||||
}
|
||||
|
||||
inline bool IsNegative() const { return (mValue & SIGN_MASK) != 0; }
|
||||
inline void SetIsNegative(bool isNegative)
|
||||
{
|
||||
mValue &= ~SIGN_MASK;
|
||||
if (isNegative) mValue |= SIGN_MASK;
|
||||
}
|
||||
public:
|
||||
SignedIntMaterial() : mValue(0) {}
|
||||
|
||||
SignedIntMaterial(unsigned value, bool negative)
|
||||
{
|
||||
SetValue(value);
|
||||
SetIsNegative(negative);
|
||||
}
|
||||
SignedIntMaterial(int value) : SignedIntMaterial(abs(value), value < 0) {}
|
||||
~SignedIntMaterial() {}
|
||||
|
||||
SignedIntMaterial& operator=(const int& source)
|
||||
{
|
||||
mValue = source;
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::vector<unsigned8> Serialize()
|
||||
{
|
||||
return BitHelper::SplitInBytes(this->operator unsigned());
|
||||
}
|
||||
|
||||
operator unsigned() const
|
||||
{
|
||||
return (unsigned)mValue;
|
||||
}
|
||||
|
||||
operator int() const
|
||||
{
|
||||
return (int)GetValue() * (IsNegative() ? -1 : 1);
|
||||
}
|
||||
|
||||
bool operator==(const SignedIntMaterial& other) const
|
||||
{
|
||||
return mValue == other.mValue;
|
||||
}
|
||||
|
||||
bool operator!=(const SignedIntMaterial& other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
bool operator<(const SignedIntMaterial& other) const
|
||||
{
|
||||
return this->operator int() < (int)other;
|
||||
}
|
||||
};
|
||||
|
||||
// Perfect hash
|
||||
namespace std
|
||||
{
|
||||
template<>
|
||||
struct hash<SignedIntMaterial>
|
||||
{
|
||||
size_t operator()(SignedIntMaterial const &value) const
|
||||
{
|
||||
return (unsigned)value;
|
||||
}
|
||||
};
|
||||
}
|
||||
185
Research/scene/Material/SmallNormal.h
Normal file
185
Research/scene/Material/SmallNormal.h
Normal file
@@ -0,0 +1,185 @@
|
||||
#pragma once
|
||||
#include "BaseMaterial.h"
|
||||
#include "../../inc/glm/glm.hpp"
|
||||
#include "../../core/Hashers.h"
|
||||
#include "../../core/Comparers.h"
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
|
||||
// 8 bit normal representation using octahedron normal vectors
|
||||
// https://knarkowicz.wordpress.com/2014/04/16/octahedron-normal-vector-encoding/
|
||||
// Technique proposed in:
|
||||
// Meyer, Q., Süßmuth, J., Sußner, G., Stamminger, M., & Greiner, G. (2010, June).
|
||||
// On Floating Point Normal Vectors.
|
||||
// In Computer Graphics Forum (Vol. 29, No. 4, pp. 1405-1409). Blackwell Publishing Ltd.
|
||||
class SmallNormal
|
||||
{
|
||||
public:
|
||||
static const unsigned8 BITS = 12;
|
||||
static const unsigned8 BYTES = BITS / 8 + (((BITS % 8) == 0) ? 0 : 1);
|
||||
static const unsigned8 CHANNELSPERPIXEL = BYTES;
|
||||
static const unsigned32 SINGLECOORDMASK = ((1 << (BITS / 2)) - 1);
|
||||
static const unsigned32 MAXCOORDVALUE = SINGLECOORDMASK;
|
||||
private:
|
||||
unsigned8 mData[BYTES];
|
||||
|
||||
inline unsigned32 normal() const
|
||||
{
|
||||
unsigned32 res = 0;
|
||||
BitHelper::JoinBytes(mData, res, 0, BYTES);
|
||||
return res;
|
||||
}
|
||||
inline void normal(unsigned32 value)
|
||||
{
|
||||
BitHelper::SplitInBytesAndMove(value, mData, 0, BYTES);
|
||||
}
|
||||
|
||||
inline static float DiscrMult() { return (float)MAXCOORDVALUE; }
|
||||
|
||||
// Wraps a vector between [-1, 1]
|
||||
glm::vec2 Wrap(glm::vec2 v) const
|
||||
{
|
||||
return (1.0f - glm::abs(glm::vec2(v.y, v.x))) * (glm::vec2(v.x >= 0 ? 1 : -1, v.y >= 0 ? 1 : -1));
|
||||
}
|
||||
public:
|
||||
|
||||
SmallNormal() { normal(0); }
|
||||
SmallNormal(glm::vec3 normal)
|
||||
{
|
||||
Set(normal);
|
||||
}
|
||||
|
||||
// Returns the x component of the octahedral normal vector as an unsigned integer between 0 and MAXCOORDVALUE
|
||||
unsigned32 GetXComponent() const { return normal() >> (BITS >> 1); }
|
||||
// Returns the y component of the octahedral normal vector as an unsigned integer between 0 and MAXCOORDVALUE
|
||||
unsigned32 GetYComponent() const { return normal() & SINGLECOORDMASK; }
|
||||
// Sets the x component of the octahedral normal vector as an unsigned integer. Should be between 0 and MAXCOORDVALUE
|
||||
void SetXComponent(unsigned32 value) { assert(value <= MAXCOORDVALUE); unsigned32 mNormal = normal(); mNormal &= SINGLECOORDMASK; mNormal |= value << (BITS / 2); normal(mNormal); }
|
||||
// Sets the y component of the octahedral normal vector as an unsigned integer. Should be between 0 and MAXCOORDVALUE
|
||||
void SetYComponent(unsigned32 value) { assert(value <= MAXCOORDVALUE); unsigned32 mNormal = normal(); mNormal &= ~SINGLECOORDMASK; mNormal |= value & SINGLECOORDMASK; normal(mNormal); }
|
||||
|
||||
void Set(glm::vec3 n)
|
||||
{
|
||||
n /= (abs(n.x) + abs(n.y) + abs(n.z));
|
||||
glm::vec2 res(n.x, n.y);
|
||||
if (n.z < 0)
|
||||
res = Wrap(res);
|
||||
res = res * 0.5f + 0.5f;
|
||||
SetXComponent((unsigned32)(res.x * DiscrMult()));
|
||||
SetYComponent((unsigned32)(res.y * DiscrMult()));
|
||||
|
||||
//if (n.x == 0 && n.y == 0 && n.z == 0) mNormal = 0;
|
||||
//else
|
||||
//{
|
||||
// n = glm::normalize(n);
|
||||
// // Calculate spherical coordinates, normalized between 0 and 1
|
||||
// glm::vec2 sphericalCoords = ((glm::vec2(atan2(n.y, n.x) / mPi, n.z) + 1.0f) * 0.5f);
|
||||
// // Store them using 4 bits per channel
|
||||
// SetXComponent((unsigned8)(sphericalCoords.x * 16.0f));
|
||||
// SetYComponent((unsigned8)(sphericalCoords.y * 16.0f));
|
||||
//}
|
||||
}
|
||||
glm::vec3 Get() const
|
||||
{
|
||||
glm::vec2 enc(((float)GetXComponent()) / DiscrMult(), ((float)GetYComponent()) / DiscrMult());
|
||||
enc = enc * 2.0f - 1.0f;
|
||||
|
||||
glm::vec3 n(enc.x, enc.y, 1.0 - abs(enc.x) - abs(enc.y));
|
||||
if (n.z < 0)
|
||||
{
|
||||
glm::vec2 wrapped = Wrap(enc);
|
||||
n.x = wrapped.x;
|
||||
n.y = wrapped.y;
|
||||
}
|
||||
return glm::normalize(n);
|
||||
|
||||
//// Extract the spherical coordinates
|
||||
//glm::vec2 sphericalCoords(((float)GetXComponent()) / 16.0f, ((float)GetYComponent()) / 16.0f);
|
||||
//// Normalize back
|
||||
//sphericalCoords = (sphericalCoords * 2.0f) - 1.0f;
|
||||
//glm::vec2 scth(sin(sphericalCoords.x), cos(sphericalCoords.y));
|
||||
//glm::vec2 scphi(sqrt(1.0 - sphericalCoords.y*sphericalCoords.y), sphericalCoords.y);
|
||||
//return glm::normalize(glm::vec3(scth.y * scphi.x, scth.x * scphi.x, scphi.y));
|
||||
}
|
||||
|
||||
unsigned32 GetValue() const { return normal(); }
|
||||
void SetValue(unsigned32 value) { normal(value); }
|
||||
|
||||
glm::u16vec3 GetProperties() const { return glm::u16vec3(0, 0, normal()); }
|
||||
void SetProperties(glm::u16vec3 props) { normal(props.z); }
|
||||
|
||||
static SmallNormal Average(const std::vector<SmallNormal>& normals)
|
||||
{
|
||||
glm::vec3 sum(0);
|
||||
for (SmallNormal n : normals)
|
||||
sum += n.Get();
|
||||
if (sum.x == 0 && sum.y == 0 && sum.z == 0) return SmallNormal();
|
||||
return SmallNormal(glm::normalize(sum / (float)normals.size()));
|
||||
}
|
||||
static SmallNormal WeightedAverage(const std::vector<SmallNormal>& normals, const std::vector<float>& weights)
|
||||
{
|
||||
glm::vec3 sum(0);
|
||||
for (size_t i = 0; i < normals.size(); i++)
|
||||
sum += normals[i].Get() * weights[i];
|
||||
if (sum.x == 0 && sum.y == 0 && sum.z == 0) return SmallNormal();
|
||||
return SmallNormal(glm::normalize(sum / CollectionHelper::Sum(weights)));
|
||||
}
|
||||
|
||||
static float Distance(const SmallNormal a, const SmallNormal b)
|
||||
{
|
||||
return glm::distance(a.Get(), b.Get()) * 0.5f;
|
||||
}
|
||||
|
||||
std::string GetTypeSuffix() const { return "n"; }
|
||||
|
||||
std::vector<unsigned8> Serialize() const
|
||||
{
|
||||
std::vector<unsigned8> res(BYTES);
|
||||
for (unsigned8 i = 0; i < BYTES; i++) res[i] = mData[i];
|
||||
return res;
|
||||
}
|
||||
void Deserialize(const std::vector<unsigned8>& value) {
|
||||
assert(value.size() == BYTES);
|
||||
for (unsigned8 i = 0; i < BYTES; i++) mData[i] = value[i];
|
||||
}
|
||||
|
||||
void Serialize(std::ostream& stream) const { Serializer<unsigned8*>::Serialize(&mData[0], BYTES, stream); }
|
||||
void Deserialize(std::istream& stream) { Serializer<unsigned8*>::Deserialize(&mData[0], BYTES, stream); }
|
||||
|
||||
bool operator==(const SmallNormal& n) const { return n.normal() == normal(); }
|
||||
bool operator!=(const SmallNormal& n) const { return !(n == *this); }
|
||||
|
||||
// Assignment operator, used for easy access to different kinds of materials
|
||||
SmallNormal& operator=(const unsigned& source) { normal(source); }
|
||||
operator unsigned() const { return normal(); }
|
||||
|
||||
static std::vector<SmallNormal> GetAll()
|
||||
{
|
||||
std::vector<SmallNormal> all(((size_t)1) << BITS);
|
||||
for (unsigned32 i = 0; i < (unsigned32)all.size(); i++)
|
||||
{
|
||||
SmallNormal n;
|
||||
n.SetValue(i);
|
||||
all[i] = n;
|
||||
}
|
||||
return all;
|
||||
}
|
||||
};
|
||||
|
||||
namespace std {
|
||||
template<>
|
||||
struct hash<SmallNormal> {
|
||||
size_t operator()(const SmallNormal &value) const {
|
||||
return value.GetValue();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
struct NormalCompare
|
||||
{
|
||||
bool operator()(const SmallNormal& n1, const SmallNormal& n2) const
|
||||
{
|
||||
return n1.GetValue() < n2.GetValue();
|
||||
}
|
||||
};
|
||||
335
Research/scene/ObjLoader.cpp
Normal file
335
Research/scene/ObjLoader.cpp
Normal file
@@ -0,0 +1,335 @@
|
||||
#include <fstream>
|
||||
#include "../inc/assimp/Importer.hpp"
|
||||
#include "../inc/assimp/scene.h"
|
||||
#include "../inc/assimp/postprocess.h"
|
||||
#include "../Renderer.h"
|
||||
#include "ObjLoader.h"
|
||||
|
||||
ObjLoader* ObjLoader::mInstance = NULL;
|
||||
|
||||
|
||||
void ObjLoader::Create() {
|
||||
if (mInstance == NULL)
|
||||
mInstance = new ObjLoader();
|
||||
}
|
||||
|
||||
void ObjLoader::Destroy() {
|
||||
if (mInstance != NULL)
|
||||
delete mInstance;
|
||||
mInstance = NULL;
|
||||
}
|
||||
|
||||
ObjLoader* ObjLoader::Instance() {
|
||||
return mInstance;
|
||||
}
|
||||
|
||||
|
||||
//************************************
|
||||
// Load OBJ file with ASSIMP library
|
||||
// Parse to scene, using binary file if possible
|
||||
//************************************
|
||||
bool ObjLoader::Load(const char* fileName, Scene &scene) {
|
||||
|
||||
mVertexOffset = 0;
|
||||
mIndexOffset = 0;
|
||||
mTextureOffset = 0;
|
||||
mBinaryFileName = "";
|
||||
|
||||
// Try to load binary scene, otherwise parse ASSIMP scene
|
||||
if (!Read(fileName, scene)) {
|
||||
// Load OBJ with ASSIMP
|
||||
Assimp::Importer importer;
|
||||
const aiScene* loadedScene = importer.ReadFile(fileName, aiProcess_Triangulate | aiProcess_JoinIdenticalVertices | aiProcess_FlipUVs | aiProcess_GenSmoothNormals);
|
||||
if (!loadedScene) {
|
||||
// If bad allocation, try loading without joining identical vertices
|
||||
const char* t = importer.GetErrorString();
|
||||
if (strcmp(t, "bad allocation") == 0)
|
||||
loadedScene = importer.ReadFile(fileName, aiProcess_Triangulate | aiProcess_FlipUVs | aiProcess_GenSmoothNormals);
|
||||
if (!loadedScene)
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load textures, only uses diffuse texture
|
||||
auto s1 = std::string(fileName);
|
||||
auto path = s1.substr(0, s1.find_last_of("\\/")).append("/");
|
||||
std::vector<std::string> textures;
|
||||
std::vector<float> reflectivenesses(loadedScene->mNumMaterials);
|
||||
textures.resize(loadedScene->mNumMaterials, std::string());
|
||||
for (unsigned m = 0; m < loadedScene->mNumMaterials; ++m) {
|
||||
const aiMaterial* currentMat = loadedScene->mMaterials[m];
|
||||
aiString dummy;
|
||||
aiString* textureFile = &dummy;
|
||||
aiGetMaterialTexture(currentMat, aiTextureType::aiTextureType_DIFFUSE, 0, textureFile);
|
||||
if (textureFile->data[0] != '\0') {
|
||||
textures[m] = std::string(path).append(textureFile->C_Str());
|
||||
//aiGetMaterialInteger(currentMat, aiMaterialProperty::)
|
||||
/*textures[m] = Renderer::Load2DTexture();
|
||||
if (mTextureOffset < 0) mTextureOffset = textures[m];*/
|
||||
}
|
||||
float reflectivity = 1.f;
|
||||
aiGetMaterialFloat(currentMat, AI_MATKEY_OPACITY, &reflectivity);
|
||||
reflectivity = 1.f - reflectivity;
|
||||
reflectivenesses[m] = reflectivity;
|
||||
//printf("%f\n", reflectivity);
|
||||
}
|
||||
|
||||
for (unsigned m = 0; m < loadedScene->mNumMeshes; ++m) {
|
||||
const aiMesh* currentMesh = loadedScene->mMeshes[m];
|
||||
size_t currentVertex = scene.vertices.size();
|
||||
size_t currentIndex = scene.indices.size();
|
||||
|
||||
// Fill vertex positions
|
||||
for (unsigned i = 0; i < currentMesh->mNumVertices; i++) {
|
||||
aiVector3D pos = currentMesh->mVertices[i];
|
||||
scene.vertices.push_back(glm::vec3(pos.x, pos.y, pos.z));
|
||||
}
|
||||
|
||||
// Fill vertices texture coordinates
|
||||
// Assume 1 set of UV coordinates, AssImp supports 8 sets
|
||||
if (currentMesh->HasTextureCoords(0)) {
|
||||
// If this mesh has uv coordinates, then the whole scene should have them.
|
||||
scene.uvs.resize(scene.vertices.size() - currentMesh->mNumVertices);
|
||||
|
||||
// Read the uvs
|
||||
for (unsigned i = 0; i < currentMesh->mNumVertices; i++) {
|
||||
aiVector3D UVW = currentMesh->mTextureCoords[0][i];
|
||||
scene.uvs.push_back(glm::vec2(UVW.x, UVW.y));
|
||||
}
|
||||
}
|
||||
// Fill empty spots
|
||||
if (scene.uvs.size() != 0)
|
||||
scene.uvs.resize(scene.vertices.size(), glm::vec2(0));
|
||||
|
||||
// Fill vertices normals
|
||||
if (currentMesh->HasNormals()) {
|
||||
for (unsigned i = 0; i < currentMesh->mNumVertices; i++) {
|
||||
aiVector3D n = currentMesh->mNormals[i];
|
||||
scene.normals.push_back(glm::vec3(n.x, n.y, n.z));
|
||||
}
|
||||
}
|
||||
// Fill empty spots
|
||||
scene.normals.resize(scene.vertices.size(), glm::vec3(0));
|
||||
|
||||
// Fill vertex colors (if they are set)
|
||||
if (currentMesh->HasVertexColors(0))
|
||||
{
|
||||
// If this mesh has colored vertices, then the whole scene should have them.
|
||||
scene.colors.resize(scene.vertices.size() - currentMesh->mNumVertices);
|
||||
|
||||
// Read the vertex coordinates
|
||||
for (unsigned i = 0; i < currentMesh->mNumVertices; i++) {
|
||||
aiColor4D col = currentMesh->mColors[0][i];
|
||||
scene.colors.push_back(glm::vec3(col.r, col.g, col.b));
|
||||
}
|
||||
}
|
||||
// If the scene has colors, fill them with the color white for the whole scene
|
||||
if (scene.colors.size() != 0)
|
||||
scene.colors.resize(scene.vertices.size(), glm::vec3(1));
|
||||
|
||||
// Fill triangle indices
|
||||
for (unsigned i = 0; i < currentMesh->mNumFaces; i++) {
|
||||
scene.indices.push_back((unsigned)(currentVertex + currentMesh->mFaces[i].mIndices[0]));
|
||||
scene.indices.push_back((unsigned)(currentVertex + currentMesh->mFaces[i].mIndices[1]));
|
||||
scene.indices.push_back((unsigned)(currentVertex + currentMesh->mFaces[i].mIndices[2]));
|
||||
}
|
||||
|
||||
// Save mesh
|
||||
Mesh mesh;
|
||||
mesh.offset = (unsigned)currentIndex;
|
||||
mesh.size = (unsigned)(scene.indices.size() - currentIndex);
|
||||
mesh.texture = textures[currentMesh->mMaterialIndex];
|
||||
mesh.hasUVs = currentMesh->HasTextureCoords(0);
|
||||
mesh.hasVertexColors = currentMesh->HasVertexColors(0);
|
||||
mesh.reflectivity = reflectivenesses[currentMesh->mMaterialIndex];
|
||||
scene.meshes.push_back(mesh);
|
||||
|
||||
if (mVertexOffset <= 0) mVertexOffset = currentVertex;
|
||||
if (mIndexOffset <= 0) mIndexOffset = currentIndex;
|
||||
}
|
||||
if (!Write(fileName, scene))
|
||||
return false;
|
||||
textures.clear();
|
||||
}
|
||||
// The scene pointer is deleted automatically by importer
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
ObjLoader::ObjLoader() {
|
||||
mVertexOffset = 0;
|
||||
mIndexOffset = 0;
|
||||
mTextureOffset = 0;
|
||||
mBinaryFileName = "";
|
||||
}
|
||||
|
||||
|
||||
ObjLoader::~ObjLoader() {
|
||||
}
|
||||
|
||||
//************************************
|
||||
// Get binary file name
|
||||
//************************************
|
||||
void ObjLoader::GetBinaryFileName(const char* fileName) {
|
||||
if (mBinaryFileName.empty()) {
|
||||
mBinaryFileName = fileName;
|
||||
mBinaryFileName.append(".bin");
|
||||
}
|
||||
}
|
||||
|
||||
bool ReadFile(std::ifstream& sceneInFile, Scene &scene)
|
||||
{
|
||||
try
|
||||
{
|
||||
int readVertexOffset, readIndexOffset, readTextureOffset;
|
||||
sceneInFile.read((char*)&readVertexOffset, sizeof(int));
|
||||
sceneInFile.read((char*)&readIndexOffset, sizeof(int));
|
||||
sceneInFile.read((char*)&readTextureOffset, sizeof(int));
|
||||
|
||||
unsigned verticesSize, uvsSize, normalsSize, colorsSize, indicesSize, meshesSize;
|
||||
|
||||
sceneInFile.read((char*)&verticesSize, sizeof(unsigned));
|
||||
sceneInFile.read((char*)&uvsSize, sizeof(unsigned));
|
||||
sceneInFile.read((char*)&normalsSize, sizeof(unsigned));
|
||||
sceneInFile.read((char*)&colorsSize, sizeof(unsigned));
|
||||
sceneInFile.read((char*)&indicesSize, sizeof(unsigned));
|
||||
sceneInFile.read((char*)&meshesSize, sizeof(unsigned));
|
||||
|
||||
// Check if the sizes are valid. If they are not, we're probably dealing with an old file
|
||||
if (meshesSize > verticesSize || (colorsSize != 0 && colorsSize != verticesSize) || normalsSize != verticesSize || (uvsSize != 0 && uvsSize != verticesSize))
|
||||
return false;
|
||||
|
||||
if (!scene.uvs.empty() || uvsSize != 0)
|
||||
scene.uvs.resize(scene.vertices.size() + verticesSize, glm::vec2(0));
|
||||
scene.normals.resize(scene.normals.size() + normalsSize);
|
||||
if (!scene.colors.empty() || colorsSize != 0)
|
||||
scene.colors.resize(scene.vertices.size() + verticesSize, glm::vec3(1));
|
||||
scene.indices.resize(scene.indices.size() + indicesSize);
|
||||
scene.meshes.resize(scene.meshes.size() + meshesSize);
|
||||
scene.vertices.resize(scene.vertices.size() + verticesSize);
|
||||
|
||||
if (verticesSize != 0) sceneInFile.read((char*)&scene.vertices[scene.vertices.size() - verticesSize], sizeof(glm::vec3) * verticesSize);
|
||||
if (sceneInFile.eof()) return false;
|
||||
if (uvsSize != 0) sceneInFile.read((char*)&scene.uvs[scene.uvs.size() - uvsSize], sizeof(glm::vec2) * uvsSize);
|
||||
if (sceneInFile.eof()) return false;
|
||||
if (normalsSize != 0) sceneInFile.read((char*)&scene.normals[scene.normals.size() - normalsSize], sizeof(glm::vec3) * normalsSize);
|
||||
if (sceneInFile.eof()) return false;
|
||||
if (colorsSize != 0) sceneInFile.read((char*)&scene.colors[scene.colors.size() - colorsSize], sizeof(glm::vec3) * colorsSize);
|
||||
if (sceneInFile.eof()) return false;
|
||||
if (indicesSize != 0) sceneInFile.read((char*)&scene.indices[scene.indices.size() - indicesSize], sizeof(unsigned) * indicesSize);
|
||||
if (sceneInFile.eof()) return false;
|
||||
|
||||
for (unsigned i = 0; i < meshesSize; i++) {
|
||||
if (sceneInFile.eof()) return false;
|
||||
Mesh mesh;
|
||||
sceneInFile.read((char*)&mesh.offset, sizeof(unsigned));
|
||||
sceneInFile.read((char*)&mesh.size, sizeof(unsigned));
|
||||
unsigned int textureSize;
|
||||
sceneInFile.read((char*)&textureSize, sizeof(unsigned int));
|
||||
char* texture = new char[textureSize + 1];
|
||||
sceneInFile.read((char*)&texture[0], textureSize);
|
||||
texture[textureSize] = '\0';
|
||||
mesh.texture = std::string(texture);
|
||||
if (sceneInFile.eof()) return false;
|
||||
sceneInFile.read((char*)&mesh.hasUVs, sizeof(bool));
|
||||
if (sceneInFile.eof()) return false;
|
||||
sceneInFile.read((char*)&mesh.hasVertexColors, sizeof(bool));
|
||||
sceneInFile.read((char*)&mesh.reflectivity, sizeof(float));
|
||||
delete[] texture;
|
||||
scene.meshes[i] = mesh;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
catch (std::exception& ex)
|
||||
{
|
||||
printf("An exception occured while reading the mesh cache file: %s", ex.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
//************************************
|
||||
// Read scene from binary file
|
||||
//************************************
|
||||
bool ObjLoader::Read(const char* fileName, Scene &scene) {
|
||||
|
||||
GetBinaryFileName(fileName);
|
||||
std::ifstream sceneInFile(mBinaryFileName, std::ios::binary);
|
||||
|
||||
if (sceneInFile.good()) {
|
||||
mVertexOffset = (unsigned)scene.vertices.size();
|
||||
mIndexOffset = (unsigned)scene.indices.size();
|
||||
|
||||
unsigned verticesSize = (unsigned)scene.vertices.size();
|
||||
unsigned uvsSize = (unsigned)scene.uvs.size();
|
||||
unsigned normalsSize = (unsigned)scene.normals.size();
|
||||
unsigned colorsSize = (unsigned)scene.colors.size();
|
||||
unsigned indicesSize = (unsigned)scene.indices.size();
|
||||
unsigned meshesSize = (unsigned)scene.meshes.size();
|
||||
|
||||
bool readSucces = ReadFile(sceneInFile, scene);
|
||||
if (!readSucces)
|
||||
{
|
||||
// Revert the scene if reading failed
|
||||
scene.vertices.resize(verticesSize); scene.vertices.shrink_to_fit();
|
||||
scene.uvs.resize(uvsSize); scene.uvs.shrink_to_fit();
|
||||
scene.normals.resize(normalsSize); scene.normals.shrink_to_fit();
|
||||
scene.colors.resize(colorsSize); scene.colors.shrink_to_fit();
|
||||
scene.indices.resize(indicesSize); scene.indices.shrink_to_fit();
|
||||
scene.meshes.resize(meshesSize); scene.meshes.shrink_to_fit();
|
||||
|
||||
}
|
||||
sceneInFile.close();
|
||||
|
||||
return readSucces;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
//************************************
|
||||
// Write scene to binary file
|
||||
//************************************
|
||||
bool ObjLoader::Write(const char* fileName, Scene &scene) {
|
||||
|
||||
GetBinaryFileName(fileName);
|
||||
std::ofstream sceneOutFile(mBinaryFileName, std::ios::binary);
|
||||
|
||||
unsigned verticesSize = (unsigned)scene.vertices.size();
|
||||
unsigned uvsSize = (unsigned)scene.uvs.size();
|
||||
unsigned normalsSize = (unsigned)scene.normals.size();
|
||||
unsigned colorsSize = (unsigned)scene.colors.size();
|
||||
unsigned indicesSize = (unsigned)scene.indices.size();
|
||||
unsigned meshesSize = (unsigned)scene.meshes.size();
|
||||
|
||||
sceneOutFile.write((char*)&mVertexOffset, sizeof(int));
|
||||
sceneOutFile.write((char*)&mIndexOffset, sizeof(int));
|
||||
sceneOutFile.write((char*)&mTextureOffset, sizeof(int));
|
||||
|
||||
sceneOutFile.write((char*)&verticesSize, sizeof(unsigned));
|
||||
sceneOutFile.write((char*)&uvsSize, sizeof(unsigned));
|
||||
sceneOutFile.write((char*)&normalsSize, sizeof(unsigned));
|
||||
sceneOutFile.write((char*)&colorsSize, sizeof(unsigned));
|
||||
sceneOutFile.write((char*)&indicesSize, sizeof(unsigned));
|
||||
sceneOutFile.write((char*)&meshesSize, sizeof(unsigned));
|
||||
|
||||
if (verticesSize > 0) sceneOutFile.write((char*)&scene.vertices[0], sizeof(glm::vec3) * verticesSize);
|
||||
if (uvsSize > 0) sceneOutFile.write((char*)&scene.uvs[0], sizeof(glm::vec2) * uvsSize);
|
||||
if (normalsSize > 0) sceneOutFile.write((char*)&scene.normals[0], sizeof(glm::vec3) * normalsSize);
|
||||
if (colorsSize > 0) sceneOutFile.write((char*)&scene.colors[0], sizeof(glm::vec3) * colorsSize);
|
||||
if (indicesSize > 0) sceneOutFile.write((char*)&scene.indices[0], sizeof(unsigned) * indicesSize);
|
||||
|
||||
unsigned int textureSize;
|
||||
for (Mesh mesh : scene.meshes) {
|
||||
sceneOutFile.write((char*)&mesh.offset, sizeof(unsigned));
|
||||
sceneOutFile.write((char*)&mesh.size, sizeof(unsigned));
|
||||
textureSize = (unsigned)mesh.texture.size();
|
||||
sceneOutFile.write((char*)&textureSize, sizeof(unsigned int));
|
||||
sceneOutFile.write(mesh.texture.c_str(), textureSize);
|
||||
sceneOutFile.write((char*)&mesh.hasUVs, sizeof(bool));
|
||||
sceneOutFile.write((char*)&mesh.hasVertexColors, sizeof(bool));
|
||||
sceneOutFile.write((char*)&mesh.reflectivity, sizeof(float));
|
||||
}
|
||||
sceneOutFile.close();
|
||||
|
||||
return true;
|
||||
}
|
||||
31
Research/scene/ObjLoader.h
Normal file
31
Research/scene/ObjLoader.h
Normal file
@@ -0,0 +1,31 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
#include "Scene.h"
|
||||
|
||||
class ObjLoader {
|
||||
|
||||
public:
|
||||
static void Create();
|
||||
static void Destroy();
|
||||
static ObjLoader* Instance();
|
||||
|
||||
bool Load(const char* fileName, Scene &scene);
|
||||
|
||||
protected:
|
||||
|
||||
private:
|
||||
ObjLoader();
|
||||
~ObjLoader();
|
||||
void GetBinaryFileName(const char* fileName);
|
||||
bool Read(const char* fileName, Scene &scene);
|
||||
bool Write(const char* fileName, Scene &scene);
|
||||
|
||||
static ObjLoader* mInstance;
|
||||
|
||||
size_t mVertexOffset;
|
||||
size_t mIndexOffset;
|
||||
size_t mTextureOffset;
|
||||
|
||||
std::string mBinaryFileName;
|
||||
};
|
||||
51
Research/scene/Octree/BaseTree.cpp
Normal file
51
Research/scene/Octree/BaseTree.cpp
Normal file
@@ -0,0 +1,51 @@
|
||||
#pragma once
|
||||
#include "BaseTree.h"
|
||||
|
||||
std::vector<BaseTree*> BaseTree::mTreePool = std::vector<BaseTree*>();
|
||||
|
||||
BaseTree::BaseTree()
|
||||
{
|
||||
mTreeIndex = AssignRootIndex();
|
||||
}
|
||||
BaseTree::~BaseTree()
|
||||
{
|
||||
mTreePool[mTreeIndex] = NULL;
|
||||
}
|
||||
|
||||
// Returns the number of bytes per pointer for each level as a vector
|
||||
std::vector<unsigned8> BaseTree::GetAdditionalBytesPerPointer() const
|
||||
{
|
||||
std::vector<unsigned8> res(GetMaxLevel() + 1);
|
||||
for (unsigned8 level = 0; level <= GetMaxLevel(); level++)
|
||||
res[level] = GetAdditionalBytesPerPointer(level);
|
||||
return res;
|
||||
}
|
||||
// Returns the number of bytes per node for each level as a vector.
|
||||
std::vector<unsigned8> BaseTree::GetAdditionalBytesPerNode() const
|
||||
{
|
||||
std::vector<unsigned8> res(GetMaxLevel() + 1);
|
||||
for (unsigned8 level = 0; level <= GetMaxLevel(); level++)
|
||||
res[level] = GetAdditionalBytesPerNode(level);
|
||||
return res;
|
||||
}
|
||||
|
||||
unsigned16 BaseTree::AssignRootIndex()
|
||||
{
|
||||
if (!mTreePool.empty())
|
||||
{
|
||||
// Try and find an empty spot in the RootPool:
|
||||
for (size_t i = 0; i < mTreePool.size(); i++)
|
||||
{
|
||||
if (mTreePool[i] == NULL)
|
||||
{
|
||||
mTreePool[i] = this;
|
||||
return (unsigned16)i;
|
||||
}
|
||||
if (mTreePool[i] == this) return (unsigned16)i; // Already assigned
|
||||
}
|
||||
}
|
||||
// If no empty spot was found, increase the size of the root
|
||||
mTreePool.push_back(this);
|
||||
assert(mTreePool.size() < BitHelper::Exp2(16));
|
||||
return (unsigned16)(mTreePool.size() - 1);
|
||||
}
|
||||
67
Research/scene/Octree/BaseTree.h
Normal file
67
Research/scene/Octree/BaseTree.h
Normal file
@@ -0,0 +1,67 @@
|
||||
#pragma once
|
||||
#include <deque>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include "../../core/Defines.h"
|
||||
#include "Node.h"
|
||||
|
||||
class BaseTree
|
||||
{
|
||||
public:
|
||||
BaseTree();
|
||||
virtual ~BaseTree();
|
||||
|
||||
virtual const Node* GetNode(const unsigned32 index) const = 0;
|
||||
virtual Node* GetNode(const unsigned32 index) = 0;
|
||||
virtual unsigned32 GetNodeCount() const = 0;
|
||||
virtual unsigned8 GetMaxLevel() const = 0;
|
||||
virtual bool IsEmpty() const = 0;
|
||||
virtual void Clear() = 0;
|
||||
virtual Node* Create(unsigned8 level) = 0;
|
||||
// Destroy the node at the given index. Note that this is not required to fix references to this node, so it is quite unsafe to call!
|
||||
// This is only to save memory when, for example, moving nodes from one tree to the other
|
||||
virtual void Destroy(unsigned32 index) = 0;
|
||||
|
||||
// Algorithms that can be done on any tree:
|
||||
virtual void Shave(unsigned8 depth) = 0;
|
||||
virtual std::vector<unsigned32> SortOnLevel() = 0;
|
||||
|
||||
// General information about the tree
|
||||
virtual std::vector<size_t> GetParentCounts() const = 0;
|
||||
virtual std::vector<size_t> GetNodesPerLevel() const = 0;
|
||||
virtual std::vector<size_t> GetOctreeNodesPerLevel() const = 0;
|
||||
virtual unsigned64 GetPointerCount() const = 0;
|
||||
virtual void PrintDebugInfo() const = 0;
|
||||
|
||||
// Reading and writing to/from files
|
||||
virtual bool ReadTree(const char* fileName) = 0;
|
||||
virtual bool WriteTree(const char* fileName) = 0;
|
||||
virtual bool VerifyTree(const char* fileName) = 0;
|
||||
virtual bool ReadAdditionalPool(const char* fileName) = 0;
|
||||
virtual bool WriteAdditionalPool(const char* fileName) = 0;
|
||||
|
||||
// Pool building information
|
||||
virtual unsigned8 GetAdditionalTreeInfoSize() const = 0;
|
||||
virtual std::vector<unsigned8> GetAdditionalTreeInfo(const std::vector<size_t>& nodePointers) const = 0;
|
||||
virtual unsigned8 GetAdditionalBytesPerNode(unsigned8 level) const = 0;
|
||||
virtual std::vector<unsigned8> GetAdditionalNodeBytes(const Node* node) const = 0;
|
||||
virtual bool LastChildHasAdditionalBytes() const = 0;
|
||||
virtual unsigned8 GetAdditionalBytesPerPointer(unsigned8 level) const = 0;
|
||||
virtual std::vector<unsigned8> GetAdditionalPointerBytes(const Node* node, ChildIndex child) const = 0;
|
||||
|
||||
// Returns the number of bytes per pointer for each level as a vector
|
||||
std::vector<unsigned8> GetAdditionalBytesPerPointer() const;
|
||||
// Returns the number of bytes per node for each level as a vector.
|
||||
std::vector<unsigned8> GetAdditionalBytesPerNode() const;
|
||||
|
||||
inline unsigned16 GetTreeIndex() const { return mTreeIndex; }
|
||||
inline static BaseTree* Get(unsigned16 rootIndex) { return mTreePool[rootIndex]; }
|
||||
|
||||
private:
|
||||
unsigned16 AssignRootIndex();
|
||||
|
||||
static std::vector<BaseTree*> mTreePool;
|
||||
unsigned16 mTreeIndex;
|
||||
};
|
||||
|
||||
52
Research/scene/Octree/ChildMask.h
Normal file
52
Research/scene/Octree/ChildMask.h
Normal file
@@ -0,0 +1,52 @@
|
||||
#pragma once
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
|
||||
struct ChildBits
|
||||
{
|
||||
unsigned8 c0 : 1, c1 : 1, c2 : 1, c3 : 1, c4 : 1, c5 : 1, c6 : 1, c7 : 1;
|
||||
};
|
||||
union ChildMask
|
||||
{
|
||||
ChildBits children;
|
||||
unsigned8 mask;
|
||||
|
||||
ChildMask() : ChildMask(0) {}
|
||||
ChildMask(unsigned8 mask) { this->mask = mask; }
|
||||
|
||||
inline bool Get(ChildIndex index) const {
|
||||
return BitHelper::GetLS(mask, index);
|
||||
//switch (index)
|
||||
//{
|
||||
//case 0: return children.c0;
|
||||
//case 1: return children.c1;
|
||||
//case 2: return children.c2;
|
||||
//case 3: return children.c3;
|
||||
//case 4: return children.c4;
|
||||
//case 5: return children.c5;
|
||||
//case 6: return children.c6;
|
||||
//case 7: return children.c7;
|
||||
//}
|
||||
//return false;
|
||||
}
|
||||
|
||||
inline void Set(ChildIndex index, bool value)
|
||||
{
|
||||
BitHelper::SetLS(mask, index, value);
|
||||
//switch (index)
|
||||
//{
|
||||
//case 0: children.c0 = value; break;
|
||||
//case 1: children.c1 = value; break;
|
||||
//case 2: children.c2 = value; break;
|
||||
//case 3: children.c3 = value; break;
|
||||
//case 4: children.c4 = value; break;
|
||||
//case 5: children.c5 = value; break;
|
||||
//case 6: children.c6 = value; break;
|
||||
//case 7: children.c7 = value; break;
|
||||
//}
|
||||
}
|
||||
|
||||
// Use bit-tricks to count the number of set bits before a certain positions
|
||||
inline unsigned8 GetSetBefore(ChildIndex pos) const { return BitHelper::GetHSSetBefore(mask, pos); }
|
||||
inline unsigned8 GetSet() const { return BitHelper::GetSet(mask); }
|
||||
};
|
||||
5
Research/scene/Octree/ColorChannelMultiRootTree.h
Normal file
5
Research/scene/Octree/ColorChannelMultiRootTree.h
Normal file
@@ -0,0 +1,5 @@
|
||||
#pragma once
|
||||
#include "../Material/ColorChannel.h"
|
||||
#include "LeafMaterialMultiRootTree.h"
|
||||
|
||||
typedef LeafMaterialMultiRootTree<ColorChannel> ColorChannelMultiRootTree;
|
||||
95
Research/scene/Octree/EdgeMaterialNode.h
Normal file
95
Research/scene/Octree/EdgeMaterialNode.h
Normal file
@@ -0,0 +1,95 @@
|
||||
#pragma once
|
||||
#include <fstream>
|
||||
#include "Node.h"
|
||||
#include "BaseTree.h"
|
||||
#include "../../core/Util/SmallDynamicArray.h"
|
||||
#include "../../inc/glm/glm.hpp"
|
||||
#include "../../core/Defines.h"
|
||||
|
||||
template<typename T, typename Comparer = std::less<T>>
|
||||
class EdgeMaterialNode : public Node
|
||||
{
|
||||
private:
|
||||
SmallDynamicArray<T> mEdgeMaterials;
|
||||
public:
|
||||
EdgeMaterialNode(BaseTree* root, unsigned8 level = 0) : Node(root, level), mEdgeMaterials(SmallDynamicArray<T>()) {}
|
||||
//EdgeMaterialNode(const EdgeMaterialNode<T>& node) : Node(node) { SetEdgeMaterials(node->GetEdgeMaterials()); }
|
||||
EdgeMaterialNode(EdgeMaterialNode&& node) : Node(std::move(node))
|
||||
{
|
||||
mEdgeMaterials = std::move(node.mEdgeMaterials);
|
||||
}
|
||||
~EdgeMaterialNode() {}
|
||||
|
||||
EdgeMaterialNode& operator=(EdgeMaterialNode&& node)
|
||||
{
|
||||
mEdgeMaterials = std::move(node.mEdgeMaterials);
|
||||
Node::operator=(std::move(node));
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Takes a pointer to an array in memory containing at least as many entries as there are children for this node.
|
||||
// Copies that array as the edge materials for this node
|
||||
void SetEdgeMaterials(T* edgeMaterials)
|
||||
{
|
||||
mEdgeMaterials.Clear();
|
||||
unsigned8 childCount = GetChildCount();
|
||||
mEdgeMaterials.Resize(0, childCount);
|
||||
mEdgeMaterials.SetRange(edgeMaterials, 0, childCount);
|
||||
}
|
||||
T* GetEdgeMaterials() const { return &mEdgeMaterials[0]; }
|
||||
T GetEdgeMaterial(ChildIndex child) const { return mEdgeMaterials[GetChildmask().GetSetBefore(child)]; }
|
||||
void SetEdgeMaterial(ChildIndex child) const { assert(GetChildmask().Get(child)); mEdgeMaterials[GetChildmask().GetSetBefore(child)]; }
|
||||
|
||||
bool Compare(const EdgeMaterialNode& node) const
|
||||
{
|
||||
if (!mEdgeMaterials.IsEmpty() && !node.mEdgeMaterials.IsEmpty())
|
||||
{
|
||||
// DEBUG: Compare the shifts.
|
||||
// This should lead to the same results as comparing the pointers but you never know
|
||||
for (unsigned8 i = 0; i < GetChildCount(); i++)
|
||||
{
|
||||
if (node.mEdgeMaterials[i] != this->mEdgeMaterials[i])
|
||||
return node.mEdgeMaterials[i] < this->mEdgeMaterials[i];
|
||||
}
|
||||
}
|
||||
|
||||
return Node::Compare(node);
|
||||
}
|
||||
bool Equals(const EdgeMaterialNode& node) const
|
||||
{
|
||||
if (!Node::Equals(node))
|
||||
return false;
|
||||
|
||||
if (this->mEdgeMaterials.IsEmpty() || node.mEdgeMaterials.IsEmpty())
|
||||
return this->mEdgeMaterials.IsEmpty() && node.mEdgeMaterials.IsEmpty();
|
||||
else
|
||||
{
|
||||
// DEBUG: Check if the shifts are equal
|
||||
// This should lead to the same results as comparing the pointers but you never know
|
||||
for (unsigned8 i = 0; i < GetChildmask().GetSet(); i++)
|
||||
{
|
||||
if (node.mEdgeMaterials[i] != this->mEdgeMaterials[i])
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void WriteProperties(std::ostream& file)
|
||||
{
|
||||
assert(!mEdgeMaterials.IsEmpty() || GetChildCount() == 0);
|
||||
Serializer<T*>::Serialize(&mEdgeMaterials[0], GetChildCount(), file);
|
||||
Node::WriteProperties(file);
|
||||
}
|
||||
void ReadProperties(std::istream& file)
|
||||
{
|
||||
mEdgeMaterials.Clear();
|
||||
mEdgeMaterials.Resize(0, GetChildCount());
|
||||
Serializer<T*>::Deserialize(&mEdgeMaterials[0], GetChildCount(), file);
|
||||
Node::ReadProperties(file);
|
||||
}
|
||||
void CopyProperties(EdgeMaterialNode* source)
|
||||
{
|
||||
SetEdgeMaterials(source->GetEdgeMaterials());
|
||||
}
|
||||
};
|
||||
252
Research/scene/Octree/HierarchicalColorsOnlyTree.cpp
Normal file
252
Research/scene/Octree/HierarchicalColorsOnlyTree.cpp
Normal file
@@ -0,0 +1,252 @@
|
||||
#include "HierarchicalColorsOnlyTree.h"
|
||||
#include "MaterialLibraryTree.h"
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../scene/Material/MaterialQuantizer/ColorQuantizer/BaseColorQuantizer.h"
|
||||
|
||||
HierarchicalColorsOnlyTree::HierarchicalColorsOnlyTree(unsigned8 maxLevel) : MaterialLibraryTree<Color, ColorCompare>(maxLevel)
|
||||
{
|
||||
}
|
||||
|
||||
void HierarchicalColorsOnlyTree::ToDAG(BaseColorQuantizer* quantizer)
|
||||
{
|
||||
if (this->GetNodeCount() == 1)
|
||||
return; // Empty tree = DAG
|
||||
|
||||
// Sort the nodes on level (for quick access of parent nodes)
|
||||
auto levelIndices = SortOnLevel();
|
||||
|
||||
this->mMaxLevel = (unsigned8)(levelIndices.size() - 2);
|
||||
|
||||
// Run through the layers in reverse order and compress them bottom up
|
||||
for (unsigned8 level = GetMaxLevel(); level > 0; --level)
|
||||
{
|
||||
printf(".");
|
||||
unsigned32 levelStart = levelIndices[level];
|
||||
unsigned32 levelEnd = levelIndices[level + 1];
|
||||
unsigned32 levelNodeCount = levelIndices[level + 1] - levelIndices[level];
|
||||
|
||||
//// Sort the nodes in the current level on material properties and child pointers
|
||||
//tbb::parallel_sort(levelStart, levelEnd, [](Node* a, Node* b) { return a->Compare(b); });
|
||||
SortBetween(levelStart, levelEnd, NodeComparer());
|
||||
|
||||
// Find all equal nodes
|
||||
MaterialLibraryNode* cur = NULL;
|
||||
std::unordered_map<unsigned, unsigned> replacements;
|
||||
std::vector<unsigned> uniqueNodes;
|
||||
for (unsigned32 i = levelStart; i < levelEnd; i++)
|
||||
{
|
||||
auto node = GetTypedNode(i);
|
||||
if (!node->Equals(*cur))
|
||||
{
|
||||
cur = node;
|
||||
uniqueNodes.push_back(cur->GetIndex());
|
||||
}
|
||||
else
|
||||
{
|
||||
// Make sure that all nodes are replaced by their equals
|
||||
replacements.insert(std::pair<unsigned, unsigned>(node->GetIndex(), cur->GetIndex()));
|
||||
}
|
||||
}
|
||||
|
||||
// From the list of unique nodes, find out which ones can be merged.
|
||||
// Get all colors
|
||||
std::unordered_map<unsigned, glm::u8vec3> uniqueNodeColors;
|
||||
for (size_t i = 0; i < uniqueNodes.size(); i++)
|
||||
{
|
||||
unsigned node = uniqueNodes[i];
|
||||
uniqueNodeColors.insert(std::pair<unsigned, glm::u8vec3>(node, GetMaterial(GetTypedNode(node)).GetColor()));
|
||||
}
|
||||
|
||||
if (level != GetMaxLevel())
|
||||
{
|
||||
// Get the unique colors of the unique nodes
|
||||
std::vector<glm::u8vec3> colorsToCompress(uniqueNodeColors.size());
|
||||
size_t k = 0;
|
||||
for (auto uniqueColor : uniqueNodeColors)
|
||||
colorsToCompress[k++] = uniqueColor.second;
|
||||
|
||||
tbb::parallel_sort(colorsToCompress.begin(), colorsToCompress.end(), ColorCompare());
|
||||
colorsToCompress.erase(std::unique(colorsToCompress.begin(), colorsToCompress.end()), colorsToCompress.end());
|
||||
colorsToCompress.shrink_to_fit();
|
||||
// Quantize these colors
|
||||
if (quantizer != NULL)
|
||||
{
|
||||
auto quantizedColors = quantizer->QuantizeColors(colorsToCompress);
|
||||
|
||||
// Replace the unique node colors by their quantized counterparts
|
||||
for (size_t i = 0; i < uniqueNodes.size(); i++)
|
||||
{
|
||||
auto originalColor = uniqueNodeColors.find(uniqueNodes[i]);
|
||||
auto quantizedColor = quantizedColors->find(originalColor->second)->second;
|
||||
originalColor->second = quantizedColor;
|
||||
}
|
||||
}
|
||||
|
||||
// Find out which nodes can be merged:
|
||||
for (auto i = uniqueNodes.begin(); i != uniqueNodes.end(); i++)
|
||||
{
|
||||
Node* cur = GetTypedNode(*i);
|
||||
// Find the list of potential merges
|
||||
auto mergeScores = GetMergeScores(uniqueNodeColors, i , i + 1, uniqueNodes.end());
|
||||
// Keep merging as long as more nodes can be merged with the current one
|
||||
while (!mergeScores.empty())
|
||||
{
|
||||
// From the list of potential merges, find the best one (the one with the highest score)
|
||||
unsigned bestMerge = mergeScores.begin()->first;
|
||||
unsigned bestScore = 0;
|
||||
for (auto merge : mergeScores)
|
||||
{
|
||||
if (merge.second > bestScore)
|
||||
{
|
||||
bestScore = merge.second;
|
||||
bestMerge = merge.first;
|
||||
}
|
||||
}
|
||||
|
||||
Node* otherNode = GetTypedNode(bestMerge);
|
||||
// Merge the other node into this node
|
||||
ChildMask childrenToMerge((cur->GetChildmask().mask ^ otherNode->GetChildmask().mask) & otherNode->GetChildmask().mask);
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
{
|
||||
if (childrenToMerge.Get(c))
|
||||
cur->SetChild(c, otherNode->GetChild(c));
|
||||
}
|
||||
// Update the replacers that pointed to other to point to cur
|
||||
for (auto replacement = replacements.begin(); replacement != replacements.end(); replacement++)
|
||||
{
|
||||
if (replacement->second == bestMerge)
|
||||
replacement->second = *i;
|
||||
}
|
||||
// Also replace the other node by the current in the replacements
|
||||
replacements.insert(std::pair<unsigned, unsigned>(bestMerge, *i));
|
||||
|
||||
// Remove other from uniqueNodes (since it is merged and will be removed completely)
|
||||
auto bestMergeIt = std::find(uniqueNodes.begin(), uniqueNodes.end(), bestMerge);
|
||||
uniqueNodes.erase(bestMergeIt);
|
||||
|
||||
// Calculate what merges might still be available after this one
|
||||
std::vector<unsigned> potentialMerges(mergeScores.size() - 1);
|
||||
// TODO: make this a parallel for?
|
||||
size_t k = 0;
|
||||
for (auto merge : mergeScores)
|
||||
{
|
||||
if (merge.first != bestMerge)
|
||||
{
|
||||
potentialMerges[k++] = merge.first;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if any other merges are still possible
|
||||
mergeScores = GetMergeScores(uniqueNodeColors, i, potentialMerges.begin(), potentialMerges.end());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mark nodes that are not in uniquenodes for deletion
|
||||
tbb::parallel_sort(uniqueNodes);
|
||||
unsigned32 uniqueNodesIndex = 0;
|
||||
for (unsigned32 i = levelStart; i < levelEnd; i++)
|
||||
{
|
||||
while (uniqueNodes[uniqueNodesIndex] < i) uniqueNodesIndex++;
|
||||
if (uniqueNodes[uniqueNodesIndex] != i) Destroy(i);
|
||||
}
|
||||
|
||||
|
||||
printf(".");
|
||||
|
||||
auto parentLevelStart = levelIndices[level - 1];
|
||||
auto parentLevelEnd = levelIndices[level];
|
||||
// Point all parents of nodes to the new replacement node
|
||||
// Note that this is only necessary if some nodes are replaced by others
|
||||
if (uniqueNodes.size() < levelNodeCount)
|
||||
{
|
||||
|
||||
for (unsigned32 j = parentLevelStart; j < parentLevelEnd; j++)
|
||||
{
|
||||
Node* parent = GetTypedNode(j);
|
||||
for (ChildIndex child = FRONT_BOTTOM_LEFT; child <= BACK_TOP_RIGHT; ++child)
|
||||
{
|
||||
if (parent->HasChild(child))
|
||||
{
|
||||
unsigned childPtr = parent->GetChildIndex(child);
|
||||
auto replacer = replacements.find(childPtr);
|
||||
if (replacer != replacements.end())
|
||||
{
|
||||
auto newChild = GetNode((*replacer).second);
|
||||
if (newChild == NULL)
|
||||
printf("Empty child being set?");
|
||||
parent->SetChild(child, newChild);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
printf(".");
|
||||
|
||||
//// Remove the old (replaced) nodes
|
||||
//if (uniqueNodes.size() < levelNodeCount)
|
||||
//{
|
||||
// tbb::parallel_for_each(replacements.begin(), replacements.end(), [&](std::pair<unsigned, unsigned> node)
|
||||
// {
|
||||
// Destroy(mNodePool[node.first]);
|
||||
// mNodePool[node.first] = NULL;
|
||||
// });
|
||||
//}
|
||||
//replacements.clear();
|
||||
|
||||
printf(" ");
|
||||
printf("Layer %2u compressed, %7u out of %7u nodes left\n", level, (unsigned32)uniqueNodes.size(), levelNodeCount);
|
||||
}
|
||||
|
||||
Clean();
|
||||
}
|
||||
|
||||
std::vector<std::pair<unsigned, unsigned>> HierarchicalColorsOnlyTree::GetMergeScores(const std::unordered_map<unsigned, glm::u8vec3> uniqueNodeColors,
|
||||
std::vector<unsigned>::iterator i, std::vector<unsigned>::iterator begin, std::vector<unsigned>::iterator end)
|
||||
{
|
||||
Node* cur = GetTypedNode(*i);
|
||||
glm::u8vec3 curColor = uniqueNodeColors.at(*i);
|
||||
ChildMask curChildMask = cur->GetChildmask();
|
||||
std::vector<std::pair<unsigned, unsigned>> mergeScores;
|
||||
for (auto j = begin; j != end; j++)
|
||||
{
|
||||
auto othColor = uniqueNodeColors.at(*j);
|
||||
// If the average colors are not the same, they can't be mergeds
|
||||
if (othColor != curColor)
|
||||
continue;
|
||||
// The nodes can be merged if all children that exist are the same.
|
||||
// Effectively this means that we need to check if the pointers to the children that both nodes have are the same
|
||||
auto other = GetTypedNode(*j);
|
||||
ChildMask otherChildMask = other->GetChildmask();
|
||||
// If the childmasks are the same, then, if the children are also the same, these nodes would already have been merged.
|
||||
// Therefore, there is no need to check for merge possibilities
|
||||
if (curChildMask.mask == otherChildMask.mask)
|
||||
continue;
|
||||
|
||||
ChildMask bothNodesHave = ChildMask(curChildMask.mask & otherChildMask.mask);
|
||||
// Initialize the merge score as the number of set nodes that are not set in both (e.g. XOR(mask1, mask2).GetSet())
|
||||
unsigned mergeScore = ChildMask(curChildMask.mask ^ otherChildMask.mask).GetSet();
|
||||
bool potentialMerge = true;
|
||||
|
||||
// Check if merging is possible and calculate the merge score
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
{
|
||||
if (bothNodesHave.Get(c))
|
||||
{
|
||||
if (cur->GetChild(c) == other->GetChild(c))
|
||||
{
|
||||
mergeScore += 2; // We can merge 2 nodes :D
|
||||
}
|
||||
else
|
||||
{
|
||||
potentialMerge = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if (potentialMerge)
|
||||
mergeScores.push_back(std::pair<unsigned, unsigned>(*j, mergeScore));
|
||||
}
|
||||
return mergeScores;
|
||||
}
|
||||
21
Research/scene/Octree/HierarchicalColorsOnlyTree.h
Normal file
21
Research/scene/Octree/HierarchicalColorsOnlyTree.h
Normal file
@@ -0,0 +1,21 @@
|
||||
#pragma once
|
||||
#include <fstream>
|
||||
#include "MaterialLibraryTree.h"
|
||||
#include "../../inc/glm/glm.hpp"
|
||||
#include "../../core/Defines.h"
|
||||
#include "../Material/Color.h"
|
||||
|
||||
class Root;
|
||||
class BaseColorQuantizer;
|
||||
|
||||
class HierarchicalColorsOnlyTree : public MaterialLibraryTree<Color, ColorCompare>
|
||||
{
|
||||
public:
|
||||
HierarchicalColorsOnlyTree(unsigned8 maxLevel);
|
||||
|
||||
// Special DAG conversion: also merges nodes that are different if the only difference is that one of the nodes has a child with some color and the other node doesn't
|
||||
void ToDAG(BaseColorQuantizer* quantizer);
|
||||
private:
|
||||
std::vector<std::pair<unsigned, unsigned>> GetMergeScores(const std::unordered_map<unsigned, glm::u8vec3> uniqueNodeColors,
|
||||
std::vector<unsigned>::iterator i, std::vector<unsigned>::iterator begin, std::vector<unsigned>::iterator end);
|
||||
};
|
||||
99
Research/scene/Octree/HierarchicalMaterialMultiRoot.h
Normal file
99
Research/scene/Octree/HierarchicalMaterialMultiRoot.h
Normal file
@@ -0,0 +1,99 @@
|
||||
#pragma once
|
||||
#include "MultiRootTree.h"
|
||||
#include "MaterialNode.h"
|
||||
|
||||
template<typename T, typename Comparer = std::less<T>>
|
||||
class MultiRootMaterialNode : public MaterialNode<T, Comparer>
|
||||
{
|
||||
private:
|
||||
bool mIsGeometry;
|
||||
|
||||
public:
|
||||
MultiRootMaterialNode(BaseTree* root, unsigned8 level = 0) : MaterialNode<T, Comparer>(root, level) {}
|
||||
MultiRootMaterialNode(BaseTree* root, T material, unsigned8 level = 0) : MaterialNode<T, Comparer>(root, material, level) {}
|
||||
MultiRootMaterialNode(BaseTree* root, T material, bool isGeometry, unsigned8 level = 0) : MultiRootMaterialNode(root, material, level) { mIsGeometry = isGeometry; }
|
||||
MultiRootMaterialNode(MultiRootMaterialNode&& node) : MaterialNode<T, Comparer>(std::move(node)) // Move ctor
|
||||
{
|
||||
mIsGeometry = std::move(node.mIsGeometry);
|
||||
}
|
||||
|
||||
~MultiRootMaterialNode() {}
|
||||
|
||||
MultiRootMaterialNode& operator=(MultiRootMaterialNode&& node) // Move assignment operator
|
||||
{
|
||||
mIsGeometry = std::move(node.mIsGeometry);
|
||||
MaterialNode<T, Comparer>::operator=(std::move(node));
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool GetIsGeometry() const { return mIsGeometry; }
|
||||
void SetIsGeometry(bool value) { mIsGeometry = value; }
|
||||
|
||||
bool Compare(const MultiRootMaterialNode<T, Comparer>& node) const
|
||||
{
|
||||
if (this->mIsGeometry != node.mIsGeometry) return !mIsGeometry;
|
||||
return MaterialNode<T, Comparer>::Compare(node);
|
||||
}
|
||||
|
||||
bool Equals(const MultiRootMaterialNode<T, Comparer>& node) const
|
||||
{
|
||||
return node.mIsGeometry == this->mIsGeometry && MaterialNode<T, Comparer>::Equals(node);
|
||||
}
|
||||
|
||||
void WriteProperties(std::ostream& file)
|
||||
{
|
||||
Serializer<bool>::Serialize(mIsGeometry, file);
|
||||
MaterialNode<T, Comparer>::WriteProperties(file);
|
||||
}
|
||||
void ReadProperties(std::istream& file)
|
||||
{
|
||||
Serializer<bool>::Deserialize(mIsGeometry, file);
|
||||
MaterialNode<T, Comparer>::ReadProperties(file);
|
||||
}
|
||||
|
||||
void CopyProperties(MultiRootMaterialNode* node)
|
||||
{
|
||||
this->SetIsGeometry(node->GetIsGeometry());
|
||||
MaterialNode<T, Comparer>::CopyProperties(node);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T, typename Comparer = std::less<T>>
|
||||
class HierarchicalMaterialMultiRoot : public MultiRootTree<MultiRootMaterialNode<T, Comparer>>
|
||||
{
|
||||
private:
|
||||
T mMaterial; // Material for the root
|
||||
public:
|
||||
HierarchicalMaterialMultiRoot(unsigned8 maxLevel, unsigned32 slaveRootCount) :
|
||||
MultiRootTree<MultiRootMaterialNode<T, Comparer>>(maxLevel, slaveRootCount)
|
||||
{
|
||||
mLeafsAreEqual = false;
|
||||
}
|
||||
|
||||
~HierarchicalMaterialMultiRoot() override {}
|
||||
|
||||
T GetMaterial(unsigned32 nodeIndex) const
|
||||
{
|
||||
return GetTypedNode(nodeIndex)->GetMaterial();
|
||||
}
|
||||
|
||||
virtual unsigned8 GetAdditionalBytesPerNode(unsigned8 level) const override { return sizeof(T); }
|
||||
virtual std::vector<unsigned8> GetAdditionalNodeBytes(const Node* node) const override { return GetMaterial(node->GetIndex()).Serialize(); }
|
||||
|
||||
MultiRootMaterialNode<T, Comparer>* AddLeafNode(glm::uvec3 coordinate)
|
||||
{
|
||||
MultiRootMaterialNode<T, Comparer>* node = MultiRootTree<MultiRootMaterialNode<T, Comparer>>::AddLeafNode(coordinate);
|
||||
node->SetIsGeometry(true);
|
||||
return node;
|
||||
// TODO: Mark all nodes along the path as "IsGeometry" = True
|
||||
}
|
||||
|
||||
MultiRootMaterialNode<T, Comparer>* AddLeafNode(glm::uvec3 coordinate, size_t slaveRootID, T material)
|
||||
{
|
||||
MultiRootMaterialNode<T, Comparer>* node = MultiRootTree<MultiRootMaterialNode<T, Comparer>>::AddLeafNode(coordinate, slaveRootID);
|
||||
node->SetMaterial(material);
|
||||
node->SetIsGeometry(false);
|
||||
return node;
|
||||
// TODO: Mark all nodes along the path as "IsGeometry" = False
|
||||
}
|
||||
};
|
||||
95
Research/scene/Octree/HierarchicalShiftingColoredTree.cpp
Normal file
95
Research/scene/Octree/HierarchicalShiftingColoredTree.cpp
Normal file
@@ -0,0 +1,95 @@
|
||||
#include "HierarchicalShiftingColoredTree.h"
|
||||
|
||||
HierarchicalShiftingColoredTree::HierarchicalShiftingColoredTree(unsigned8 maxLevel) : MaterialLibraryTree<Color, ColorCompare>(maxLevel)
|
||||
{
|
||||
colorsReplaced = false;
|
||||
}
|
||||
|
||||
glm::i8vec3 GetShift(glm::u8vec3 source, glm::u8vec3 dest)
|
||||
{
|
||||
glm::i8vec3 shift(0);
|
||||
for (unsigned8 i = 0; i < 3; i++)
|
||||
{
|
||||
std::int16_t diff = (std::int16_t)dest[i] - (std::int16_t)source[i];
|
||||
shift[i] = diff >> 1;
|
||||
}
|
||||
return shift;
|
||||
}
|
||||
|
||||
glm::u8vec3 AccumulateShift(glm::u8vec3 source, glm::i8vec3 shift)
|
||||
{
|
||||
glm::u8vec3 dest(0);
|
||||
for (unsigned8 i = 0; i < 3; i++)
|
||||
{
|
||||
dest[i] = source[i] + shift[i] * 2;
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
||||
Color GetShiftMaterial(glm::i8vec3 shift)
|
||||
{
|
||||
return Color(glm::u8vec3(
|
||||
127 + shift.x,
|
||||
127 + shift.y,
|
||||
127 + shift.z
|
||||
));
|
||||
}
|
||||
|
||||
void HierarchicalShiftingColoredTree::ReplaceColorsByShifts()
|
||||
{
|
||||
// Make sure we have an octree (not a DAG)
|
||||
this->ToOctree();
|
||||
auto levelIndices = SortOnLevel();
|
||||
|
||||
std::vector<glm::u8vec3> accumulatedShiftPerNode(GetNodeCount());
|
||||
std::vector<Color> shiftMaterials(GetNodeCount());
|
||||
|
||||
// Top down go through the tree, and for each node, assign the children some shift and calculate the accumulated shift.
|
||||
// The accumulated shift per node will in turn be used to calculate the shift per node without causing rounding errors.
|
||||
glm::u8vec3 rootColor = GetMaterial(GetRoot()).GetColor();
|
||||
glm::i8vec3 rootShift = GetShift(glm::u8vec3(0), rootColor);
|
||||
accumulatedShiftPerNode[0] = AccumulateShift(glm::u8vec3(0), rootShift);
|
||||
shiftMaterials[0] = GetShiftMaterial(rootShift);
|
||||
|
||||
// Calculate the shifts, top-down.
|
||||
for (unsigned8 level = 0; level < GetMaxLevel(); level++)
|
||||
{
|
||||
auto levelStart = levelIndices[level];
|
||||
auto levelEnd = levelIndices[level + 1];
|
||||
// Calculate the shift per child
|
||||
tbb::parallel_for(levelStart, levelEnd, [&](const unsigned32 i)
|
||||
{
|
||||
glm::u8vec3 parentColor = accumulatedShiftPerNode[i];
|
||||
Node* node = GetTypedNode(i);
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
{
|
||||
if (node->HasChild(c))
|
||||
{
|
||||
unsigned childIndex = node->GetChildIndex(c);
|
||||
glm::u8vec3 childColor = GetMaterial(GetTypedNode(childIndex)).GetColor();
|
||||
glm::i8vec3 shift = GetShift(parentColor, childColor);
|
||||
accumulatedShiftPerNode[childIndex] = AccumulateShift(parentColor, shift);
|
||||
shiftMaterials[childIndex] = GetShiftMaterial(shift);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Create a new material library containing the shifts
|
||||
if (mOwnsLibrary) delete mMaterialLibrary;
|
||||
mMaterialLibrary = new MaterialLibrary<Color, ColorCompare>();
|
||||
for (auto material : shiftMaterials)
|
||||
{
|
||||
mMaterialLibrary->AddMaterial(material);
|
||||
}
|
||||
mMaterialLibrary->Finalize();
|
||||
|
||||
// Replace all current node materials by their representative shift materials
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](unsigned32 i)
|
||||
{
|
||||
MaterialLibraryNode* node = GetTypedNode(i);
|
||||
node->SetMaterial(mMaterialLibrary->GetTextureIndex(shiftMaterials[i]));
|
||||
});
|
||||
|
||||
colorsReplaced = true;
|
||||
}
|
||||
17
Research/scene/Octree/HierarchicalShiftingColoredTree.h
Normal file
17
Research/scene/Octree/HierarchicalShiftingColoredTree.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
#include <fstream>
|
||||
#include "../../inc/glm/glm.hpp"
|
||||
#include "../../core/Defines.h"
|
||||
#include "../Material/Color.h"
|
||||
#include "MaterialLibraryTree.h"
|
||||
|
||||
class Root;
|
||||
|
||||
class HierarchicalShiftingColoredTree : public MaterialLibraryTree<Color, ColorCompare>
|
||||
{
|
||||
private:
|
||||
bool colorsReplaced;
|
||||
public:
|
||||
HierarchicalShiftingColoredTree(unsigned8 maxLevel);
|
||||
void ReplaceColorsByShifts();
|
||||
};
|
||||
10
Research/scene/Octree/IAdditionalProperties.h
Normal file
10
Research/scene/Octree/IAdditionalProperties.h
Normal file
@@ -0,0 +1,10 @@
|
||||
#pragma once
|
||||
#include "../../core/Defines.h"
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
class IAdditionalProperties
|
||||
{
|
||||
public:
|
||||
virtual std::map<std::string, std::string> GetAdditionalProperties() = 0;
|
||||
};
|
||||
12
Research/scene/Octree/IBlockTexture.h
Normal file
12
Research/scene/Octree/IBlockTexture.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
#include "../../core/Defines.h"
|
||||
|
||||
class IBlockTexture
|
||||
{
|
||||
public:
|
||||
virtual std::vector<unsigned8> GetBlockPointerPool() = 0;
|
||||
virtual size_t GetBlockPointerPoolSize() = 0;
|
||||
|
||||
virtual std::vector<unsigned8> GetBlockPool() = 0;
|
||||
virtual size_t GetBlockPoolSize() = 0;
|
||||
};
|
||||
10
Research/scene/Octree/IMaterialTexture.h
Normal file
10
Research/scene/Octree/IMaterialTexture.h
Normal file
@@ -0,0 +1,10 @@
|
||||
#pragma once
|
||||
#include "../../core/Defines.h"
|
||||
|
||||
class IMaterialTexture
|
||||
{
|
||||
public:
|
||||
virtual std::vector<unsigned8> GetMaterialTexture() = 0;
|
||||
virtual unsigned GetMaterialTextureSize() = 0;
|
||||
virtual unsigned8 GetMaterialTextureChannelsPerPixel() = 0;
|
||||
};
|
||||
41
Research/scene/Octree/LeafMaterialMultiRootTree.h
Normal file
41
Research/scene/Octree/LeafMaterialMultiRootTree.h
Normal file
@@ -0,0 +1,41 @@
|
||||
#pragma once
|
||||
#include "MultiRootTree.h"
|
||||
#include "MaterialNode.h"
|
||||
|
||||
template<typename T>
|
||||
class LeafMaterialMultiRootTree : public MultiRootTree<MaterialNode<T>>
|
||||
{
|
||||
public:
|
||||
LeafMaterialMultiRootTree(unsigned8 maxLevel, unsigned32 slaveRootCount)
|
||||
: MultiRootTree(maxLevel, slaveRootCount) { mLeafsAreEqual = false; }
|
||||
|
||||
~LeafMaterialMultiRootTree() override {}
|
||||
|
||||
unsigned8 GetAdditionalBytesPerNode(unsigned8 level) const override
|
||||
{
|
||||
if (level == GetMaxLevel()) return sizeof(T);
|
||||
else return Tree<MaterialNode<T>>::GetAdditionalBytesPerNode(level);
|
||||
}
|
||||
std::vector<unsigned8> GetAdditionalNodeBytes(const Node* node) const override
|
||||
{
|
||||
if (node->GetLevel() == GetMaxLevel())
|
||||
{
|
||||
auto matNode = (MaterialNode<T>*)node;
|
||||
return matNode->GetMaterial().Serialize();
|
||||
}
|
||||
else
|
||||
return Tree<MaterialNode<T>>::GetAdditionalNodeBytes(node);
|
||||
}
|
||||
|
||||
void AddLeafNode(glm::uvec3 coordinate, unsigned32 slaveRootID, T material)
|
||||
{
|
||||
MaterialNode<T>* node = MultiRootTree<MaterialNode<T>>::AddLeafNode(coordinate, slaveRootID);
|
||||
node->SetMaterial(material);
|
||||
}
|
||||
|
||||
void AddLeafNode(glm::uvec3 coordinate, T material)
|
||||
{
|
||||
MaterialNode<T>* node = MultiRootTree<MaterialNode<T>>::AddLeafNode(coordinate);
|
||||
node->SetMaterial(material);
|
||||
}
|
||||
};
|
||||
487
Research/scene/Octree/MaterialLibraryMultiRootTree.h
Normal file
487
Research/scene/Octree/MaterialLibraryMultiRootTree.h
Normal file
@@ -0,0 +1,487 @@
|
||||
#pragma once
|
||||
#include "HierarchicalMaterialMultiRoot.h"
|
||||
#include "../Material/BitsMaterial.h"
|
||||
#include "IMaterialTexture.h"
|
||||
#include "../../inc/tbb/parallel_for_each.h"
|
||||
#include "../Material/BlockBasedMaterialLibrary.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "../../core/Util/BoolArray.h"
|
||||
#include "NodeReplacementFinder.h"
|
||||
#include "Tree.h"
|
||||
#include <queue>
|
||||
#include <unordered_set>
|
||||
|
||||
// Usage:
|
||||
// This tree can only be built correctly if it is based on some material tree that has materials throughout
|
||||
// (Such as HierarchicalRoot<T> or MaterialRoot<T>). To build this tree, create this root object and then call
|
||||
// the "BaseOn(tree)" method with the material tree.
|
||||
template<typename T, typename Comparer = std::less<T>, unsigned8 channelsPerPixel = 3>
|
||||
class MaterialLibraryMultiRoot : public HierarchicalMaterialMultiRoot<BitsMaterial<8>>, public IMaterialTexture
|
||||
{
|
||||
private:
|
||||
// After the tree is finalized, the material library will be used to contain the actual materials
|
||||
MaterialLibrary<T, Comparer, channelsPerPixel>* mMaterialLibrary;
|
||||
std::vector<unsigned8> mBitMap;
|
||||
|
||||
std::vector<unsigned char> mMaterialTexture;
|
||||
unsigned short mMaterialTextureSize;
|
||||
MaterialLibraryPointer mMaxTextureIndex;
|
||||
|
||||
inline void WriteMaterialTexture(std::ostream& file)
|
||||
{
|
||||
if (mMaterialTexture.empty())
|
||||
GetMaterialTexture();
|
||||
// Pack the texture size and the biggest texture index in one 32 bit unsigned int (for historic reasons...)
|
||||
unsigned materialTextureSizeSummary = (mMaxTextureIndex.x << 20) | (mMaxTextureIndex.y << 10) | (mMaterialTextureSize - 1);
|
||||
Serializer<unsigned>::Serialize(materialTextureSizeSummary, file);
|
||||
Serializer<unsigned8*>::Serialize(&mMaterialTexture[0], (size_t)mMaterialTextureSize * (size_t)mMaterialTextureSize * (size_t)channelsPerPixel, file);
|
||||
}
|
||||
|
||||
inline void ReadMaterialTexture(std::istream& file)
|
||||
{
|
||||
unsigned materialTextureSizeSummary;
|
||||
Serializer<unsigned>::Deserialize(materialTextureSizeSummary, file);
|
||||
unsigned mask1 = BitHelper::GetLSMask<unsigned32>(20, 30);
|
||||
unsigned mask2 = BitHelper::GetLSMask<unsigned32>(10, 20);
|
||||
unsigned mask3 = BitHelper::GetLSMask<unsigned32>(0, 10);
|
||||
unsigned short maxTextureIndexX = (mask1 & materialTextureSizeSummary) >> 20;
|
||||
unsigned short maxTextureIndexY = (mask2 & materialTextureSizeSummary) >> 10;
|
||||
unsigned materialTextureSize = mask3 & materialTextureSizeSummary;
|
||||
mMaterialTextureSize = materialTextureSize + 1;
|
||||
mMaxTextureIndex = MaterialLibraryPointer(maxTextureIndexX, maxTextureIndexY);
|
||||
|
||||
size_t textureArraySize = (size_t)mMaterialTextureSize * (size_t)mMaterialTextureSize * (size_t)channelsPerPixel;
|
||||
|
||||
mMaterialTexture.resize(textureArraySize);
|
||||
Serializer<unsigned8*>::Deserialize(&mMaterialTexture[0], textureArraySize, file);
|
||||
}
|
||||
|
||||
void AddMaterial(const T& material)
|
||||
{
|
||||
assert(!mMaterialLibrary->IsFinalized());
|
||||
mMaterialLibrary->AddMaterial(material);
|
||||
}
|
||||
|
||||
void FinalizeMaterials()
|
||||
{
|
||||
assert(!mMaterialLibrary->IsFinalized());
|
||||
mMaterialLibrary->Finalize();
|
||||
unsigned requiredXBits = BitHelper::Log2Ceil(mMaterialLibrary->GetTextureSize());
|
||||
unsigned requiredYBits = BitHelper::Log2Ceil(mMaterialLibrary->GetMaxTextureIndex().y);
|
||||
unsigned requiredBits = requiredXBits + requiredYBits;
|
||||
unsigned32 mask = BitHelper::GetLSMask<unsigned32>(16, 16 + requiredXBits) | BitHelper::GetLSMask<unsigned32>(0, requiredYBits);
|
||||
mBitMap = BitHelper::GetBitMapHS(mask);
|
||||
AddSlaveRoots(requiredBits); // The main root can be used for the first bit, the rest of the bits require slave roots
|
||||
}
|
||||
|
||||
bool CheckNodesToRemove(MultiRootMaterialNode<BitsMaterial<8>>* node, bool curValue, BoolArray& nodesCanBeShaved)
|
||||
{
|
||||
if (!node->HasChildren()) return true;
|
||||
auto mat = node->GetMaterial();
|
||||
bool childrenEqual = true;
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
{
|
||||
if (node->HasChild(c))
|
||||
{
|
||||
bool nodeMat = mat.GetLS(c);
|
||||
MultiRootMaterialNode<BitsMaterial<8>>* child = (MultiRootMaterialNode<BitsMaterial<8>>*)GetNode(node->GetChildIndex(c));
|
||||
bool nodeHasSameMat = CheckNodesToRemove(child, nodeMat, nodesCanBeShaved);
|
||||
if (nodeMat != curValue || !nodeHasSameMat) childrenEqual = false;
|
||||
}
|
||||
}
|
||||
if (!childrenEqual) nodesCanBeShaved.Set(node->GetIndex(), false);
|
||||
return childrenEqual;
|
||||
}
|
||||
|
||||
static std::vector<unsigned32> HashChildren(const MultiRootMaterialNode<BitsMaterial<8>>* node)
|
||||
{
|
||||
assert(!node->GetIsGeometry());
|
||||
std::vector<unsigned32> hash(8, 0);
|
||||
auto mat = node->GetMaterial();
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
{
|
||||
if (node->HasChild(c))
|
||||
hash[c] = (node->GetChildIndex(c) << 1) | (mat.GetLS(c) ? 1 : 0);
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
public:
|
||||
MaterialLibraryMultiRoot(unsigned8 maxLevel) : HierarchicalMaterialMultiRoot<BitsMaterial<8>>(maxLevel, 0)
|
||||
{
|
||||
mMaterialLibrary = new BlockBasedMaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
}
|
||||
|
||||
~MaterialLibraryMultiRoot() override {
|
||||
if (mMaterialLibrary != NULL)
|
||||
delete mMaterialLibrary;
|
||||
}
|
||||
|
||||
void CreateMaterialLibrary(const std::vector<T>& materials)
|
||||
{
|
||||
for (const T& material : materials) AddMaterial(material);
|
||||
FinalizeMaterials();
|
||||
}
|
||||
|
||||
// Copies the geometry and material information of the given tree to this tree
|
||||
template <typename MaterialTree>
|
||||
void BaseOn(MaterialTree* tree, bool autoDAG)
|
||||
{
|
||||
// Make sure the leaf nodes are the last nodes in the tree, so that we can skip them and only construct one.
|
||||
std::vector<unsigned32> levelIndices = tree->SortOnLevel();
|
||||
|
||||
// Clear the existing tree
|
||||
Clear();
|
||||
auto root = Create(0);
|
||||
root->SetIsGeometry(true);
|
||||
|
||||
// Create the single leaf node that can exist for the geometry tree (for memory efficiency)
|
||||
auto geometryLeaf = Create(GetMaxLevel());
|
||||
geometryLeaf->SetIsGeometry(true);
|
||||
|
||||
// Copy the geometry information to a geometry tree
|
||||
unsigned32 destChildrenCache[8];
|
||||
unsigned32 offset = GetNodeCount();
|
||||
for (unsigned32 i = 0; i < levelIndices[GetMaxLevel()]; i++)
|
||||
{
|
||||
Node* source = tree->GetNode(i);
|
||||
unsigned32* sourceChildren = source->GetChildren();
|
||||
if (source->GetLevel() == GetMaxLevel() - 1)
|
||||
{
|
||||
for (ChildIndex child = 0; child < source->GetChildCount(); child++)
|
||||
destChildrenCache[child] = geometryLeaf->GetIndex();
|
||||
}
|
||||
else
|
||||
{
|
||||
for (ChildIndex child = 0; child < source->GetChildCount(); child++)
|
||||
destChildrenCache[child] = offset + sourceChildren[child] - 1; // The root is reused, so that index shouldn't be counted towards the offset
|
||||
}
|
||||
auto dest = source->GetLevel() == 0 ? root : Create(source->GetLevel());
|
||||
dest->SetIsGeometry(true);
|
||||
dest->SetChildren(source->GetChildmask(), destChildrenCache);
|
||||
}
|
||||
|
||||
// Create and fill the material library (if this hasn't been done yet)
|
||||
if (!mMaterialLibrary->IsFinalized())
|
||||
{
|
||||
auto materials = tree->GetUniqueMaterials();
|
||||
CreateMaterialLibrary(materials);
|
||||
}
|
||||
else
|
||||
{ // Re-add the slaveroots
|
||||
AddSlaveRoots(mBitMap.size());
|
||||
}
|
||||
|
||||
unsigned32 bitCount = (unsigned32)mBitMap.size();
|
||||
|
||||
// Go through all nodes that aren't leaf nodes
|
||||
// And construct their (bit-based) material tree (e.g. the not-geometry part of the tree)
|
||||
auto leaf = Create(GetMaxLevel());
|
||||
leaf->SetIsGeometry(false);
|
||||
for (unsigned32 bit = 0; bit < bitCount; bit++)
|
||||
{
|
||||
offset = GetNodeCount();
|
||||
for (unsigned32 i = 0; i < levelIndices[GetMaxLevel()]; i++)
|
||||
{
|
||||
auto source = tree->GetTypedNode(i);
|
||||
|
||||
// Create the material the new node has to have
|
||||
BitsMaterial<8> destMaterial;
|
||||
for (ChildIndex childIdx = 0; childIdx < 8; childIdx++)
|
||||
{
|
||||
if (source->HasChild(childIdx))
|
||||
{
|
||||
auto child = tree->GetTypedNode(source->GetChildIndex(childIdx));
|
||||
T sourceMaterial = tree->GetMaterial(child);
|
||||
unsigned32 sourceMaterialPointer = (unsigned32)mMaterialLibrary->GetTextureIndex(sourceMaterial);
|
||||
destMaterial.SetLS(childIdx, BitHelper::GetHS(sourceMaterialPointer, mBitMap[bit]));
|
||||
}
|
||||
}
|
||||
|
||||
// Create the children pointers the new node has to have
|
||||
unsigned32* sourceChildren = source->GetChildren();
|
||||
if (source->GetLevel() == GetMaxLevel() - 1)
|
||||
{
|
||||
for (ChildIndex child = 0; child < source->GetChildCount(); child++)
|
||||
destChildrenCache[child] = leaf->GetIndex();
|
||||
}
|
||||
else
|
||||
{
|
||||
for (ChildIndex child = 0; child < source->GetChildCount(); child++)
|
||||
destChildrenCache[child] = offset + sourceChildren[child] - 1; // The root gets reused so it shouldn't be counted towards the offset
|
||||
}
|
||||
|
||||
// Create the new node
|
||||
auto dest = source->GetLevel() == 0 ? GetSlaveRoot(bit) : Create(source->GetLevel());
|
||||
dest->SetMaterial(destMaterial);
|
||||
dest->SetIsGeometry(false);
|
||||
dest->SetChildren(source->GetChildmask(), destChildrenCache);
|
||||
}
|
||||
|
||||
if (autoDAG)
|
||||
{
|
||||
ToDAG(1, false);
|
||||
printf(".");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
// Bottom-up remove node that only have the same bit value (e.g. all 1 or all 0 for the non-geometry nodes).
|
||||
void ShaveEquals()
|
||||
{
|
||||
// For all the nodes on level 1, RemoveChildrenWithSameBit
|
||||
std::vector<unsigned32> levelIndices = SortOnLevel();
|
||||
BoolArray nodesToShave(GetNodeCount());
|
||||
// Assume that all nodes that arent geometry (or roots) can be shaved until the opposite has been proven
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
Node* node = GetNode((unsigned32)i);
|
||||
if (node->GetLevel() > 0)
|
||||
{
|
||||
MultiRootMaterialNode<BitsMaterial<8>>* matNode = (MultiRootMaterialNode<BitsMaterial<8>>*)node;
|
||||
nodesToShave.Set(matNode->GetIndex(), !matNode->GetIsGeometry());
|
||||
}
|
||||
}
|
||||
|
||||
// Now try to find proof not to shave certain nodes
|
||||
for (unsigned32 i = levelIndices[1]; i < levelIndices[2]; i++)
|
||||
{
|
||||
MultiRootMaterialNode<BitsMaterial<8>>* node =GetTypedNode(i);
|
||||
if (!node->GetIsGeometry()) CheckNodesToRemove(node, false, nodesToShave); // Since nodes on the second level don't have material properties, assume the bit value is false
|
||||
}
|
||||
|
||||
// Shave all nodes of which no evidence is found not to shave them
|
||||
size_t nodesRemoved = 0;
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
if (nodesToShave.Get(i))
|
||||
{
|
||||
nodesRemoved++;
|
||||
MultiRootMaterialNode<BitsMaterial<8>>* node = (MultiRootMaterialNode<BitsMaterial<8>>*)GetNode((unsigned32)i);
|
||||
node->SetMaterial(BitsMaterial<8>());
|
||||
node->SetChildren(0, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
printf("Connections removed from %llu nodes.\n", (unsigned64)nodesRemoved);
|
||||
ClearOrphans();
|
||||
}
|
||||
|
||||
// Since the bit values only need to be correct for parts of the scene that are defined, we can randomly fill in the rest to be correct
|
||||
void FillEmptySpace(bool full = true)
|
||||
{
|
||||
// TODO: For the layer above the leaf node, just add all 8 children for each node (all pointing to the leaf node), and make
|
||||
// the only difference the bit node. Then connect all nodes in the layer above it correctly.
|
||||
std::vector<unsigned32> levelOffsets = SortOnLevel();
|
||||
|
||||
// Find the leaf node that is not geometry
|
||||
MultiRootMaterialNode<BitsMaterial<8>>* leaf = NULL;
|
||||
for (unsigned32 i = levelOffsets[GetMaxLevel()]; i < levelOffsets[GetMaxLevel() + 1]; i++)
|
||||
{
|
||||
auto node = GetTypedNode(i);
|
||||
if (!node->GetIsGeometry()) { leaf = node; break; }
|
||||
}
|
||||
assert(leaf != NULL);
|
||||
|
||||
// Create properties for pointers to this leaf node
|
||||
unsigned32 leafPointer = leaf->GetIndex();
|
||||
unsigned32 leafChildren[8];
|
||||
for (size_t i = 0; i < 8; i++) leafChildren[i] = leafPointer;
|
||||
ChildMask leafMask(255);
|
||||
|
||||
// Make sure all nodes above the leaf level have full geometry, allowing more merging
|
||||
for (unsigned32 i = levelOffsets[GetMaxLevel() - 1]; i < levelOffsets[GetMaxLevel()]; i++)
|
||||
{
|
||||
auto node = GetTypedNode(i);
|
||||
if (!node->GetIsGeometry() && node->HasChildren())
|
||||
node->SetChildren(leafMask, leafChildren);
|
||||
}
|
||||
|
||||
if (!full) return;
|
||||
|
||||
// TODO: After every layer is made smaller, call "ToDAG" up to the next level to process :P
|
||||
// Now use a lookup table to quickly find all feasible nodes for a merge.
|
||||
std::function<std::vector<unsigned32>(const MultiRootMaterialNode<BitsMaterial<8>>*)> childrenHasher = &MaterialLibraryMultiRoot::HashChildren;
|
||||
|
||||
for (auto level = GetMaxLevel() - 1; level-- > 1;)
|
||||
{
|
||||
ToDAG(level - 1);
|
||||
levelOffsets = SortOnLevel();
|
||||
std::vector<size_t> parentsPerNode = GetParentCounts();
|
||||
NodeReplacementFinder<unsigned32, MultiRootMaterialNode<BitsMaterial<8>>*> finder(childrenHasher);
|
||||
|
||||
auto levelStart = levelOffsets[level];
|
||||
auto levelEnd = levelOffsets[level + 1];
|
||||
std::vector<MultiRootMaterialNode<BitsMaterial<8>>*> nodesToTryVec;
|
||||
// Add all nodes to the finder:
|
||||
for (size_t i = levelStart; i < levelEnd; i++)
|
||||
{
|
||||
MultiRootMaterialNode<BitsMaterial<8>>* node = (MultiRootMaterialNode<BitsMaterial<8>>*)GetNode((unsigned32)i);
|
||||
if (!node->GetIsGeometry() && node->HasChildren())
|
||||
{
|
||||
finder.Add(node);
|
||||
nodesToTryVec.push_back(node);
|
||||
}
|
||||
}
|
||||
// Sort nodes on number of parents. Merging nodes with many parents is preferred as it has a high probability of leading to more merges higher up in the tree
|
||||
std::sort(nodesToTryVec.begin(), nodesToTryVec.end(), [&](MultiRootMaterialNode<BitsMaterial<8>>* a, MultiRootMaterialNode<BitsMaterial<8>>* b)
|
||||
{
|
||||
return parentsPerNode[a->GetIndex()] > parentsPerNode[b->GetIndex()];
|
||||
});
|
||||
|
||||
std::queue<MultiRootMaterialNode<BitsMaterial<8>>*> nodesToTry;
|
||||
for (auto nodeToTry : nodesToTryVec) nodesToTry.push(nodeToTry);
|
||||
|
||||
std::unordered_set<MultiRootMaterialNode<BitsMaterial<8>>*> allMergedNodes;
|
||||
// Now replace as much as possible
|
||||
while (!nodesToTry.empty())
|
||||
{
|
||||
MultiRootMaterialNode<BitsMaterial<8>>* node = nodesToTry.front();
|
||||
if (allMergedNodes.find(node) == allMergedNodes.end())
|
||||
{
|
||||
std::vector<MultiRootMaterialNode<BitsMaterial<8>>*> mergeOptions = finder.Find(node);
|
||||
// Prefer the merge options with most children :)
|
||||
std::sort(mergeOptions.begin(), mergeOptions.end(), [&](Node* a, Node* b)
|
||||
{
|
||||
if (parentsPerNode[a->GetIndex()] != parentsPerNode[b->GetIndex()]) return parentsPerNode[a->GetIndex()] > parentsPerNode[b->GetIndex()];
|
||||
return a->GetChildCount() > b->GetChildCount();
|
||||
});
|
||||
// All merge options for this node will be explored, so remove it
|
||||
if (mergeOptions.size() > 1)
|
||||
{
|
||||
// Keep track of which nodes have been merged, so that we can set them to be equal to this node in the end
|
||||
ChildMask combinedMask = node->GetChildmask();
|
||||
unsigned32* combinedChildIndices = new unsigned32[8];
|
||||
for (ChildIndex c = 0; c < 8; c++) if (node->HasChild(c)) combinedChildIndices[c] = node->GetChildIndex(c);
|
||||
unsigned8 combinedMaterial = (unsigned8)node->GetMaterial().GetValue();
|
||||
|
||||
std::vector<MultiRootMaterialNode<BitsMaterial<8>>*> mergedNodes(1, node);
|
||||
for (auto option : mergeOptions)
|
||||
{
|
||||
if (option != node)
|
||||
{
|
||||
// Check if the merge is still valid
|
||||
unsigned8 optionMaterial = (unsigned8)option->GetMaterial().GetValue();
|
||||
unsigned8 optionMask = (unsigned8)option->GetChildmask().mask;
|
||||
|
||||
// The material mask should be the same for children that both nodes have:
|
||||
bool valid = (optionMaterial & (optionMask & combinedMask.mask)) == (combinedMaterial & (optionMask & combinedMask.mask));
|
||||
if (valid)
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
if (combinedMask.Get(c) && option->HasChild(c) && combinedChildIndices[c] != node->GetChildIndex(c))
|
||||
{
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
|
||||
// If the merge is still valid, updated the combined Mask, combined material and combinedChildIndices
|
||||
if (valid)
|
||||
{
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
{
|
||||
if (!combinedMask.Get(c) && option->HasChild(c))
|
||||
{
|
||||
combinedChildIndices[c] = option->GetChildIndex(c);
|
||||
combinedMask.Set(c, true);
|
||||
}
|
||||
}
|
||||
combinedMaterial |= optionMaterial;
|
||||
mergedNodes.push_back(option);
|
||||
}
|
||||
}
|
||||
}
|
||||
// If more nodes then the current node are merged,
|
||||
// Update all merged nodes to be equal. Also remove the (old) merged nodes from the finder and add the merged version
|
||||
if (mergedNodes.size() > 1)
|
||||
{
|
||||
unsigned8 i = 0;
|
||||
unsigned32* combinedChildren = new unsigned32[combinedMask.GetSet()];
|
||||
for (ChildIndex c = 0; c < 8; c++) if (combinedMask.Get(c)) combinedChildren[i++] = combinedChildIndices[c];
|
||||
BitsMaterial<8> combinedMaterialProp((size_t)combinedMaterial);
|
||||
|
||||
for (auto mergedNode : mergedNodes)
|
||||
{
|
||||
finder.Remove(mergedNode);
|
||||
mergedNode->SetChildren(combinedMask, combinedChildren);
|
||||
mergedNode->SetMaterial(combinedMaterialProp);
|
||||
allMergedNodes.insert(mergedNode);
|
||||
}
|
||||
delete combinedChildren;
|
||||
finder.Add(node);
|
||||
}
|
||||
else
|
||||
{
|
||||
finder.Remove(node);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
finder.Remove(node);
|
||||
}
|
||||
}
|
||||
nodesToTry.pop();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::vector<unsigned8> GetMaterialTexture() override
|
||||
{
|
||||
if (!mMaterialTexture.empty())
|
||||
return mMaterialTexture;
|
||||
assert(mMaterialLibrary->IsFinalized());
|
||||
mMaterialTextureSize = mMaterialLibrary->GetTextureSize();
|
||||
mMaterialTexture = mMaterialLibrary->GetTexture();
|
||||
mMaxTextureIndex = mMaterialLibrary->GetMaxTextureIndex();
|
||||
return mMaterialTexture;
|
||||
}
|
||||
|
||||
unsigned GetMaterialTextureSize() override
|
||||
{
|
||||
GetMaterialTexture();
|
||||
return mMaterialTextureSize;
|
||||
}
|
||||
|
||||
unsigned8 GetMaterialTextureChannelsPerPixel() override { return channelsPerPixel; }
|
||||
|
||||
bool HasAdditionalPool() const override { return true; }
|
||||
protected:
|
||||
void AppendPostProcess(glm::uvec3 coordinates, unsigned8 level, Tree<MultiRootMaterialNode<BitsMaterial<8>>>* tree) override
|
||||
{
|
||||
MaterialLibraryMultiRoot<T, Comparer, channelsPerPixel>* other = (MaterialLibraryMultiRoot<T, Comparer, channelsPerPixel>*)tree;
|
||||
if (other->mMaterialLibrary != NULL)
|
||||
{
|
||||
// Copy the material library of the appended tree
|
||||
if ((!this->mMaterialLibrary->IsFinalized()) && (*(other->mMaterialLibrary)) != (*(this->mMaterialLibrary)))
|
||||
{
|
||||
// Use copy constructor to copy the library of the other tree
|
||||
delete mMaterialLibrary;
|
||||
this->mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>(*(other->mMaterialLibrary));
|
||||
}
|
||||
|
||||
assert((*(this->mMaterialLibrary)) == (*(other->mMaterialLibrary)));
|
||||
}
|
||||
}
|
||||
|
||||
void WriteProperties(std::ostream& file) override {
|
||||
WriteMaterialTexture(file);
|
||||
HierarchicalMaterialMultiRoot<BitsMaterial<8>>::WriteProperties(file);
|
||||
}
|
||||
void ReadProperties(std::istream& file) override {
|
||||
// Reat the material texture
|
||||
ReadMaterialTexture(file);
|
||||
// Restore the material library from the texture
|
||||
if (mMaterialLibrary != NULL)
|
||||
delete mMaterialLibrary;
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>(mMaterialTexture, mMaterialTextureSize, mMaxTextureIndex);
|
||||
mMaterialLibrary->Finalize();
|
||||
|
||||
HierarchicalMaterialMultiRoot<BitsMaterial<8>>::ReadProperties(file);
|
||||
}
|
||||
void WriteAdditionalPoolProperties(std::ostream& file) override { WriteMaterialTexture(file); HierarchicalMaterialMultiRoot<BitsMaterial<8>>::WriteAdditionalPoolProperties(file); }
|
||||
void ReadAdditionalPoolProperties(std::istream& file) override { ReadMaterialTexture(file); HierarchicalMaterialMultiRoot<BitsMaterial<8>>::ReadAdditionalPoolProperties(file); }
|
||||
};
|
||||
385
Research/scene/Octree/MaterialLibraryTree.h
Normal file
385
Research/scene/Octree/MaterialLibraryTree.h
Normal file
@@ -0,0 +1,385 @@
|
||||
#pragma once
|
||||
#include "Tree.h"
|
||||
#include "MaterialNode.h"
|
||||
#include "IMaterialTexture.h"
|
||||
#include "../../inc/tbb/parallel_for_each.h"
|
||||
#include "../../inc/tbb/concurrent_queue.h"
|
||||
#include "../Material/MaterialLibrary.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
#include <unordered_map>
|
||||
#include <map>
|
||||
#include <set>
|
||||
|
||||
typedef MaterialNode<MaterialLibraryPointer> MaterialLibraryNode;
|
||||
|
||||
template<typename T, typename Comparer = std::less<T>, unsigned8 channelsPerPixel = 3>
|
||||
class MaterialLibraryTree : public Tree<MaterialLibraryNode>, public IMaterialTexture
|
||||
{
|
||||
protected:
|
||||
MaterialLibrary<T, Comparer, channelsPerPixel>* mMaterialLibrary;
|
||||
bool mOwnsLibrary;
|
||||
std::vector<unsigned char> mMaterialTexture;
|
||||
MaterialLibraryPointer mMaxTextureIndex;
|
||||
unsigned short mMaterialTextureSize;
|
||||
|
||||
std::unordered_map<T, MaterialLibraryNode*> leafMap;
|
||||
|
||||
inline void WriteMaterialTexture(std::ostream& file)
|
||||
{
|
||||
assert(mMaterialLibrary != NULL);
|
||||
mMaterialLibrary->Serialize(file);
|
||||
}
|
||||
|
||||
inline void ReadMaterialTexture(std::istream& file)
|
||||
{
|
||||
if (mMaterialLibrary == NULL)
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
mMaterialLibrary->Deserialize(file);
|
||||
GetMaterialTexture();
|
||||
}
|
||||
|
||||
inline void CreateAllMaterialLeafs()
|
||||
{
|
||||
std::vector<std::pair<T, MaterialLibraryPointer>> materialsAndIndices = mMaterialLibrary->GetMaterialTextureIndices();
|
||||
for (auto materialAndIndex : materialsAndIndices)
|
||||
{
|
||||
T material = materialAndIndex.first;
|
||||
MaterialLibraryPointer textureIndex = materialAndIndex.second;
|
||||
MaterialLibraryNode* leaf = (MaterialLibraryNode*)Create(GetMaxLevel());
|
||||
leaf->SetMaterial(textureIndex);
|
||||
leafMap.insert(std::pair<T, MaterialLibraryNode*>(material, leaf));
|
||||
}
|
||||
}
|
||||
public:
|
||||
// Creates a material library tree.
|
||||
MaterialLibraryTree(unsigned8 maxLevel) :
|
||||
Tree<MaterialLibraryNode>(maxLevel),
|
||||
mOwnsLibrary(true),
|
||||
mMaterialLibrary(new MaterialLibrary<T, Comparer, channelsPerPixel>())
|
||||
{
|
||||
mLeafsAreEqual = false;
|
||||
}
|
||||
|
||||
// Creates a material library tree with the given (finalized!) material library. The library is not owned by this tree and therefore will not
|
||||
// be deleted when this tree is deleted.
|
||||
MaterialLibraryTree(unsigned8 maxLevel, MaterialLibrary<T, Comparer, channelsPerPixel>* materialLibrary) :
|
||||
Tree<MaterialLibraryNode>(maxLevel),
|
||||
mOwnsLibrary(false),
|
||||
mMaterialLibrary(materialLibrary)
|
||||
{
|
||||
mLeafsAreEqual = false;
|
||||
CreateAllMaterialLeafs();
|
||||
}
|
||||
|
||||
~MaterialLibraryTree() override {
|
||||
if(mOwnsLibrary)
|
||||
delete mMaterialLibrary;
|
||||
}
|
||||
|
||||
MaterialLibrary<T, Comparer, channelsPerPixel>* GetMaterialLibrary()
|
||||
{
|
||||
return mMaterialLibrary;
|
||||
}
|
||||
|
||||
static void PassLibraryOwnership(MaterialLibraryTree* tree1, MaterialLibraryTree* tree2)
|
||||
{
|
||||
tree1->mOwnsLibrary = false;
|
||||
tree2->mOwnsLibrary = true;
|
||||
}
|
||||
|
||||
void AddMaterial(T material)
|
||||
{
|
||||
if (mMaterialLibrary->IsFinalized())
|
||||
return;
|
||||
mMaterialLibrary->AddMaterial(material);
|
||||
}
|
||||
|
||||
void FinalizeMaterials()
|
||||
{
|
||||
mMaterialLibrary->Finalize();
|
||||
CreateAllMaterialLeafs();
|
||||
}
|
||||
|
||||
// Assuming all leaf nodes contain pointers to materials, propagates those materials up in the tree, leaving the average material everywhere.
|
||||
// It is advised to call this method after DAG conversion, as it is still valid at that point, and it will be cheaper.
|
||||
void PropagateMaterials()
|
||||
{
|
||||
// Bottom up go through the nodes to propagate the materials
|
||||
auto levelIndices = SortOnLevel();
|
||||
|
||||
// Set node weights for weighted average calculation
|
||||
std::vector<float> lastLevelNodeWeights;
|
||||
std::vector<T> perfectMaterialPerNode(GetNodeCount());
|
||||
std::vector<float> levelNodeWeights;
|
||||
|
||||
auto leafStart = levelIndices[GetMaxLevel()];
|
||||
auto leafEnd = levelIndices[GetMaxLevel() + 1];
|
||||
lastLevelNodeWeights.resize(leafEnd - leafStart, 1.f);
|
||||
// Initialize the vectors for leaf nodes, assuming leaf nodes have weight 1 and perfect materials
|
||||
for (auto i = leafStart; i != leafEnd; i++)
|
||||
perfectMaterialPerNode[i] = mMaterialLibrary->GetMaterial(GetTypedNode(i)->GetMaterial());
|
||||
|
||||
// Bottom-up calculate the weighted average material for each node in the tree, and store them in perfectMaterialPerNode
|
||||
for (unsigned8 level = GetMaxLevel(); level-- > 0;)
|
||||
{
|
||||
unsigned32 levelStart = levelIndices[level];
|
||||
unsigned32 levelEnd = levelIndices[level + 1];
|
||||
levelNodeWeights = std::vector<float>(levelEnd - levelStart);
|
||||
tbb::parallel_for(levelStart, levelEnd, [&](unsigned32 i)
|
||||
{
|
||||
// Get the current node
|
||||
MaterialLibraryNode* node = GetTypedNode(i);
|
||||
std::vector<T> childMaterials;
|
||||
std::vector<float> childWeights;
|
||||
float nodeWeight = 0;
|
||||
// Find all materials the children use
|
||||
unsigned32* children = node->GetChildren();
|
||||
unsigned8 childCount = node->GetChildCount();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
{
|
||||
unsigned32 i = children[c];
|
||||
MaterialLibraryNode* child = GetTypedNode(i);
|
||||
MaterialLibraryPointer matPtr = child->GetMaterial();
|
||||
float childWeight = lastLevelNodeWeights[i - levelEnd];
|
||||
childMaterials.push_back(perfectMaterialPerNode[i]);
|
||||
childWeights.push_back(childWeight);
|
||||
nodeWeight += childWeight;
|
||||
}
|
||||
// Calculate the average material and retrieve the closest material to that from the library
|
||||
T nodeMaterial = T::WeightedAverage(childMaterials, childWeights);
|
||||
// Store the weighted average in perfectMaterialPerNode
|
||||
perfectMaterialPerNode[i] = nodeMaterial;
|
||||
levelNodeWeights[i - levelStart] = nodeWeight;
|
||||
});
|
||||
// Update the last level node weights to the node weights of the current level
|
||||
lastLevelNodeWeights = std::move(levelNodeWeights);
|
||||
}
|
||||
|
||||
// Find all materials required for this scene
|
||||
std::vector<T> perfectMaterials(GetNodeCount());
|
||||
tbb::parallel_for((size_t)0, perfectMaterialPerNode.size(), [&](size_t i)
|
||||
{
|
||||
perfectMaterials[i] = perfectMaterialPerNode[i];
|
||||
});
|
||||
|
||||
// Reduce the materials by only using unique ones
|
||||
tbb::parallel_sort(perfectMaterials, Comparer());
|
||||
perfectMaterials.erase(std::unique(perfectMaterials.begin(), perfectMaterials.end()), perfectMaterials.end());
|
||||
perfectMaterials.shrink_to_fit();
|
||||
|
||||
// Create a new material library based on these materials
|
||||
if (mOwnsLibrary) delete mMaterialLibrary;
|
||||
mMaterialTexture.clear();
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
for (auto material : perfectMaterials)
|
||||
mMaterialLibrary->AddMaterial(material);
|
||||
mMaterialLibrary->Finalize();
|
||||
|
||||
// Update all nodes in the tree to point to the new material library
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](unsigned32 i)
|
||||
{
|
||||
MaterialLibraryPointer materialPointer = mMaterialLibrary->GetTextureIndex(perfectMaterialPerNode[i]);
|
||||
auto node = GetTypedNode(i);
|
||||
node->SetMaterial(materialPointer);
|
||||
});
|
||||
|
||||
// Update the material texture
|
||||
GetMaterialTexture();
|
||||
}
|
||||
|
||||
std::vector<unsigned8> GetMaterialTexture() override
|
||||
{
|
||||
if (!mMaterialTexture.empty())
|
||||
return mMaterialTexture;
|
||||
assert(mMaterialLibrary->IsFinalized());
|
||||
mMaterialTextureSize = mMaterialLibrary->GetTextureSize();
|
||||
mMaterialTexture = mMaterialLibrary->GetTexture();
|
||||
mMaxTextureIndex = mMaterialLibrary->GetMaxTextureIndex();
|
||||
return mMaterialTexture;
|
||||
}
|
||||
|
||||
unsigned GetMaterialTextureSize() override
|
||||
{
|
||||
GetMaterialTexture();
|
||||
return mMaterialTextureSize;
|
||||
}
|
||||
|
||||
unsigned8 GetMaterialTextureChannelsPerPixel() override { return channelsPerPixel; }
|
||||
|
||||
std::vector<T> GetLeafMaterials()
|
||||
{
|
||||
auto levelIndices = SortOnLevel();
|
||||
unsigned32 leafsStart = levelIndices[GetMaxLevel()];
|
||||
unsigned32 leafsEnd = levelIndices[GetMaxLevel() + 1];
|
||||
std::vector<T> leafMaterials(levelIndices[GetMaxLevel() + 1] - levelIndices[GetMaxLevel()]);
|
||||
tbb::parallel_for(leafsStart, leafsEnd, [&](const unsigned32 i)
|
||||
{
|
||||
leafMaterials[i - leafsStart] = GetMaterial(GetTypedNode(i));
|
||||
});
|
||||
return leafMaterials;
|
||||
}
|
||||
|
||||
// Replaces the current leaf materials (and thereby also the material library) by the given material.
|
||||
// In this, it assumes the leafMaterials list is ordered in the same way the leafs are ordered in the current tree.
|
||||
// It is advised to follow this command by "ToDAG()" and "PropagateMaterials()"
|
||||
void SetLeafMaterials(const std::vector<T> leafMaterials)
|
||||
{
|
||||
// Create the new material library
|
||||
delete mMaterialLibrary;
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
for (auto material : leafMaterials)
|
||||
mMaterialLibrary->AddMaterial(material);
|
||||
mMaterialLibrary->Finalize();
|
||||
|
||||
// Set the leaf node materials
|
||||
size_t i = 0;
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
MaterialLibraryNode* node = GetTypedNode(i);
|
||||
if (node->GetLevel() == GetMaxLevel())
|
||||
{
|
||||
node->SetMaterial(mMaterialLibrary->GetTextureIndex(leafMaterials[i]));
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
T GetMaterial(const MaterialLibraryNode* node) const
|
||||
{
|
||||
assert(mMaterialLibrary->IsFinalized());
|
||||
return mMaterialLibrary->GetMaterial(node->GetMaterial());
|
||||
}
|
||||
|
||||
std::vector<T> GetMaterials() const
|
||||
{
|
||||
// Read all materials from all nodes
|
||||
std::vector<T> materials(GetNodeCount());
|
||||
//for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](const unsigned32 i)
|
||||
{
|
||||
materials[i] = GetMaterial(GetTypedNode(i));
|
||||
});
|
||||
return materials;
|
||||
}
|
||||
|
||||
void SetMaterials(const std::vector<T>& materials)
|
||||
{
|
||||
// Not a valid material vector
|
||||
if (materials.size() != GetNodeCount())
|
||||
return;
|
||||
|
||||
// Create a new material library containing the new materials
|
||||
if (mOwnsLibrary) delete mMaterialLibrary;
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
for (auto material : materials)
|
||||
{
|
||||
mMaterialLibrary->AddMaterial(material);
|
||||
}
|
||||
mMaterialLibrary->Finalize();
|
||||
mMaterialTexture.clear();
|
||||
|
||||
// Update all nodes to point to this new library
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](unsigned32 i)
|
||||
{
|
||||
MaterialLibraryNode* node = GetTypedNode(i);
|
||||
node->SetMaterial(mMaterialLibrary->GetTextureIndex(materials[i]));
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<T> GetUniqueMaterials() const
|
||||
{
|
||||
return mMaterialLibrary->GetMaterials();
|
||||
}
|
||||
|
||||
// Clears all material pointers from non-leaf nodes
|
||||
void ClearPropagation()
|
||||
{
|
||||
auto levelIndices = SortOnLevel();
|
||||
tbb::parallel_for((unsigned32)0, levelIndices[GetMaxLevel()], [&](const unsigned32 i)
|
||||
{
|
||||
MaterialLibraryNode* node = GetTypedNode(i);
|
||||
node->SetMaterial(MaterialLibraryPointer(0));
|
||||
});
|
||||
}
|
||||
|
||||
// Replaces the leaf materials by the given leaf materials. It is advised to follow this command by "ToDAG()" and "PropagateMaterials()"
|
||||
void ReplaceLeafMaterials(const std::map<T, T, Comparer>& leafMaterialReplacers)
|
||||
{
|
||||
auto newMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
for (auto material : leafMaterialReplacers)
|
||||
newMaterialLibrary->AddMaterial(material.second);
|
||||
newMaterialLibrary->Finalize();
|
||||
std::unordered_map<MaterialLibraryPointer, MaterialLibraryPointer> materialLibraryPointerReplacers;
|
||||
for (auto material : leafMaterialReplacers)
|
||||
materialLibraryPointerReplacers.insert(std::make_pair(mMaterialLibrary->GetTextureIndex(material.first), newMaterialLibrary->GetTextureIndex(material.second)));
|
||||
|
||||
auto levelIndices = SortOnLevel();
|
||||
tbb::parallel_for(levelIndices[GetMaxLevel()], levelIndices[GetMaxLevel() + 1], [&](const unsigned32 i)
|
||||
{
|
||||
MaterialLibraryNode* node = GetTypedNode(i);
|
||||
node->SetMaterial(materialLibraryPointerReplacers[node->GetMaterial()]);
|
||||
});
|
||||
delete mMaterialLibrary;
|
||||
mMaterialLibrary = newMaterialLibrary;
|
||||
}
|
||||
|
||||
unsigned8 GetAdditionalBytesPerNode(unsigned8 level) const override
|
||||
{
|
||||
return 3; // For now, assume that material pointers are always 12+12 bits
|
||||
}
|
||||
std::vector<unsigned8> GetAdditionalNodeBytes(const Node* node) const override
|
||||
{
|
||||
MaterialLibraryPointer materialPointer = ((MaterialLibraryNode*)node)->GetMaterial();
|
||||
std::vector<unsigned8> res(3);
|
||||
res[0] = (unsigned8)(materialPointer.x >> 4);
|
||||
res[1] = (unsigned8)((materialPointer.x << 4) | ((materialPointer.y & (0x0FFF)) >> 8));
|
||||
res[2] = (unsigned8)materialPointer.y;
|
||||
return res;
|
||||
}
|
||||
|
||||
void AddLeafNode(glm::uvec3 coordinate, T material)
|
||||
{
|
||||
// Get the material pointer
|
||||
assert(mMaterialLibrary->IsFinalized());
|
||||
// Check if there is already a leaf for this material (for auto reuse)
|
||||
auto existingLeaf = leafMap.find(material);
|
||||
if (existingLeaf != leafMap.end())
|
||||
{
|
||||
// If the leaf node already exists, reuse it:
|
||||
// Create the parent node of the leaf
|
||||
MaterialLibraryNode* parentOfLeaf = Tree::AddNode(glm::uvec3(coordinate.x >> 1, coordinate.y >> 1, coordinate.z >> 1), GetMaxLevel() - 1);
|
||||
// The last bit of the coordinate can be used to find the childindex:
|
||||
ChildIndex index = (((coordinate.x & 1) == 1) ? 1 : 0)
|
||||
+ (((coordinate.y & 1) == 1) ? 2 : 0)
|
||||
+ (((coordinate.z & 1) == 1) ? 4 : 0);
|
||||
|
||||
parentOfLeaf->SetChild(index, existingLeaf->second);
|
||||
}
|
||||
else
|
||||
{
|
||||
MaterialLibraryPointer textureIndex = mMaterialLibrary->GetTextureIndex(material);
|
||||
MaterialLibraryNode* leaf = Tree::AddLeafNode(coordinate);
|
||||
leaf->SetMaterial(textureIndex);
|
||||
leafMap.insert(std::pair<T, MaterialLibraryNode*>(material, leaf));
|
||||
}
|
||||
}
|
||||
|
||||
bool HasAdditionalPool() const override { return true; }
|
||||
protected:
|
||||
void WriteProperties(std::ostream& file) override {
|
||||
// Write the material texture
|
||||
WriteMaterialTexture(file);
|
||||
}
|
||||
void ReadProperties(std::istream& file) override {
|
||||
// Reat the material texture
|
||||
ReadMaterialTexture(file);
|
||||
// Restore the material library from the texture
|
||||
delete mMaterialLibrary;
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>(mMaterialTexture, mMaterialTextureSize, mMaxTextureIndex);
|
||||
mMaterialLibrary->Finalize();
|
||||
|
||||
//// Refresh the material texture for debug purposes
|
||||
//mMaterialTexture.clear();
|
||||
//GetMaterialTexture();
|
||||
}
|
||||
void WriteAdditionalPoolProperties(std::ostream& file) override { WriteMaterialTexture(file); }
|
||||
void ReadAdditionalPoolProperties(std::istream& file) override { ReadMaterialTexture(file); }
|
||||
};
|
||||
337
Research/scene/Octree/MaterialLibraryUniqueIndexTree.h
Normal file
337
Research/scene/Octree/MaterialLibraryUniqueIndexTree.h
Normal file
@@ -0,0 +1,337 @@
|
||||
#pragma once
|
||||
#include "UniqueIndexTree.h"
|
||||
#include "MaterialTree.h"
|
||||
#include "IMaterialTexture.h"
|
||||
#include "../../inc/tbb/parallel_for_each.h"
|
||||
#include "../Material/MaterialLibrary.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include "../../core/Util/BoolArray.h"
|
||||
#include "../../inc/lodepng/lodepng.h"
|
||||
#include <unordered_map>
|
||||
#include <map>
|
||||
#include <stack>
|
||||
|
||||
//#define PRINT_ERROR_DATA
|
||||
|
||||
template<typename T, typename Comparer = std::less<T>, unsigned8 channelsPerPixel = 3>
|
||||
class MaterialLibraryUniqueIndexTree : public UniqueIndexTree<MaterialLibraryPointer>, public IMaterialTexture
|
||||
{
|
||||
private:
|
||||
// After the tree is finalized, the material library will be used to contain the actual materials
|
||||
MaterialLibrary<T, Comparer, channelsPerPixel>* mMaterialLibrary;
|
||||
|
||||
std::vector<unsigned8> mMaterialTexture;
|
||||
unsigned16 mMaterialTextureSize;
|
||||
MaterialLibraryPointer mMaxTextureIndex;
|
||||
|
||||
inline void WriteMaterialTexture(std::ostream& file)
|
||||
{
|
||||
assert(mMaterialLibrary != NULL);
|
||||
mMaterialLibrary->Serialize(file);
|
||||
}
|
||||
|
||||
inline void ReadMaterialTexture(std::istream& file)
|
||||
{
|
||||
if (mMaterialLibrary == NULL)
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
mMaterialLibrary->Deserialize(file);
|
||||
GetMaterialTexture();
|
||||
}
|
||||
|
||||
public:
|
||||
MaterialLibraryUniqueIndexTree(unsigned8 maxLevel, CompressedTexture<MaterialLibraryPointer>* nodeMaterialsTexture, unsigned32 collapsedMaterialLevels) :
|
||||
UniqueIndexTree(maxLevel, nodeMaterialsTexture, collapsedMaterialLevels),
|
||||
mMaterialLibrary(NULL),
|
||||
mMaterialTexture(std::vector<unsigned8>()),
|
||||
mMaterialTextureSize(0),
|
||||
mMaxTextureIndex(MaterialLibraryPointer(0))
|
||||
{}
|
||||
|
||||
// Creates a UniqueIndexRoot with "maxLevel" levels.
|
||||
// Note that the nodeMaterialsTexture will be deleted if the tree is deleted.
|
||||
MaterialLibraryUniqueIndexTree(unsigned8 maxLevel, CompressedTexture<MaterialLibraryPointer>* nodeMaterialsTexture)
|
||||
: MaterialLibraryUniqueIndexTree(maxLevel, nodeMaterialsTexture, 0)
|
||||
{}
|
||||
|
||||
~MaterialLibraryUniqueIndexTree() override {
|
||||
if(mMaterialLibrary != NULL)
|
||||
delete mMaterialLibrary;
|
||||
}
|
||||
|
||||
void AppendPostProcess(glm::uvec3 coordinates, unsigned8 level, Tree* tree) override
|
||||
{
|
||||
MaterialLibraryUniqueIndexTree<T, Comparer, channelsPerPixel>* uniqueIndexTree = (MaterialLibraryUniqueIndexTree<T, Comparer, channelsPerPixel>*)tree;
|
||||
|
||||
// Copy the color information from the blocks of the other tree. Make sure that the blocks are inserted in the correct position.
|
||||
auto oldLibrary = mMaterialLibrary;
|
||||
auto existingMaterials = GetUniqueMaterials();
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
for (auto material : existingMaterials)
|
||||
mMaterialLibrary->AddMaterial(material);
|
||||
auto appendedLibrary = uniqueIndexTree->GetMaterialLibrary();
|
||||
auto appendedMaterials = uniqueIndexTree->GetUniqueMaterials();
|
||||
for (auto material : appendedMaterials)
|
||||
mMaterialLibrary->AddMaterial(material);
|
||||
mMaterialLibrary->Finalize();
|
||||
mMaterialTexture = std::vector<unsigned char>();
|
||||
mMaterialTextureSize = 0;
|
||||
mMaxTextureIndex = mMaterialLibrary->GetMaxTextureIndex();
|
||||
|
||||
// Replace the existing materials
|
||||
std::unordered_map<MaterialLibraryPointer, MaterialLibraryPointer> ownPointerReplacers;
|
||||
for (auto material : existingMaterials)
|
||||
ownPointerReplacers[oldLibrary->GetTextureIndex(material)] = mMaterialLibrary->GetTextureIndex(material);
|
||||
UniqueIndexTree::ReplaceMaterials(ownPointerReplacers);
|
||||
delete oldLibrary;
|
||||
|
||||
// Create a map from the old tree to the new tree replacers:
|
||||
std::unordered_map<MaterialLibraryPointer, MaterialLibraryPointer> appendedPointerReplacers;
|
||||
for (auto material : appendedMaterials)
|
||||
appendedPointerReplacers[appendedLibrary->GetTextureIndex(material)] = mMaterialLibrary->GetTextureIndex(material);
|
||||
|
||||
UniqueIndexTree::AppendPostProcess(coordinates, level, uniqueIndexTree, appendedPointerReplacers);
|
||||
}
|
||||
|
||||
void ReplaceMaterials(const std::unordered_map<T, T>& replacementMap)
|
||||
{
|
||||
assert(mMaterialLibrary != NULL);
|
||||
auto oldLibrary = mMaterialLibrary;
|
||||
std::vector<T> oldMaterials = oldLibrary->GetMaterials();
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
std::unordered_map<T, T> actualReplacers;
|
||||
for (auto material : oldMaterials)
|
||||
{
|
||||
auto replacerIt = replacementMap.find(material);
|
||||
T replacer = material;
|
||||
if (replacerIt == replacementMap.end())
|
||||
printf("Material not found in replacementMap");
|
||||
else
|
||||
replacer = replacerIt->second;
|
||||
mMaterialLibrary->AddMaterial(replacer);
|
||||
actualReplacers.insert(std::pair<T, T>(material, replacer));
|
||||
}
|
||||
mMaterialLibrary->Finalize();
|
||||
|
||||
std::unordered_map<MaterialLibraryPointer, MaterialLibraryPointer> pointerReplacers;
|
||||
for (auto material : oldMaterials)
|
||||
{
|
||||
auto oldPointer = oldLibrary->GetTextureIndex(material);
|
||||
auto newPointer = mMaterialLibrary->GetTextureIndex(actualReplacers[material]);
|
||||
pointerReplacers.insert(std::pair<MaterialLibraryPointer, MaterialLibraryPointer>(oldPointer, newPointer));
|
||||
}
|
||||
|
||||
UniqueIndexTree::ReplaceMaterials(pointerReplacers);
|
||||
|
||||
delete oldLibrary;
|
||||
}
|
||||
|
||||
// Will build a unique index tree with the same material information as the given material tree.
|
||||
// This will consume (and delete) the given tree!
|
||||
template<typename MaterialTree>
|
||||
void BaseOn(MaterialTree* tree)
|
||||
{
|
||||
// Create a material library for all original materials
|
||||
assert(mMaterialLibrary == NULL);
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
std::vector<T> uniqueMaterials = tree->GetUniqueMaterials();
|
||||
for (auto material : uniqueMaterials)
|
||||
mMaterialLibrary->AddMaterial(material);
|
||||
mMaterialLibrary->Finalize();
|
||||
|
||||
// Create a new material tree containing the material pointers:
|
||||
size_t nodeCount = tree->GetNodeCount();
|
||||
std::vector<MaterialLibraryPointer> pointers(nodeCount);
|
||||
tbb::parallel_for(size_t(0), nodeCount, [&](size_t i)
|
||||
{
|
||||
pointers[i] = mMaterialLibrary->GetTextureIndex(tree->GetMaterial(tree->GetTypedNode((unsigned32)i)));
|
||||
});
|
||||
|
||||
UniqueIndexTree::BaseOn(tree, pointers);
|
||||
}
|
||||
|
||||
// Returns a list with all unique materials in the tree
|
||||
std::vector<T> GetUniqueMaterials() const
|
||||
{
|
||||
return mMaterialLibrary->GetMaterials();
|
||||
}
|
||||
|
||||
MaterialLibrary<T, Comparer, channelsPerPixel>* GetMaterialLibrary() const
|
||||
{
|
||||
return mMaterialLibrary;
|
||||
}
|
||||
|
||||
// Returns the material of the node at index i
|
||||
T GetMaterial(const size_t& i) const
|
||||
{
|
||||
return mMaterialLibrary->GetMaterial(GetNodeValue(i));
|
||||
}
|
||||
|
||||
std::vector<T> GetMaterials(size_t fromIndex = 0) const
|
||||
{
|
||||
std::vector<MaterialLibraryPointer> pointersTexture = GetNodeValues(fromIndex);
|
||||
std::vector<T> res(pointersTexture.size());
|
||||
tbb::parallel_for(size_t(0), res.size(), [&](size_t i)
|
||||
{
|
||||
res[i] = mMaterialLibrary->GetMaterial(pointersTexture[i]);
|
||||
});
|
||||
return res;
|
||||
}
|
||||
|
||||
// Returns the texture containing all materials once
|
||||
std::vector<unsigned8> GetMaterialTexture() override
|
||||
{
|
||||
if (!mMaterialTexture.empty())
|
||||
return mMaterialTexture;
|
||||
assert(mMaterialLibrary->IsFinalized());
|
||||
mMaterialTextureSize = mMaterialLibrary->GetTextureSize();
|
||||
mMaterialTexture = mMaterialLibrary->GetTexture();
|
||||
mMaxTextureIndex = mMaterialLibrary->GetMaxTextureIndex();
|
||||
return mMaterialTexture;
|
||||
}
|
||||
|
||||
unsigned GetMaterialTextureSize() override
|
||||
{
|
||||
GetMaterialTexture();
|
||||
return mMaterialTextureSize;
|
||||
}
|
||||
|
||||
void PrintDebugInfo() const override
|
||||
{
|
||||
UniqueIndexTree::PrintDebugInfo();
|
||||
}
|
||||
|
||||
unsigned8 GetMaterialTextureChannelsPerPixel() override { return channelsPerPixel; }
|
||||
protected:
|
||||
void WriteAdditionalUniqueIndexTreeProperties(std::ostream& file) override {
|
||||
WriteMaterialTexture(file);
|
||||
}
|
||||
|
||||
void ReadAdditionalUniqueIndexTreeProperties(std::istream& file) override {
|
||||
ReadMaterialTexture(file);
|
||||
}
|
||||
|
||||
void WriteAdditionalPoolProperties(std::ostream& file) override { WriteMaterialTexture(file); UniqueIndexTree::WriteAdditionalPoolProperties(file); }
|
||||
void ReadAdditionalPoolProperties(std::istream& file) override { ReadMaterialTexture(file); UniqueIndexTree::ReadAdditionalPoolProperties(file); }
|
||||
};
|
||||
|
||||
#ifdef PRINT_ERROR_DATA
|
||||
#include "../../core/OctreeBuilder/ColorQuantizerFactory.h"
|
||||
#include "../Material/MaterialQuantizer/ColorQuantizer/BaseColorQuantizer.h"
|
||||
|
||||
struct ColorErrorInfo
|
||||
{
|
||||
glm::u64vec3 sumError = glm::u64vec3(0);
|
||||
unsigned maxError = 0;
|
||||
glm::uvec3 maxErrorValue = glm::uvec3(0);
|
||||
long double sumDeltaE = 0;
|
||||
float maxDeltaE = 0;
|
||||
};
|
||||
|
||||
template<>
|
||||
void MaterialLibraryUniqueIndexTree<Color, ColorCompare>::PrintDebugInfo() const
|
||||
{
|
||||
// If the colors are not quantized, print some information
|
||||
std::vector<Color> uniqueMaterials = mMaterialLibrary->GetMaterials();
|
||||
printf("Calculating quantization errors over the full scene (%u unique colors, %llu values)...\n", (unsigned32)uniqueMaterials.size(), (unsigned64)this->GetMaterialCount());
|
||||
std::string quantizationTypes[4] = { "lab256", "lab1024", "lab4096", "lab16384" };
|
||||
for (std::string type : quantizationTypes)
|
||||
{
|
||||
printf("%s: ", type.c_str());
|
||||
BaseColorQuantizer* quantizer = ColorQuantizerFactory::Create(type);
|
||||
auto quantizationMap = quantizer->QuantizeMaterials(uniqueMaterials);
|
||||
|
||||
//glm::dvec3 sumError(0);
|
||||
//unsigned maxError = 0;
|
||||
//glm::uvec3 maxErrorValue(0);
|
||||
//long double sumDeltaE = 0;
|
||||
//float maxDeltaE = 0;
|
||||
ColorErrorInfo initial;
|
||||
//tbb::parallel_for((size_t)0, this->GetMaterialCount(), [&](const size_t& i)
|
||||
//for (size_t i = 0; i < this->GetMaterialCount(); i++)
|
||||
ColorErrorInfo error = tbb::parallel_reduce(tbb::blocked_range<size_t>(0, this->GetMaterialCount()), initial, [&](const tbb::blocked_range<size_t>& r, ColorErrorInfo errorInfo)
|
||||
{
|
||||
for (size_t i = r.begin(); i != r.end(); i++)
|
||||
{
|
||||
const Color& actual = GetMaterial(i);
|
||||
const Color& quantized = quantizationMap->at(actual);
|
||||
glm::ivec3 error = glm::abs(glm::ivec3(actual.GetColor()) - glm::ivec3(quantized.GetColor()));
|
||||
errorInfo.sumError += error;
|
||||
unsigned errorU = error.r + error.g + error.b;
|
||||
if (errorU > errorInfo.maxError)
|
||||
{
|
||||
errorInfo.maxError = errorU;
|
||||
errorInfo.maxErrorValue = error;
|
||||
}
|
||||
float deltaE = ColorHelper::GetDeltaEFromRGB(actual.GetColor(), quantized.GetColor());
|
||||
if (deltaE == deltaE) // Only sum if it is not NaN...
|
||||
errorInfo.sumDeltaE += deltaE;
|
||||
if (deltaE > errorInfo.maxDeltaE) errorInfo.maxDeltaE = deltaE;
|
||||
}
|
||||
return errorInfo;
|
||||
}, [&](ColorErrorInfo a, ColorErrorInfo b)
|
||||
{
|
||||
ColorErrorInfo res;
|
||||
res.sumError = a.sumError + b.sumError;
|
||||
res.maxError = a.maxError > b.maxError ? a.maxError : b.maxError;
|
||||
res.maxErrorValue = a.maxError > b.maxError ? a.maxErrorValue : b.maxErrorValue;
|
||||
res.sumDeltaE = a.sumDeltaE + b.sumDeltaE;
|
||||
res.maxDeltaE = a.maxDeltaE > b.maxDeltaE ? a.maxDeltaE : b.maxDeltaE;
|
||||
return res;
|
||||
});
|
||||
//);
|
||||
glm::dvec3 sumError = glm::dvec3(error.sumError);
|
||||
glm::dvec3 meanError = sumError / double(GetMaterialCount());
|
||||
double meanDeltaE = error.sumDeltaE / double(GetMaterialCount());
|
||||
printf("Mean errors: (%f, %f, %f), Max errors: (%u, %u, %u), Mean delta-E: %f, Max delta-E: %f\n", meanError.x, meanError.y, meanError.z, error.maxErrorValue.x, error.maxErrorValue.y, error.maxErrorValue.z, meanDeltaE, error.maxDeltaE);
|
||||
|
||||
delete quantizationMap;
|
||||
}
|
||||
}
|
||||
|
||||
#include "../Material/MaterialQuantizer/NormalQuantizer.h"
|
||||
#include "../../inc/glm/gtx/vector_angle.hpp"
|
||||
|
||||
struct NormalErrorInfo
|
||||
{
|
||||
float maxAngle = 0;
|
||||
long double sumAngle = 0;
|
||||
};
|
||||
|
||||
template<>
|
||||
void MaterialLibraryUniqueIndexTree<SmallNormal, NormalCompare, 4>::PrintDebugInfo() const
|
||||
{
|
||||
// If the colors are not quantized, print some information
|
||||
std::vector<SmallNormal> uniqueMaterials = mMaterialLibrary->GetMaterials();
|
||||
printf("Calculating normal quantization errors over the full scene (%u unique normals, %llu values)...\n", (unsigned32)uniqueMaterials.size(), (unsigned64)this->GetMaterialCount());
|
||||
unsigned8 bits[] = { 12 };
|
||||
for (unsigned8 quantizeToBits : bits)
|
||||
{
|
||||
printf("%u bits: ", quantizeToBits);
|
||||
NormalQuantizer quantizer(quantizeToBits);
|
||||
|
||||
NormalErrorInfo initial;
|
||||
NormalErrorInfo error = tbb::parallel_reduce(tbb::blocked_range<size_t>(0, this->GetMaterialCount()), initial, [&](const tbb::blocked_range<size_t>& r, NormalErrorInfo errorInfo)
|
||||
{
|
||||
for (size_t i = r.begin(); i != r.end(); i++)
|
||||
{
|
||||
const SmallNormal& actual = GetMaterial(i);
|
||||
SmallNormal quantized = quantizer.Quantize(actual);
|
||||
float angle = glm::angle(actual.Get(), quantized.Get());
|
||||
if (angle > errorInfo.maxAngle) errorInfo.maxAngle = angle;
|
||||
errorInfo.sumAngle += angle;
|
||||
}
|
||||
return errorInfo;
|
||||
}, [&](NormalErrorInfo a, NormalErrorInfo b)
|
||||
{
|
||||
NormalErrorInfo res;
|
||||
res.maxAngle = a.maxAngle > b.maxAngle ? a.maxAngle : b.maxAngle;
|
||||
res.sumAngle = a.sumAngle + b.sumAngle;
|
||||
return res;
|
||||
});
|
||||
//);
|
||||
long double meanError = error.sumAngle / long double(GetMaterialCount());
|
||||
printf("Mean angle error: %f, Max angle error: %f\n", meanError * (360.0 / (2 * mPi)), error.maxAngle * (360.0 / (2 * mPi)));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
67
Research/scene/Octree/MaterialNode.h
Normal file
67
Research/scene/Octree/MaterialNode.h
Normal file
@@ -0,0 +1,67 @@
|
||||
#pragma once
|
||||
#include <fstream>
|
||||
#include "Node.h"
|
||||
#include "BaseTree.h"
|
||||
#include "../../inc/glm/glm.hpp"
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../core/Serializer.h"
|
||||
|
||||
template <typename T, typename Comparer = std::less<T>>
|
||||
class MaterialNode : public Node
|
||||
{
|
||||
private:
|
||||
T mMaterial;
|
||||
public:
|
||||
MaterialNode(BaseTree* root, unsigned8 level = 0) : Node(root, level) {}
|
||||
MaterialNode(BaseTree* root, T material, unsigned8 level = 0) : MaterialNode(root, level) { mMaterial = material; }
|
||||
//MaterialNode(const MaterialNode& node) : mMaterial(node.mMaterial), Node(node) {} // Copy ctor
|
||||
MaterialNode(MaterialNode&& node) : Node(std::move(node))
|
||||
{
|
||||
mMaterial = std::move(node.mMaterial);
|
||||
}
|
||||
~MaterialNode() {}
|
||||
|
||||
//MaterialNode& operator=(const MaterialNode& node)
|
||||
//{
|
||||
// mMaterial = node.mMaterial;
|
||||
// Node::operator=(node);
|
||||
// return *this;
|
||||
//}
|
||||
MaterialNode& operator=(MaterialNode&& node)
|
||||
{
|
||||
mMaterial = std::move(node.mMaterial);
|
||||
Node::operator=(std::move(node));
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
T GetMaterial() const { return mMaterial; }
|
||||
void SetMaterial(T material) { mMaterial = material; }
|
||||
|
||||
bool Compare(const MaterialNode& node) const
|
||||
{
|
||||
if (this->mMaterial != node.mMaterial) return Comparer()(this->mMaterial, node.mMaterial);
|
||||
return Node::Compare(node);
|
||||
}
|
||||
|
||||
bool Equals(const MaterialNode& node) const
|
||||
{
|
||||
if (this == &node) return true;
|
||||
return node.mMaterial == this->mMaterial && Node::Equals(node);
|
||||
}
|
||||
|
||||
void WriteProperties(std::ostream& file)
|
||||
{
|
||||
Serializer<T>::Serialize(mMaterial, file);
|
||||
}
|
||||
void ReadProperties(std::istream& file)
|
||||
{
|
||||
Serializer<T>::Deserialize(mMaterial, file);
|
||||
}
|
||||
|
||||
void CopyProperties(MaterialNode* source)
|
||||
{
|
||||
auto node = (MaterialNode<T, Comparer>*)source;
|
||||
this->SetMaterial(node->GetMaterial());
|
||||
}
|
||||
};
|
||||
199
Research/scene/Octree/MaterialTree.h
Normal file
199
Research/scene/Octree/MaterialTree.h
Normal file
@@ -0,0 +1,199 @@
|
||||
#pragma once
|
||||
#include "Tree.h"
|
||||
#include "MaterialNode.h"
|
||||
#include "../../inc/tbb/parallel_for_each.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include <unordered_map>
|
||||
//#include <map>
|
||||
#include <stack>
|
||||
|
||||
template<typename T, typename Comparer = std::less<T>>
|
||||
class MaterialTree : public Tree<MaterialNode<T, Comparer>>
|
||||
{
|
||||
private:
|
||||
std::unordered_map<T, Node*> mLeafMap;
|
||||
bool mUseLeafMap;
|
||||
public:
|
||||
// Creates a Material Tree with "maxLevel" levels.
|
||||
MaterialTree(unsigned8 maxLevel) : Tree<MaterialNode<T, Comparer>>(maxLevel), mUseLeafMap(false)
|
||||
{
|
||||
mLeafsAreEqual = false;
|
||||
}
|
||||
|
||||
~MaterialTree() override { }
|
||||
|
||||
// When using the leaf map, leaf nodes with the same material will only be created once (e.g. the leaf level is already a DAG)
|
||||
void UseLeafMap(bool value)
|
||||
{
|
||||
mUseLeafMap = value;
|
||||
|
||||
// Build the leaf map for the nodes that already exist
|
||||
if (mUseLeafMap)
|
||||
{
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
MaterialNode<T, Comparer>* node = GetTypedNode(i);
|
||||
if (node->GetLevel() == GetMaxLevel())
|
||||
mLeafMap.insert(std::pair<T, Node*>(node->GetMaterial(), node));
|
||||
}
|
||||
// Convert the leaf nodes to a DAG
|
||||
ToDAG(GetMaxLevel() - 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Assuming all leaf nodes contain pointers to materials, propagates those materials up in the tree, leaving the average material everywhere.
|
||||
// It is advised to call this method after DAG conversion, as it is still valid at that point, and it will be cheaper.
|
||||
void PropagateMaterials(T(*Average)(const std::vector<T>& materials, const std::vector<float>& weights))
|
||||
{
|
||||
// Bottom up go through the nodes to propagate the materials
|
||||
auto levelIndices = SortOnLevel();
|
||||
// Calculate how many levels actually contain data
|
||||
unsigned8 filledLevels = 0;
|
||||
for (unsigned8 level = 0; level <= GetMaxLevel(); level++)
|
||||
{
|
||||
if (levelIndices[level] != levelIndices[level + 1]) filledLevels++;
|
||||
else break;
|
||||
}
|
||||
|
||||
// If the levelIndices.size() <= 2, that means there's only the root level, thus no propagation is needed
|
||||
if (filledLevels < 2) return;
|
||||
|
||||
// Set node weights for weighted average calculation
|
||||
std::vector<float> lastLevelNodeWeights;
|
||||
std::vector<float> levelNodeWeights;
|
||||
|
||||
// Bottom-up calculate the weighted average material for each node in the tree, and store them in perfectMaterialPerNode
|
||||
for (unsigned8 level = filledLevels - 1; level-- > 0;)
|
||||
{
|
||||
auto levelStart = levelIndices[level];
|
||||
auto levelEnd = levelIndices[level + 1];
|
||||
levelNodeWeights = std::vector<float>(levelEnd - levelStart);
|
||||
tbb::parallel_for(levelStart, levelEnd, [&](const unsigned32 i)
|
||||
{
|
||||
// Get the current node
|
||||
MaterialNode<T, Comparer>* node = GetTypedNode(i);
|
||||
std::vector<T> childMaterials;
|
||||
std::vector<float> childWeights;
|
||||
float nodeWeight = 0;
|
||||
// Find all materials the children use
|
||||
unsigned32* children = node->GetChildren();
|
||||
unsigned8 childCount = node->GetChildCount();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
{
|
||||
const unsigned32& i = children[c];
|
||||
MaterialNode<T, Comparer>* child = GetTypedNode(i);
|
||||
float childWeight = (i - levelEnd < lastLevelNodeWeights.size()) ? lastLevelNodeWeights[i - levelEnd] : 1.f;
|
||||
childMaterials.push_back(child->GetMaterial());
|
||||
childWeights.push_back(childWeight);
|
||||
nodeWeight += childWeight;
|
||||
}
|
||||
// Calculate the average material and retrieve the closest material to that from the library
|
||||
node->SetMaterial(Average(childMaterials, childWeights));
|
||||
// Store the weighted average in perfectMaterialPerNode
|
||||
levelNodeWeights[i - levelStart] = nodeWeight;
|
||||
});
|
||||
// Update the last level node weights to the node weights of the current level
|
||||
lastLevelNodeWeights = std::move(levelNodeWeights);
|
||||
}
|
||||
}
|
||||
|
||||
void ReplaceMaterials(const std::unordered_map<T, T>& replacementMap)
|
||||
{
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](unsigned32 i)
|
||||
{
|
||||
auto node = GetTypedNode(i);
|
||||
node->SetMaterial(replacementMap.at(node->GetMaterial()));
|
||||
});
|
||||
}
|
||||
|
||||
void SetMaterials(std::vector<T> materials)
|
||||
{
|
||||
assert(materials.size() == GetNodeCount());
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](const unsigned32 i)
|
||||
{
|
||||
MaterialNode<T, Comparer>* node = GetTypedNode(i);
|
||||
node->SetMaterial(materials[i]);
|
||||
});
|
||||
}
|
||||
|
||||
T GetMaterial(MaterialNode<T, Comparer>* node)
|
||||
{
|
||||
return node->GetMaterial();
|
||||
}
|
||||
|
||||
std::vector<T> GetMaterials() const
|
||||
{
|
||||
std::vector<T> nodeMaterials(GetNodeCount());
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](const unsigned32 i)
|
||||
{
|
||||
nodeMaterials[i] = GetTypedNode(i)->GetMaterial();
|
||||
});
|
||||
return nodeMaterials;
|
||||
}
|
||||
|
||||
// Returns a list with all unique materials in the tree
|
||||
std::vector<T> GetUniqueMaterials() const
|
||||
{
|
||||
std::vector<T> nodeMaterials = GetMaterials();
|
||||
CollectionHelper::Unique(nodeMaterials, Comparer());
|
||||
return nodeMaterials;
|
||||
}
|
||||
|
||||
// Sets the material of a node.
|
||||
void SetMaterial(glm::uvec3 coordinate, unsigned level, T material)
|
||||
{
|
||||
MaterialNode<T, Comparer>* node = AddNode(coordinate, level);
|
||||
node->SetMaterial(material);
|
||||
}
|
||||
|
||||
// Creates a leaf node and sets its material
|
||||
void AddLeafNode(glm::uvec3 coordinate, T material)
|
||||
{
|
||||
if (mUseLeafMap)
|
||||
{
|
||||
// Check if there is already a leaf for this material (for auto reuse)
|
||||
auto existingLeaf = mLeafMap.find(material);
|
||||
Node* leaf = NULL;
|
||||
if (existingLeaf != mLeafMap.end())
|
||||
{
|
||||
// If there is a leaf, use it
|
||||
assert(material == ((MaterialNode<T, Comparer>*)(existingLeaf->second))->GetMaterial());
|
||||
leaf = existingLeaf->second;
|
||||
}
|
||||
else
|
||||
{
|
||||
// If no leaf with this material exists yet, create it
|
||||
leaf = Create(GetMaxLevel());
|
||||
((MaterialNode<T, Comparer>*)leaf)->SetMaterial(material);
|
||||
mLeafMap.insert(std::pair<T, Node*>(material, leaf));
|
||||
}
|
||||
// Create the parent node of the leaf
|
||||
MaterialNode<T, Comparer>* parentOfLeaf = Tree<MaterialNode<T, Comparer>>::AddNode(glm::uvec3(coordinate.x >> 1, coordinate.y >> 1, coordinate.z >> 1), GetMaxLevel() - 1);
|
||||
// The last bit of the coordinate can be used to find the childindex in this parent:
|
||||
ChildIndex index = (((coordinate.x & 1) == 1) ? 1 : 0)
|
||||
+ (((coordinate.y & 1) == 1) ? 2 : 0)
|
||||
+ (((coordinate.z & 1) == 1) ? 4 : 0);
|
||||
|
||||
// Make sure the leaf points to this child
|
||||
parentOfLeaf->SetChild(index, leaf);
|
||||
}
|
||||
else
|
||||
{
|
||||
MaterialNode<T, Comparer>* leaf = Tree<MaterialNode<T, Comparer>>::AddLeafNode(coordinate);
|
||||
leaf->SetMaterial(material);
|
||||
}
|
||||
}
|
||||
|
||||
bool HasAdditionalPool() const override { return false; }
|
||||
protected:
|
||||
unsigned8 GetAdditionalBytesPerNode(unsigned8 level) const override
|
||||
{
|
||||
return sizeof(T);
|
||||
}
|
||||
std::vector<unsigned8> GetAdditionalNodeBytes(const Node* node) const override
|
||||
{
|
||||
return ((MaterialNode<T, Comparer>*)node)->GetMaterial().Serialize();
|
||||
}
|
||||
};
|
||||
11
Research/scene/Octree/MultiRootBitsTree.h
Normal file
11
Research/scene/Octree/MultiRootBitsTree.h
Normal file
@@ -0,0 +1,11 @@
|
||||
#pragma once
|
||||
#include "../Material/BitsMaterial.h"
|
||||
#include "LeafMaterialMultiRootTree.h"
|
||||
|
||||
template class LeafMaterialMultiRootTree < BitsMaterial<2> >;
|
||||
template class LeafMaterialMultiRootTree < BitsMaterial<3> >;
|
||||
template class LeafMaterialMultiRootTree < BitsMaterial<4> >;
|
||||
template class LeafMaterialMultiRootTree < BitsMaterial<5> >;
|
||||
template class LeafMaterialMultiRootTree < BitsMaterial<6> >;
|
||||
template class LeafMaterialMultiRootTree < BitsMaterial<7> >;
|
||||
template class LeafMaterialMultiRootTree < BitsMaterial<8> >;
|
||||
187
Research/scene/Octree/MultiRootTree.h
Normal file
187
Research/scene/Octree/MultiRootTree.h
Normal file
@@ -0,0 +1,187 @@
|
||||
#pragma once
|
||||
#include "Tree.h"
|
||||
#include "IAdditionalProperties.h"
|
||||
|
||||
template<typename NodeType = Node>
|
||||
class MultiRootTree : public Tree<NodeType>, public IAdditionalProperties
|
||||
{
|
||||
public:
|
||||
MultiRootTree(unsigned8 maxLevel, unsigned32 slaveRootCount) : Tree(maxLevel)
|
||||
{
|
||||
// Initialize the slaves
|
||||
AddSlaveRoots(slaveRootCount);
|
||||
}
|
||||
~MultiRootTree() override {}
|
||||
|
||||
void Clear() override
|
||||
{
|
||||
mSlaveRoots.clear();
|
||||
Tree<NodeType>::Clear();
|
||||
}
|
||||
|
||||
unsigned32 GetSlaveRootCount() const { return (unsigned32)mSlaveRoots.size(); }
|
||||
const NodeType* GetSlaveRoot(unsigned32 i) const { return GetTypedNode(mSlaveRoots[i]); }
|
||||
NodeType* GetSlaveRoot(unsigned32 i) { return GetTypedNode(mSlaveRoots[i]); }
|
||||
void AddSlaveRoots(size_t count)
|
||||
{
|
||||
size_t originalSlaveRootCount = GetSlaveRootCount();
|
||||
mSlaveRoots.resize(originalSlaveRootCount + count);
|
||||
for (size_t i = 0; i < count; i++)
|
||||
mSlaveRoots[originalSlaveRootCount + i] = Create(0)->GetIndex();
|
||||
}
|
||||
|
||||
void Append(glm::uvec3 coordinates, unsigned8 level, MultiRootTree* tree)
|
||||
{
|
||||
// Let some possible inhereting class do preprocessing on the current tree before append
|
||||
AppendPreProcess(coordinates, level, tree);
|
||||
|
||||
// If the tree that is to be appended has more roots, add roots to this tree first:
|
||||
if (tree->GetSlaveRootCount() > this->GetSlaveRootCount())
|
||||
AddSlaveRoots(tree->GetSlaveRootCount() - this->GetSlaveRootCount());
|
||||
|
||||
std::vector<unsigned32> equivalents(tree->GetNodeCount());
|
||||
|
||||
// First create/get the node that acts as the root of the tree to append
|
||||
NodeType* treeRoot = this->AddNode(coordinates, level);
|
||||
treeRoot->CopyProperties(tree->GetRoot());
|
||||
equivalents[0] = treeRoot->GetIndex();
|
||||
|
||||
// Make copies of all nodes in tree, and add them to our own nodepool (with the correct level and root)
|
||||
NodeType* copy = NULL;
|
||||
for (unsigned32 nodeId = 1; nodeId < tree->GetNodeCount(); nodeId++)
|
||||
{
|
||||
NodeType* node = tree->GetTypedNode(nodeId);
|
||||
if (node->GetLevel() == 0)
|
||||
{
|
||||
// Find which slave this root belongs to
|
||||
for (unsigned i = 0; i < tree->GetSlaveRootCount(); i++)
|
||||
if (tree->GetSlaveRoot(i) == node)
|
||||
// And add it
|
||||
copy = (NodeType*)this->GetSlaveRoot(i)->AddNode(coordinates, level);
|
||||
}
|
||||
else
|
||||
copy = this->Create(node->GetLevel() + level);
|
||||
assert(copy != NULL);
|
||||
copy->CopyProperties(node);
|
||||
equivalents[nodeId] = copy->GetIndex();
|
||||
}
|
||||
// Restore all child pointers
|
||||
unsigned32* newChildren = new unsigned32[8];
|
||||
for (unsigned32 i = 0; i < tree->GetNodeCount(); i++)
|
||||
{
|
||||
NodeType* copy = GetTypedNode(equivalents[i]);
|
||||
NodeType* node = tree->GetTypedNode(i);
|
||||
|
||||
unsigned32* children = node->GetChildren();
|
||||
unsigned8 childCount = node->GetChildCount();
|
||||
|
||||
for (ChildIndex c = 0; c < childCount; c++) newChildren[c] = equivalents[children[c]];
|
||||
|
||||
copy->SetChildren(node->GetChildmask(), newChildren);
|
||||
}
|
||||
delete[] newChildren;
|
||||
|
||||
// Let some possible inhereting class do postprocessing on the added nodes
|
||||
AppendPostProcess(coordinates, level, tree);
|
||||
}
|
||||
|
||||
// Adds a leaf node to the given slave root ID. Use AddLeafNode without additional arguments to add leafs to the main root.
|
||||
NodeType* AddLeafNode(glm::uvec3 coordinates)
|
||||
{
|
||||
return Tree<NodeType>::AddLeafNode(coordinates);
|
||||
}
|
||||
NodeType* AddLeafNode(glm::uvec3 coordinates, unsigned32 slaveRootID)
|
||||
{
|
||||
return (NodeType*)GetSlaveRoot(slaveRootID)->AddNode(coordinates, GetMaxLevel());
|
||||
}
|
||||
bool SlaveHasLeaf(glm::uvec3 coordinates, unsigned32 slaveRootID) const
|
||||
{
|
||||
return GetSlaveRoot(slaveRootID)->HasNode(coordinates, GetMaxLevel());
|
||||
}
|
||||
|
||||
std::vector<size_t> GetOctreeNodesPerLevel() const override
|
||||
{
|
||||
std::vector<size_t> octreeNodesPerLevel(GetMaxLevel() + 1);
|
||||
std::function<void(const Node*)> nodeCounter = [&octreeNodesPerLevel](const Node* node) -> void
|
||||
{
|
||||
octreeNodesPerLevel[node->GetLevel()]++;
|
||||
};
|
||||
this->Traverse(nodeCounter);
|
||||
for (unsigned32 i = 0; i < GetSlaveRootCount(); i++) GetSlaveRoot(i)->Traverse(nodeCounter);
|
||||
return octreeNodesPerLevel;
|
||||
}
|
||||
|
||||
//size_t GetMinimumNodePoolSize() const override;
|
||||
//std::vector<unsigned8>& GetNodePool() override;
|
||||
|
||||
void WriteProperties(std::ostream& file) override
|
||||
{
|
||||
// Write the number of slave roots, and their indexes
|
||||
Serializer<std::vector<unsigned32>, unsigned32>::Serialize(mSlaveRoots, file);
|
||||
}
|
||||
void ReadProperties(std::istream& file) override
|
||||
{
|
||||
// By this time all original slaves have been deleted, so we need to rebuild them
|
||||
Serializer<std::vector<unsigned32>, unsigned32>::Deserialize(mSlaveRoots, file);
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> GetAdditionalProperties() override
|
||||
{
|
||||
if (!mAdditionalProperties.empty()) return mAdditionalProperties;
|
||||
else
|
||||
{
|
||||
mAdditionalProperties.insert(std::pair<std::string, std::string>("RootCount", std::to_string(1 + mSlaveRoots.size())));
|
||||
return mAdditionalProperties;
|
||||
}
|
||||
}
|
||||
|
||||
bool HasAdditionalPool() const override { return true; }
|
||||
protected:
|
||||
unsigned8 GetAdditionalBytesPerNode(unsigned8 level) const override { return level == GetMaxLevel() ? 1 : 0; }
|
||||
virtual std::vector<unsigned8> GetAdditionalNodeBytes(const Node* node) const override
|
||||
{
|
||||
if (node->GetLevel() != GetMaxLevel()) return std::vector<unsigned8>();
|
||||
return std::vector<unsigned8>(1, 1);
|
||||
}
|
||||
|
||||
unsigned8 GetAdditionalTreeInfoSize() const override { return (unsigned8)(mSlaveRoots.size() + 1) * 2; }
|
||||
std::vector<unsigned8> GetAdditionalTreeInfo(const std::vector<size_t>& nodePointers) const override
|
||||
{
|
||||
std::vector<unsigned8> res(GetAdditionalTreeInfoSize());
|
||||
// Start with the main root:
|
||||
std::vector<unsigned8> rootPointer = BitHelper::SplitInBytes(nodePointers[0], 2);
|
||||
std::move(rootPointer.begin(), rootPointer.end(), res.begin());
|
||||
for (unsigned32 i = 0; i < GetSlaveRootCount(); i++)
|
||||
{
|
||||
size_t slavePointer = nodePointers[GetSlaveRoot(i)->GetIndex()];
|
||||
std::vector<unsigned8> slavePointerBytes = BitHelper::SplitInBytes(slavePointer, 2);
|
||||
std::move(slavePointerBytes.begin(), slavePointerBytes.end(), res.begin() + (1 + i) * 2);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void UpdateLocalReferences(const std::vector<unsigned32>& indexMap)
|
||||
{
|
||||
for (size_t i = 0; i < mSlaveRoots.size(); i++)
|
||||
mSlaveRoots[i] = indexMap[mSlaveRoots[i]];
|
||||
Tree<NodeType>::UpdateLocalReferences(indexMap);
|
||||
}
|
||||
|
||||
void WriteAdditionalPoolProperties(std::ostream& file) override
|
||||
{
|
||||
GetAdditionalProperties();
|
||||
Serializer<std::map<std::string, std::string>>::Serialize(mAdditionalProperties, file);
|
||||
}
|
||||
void ReadAdditionalPoolProperties(std::istream& file) override
|
||||
{
|
||||
Serializer<std::map<std::string, std::string>>::Deserialize(mAdditionalProperties, file);
|
||||
}
|
||||
|
||||
std::vector<unsigned32> mSlaveRoots;
|
||||
std::map<std::string, std::string> mAdditionalProperties;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
234
Research/scene/Octree/Node.cpp
Normal file
234
Research/scene/Octree/Node.cpp
Normal file
@@ -0,0 +1,234 @@
|
||||
#pragma warning(disable:4996)
|
||||
#include "Node.h"
|
||||
|
||||
#include <fstream>
|
||||
#include <functional>
|
||||
//#include "../PropertyLoader.h"
|
||||
//#include "../Renderer.h"
|
||||
#include "../../core/IntersectTests.h"
|
||||
#include "BaseTree.h"
|
||||
|
||||
//************************************
|
||||
// Should only be called on a root node from outside this class
|
||||
//************************************
|
||||
Node::Node(const BaseTree* tree, unsigned8 level) :
|
||||
mLevel(level),
|
||||
mChildMask(ChildMask(0)),
|
||||
#ifdef USE_DYNAMIC_ARRAY
|
||||
mChildren(SmallDynamicArray<unsigned32>()),
|
||||
#else
|
||||
mChildren(std::vector<unsigned32>()),
|
||||
#endif
|
||||
mTree(tree != NULL ? tree->GetTreeIndex() : 0)
|
||||
{}
|
||||
|
||||
//Node::Node(const Node& node) :
|
||||
// mIndex(node.mIndex),
|
||||
// mLevel(node.mLevel),
|
||||
// mTree(node.mTree)
|
||||
//{
|
||||
// SetChildren(node.mChildMask, node.GetChildren());
|
||||
//}
|
||||
|
||||
//Node::Node(Node&& node) :
|
||||
// mChildren(std::move(node.mChildren)),
|
||||
// mIndex(std::move(node.mIndex)),
|
||||
// mChildMask(std::move(node.mChildMask)),
|
||||
// mLevel(std::move(node.mLevel)),
|
||||
// mTree(std::move(node.mTree))
|
||||
//{
|
||||
//}
|
||||
//
|
||||
//Node& Node::operator=(Node&& node)
|
||||
//{
|
||||
// mChildren = std::move(node.mChildren);
|
||||
// mIndex = std::move(node.mIndex);
|
||||
// mChildMask = std::move(node.mChildMask);
|
||||
// mLevel = std::move(node.mLevel);
|
||||
// mTree = std::move(node.mTree);
|
||||
//}
|
||||
|
||||
//************************************
|
||||
// Destroys the current node
|
||||
//************************************
|
||||
Node::~Node() {
|
||||
//printf("Help I'm being killed!\n");
|
||||
}
|
||||
|
||||
void Node::Traverse(const std::function<void(const Node*)>& f) const
|
||||
{
|
||||
f.operator()(this);
|
||||
|
||||
unsigned8 childCount = GetChildCount();
|
||||
for (unsigned8 i = 0; i < childCount; i++)
|
||||
BaseTree::Get(mTree)->GetNode(mChildren[i])->Traverse(f);
|
||||
}
|
||||
|
||||
unsigned64 Node::GetOctreeNodeCount(bool (*f)(const Node*)) const
|
||||
{
|
||||
unsigned8 childCount = GetChildCount();
|
||||
unsigned64 nodeCount = f(this) ? 1 : 0;
|
||||
for (unsigned i = 0; i < childCount; i++)
|
||||
nodeCount += BaseTree::Get(mTree)->GetNode(mChildren[i])->GetOctreeNodeCount(f);
|
||||
return nodeCount;
|
||||
}
|
||||
unsigned64 Node::GetLeafVoxelCount() const
|
||||
{
|
||||
if (mChildMask.mask == 0 || mLevel == BaseTree::Get(mTree)->GetMaxLevel()) // This is e a leaf node!
|
||||
return 1;
|
||||
|
||||
unsigned long long res = 0;
|
||||
unsigned8 childCount = mChildMask.GetSetBefore(8);
|
||||
for (unsigned i = 0; i < childCount; i++)
|
||||
res += BaseTree::Get(mTree)->GetNode(mChildren[i])->GetLeafVoxelCount();
|
||||
return res;
|
||||
}
|
||||
|
||||
Node* Node::AddChild(const ChildIndex index) {
|
||||
Node* curChild = GetChild(index);
|
||||
if (curChild == NULL) {
|
||||
curChild = BaseTree::Get(mTree)->Create(mLevel + 1);
|
||||
SetChild(index, curChild);
|
||||
}
|
||||
return curChild;
|
||||
}
|
||||
|
||||
Node* Node::GetChild(const ChildIndex index) const
|
||||
{
|
||||
if (!HasChild(index))
|
||||
return NULL;
|
||||
ChildIndex vIndex = mChildMask.GetSetBefore(index);
|
||||
return BaseTree::Get(mTree)->GetNode(mChildren[vIndex]);
|
||||
}
|
||||
|
||||
void Node::SetChild(const ChildIndex index, Node* child)
|
||||
{
|
||||
SetChildIndex(index, child->GetIndex());
|
||||
}
|
||||
|
||||
void Node::SetChildIndex(const ChildIndex c, const unsigned32 index)
|
||||
{
|
||||
bool isNew = !HasChild(c);
|
||||
unsigned8 oldChildCount = mChildMask.GetSet();
|
||||
mChildMask.Set(c, true);
|
||||
ChildIndex vIndex = mChildMask.GetSetBefore(c);
|
||||
if (isNew)
|
||||
{
|
||||
#ifdef USE_DYNAMIC_ARRAY
|
||||
mChildren.Insert(vIndex, index, oldChildCount);
|
||||
#else
|
||||
mChildren.insert(mChildren.begin() + vIndex, index);
|
||||
#endif
|
||||
}
|
||||
else
|
||||
mChildren[vIndex] = index;
|
||||
}
|
||||
|
||||
void Node::SetChildren(ChildMask mask, const unsigned* children)
|
||||
{
|
||||
unsigned8 oldChildCount = mChildMask.GetSet();
|
||||
unsigned8 newChildCount = mask.GetSet();
|
||||
mChildMask = mask;
|
||||
#ifdef USE_DYNAMIC_ARRAY
|
||||
if (oldChildCount != newChildCount)
|
||||
mChildren.Resize(oldChildCount, newChildCount);
|
||||
mChildren.SetRange(children, 0, newChildCount);
|
||||
#else
|
||||
mChildren.resize(newChildCount);
|
||||
for (ChildIndex c = 0; c < newChildCount; c++)
|
||||
mChildren[c] = children[c];
|
||||
#endif
|
||||
}
|
||||
|
||||
void Node::MoveToTree(BaseTree* tree)
|
||||
{
|
||||
assert(typeid(*(BaseTree::Get(mTree))) == typeid(*tree));
|
||||
mTree = tree->GetTreeIndex();
|
||||
}
|
||||
|
||||
|
||||
//************************************
|
||||
// Adds the given node at the given coordinates: recursively adds parents in top-down fashion
|
||||
//************************************
|
||||
Node* Node::AddNode(glm::uvec3 coordinates, const unsigned8 level) {
|
||||
if (GetLevel() >= level)
|
||||
{
|
||||
if (coordinates.x != 0 || coordinates.y != 0 || coordinates.z != 0)
|
||||
printf("Unexpected root coordinate (%d, %d, %d)\n", coordinates.x, coordinates.y, coordinates.z);
|
||||
return this;
|
||||
}
|
||||
|
||||
ChildIndex child = GetChildIndex(coordinates, level);
|
||||
unsigned32 mask = BitHelper::GetLSMask<unsigned32>(0, level - GetLevel() - 1);
|
||||
coordinates.x &= mask;
|
||||
coordinates.y &= mask;
|
||||
coordinates.z &= mask;
|
||||
return AddChild(child)->AddNode(coordinates, level);
|
||||
}
|
||||
|
||||
bool Node::HasNode(glm::uvec3 coord, const unsigned8 level) const
|
||||
{
|
||||
// If we reached a node at the wanted level, return true
|
||||
if (mLevel >= level) return true;
|
||||
|
||||
// Get the wanted child
|
||||
ChildIndex child = GetChildIndex(coord, level);
|
||||
if (HasChild(child))
|
||||
{
|
||||
unsigned32 mask = BitHelper::GetLSMask<unsigned32>(0, level - this->GetLevel() - 1);
|
||||
coord.x &= mask;
|
||||
coord.y &= mask;
|
||||
coord.z &= mask;
|
||||
return GetChild(child)->HasNode(coord, level);
|
||||
}
|
||||
else return false;
|
||||
}
|
||||
|
||||
ChildIndex Node::GetChildIndex(const glm::uvec3 coord, const unsigned8 level) const
|
||||
{
|
||||
unsigned range = 1 << (level - this->GetLevel() - 1);
|
||||
return (coord.x < range ? 0 : 1)
|
||||
+ (coord.y < range ? 0 : 2)
|
||||
+ (coord.z < range ? 0 : 4);
|
||||
}
|
||||
|
||||
void Node::WriteProperties(std::ostream& file) {}
|
||||
void Node::ReadProperties(std::istream& file) {}
|
||||
void Node::CopyProperties(Node* source) {}
|
||||
|
||||
bool Node::Compare(const Node& node) const
|
||||
{
|
||||
// Then on childmask
|
||||
unsigned8 nodeMask = node.GetChildmask().mask;
|
||||
if (mChildMask.mask != nodeMask)
|
||||
// This is equal to returning false if the highest significant bit in the other childmask is more significant than the highest significant bit in this childmask
|
||||
return mChildMask.mask < nodeMask;
|
||||
|
||||
// Then on child pointer values
|
||||
unsigned8 childCount = mChildMask.GetSet();
|
||||
for (unsigned8 i = 0; i < childCount; i++)
|
||||
{
|
||||
// Cast pointer to unsigned number
|
||||
auto aPtr = this->mChildren[i];
|
||||
auto bPtr = node.mChildren[i];
|
||||
if (aPtr != bPtr) return aPtr < bPtr;
|
||||
}
|
||||
|
||||
// Apparently the nodes are equal
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Node::Equals(const Node& node) const
|
||||
{
|
||||
if (this == &node) // Same address == same node
|
||||
return true;
|
||||
if (this->GetLevel() != node.GetLevel() || this->GetChildmask().mask != node.GetChildmask().mask)
|
||||
return false;
|
||||
unsigned8 childCount = mChildMask.GetSet();
|
||||
for (unsigned8 i = 0; i < childCount; i++)
|
||||
{
|
||||
if (this->mChildren[i] != node.mChildren[i])
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
114
Research/scene/Octree/Node.h
Normal file
114
Research/scene/Octree/Node.h
Normal file
@@ -0,0 +1,114 @@
|
||||
#pragma once
|
||||
|
||||
// Dynamic arrays are more memory efficient (as they don't require explicitly storing the capacity, size, etc)
|
||||
// But using them is less save. In Windows it seems to work fine, but segmentation errors occur on linux using dynamic arrays.
|
||||
#define USE_DYNAMIC_ARRAY
|
||||
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
#include <typeinfo>
|
||||
#include "../../inc/glm/glm.hpp"
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../core/Util/SmallDynamicArray.h"
|
||||
#include "ChildMask.h"
|
||||
|
||||
class BaseTree;
|
||||
|
||||
class Node {
|
||||
public:
|
||||
Node(const BaseTree* rootIndex, unsigned8 level = 0);
|
||||
//Node(const Node& node); // Copy ctor
|
||||
Node(Node&& node)
|
||||
{
|
||||
mChildren = std::move(node.mChildren);
|
||||
mIndex = std::move(node.mIndex);
|
||||
mChildMask = std::move(node.mChildMask);
|
||||
mLevel = std::move(node.mLevel);
|
||||
mTree = std::move(node.mTree);
|
||||
}// Move ctor
|
||||
|
||||
//Node(const Node& node) {
|
||||
// mChildren = node.mChildren);
|
||||
//}
|
||||
//Node& operator=(const Node&) = default;
|
||||
|
||||
~Node();
|
||||
|
||||
//Node& operator=(const Node& other); // Copy assignment
|
||||
Node& operator=(Node&& other) // Move assignment
|
||||
{
|
||||
mChildren = std::move(other.mChildren);
|
||||
mIndex = std::move(other.mIndex);
|
||||
mChildMask = std::move(other.mChildMask);
|
||||
mLevel = std::move(other.mLevel);
|
||||
mTree = std::move(other.mTree);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void SetChild(const ChildIndex index, Node* child);
|
||||
Node* GetChild(const ChildIndex index) const;
|
||||
inline bool HasChild(const ChildIndex index) const { return mChildMask.Get(index); }
|
||||
inline unsigned GetChildIndex(const ChildIndex index) const
|
||||
{
|
||||
if (!HasChild(index))
|
||||
return 0;
|
||||
ChildIndex vIndex = mChildMask.GetSetBefore(index);
|
||||
return mChildren[vIndex];
|
||||
}
|
||||
|
||||
inline bool HasChildren() const { return mChildMask.mask != 0; }
|
||||
inline unsigned* GetChildren() const { return (unsigned*)&mChildren[0]; }
|
||||
// Replaces the current children of this node by the children in the given array of children.
|
||||
// The given array should have at least as many members as the number of set bits in the new child mask.
|
||||
void SetChildren(ChildMask mask, const unsigned* children);
|
||||
|
||||
void MoveToTree(BaseTree* tree);
|
||||
|
||||
// Returns the number of direct children this node has (e.g. the number of "1" in the childmask)
|
||||
inline unsigned8 GetChildCount() const { return mChildMask.GetSet(); }
|
||||
|
||||
inline unsigned8 GetLevel() const { return mLevel; }
|
||||
inline void SetLevel(unsigned8 level) { mLevel = level; }
|
||||
inline unsigned GetIndex() const { return mIndex; }
|
||||
inline void SetIndex(unsigned32 index) { mIndex = index; }
|
||||
// If the tree type is equal to the current tree type, moved the node to the new tree.
|
||||
inline ChildMask GetChildmask() const { return mChildMask; }
|
||||
|
||||
// Returns the total number of octree nodes (including the current node) that can be reached from this nodes
|
||||
unsigned64 GetOctreeNodeCount(bool (*f)(const Node*)) const;
|
||||
unsigned64 GetOctreeNodeCount() const { return GetOctreeNodeCount([](const Node* node) { return true; }); }
|
||||
|
||||
// Returns the number of leaf voxels in this octree (if it wasn't compressed)
|
||||
unsigned64 GetLeafVoxelCount() const;
|
||||
|
||||
// Returns true if this node is smaller than the other node. Used for sorting
|
||||
bool Compare(const Node& node) const;
|
||||
// Returns true if this node is equal to the other node
|
||||
bool Equals(const Node& node) const;
|
||||
|
||||
// Adds the the given to the tree at the given coordinate. The node should specify at which level it needs to be added. If a node already exists at the given coordinate,
|
||||
// false is returned
|
||||
Node* AddNode(glm::uvec3 coordinates, const unsigned8 level);
|
||||
bool HasNode(glm::uvec3 coord, const unsigned8 level) const;
|
||||
ChildIndex GetChildIndex(const glm::uvec3 coord, const unsigned8 level) const;
|
||||
void SetChildIndex(const ChildIndex index, const unsigned32 childIndex);
|
||||
|
||||
void Traverse(const std::function<void(const Node*)>& f) const;
|
||||
|
||||
void WriteProperties(std::ostream& file);
|
||||
void ReadProperties(std::istream& file);
|
||||
void CopyProperties(Node* source);
|
||||
protected:
|
||||
Node* AddChild(const ChildIndex index);
|
||||
|
||||
#ifdef USE_DYNAMIC_ARRAY
|
||||
SmallDynamicArray<unsigned32> mChildren; // Array (with length of the number of children), containing the indices of child nodes
|
||||
#else
|
||||
std::vector<unsigned32> mChildren;
|
||||
#endif
|
||||
|
||||
unsigned32 mIndex;
|
||||
ChildMask mChildMask;
|
||||
unsigned8 mLevel; // the level of the current node, where the root has level 0
|
||||
unsigned16 mTree; // the tree to which this node
|
||||
};
|
||||
205
Research/scene/Octree/NodeReplacementFinder.h
Normal file
205
Research/scene/Octree/NodeReplacementFinder.h
Normal file
@@ -0,0 +1,205 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
#include <assert.h>
|
||||
#include <functional>
|
||||
|
||||
#include "../../core/Defines.h"
|
||||
|
||||
// Searchtree based class that finds all fitting nodes for a certain value.
|
||||
// A function needs to be given that hashes a value into a set of hashes (all values should give a set with the same length!)
|
||||
// For each hash in this set of hashes, a value of 0 means that anything can be put there. The other hashes have to match.
|
||||
template<typename K, typename V>
|
||||
class NodeReplacementFinder
|
||||
{
|
||||
private:
|
||||
struct SearchTreeNode
|
||||
{
|
||||
private:
|
||||
std::unordered_map<K, SearchTreeNode*> children;
|
||||
union WildCardOrValue
|
||||
{
|
||||
SearchTreeNode* wildcard;
|
||||
V value;
|
||||
};
|
||||
WildCardOrValue wildCardOrValue;
|
||||
bool isLeaf;
|
||||
|
||||
V GetValue() const
|
||||
{
|
||||
assert(isLeaf);
|
||||
return wildCardOrValue.value;
|
||||
}
|
||||
|
||||
void SetValue(V value)
|
||||
{
|
||||
assert(isLeaf);
|
||||
wildCardOrValue.value = value;
|
||||
}
|
||||
|
||||
SearchTreeNode* GetWildCard() const
|
||||
{
|
||||
assert(!isLeaf);
|
||||
return wildCardOrValue.wildcard;
|
||||
}
|
||||
|
||||
void SetWildCard(SearchTreeNode* wildcard)
|
||||
{
|
||||
assert(!isLeaf);
|
||||
wildCardOrValue.wildcard = wildcard;
|
||||
}
|
||||
|
||||
|
||||
SearchTreeNode* GetChild(unsigned32 key) const
|
||||
{
|
||||
// If the key is a wildcard, return the wildcardnode
|
||||
if (key == 0) return GetWildCard();
|
||||
|
||||
// Otherwise, find the correct child
|
||||
auto it = children.find(key);
|
||||
if (it == children.end()) return NULL;
|
||||
else return (*it).second;
|
||||
}
|
||||
|
||||
SearchTreeNode* AddChild(unsigned32 key, bool isLeaf)
|
||||
{
|
||||
SearchTreeNode* child = GetChild(key);
|
||||
if (child != NULL) return child;
|
||||
|
||||
SearchTreeNode* newChild = new SearchTreeNode(isLeaf);
|
||||
if (key == 0) SetWildCard(newChild);
|
||||
else children.insert(std::make_pair(key, newChild));
|
||||
return newChild;
|
||||
}
|
||||
|
||||
// Recursively call add, updating the index to make sure the correct key is used
|
||||
void Add(const std::vector<unsigned32>& keys, const V& value, size_t index)
|
||||
{
|
||||
if (isLeaf) SetValue(value);
|
||||
else
|
||||
{
|
||||
unsigned32 curKey = keys[index];
|
||||
SearchTreeNode* child = AddChild(curKey, index == keys.size() - 1);
|
||||
child->Add(keys, value, index + 1);
|
||||
}
|
||||
}
|
||||
|
||||
void Find(const std::vector<unsigned32>& keys, size_t index, std::vector<V>& out) const
|
||||
{
|
||||
if (isLeaf) out.push_back(GetValue());
|
||||
else
|
||||
{
|
||||
unsigned32 curKey = keys[index];
|
||||
if (curKey == 0)
|
||||
{
|
||||
// if the current key is a wildcard, explore all possible paths:
|
||||
for (auto child : children)
|
||||
child.second->Find(keys, index + 1, out);
|
||||
}
|
||||
else
|
||||
{
|
||||
// If the current key isn't a wildcard, only explore the current key and the wildcard key
|
||||
auto curKeyChild = GetChild(curKey);
|
||||
if (curKeyChild != NULL) curKeyChild->Find(keys, index + 1, out);
|
||||
}
|
||||
SearchTreeNode* wildcard = GetWildCard();
|
||||
if (wildcard != NULL) wildcard->Find(keys, index + 1, out);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the node can be removed safely
|
||||
bool Remove(const std::vector<unsigned32>& keys, size_t index)
|
||||
{
|
||||
if (isLeaf) return true;
|
||||
else
|
||||
{
|
||||
unsigned32 curKey = keys[index];
|
||||
auto child = GetChild(curKey);
|
||||
if (child != NULL)
|
||||
{
|
||||
bool canDelete = child->Remove(keys, index + 1);
|
||||
if (canDelete)
|
||||
{
|
||||
if (curKey == 0) SetWildCard(NULL);
|
||||
else children.erase(children.find(curKey));
|
||||
|
||||
delete child;
|
||||
}
|
||||
}
|
||||
}
|
||||
return children.empty();
|
||||
}
|
||||
|
||||
|
||||
public:
|
||||
SearchTreeNode(bool isLeaf) : isLeaf(isLeaf)
|
||||
{
|
||||
if (!isLeaf)
|
||||
{
|
||||
children = std::unordered_map<unsigned32, SearchTreeNode*>();
|
||||
SetWildCard(NULL);
|
||||
}
|
||||
else
|
||||
{
|
||||
SetValue(V());
|
||||
}
|
||||
}
|
||||
~SearchTreeNode()
|
||||
{
|
||||
if (!isLeaf)
|
||||
{
|
||||
for (auto child : children)
|
||||
delete child.second;
|
||||
}
|
||||
}
|
||||
|
||||
void Add(const std::vector<unsigned32>& keys, V value)
|
||||
{
|
||||
Add(keys, value, 0);
|
||||
}
|
||||
|
||||
void Remove(const std::vector<unsigned32>& keys)
|
||||
{
|
||||
Remove(keys, 0);
|
||||
}
|
||||
|
||||
std::vector<V> Find(std::vector<unsigned32> keys) const
|
||||
{
|
||||
std::vector<V> res;
|
||||
Find(keys, 0, res);
|
||||
return res;
|
||||
}
|
||||
};
|
||||
std::function<std::vector<K>(const V)> GetKeys;
|
||||
SearchTreeNode* root;
|
||||
public:
|
||||
NodeReplacementFinder(std::function<std::vector<K>(const V)> keyGetter)
|
||||
{
|
||||
GetKeys = keyGetter;
|
||||
root = new SearchTreeNode(false);
|
||||
}
|
||||
~NodeReplacementFinder()
|
||||
{
|
||||
delete root;
|
||||
}
|
||||
|
||||
void Add(const V value)
|
||||
{
|
||||
std::vector<K> keys = GetKeys(value);
|
||||
root->Add(keys, value);
|
||||
}
|
||||
|
||||
void Remove(const V value)
|
||||
{
|
||||
std::vector<K> keys = GetKeys(value);
|
||||
root->Remove(keys);
|
||||
}
|
||||
|
||||
std::vector<V> Find(const V value) const
|
||||
{
|
||||
std::vector<K> keys = GetKeys(value);
|
||||
return root->Find(keys);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
47
Research/scene/Octree/NodeSmall.h
Normal file
47
Research/scene/Octree/NodeSmall.h
Normal file
@@ -0,0 +1,47 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include "../../inc/glm/glm.hpp"
|
||||
#include "../../core/Defines.h"
|
||||
#include "Root.h"
|
||||
|
||||
class NodeSmall {
|
||||
public:
|
||||
NodeSmall(Root* root, unsigned8 level = 0) {}
|
||||
~NodeSmall() {}
|
||||
|
||||
//Node* GetChild(ChildIndex index);
|
||||
//bool HasChild(ChildIndex index);
|
||||
//void SetChild(ChildIndex index, Node* child);
|
||||
//// Returns the number of direct children this node has (e.g. the number of "1" in the childmask)
|
||||
//unsigned8 GetChildCount();
|
||||
//unsigned8 GetLevel();
|
||||
//// Returns the number of leaf voxels in this octree (if it wasn't compressed)
|
||||
//virtual unsigned long long GetLeafVoxelCount();
|
||||
|
||||
//// Returns true if this node is smaller than the other node. Used for sorting
|
||||
//virtual bool Compare(Node* node);
|
||||
//// Returns true if this node is equal to the other node
|
||||
//virtual bool Equals(Node* node);
|
||||
|
||||
//// Recursively set the level of the current node
|
||||
//void SetLevel(unsigned8 level);
|
||||
|
||||
//ChildMask GetChildmask();
|
||||
|
||||
//virtual void WriteProperties(std::ostream& file);
|
||||
//virtual void ReadProperties(std::istream& file);
|
||||
//virtual void CopyProperties(Node* source);
|
||||
protected:
|
||||
|
||||
//Node* AddChild(ChildIndex index);
|
||||
//// Adds the the given to the tree at the given coordinate. The node should specify at which level it needs to be added. If a node already exists at the given coordinate,
|
||||
//// false is returned
|
||||
//Node* AddNode(glm::uvec3 coordinates, unsigned8 level);
|
||||
|
||||
Root* mRoot; // the tree to which this node belongs
|
||||
|
||||
ChildMask mChildMask;
|
||||
std::vector<Node*> mChildren; // index of children, NULL if leaf node
|
||||
unsigned8 mLevel; // the level of the current node, where the root has level 0
|
||||
private:
|
||||
};
|
||||
965
Research/scene/Octree/Tree.h
Normal file
965
Research/scene/Octree/Tree.h
Normal file
@@ -0,0 +1,965 @@
|
||||
#pragma once
|
||||
#include <deque>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <fstream>
|
||||
#include <algorithm>
|
||||
#include <unordered_set>
|
||||
#include <numeric>
|
||||
#include <functional>
|
||||
#include "BaseTree.h"
|
||||
#include "../../inc/tbb/parallel_sort.h"
|
||||
#include "../../inc/tbb/parallel_for_each.h"
|
||||
#include "../../inc/tbb/concurrent_queue.h"
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../core/CollectionHelper.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include "../../core/Util/BoolArray.h"
|
||||
#include "../../core/Util/ObjectPool.h"
|
||||
|
||||
template<typename NodeType = Node>
|
||||
class Tree : public BaseTree {
|
||||
public:
|
||||
Tree(unsigned8 maxLevel) :
|
||||
BaseTree(),
|
||||
mLeafNode(0),
|
||||
mLeafsAreEqual(true), // Default tree nodes are equal. Override the root for trees that don't have this property
|
||||
mMaxLevel(maxLevel),
|
||||
mNodePool(ObjectPool<NodeType>(/*1000000*/)),
|
||||
mSortedOnLevel(true)
|
||||
{
|
||||
// Create the root
|
||||
Create(0);
|
||||
}
|
||||
|
||||
~Tree() override
|
||||
{
|
||||
Clear();
|
||||
}
|
||||
|
||||
const Node* GetNode(const unsigned32 index) const override { return mNodePool[index]; }
|
||||
Node* GetNode(const unsigned32 index) override { return mNodePool[index]; }
|
||||
inline const NodeType* GetTypedNode(const unsigned32 index) const { return mNodePool[index]; }
|
||||
inline NodeType* GetTypedNode(const unsigned32 index) { return mNodePool[index]; }
|
||||
inline const NodeType* GetRoot() const { return mNodePool[0]; /*The first node in the nodepool is ALWAYS the root.*/ }
|
||||
inline NodeType* GetRoot() { return mNodePool[0]; }
|
||||
|
||||
inline unsigned8 GetMaxLevel() const override { return mMaxLevel; }
|
||||
unsigned32 GetNodeCount() const override { return (unsigned32)mNodePool.Size(); }
|
||||
inline bool IsEmpty() const override { return !GetRoot()->HasChildren(); }
|
||||
|
||||
bool HasLeaf(const glm::uvec3& coord) const { return HasNode(coord, GetMaxLevel()); }
|
||||
NodeType* AddLeafNode(const glm::uvec3& coord) { return AddNode(coord, GetMaxLevel()); }
|
||||
bool HasNode(const glm::uvec3& coord, const unsigned8 level) const { return GetRoot()->HasNode(coord, level); }
|
||||
NodeType* AddNode(const glm::uvec3& coord, const unsigned8 level) {
|
||||
NodeType* root = GetRoot();
|
||||
return (NodeType*)(root->AddNode(coord, level));
|
||||
}
|
||||
|
||||
void Traverse(const std::function<void(const Node*)>& f) const { GetRoot()->Traverse(f); }
|
||||
|
||||
std::vector<size_t> GetNodesPerLevel() const override
|
||||
{
|
||||
std::vector<size_t> nodesPerLevel;
|
||||
nodesPerLevel.resize(GetMaxLevel() + 1, 0);
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
nodesPerLevel[GetTypedNode(i)->GetLevel()]++;
|
||||
return nodesPerLevel;
|
||||
}
|
||||
size_t GetNodesInLevel(const unsigned8 level) const
|
||||
{
|
||||
size_t res = 0;
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
if (GetTypedNode(i)->GetLevel() == level) res++;
|
||||
return res;
|
||||
}
|
||||
virtual std::vector<size_t> GetOctreeNodesPerLevel() const override
|
||||
{
|
||||
std::vector<size_t> octreeNodesPerLevel(GetMaxLevel() + 1);
|
||||
std::function<void(const Node*)> nodeCounter = [&octreeNodesPerLevel](const Node* node) -> void
|
||||
{
|
||||
octreeNodesPerLevel[node->GetLevel()]++;
|
||||
};
|
||||
this->Traverse(nodeCounter);
|
||||
return octreeNodesPerLevel;
|
||||
}
|
||||
|
||||
virtual unsigned64 GetPointerCount() const override
|
||||
{
|
||||
unsigned64 res = 0;
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
res += GetTypedNode(i)->GetChildCount();
|
||||
return res;
|
||||
}
|
||||
|
||||
// Returns a map with for each node who it's parents are (O(N)). To find the parents of a node, use it's index (node->GetIndex())
|
||||
std::vector<std::vector<std::pair<unsigned, ChildIndex>>> GetParentMap() const
|
||||
{
|
||||
std::vector<std::vector<std::pair<unsigned, ChildIndex>>> parentMap(GetNodeCount(), std::vector<std::pair<unsigned, ChildIndex>>());
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
NodeType* node = GetTypedNode(i);
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
if (node->HasChild(c))
|
||||
parentMap[node->GetChildIndex(c)].push_back(std::make_pair(i, c));
|
||||
}
|
||||
return parentMap;
|
||||
}
|
||||
|
||||
// Counts the number of parents each node has (e.g. how many pointers there are to each node). For a regular octree, this will be 1, but for a DAG it isn't necessarily.
|
||||
std::vector<size_t> GetParentCounts() const override
|
||||
{
|
||||
std::vector<size_t> parentCounts(GetNodeCount(), 0);
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
const NodeType* node = GetTypedNode(i);
|
||||
unsigned32* children = node->GetChildren();
|
||||
for (ChildIndex c = 0; c < node->GetChildCount(); c++)
|
||||
parentCounts[children[c]]++;
|
||||
}
|
||||
return parentCounts;
|
||||
}
|
||||
|
||||
unsigned64 GetLeafVoxelCount() const
|
||||
{
|
||||
std::vector<unsigned64> leafCountTable(GetNodeCount(), 1);
|
||||
for (unsigned8 level = GetMaxLevel(); level-- > 0;)
|
||||
{
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
const NodeType* node = GetTypedNode(i);
|
||||
if (node->GetLevel() == level)
|
||||
{
|
||||
unsigned64 sum = 0;
|
||||
unsigned32* children = node->GetChildren();
|
||||
for (ChildIndex c = 0; c < node->GetChildCount(); c++)
|
||||
sum += leafCountTable[children[c]];
|
||||
leafCountTable[i] = sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
return leafCountTable[0];
|
||||
}
|
||||
|
||||
bool ReadTree(const char* fileName) override
|
||||
{
|
||||
std::string binFileName = GetOctreeFileName(fileName);
|
||||
std::ifstream treeInFile(binFileName, std::ios::binary);
|
||||
|
||||
if (treeInFile.good()) {
|
||||
bool succes = Deserialize(treeInFile);
|
||||
|
||||
// Finished reading
|
||||
treeInFile.close();
|
||||
return succes;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
bool WriteTree(const char* fileName) override
|
||||
{
|
||||
std::string binFileName = GetOctreeFileName(fileName);
|
||||
std::ofstream treeOutFile(binFileName, std::ios::binary);
|
||||
|
||||
bool succes = Serialize(treeOutFile);
|
||||
|
||||
treeOutFile.close();
|
||||
return succes;
|
||||
}
|
||||
|
||||
bool VerifyTree(const char* fileName) override
|
||||
{
|
||||
std::string binFileName = GetOctreeFileName(fileName);
|
||||
std::ifstream treeInFile(binFileName, std::ios::binary);
|
||||
|
||||
if (treeInFile.good()) {
|
||||
// Read the maximum level in the tree
|
||||
unsigned8 maxLevel;
|
||||
Serializer<unsigned8>::Deserialize(maxLevel, treeInFile);
|
||||
this->mMaxLevel = maxLevel;
|
||||
|
||||
// Read the nodes per level
|
||||
std::vector<unsigned> nodesPerLevel(maxLevel + 1);
|
||||
Serializer<unsigned*>::Deserialize(&nodesPerLevel[0], mMaxLevel + 1, treeInFile);
|
||||
|
||||
// Sum the nodes per level to get the total number of nodes
|
||||
size_t nodeCount = std::accumulate(nodesPerLevel.begin(), nodesPerLevel.end(), 0);
|
||||
|
||||
printf(".");
|
||||
|
||||
// Read the childmasks for all nodes (needed to know which pointers exist)
|
||||
std::vector<ChildMask> childmasks(nodeCount);
|
||||
Serializer<ChildMask*>::Deserialize(&childmasks[0], nodeCount, treeInFile);
|
||||
printf(".");
|
||||
|
||||
// Read all pointers
|
||||
unsigned* newChildren = new unsigned[8];
|
||||
for (ChildMask mask : childmasks)
|
||||
{
|
||||
if (treeInFile.eof()) return false;
|
||||
ChildIndex childCount = mask.GetSet();
|
||||
Serializer<unsigned*>::Deserialize(newChildren, childCount, treeInFile);
|
||||
|
||||
for (ChildIndex i = 0; i < childCount; i++)
|
||||
{
|
||||
if (newChildren[i] >= nodeCount)
|
||||
{
|
||||
printf("Node index out of bounds: %u", newChildren[i]);
|
||||
delete[] newChildren;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
delete[] newChildren;
|
||||
printf("..");
|
||||
|
||||
// Finished reading
|
||||
treeInFile.close();
|
||||
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Serialize(std::ostream& treeOutFile)
|
||||
{
|
||||
auto levelIndices = SortOnLevel();
|
||||
|
||||
// Write the total number of levels in the tree
|
||||
unsigned maxLevel = GetMaxLevel();
|
||||
Serializer<unsigned8>::Serialize(maxLevel, treeOutFile);
|
||||
|
||||
// Write the nodes per level
|
||||
unsigned nodesThisLevel;
|
||||
for (unsigned8 level = 0; level < maxLevel + 1; level++)
|
||||
{
|
||||
nodesThisLevel = (unsigned)(levelIndices[level + 1] - levelIndices[level]);
|
||||
Serializer<unsigned32>::Serialize(nodesThisLevel, treeOutFile);
|
||||
}
|
||||
|
||||
// Gather the child masks for all nodes:
|
||||
std::vector<ChildMask> childMasks(GetNodeCount());
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](unsigned32 i)
|
||||
{
|
||||
childMasks[i] = GetTypedNode(i)->GetChildmask();
|
||||
});
|
||||
|
||||
Serializer<ChildMask*>::Serialize(&childMasks[0], childMasks.size(), treeOutFile);
|
||||
|
||||
// Write child pointers of all nodes
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
NodeType* node = GetTypedNode(i);
|
||||
ChildMask mask = node->GetChildmask();
|
||||
unsigned32* children = node->GetChildren();
|
||||
Serializer<unsigned32*>::Serialize(children, mask.GetSet(), treeOutFile);
|
||||
}
|
||||
|
||||
// Hack: first write the root properties, then the tree properties, then the rest of the nodes
|
||||
// This is done to be compliant with old files of previous versions
|
||||
|
||||
GetRoot()->WriteProperties(treeOutFile);
|
||||
WriteProperties(treeOutFile);
|
||||
// Write extra properties per node
|
||||
for (unsigned32 i = 1; i < GetNodeCount(); i++)
|
||||
GetTypedNode(i)->WriteProperties(treeOutFile);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Deserialize(std::istream& treeInFile)
|
||||
{
|
||||
this->Clear();
|
||||
this->InitializeReadTree();
|
||||
|
||||
// Read the maximum level in the tree
|
||||
Serializer<unsigned8>::Deserialize(mMaxLevel, treeInFile);
|
||||
|
||||
// Read the nodes per level
|
||||
std::vector<unsigned> nodesPerLevel(mMaxLevel + 1);
|
||||
Serializer<unsigned*>::Deserialize(&nodesPerLevel[0], mMaxLevel + 1, treeInFile);
|
||||
|
||||
// Sum the nodes per level to get the total number of nodes
|
||||
unsigned32 nodeCount = std::accumulate(nodesPerLevel.begin(), nodesPerLevel.end(), 0);
|
||||
|
||||
printf(".");
|
||||
|
||||
// Read the childmasks for all nodes (needed to know which pointers exist)
|
||||
std::vector<ChildMask> childmasks(nodeCount);
|
||||
Serializer<ChildMask*>::Deserialize(&childmasks[0], nodeCount, treeInFile);
|
||||
|
||||
for (unsigned8 level = 0; level <= mMaxLevel; level++)
|
||||
for (unsigned i = 0; i < nodesPerLevel[level]; i++)
|
||||
// Create the node. Creating them in order also adds them to the node pool in order.
|
||||
Create(level);
|
||||
printf(".");
|
||||
|
||||
// Read all pointers
|
||||
unsigned* newChildren = new unsigned[8];
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
{
|
||||
ChildMask mask = childmasks[i];
|
||||
Serializer<unsigned*>::Deserialize(newChildren, mask.GetSet(), treeInFile);
|
||||
GetTypedNode(i)->SetChildren(mask, newChildren);
|
||||
}
|
||||
delete[] newChildren;
|
||||
printf(".");
|
||||
|
||||
GetRoot()->ReadProperties(treeInFile);
|
||||
ReadProperties(treeInFile);
|
||||
// Read extra properties per node
|
||||
for (unsigned32 i = 1; i < GetNodeCount(); i++)
|
||||
{
|
||||
if (treeInFile.eof())
|
||||
{
|
||||
printf("Something is wrong...");
|
||||
return false;
|
||||
}
|
||||
GetTypedNode(i)->ReadProperties(treeInFile);
|
||||
}
|
||||
printf(".");
|
||||
|
||||
this->FinalizeReadTree();
|
||||
return true;
|
||||
}
|
||||
|
||||
static std::string GetOctreeFileName(const char* fileName) { return std::string(fileName) + ".oct"; }
|
||||
static std::string GetAdditionalPoolFileName(const char* fileName) { return std::string(fileName) + ".additional.pool"; }
|
||||
|
||||
virtual void ReadProperties(std::istream& file) {}
|
||||
virtual void WriteProperties(std::ostream& file) {}
|
||||
|
||||
unsigned8 GetAdditionalTreeInfoSize() const override { return 0; }
|
||||
std::vector<unsigned8> GetAdditionalTreeInfo(const std::vector<size_t>& nodePointers) const override { return std::vector<unsigned8>(); }
|
||||
unsigned8 GetAdditionalBytesPerNode(unsigned8 level) const override { return 0; }
|
||||
std::vector<unsigned8> GetAdditionalNodeBytes(const Node* node) const override { return std::vector<unsigned8>(); }
|
||||
bool LastChildHasAdditionalBytes() const override { return true; }
|
||||
unsigned8 GetAdditionalBytesPerPointer(unsigned8 level) const override { return 0; }
|
||||
std::vector<unsigned8> GetAdditionalPointerBytes(const Node* node, ChildIndex child) const override { return std::vector<unsigned8>(); }
|
||||
|
||||
// Reads the node pool and stores it in this tree
|
||||
bool ReadAdditionalPool(const char* fileName) override
|
||||
{
|
||||
if (!HasAdditionalPool()) return true;
|
||||
std::string binFileName = GetAdditionalPoolFileName(fileName);
|
||||
std::ifstream additionalPoolInFile(binFileName, std::ios::binary);
|
||||
|
||||
if (additionalPoolInFile.good()) {
|
||||
// Destroy whole current node pool
|
||||
ReadAdditionalPoolProperties(additionalPoolInFile);
|
||||
additionalPoolInFile.close();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// Writes the node pool
|
||||
bool WriteAdditionalPool(const char* fileName) override
|
||||
{
|
||||
if (!HasAdditionalPool()) return true;
|
||||
std::string binFileName = GetAdditionalPoolFileName(fileName);
|
||||
std::ofstream additionalPoolOutFile(binFileName, std::ios::binary);
|
||||
WriteAdditionalPoolProperties(additionalPoolOutFile);
|
||||
additionalPoolOutFile.close();
|
||||
return true;
|
||||
}
|
||||
virtual bool HasAdditionalPool() const { return false; }
|
||||
|
||||
// Clears the whole tree, including the root!
|
||||
virtual void Clear() override
|
||||
{
|
||||
mNodePool.Clear();
|
||||
}
|
||||
|
||||
// Removed all nodes marked for deletion and updates the indices.
|
||||
void Clean()
|
||||
{
|
||||
unsigned32 oldNodeCount = GetNodeCount();
|
||||
mNodePool.Clean();
|
||||
UpdateNodeIndices(oldNodeCount);
|
||||
}
|
||||
|
||||
NodeType* Create(unsigned8 level) override
|
||||
{
|
||||
// Make sure only one leaf node is created (memory, performance)
|
||||
if (mLeafsAreEqual && level == GetMaxLevel() && mLeafNode != 0)
|
||||
return GetTypedNode(mLeafNode);
|
||||
// Otherwise, create a new node
|
||||
NodeType* res = mNodePool.Create(this, level);
|
||||
mSortedOnLevel = false;
|
||||
|
||||
res->SetIndex(GetNodeCount() - 1);
|
||||
if (mLeafsAreEqual && level == GetMaxLevel() && mLeafNode == 0) mLeafNode = res->GetIndex();
|
||||
return res;
|
||||
}
|
||||
|
||||
void Append(glm::uvec3 coordinates, unsigned8 level, Tree* tree)
|
||||
{
|
||||
// Let some possible inhereting class do preprocessing on the current tree before append
|
||||
AppendPreProcess(coordinates, level, tree);
|
||||
|
||||
// First create/get the node that acts as the root of the tree to append
|
||||
std::vector<unsigned32> equivalents(tree->GetNodeCount());
|
||||
NodeType* treeRoot = AddNode(coordinates, level);
|
||||
equivalents[0] = treeRoot->GetIndex();
|
||||
|
||||
// Make copies of all nodes in tree, and add them to our own nodepool (with the correct level and root)
|
||||
for (unsigned32 i = 1; i < tree->GetNodeCount(); i++)
|
||||
{
|
||||
NodeType* node = tree->GetTypedNode(i);
|
||||
NodeType* copy = this->Create(node->GetLevel() + level);
|
||||
equivalents[i] = copy->GetIndex();
|
||||
}
|
||||
|
||||
unsigned32* newChildren = new unsigned32[8];
|
||||
// Restore all child pointers
|
||||
for (unsigned32 i = 0; i < tree->GetNodeCount(); i++)
|
||||
{
|
||||
NodeType* node = tree->GetTypedNode(i);
|
||||
unsigned32* children = node->GetChildren();
|
||||
unsigned8 childCount = node->GetChildCount();
|
||||
for (ChildIndex c = 0; c < childCount; c++) newChildren[c] = equivalents[children[c]];
|
||||
NodeType* copy = this->GetTypedNode(equivalents[i]);
|
||||
copy->SetChildren(node->GetChildmask(), newChildren);
|
||||
}
|
||||
delete[] newChildren;
|
||||
|
||||
// Copy properties
|
||||
tbb::parallel_for((unsigned32)0, tree->GetNodeCount(), [&](unsigned i)
|
||||
{
|
||||
NodeType* source = tree->GetTypedNode(i);
|
||||
GetTypedNode(equivalents[i])->CopyProperties(source);
|
||||
});
|
||||
|
||||
// Let some possible inhereting class do postprocessing on the added nodes
|
||||
AppendPostProcess(coordinates, level, tree);
|
||||
}
|
||||
|
||||
// Appends the given tree to this tree. The nodes in the original tree will be moved to this tree.
|
||||
// During the appending, nodes are automatically merged when possible to keep memory usage low.
|
||||
// Note that this DOES NOT call AppendPostProcess, so trees that depend on this cannot be build using this method
|
||||
void AppendAndMerge(glm::uvec3 coordinates, unsigned8 appendLevel, Tree* tree)
|
||||
{
|
||||
// Let some possible inhereting class do preprocessing on the current tree before append
|
||||
AppendPreProcess(coordinates, appendLevel, tree);
|
||||
|
||||
this->SortNodes();
|
||||
std::vector<unsigned32> levelIndices = this->SortOnLevel();
|
||||
std::vector<unsigned32> appendedTreeLevelIndices = tree->SortOnLevel();
|
||||
|
||||
// First create/get the node that acts as the root of the tree to append
|
||||
NodeType* treeRoot = this->AddNode(coordinates, appendLevel);
|
||||
|
||||
// Contains at the index of the node in the tree that is to be appended, the index of the replacement node in the pool
|
||||
std::vector<unsigned32> replacementMap(tree->GetNodeCount(), 0);
|
||||
replacementMap[0] = treeRoot->GetIndex();
|
||||
for (unsigned8 level = GetMaxLevel(); level > appendLevel; level--)
|
||||
{
|
||||
// Sort all the nodes in the current level of the other tree
|
||||
unsigned32 levelStart = levelIndices[level];
|
||||
unsigned32 levelEnd = levelIndices[level + 1];
|
||||
unsigned32 appendedLevelStart = appendedTreeLevelIndices[level - appendLevel];
|
||||
unsigned32 appendedLevelEnd = appendedTreeLevelIndices[level - appendLevel + 1];
|
||||
|
||||
//std::vector<NodeType&> existingNodes(levelEnd - levelStart);
|
||||
//for (unsigned32 i = levelStart; i < levelEnd; i++) existingNodes[i - levelStart] = GetTypedNode(i);
|
||||
|
||||
//tbb::parallel_sort(existingNodes.begin(), existingNodes.end(), NodeComparer());
|
||||
|
||||
// Vector of all nodes that need to be appended
|
||||
std::vector<unsigned32> toAppend(appendedLevelEnd - appendedLevelStart);
|
||||
for (unsigned32 i = appendedLevelStart; i < appendedLevelEnd; i++) toAppend[i - appendedLevelStart] = i;
|
||||
|
||||
tbb::parallel_for_each(toAppend.begin(), toAppend.end(), [&](const unsigned32 i)
|
||||
{
|
||||
NodeType* node = tree->GetTypedNode(i);
|
||||
unsigned32* children = node->GetChildren();
|
||||
ChildIndex childCount = node->GetChildCount();
|
||||
// Make sure the node looks exactly like how it would look if it was in the current tree:
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
children[c] = replacementMap[children[c]];
|
||||
node->SetLevel(level);
|
||||
node->MoveToTree(this);
|
||||
});
|
||||
|
||||
// Sort the nodes in the same way the existing nodes are sorted
|
||||
//NodeComparer comparer();
|
||||
tbb::parallel_sort(toAppend.begin(), toAppend.end(), [&](const unsigned32 i1, const unsigned32 i2)
|
||||
{
|
||||
NodeType* node1 = tree->GetTypedNode(i1);
|
||||
NodeType* node2 = tree->GetTypedNode(i2);
|
||||
return node1->Compare(*node2);
|
||||
});
|
||||
|
||||
// Go through the nodes in this order.
|
||||
unsigned32 existingIndex = 0;
|
||||
for (const unsigned32 i : toAppend)
|
||||
{
|
||||
NodeType* node = tree->GetTypedNode(i);
|
||||
// Scan the existing nodes until they are no longer smaller than the current node
|
||||
while (existingIndex < (levelEnd - levelStart) && GetTypedNode(levelStart + existingIndex)->Compare(*node))
|
||||
existingIndex++;
|
||||
// If the node at that position is equal to the current node, then delete the current node
|
||||
if (existingIndex < (levelEnd - levelStart) && GetTypedNode(levelStart + existingIndex)->Equals(*node))
|
||||
{
|
||||
replacementMap[node->GetIndex()] = levelStart + existingIndex;
|
||||
tree->Destroy(node);
|
||||
}
|
||||
else // Otherwise, add this node to the pool
|
||||
{
|
||||
// Create a copy of the node
|
||||
node = mNodePool.Add(node);
|
||||
tree->Destroy(node->GetIndex());
|
||||
unsigned32 newNodeIndex = GetNodeCount() - 1;
|
||||
replacementMap[node->GetIndex()] = newNodeIndex;
|
||||
node->SetIndex(newNodeIndex);
|
||||
mSortedOnLevel = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reconnect the children of the replacement root
|
||||
unsigned8 childCount = tree->GetRoot()->GetChildCount();
|
||||
unsigned32* children = tree->GetRoot()->GetChildren();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
children[c] = replacementMap[children[c]];
|
||||
treeRoot->SetChildren(tree->GetRoot()->GetChildmask(), children);
|
||||
|
||||
tree->Clear();
|
||||
}
|
||||
|
||||
// Moves the given tree to the this tree, replacing the contents of the this try by that of the given tree.
|
||||
// Note that all nodes are recreated, because a tree with a different subclass of node may be moved to this one.
|
||||
// This means that all properties of the original tree will be lost!
|
||||
void MoveShallow(BaseTree* tree)
|
||||
{
|
||||
// Update the current tree to be similar to the source tree
|
||||
Clear();
|
||||
this->mMaxLevel = tree->GetMaxLevel();
|
||||
|
||||
// Create copies of the old tree and put them in the new tree, with correct childpointers
|
||||
for (unsigned32 i = 0; i < tree->GetNodeCount(); i++)
|
||||
{
|
||||
Node* original = tree->GetNode(i);
|
||||
NodeType* replacer = mNodePool.Create(this, original->GetLevel());
|
||||
replacer->SetChildren(original->GetChildmask(), original->GetChildren());
|
||||
//tree->Destroy(i);
|
||||
}
|
||||
|
||||
// Clear the original tree
|
||||
tree->Clear();
|
||||
}
|
||||
|
||||
// Sorts the nodes on their level, and returns a list of start indices per level
|
||||
std::vector<unsigned32> SortOnLevel() override
|
||||
{
|
||||
// Sort nodes on level (ignore the root = first node in pool)
|
||||
if (!mSortedOnLevel)
|
||||
{
|
||||
//mNodePool.Sort(NodeComparer());
|
||||
mNodePool.Sort(1, GetNodeCount(), [](const NodeType& a, const NodeType& b) { return a.GetLevel() < b.GetLevel(); });
|
||||
//tbb::parallel_sort(mNodePool.begin() + 1, mNodePool.end(), [](NodeType* a, NodeType* b) { return a->GetLevel() < b->GetLevel(); });
|
||||
|
||||
UpdateNodeIndices(GetNodeCount());
|
||||
|
||||
mSortedOnLevel = true;
|
||||
}
|
||||
|
||||
// Find the start indices of all levels in the node pool
|
||||
std::vector<unsigned32> levelIndices(GetMaxLevel() + 1);
|
||||
unsigned8 curLevel = 255;
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
NodeType* node = GetTypedNode(i);
|
||||
if (node->GetLevel() != curLevel) {
|
||||
curLevel = node->GetLevel();
|
||||
levelIndices[curLevel] = node->GetIndex();
|
||||
}
|
||||
}
|
||||
// If for some reason, there are no nodes on every level, fill the rest of the level offsets as if there were these nodes...
|
||||
for (unsigned8 level = curLevel + 1; level <= GetMaxLevel(); level++) levelIndices[level] = GetNodeCount();
|
||||
|
||||
// Add a dummy value at the end
|
||||
levelIndices.push_back(GetNodeCount());
|
||||
|
||||
return levelIndices;
|
||||
}
|
||||
|
||||
void SortNodes()
|
||||
{
|
||||
std::vector<unsigned32> levelIndices = SortOnLevel();
|
||||
for (unsigned8 level = 1; level <= GetMaxLevel(); level++) this->SortBetween(levelIndices[level], levelIndices[level + 1], NodeComparer());
|
||||
UpdateNodeIndices(GetNodeCount());
|
||||
}
|
||||
|
||||
void ToDAG(unsigned8 fromLevel = 0, bool verbose = true)
|
||||
{
|
||||
if (this->GetNodeCount() == 1)
|
||||
return; // Empty tree = DAG
|
||||
|
||||
// Sort the nodes on level (for quick access of nodes within a level).
|
||||
// Note that we can't sort on the node comparer just yet, as the nodes will change during the ToDAG process.
|
||||
auto levelIndices = SortOnLevel();
|
||||
|
||||
mMaxLevel = (unsigned8)(levelIndices.size() - 2);
|
||||
if (fromLevel > GetMaxLevel()) return;
|
||||
|
||||
unsigned32 maxOldIndex = GetNodeCount() - 1;
|
||||
|
||||
// Run through the layers in reverse order and compress them bottom up
|
||||
for (unsigned8 level = GetMaxLevel(); level > fromLevel; --level)
|
||||
{
|
||||
if (verbose) printf(".");
|
||||
// Initialize some variables needed
|
||||
unsigned32 levelStartIndex = levelIndices[level];
|
||||
unsigned32 levelEndIndex = levelIndices[level + 1];
|
||||
unsigned32 parentLevelStartIndex = levelIndices[level - 1];
|
||||
unsigned32 levelNodeCount = levelEndIndex - levelStartIndex;
|
||||
unsigned32 deletedCount = 0;
|
||||
|
||||
bool fullRead = !mLeafsAreEqual || level != GetMaxLevel();
|
||||
|
||||
if (fullRead)
|
||||
{
|
||||
// Sort nodes using the node comparer
|
||||
mNodePool.Sort(levelStartIndex, levelEndIndex, NodeComparer());
|
||||
if (verbose) printf(".");
|
||||
|
||||
// Find unique nodes and store which node duplicates which
|
||||
NodeType* cur = NULL;
|
||||
std::vector<unsigned32> replacements(levelNodeCount);
|
||||
size_t uniqueNodes = 0;
|
||||
for (unsigned32 i = levelStartIndex; i < levelEndIndex; i++)
|
||||
{
|
||||
NodeType* node = GetTypedNode(i);
|
||||
if (cur == NULL || !node->Equals(*cur))
|
||||
{
|
||||
uniqueNodes++;
|
||||
cur = node;
|
||||
}
|
||||
replacements[node->GetIndex() - levelStartIndex] = cur->GetIndex();
|
||||
}
|
||||
if (verbose) printf(".");
|
||||
|
||||
// Point all parents of nodes to the new replacement node
|
||||
// Note that this is only necessary if some nodes are replaced by others
|
||||
if (uniqueNodes < levelNodeCount)
|
||||
{
|
||||
tbb::parallel_for(parentLevelStartIndex, levelStartIndex, [&](const unsigned32 i)
|
||||
{
|
||||
Node* parent = GetTypedNode(i);
|
||||
unsigned32* children = parent->GetChildren();
|
||||
unsigned8 childCount = parent->GetChildCount();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
children[c] = replacements[children[c] - levelStartIndex];
|
||||
});
|
||||
if (verbose) printf(".");
|
||||
for (unsigned32 i = levelStartIndex; i < levelEndIndex; i++)
|
||||
{
|
||||
unsigned32 nodeIndex = GetTypedNode(i)->GetIndex();
|
||||
// If the node is not replaced by it's own index, then delete it
|
||||
if (replacements[nodeIndex - levelStartIndex] != nodeIndex)
|
||||
{
|
||||
Destroy(i);
|
||||
deletedCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (verbose) printf(".");
|
||||
}
|
||||
else
|
||||
{
|
||||
if (verbose) printf(".");
|
||||
// For the leaf nodes (e.g. !fullRead), just add the first leaf node in the DAGNodePool, and replace the rest by it.
|
||||
NodeType* replacer = GetTypedNode(levelStartIndex);
|
||||
if (verbose) printf(".");
|
||||
if (levelNodeCount > 1)
|
||||
{
|
||||
tbb::parallel_for(parentLevelStartIndex, levelStartIndex, [&](const unsigned32 i)
|
||||
{
|
||||
NodeType* parent = GetTypedNode(i);
|
||||
unsigned32* children = parent->GetChildren();
|
||||
unsigned8 childCount = parent->GetChildCount();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
children[c] = levelStartIndex;
|
||||
});
|
||||
if (verbose) printf(".");
|
||||
for (unsigned32 i = levelStartIndex + 1; i < levelEndIndex; i++)
|
||||
{
|
||||
Destroy(i);
|
||||
deletedCount++;
|
||||
}
|
||||
}
|
||||
else if (verbose) printf(".");
|
||||
}
|
||||
if (verbose) printf(" Layer %2u compressed, %7u out of %7u nodes left\n", level, levelNodeCount - deletedCount, levelNodeCount);
|
||||
}
|
||||
Clean();
|
||||
}
|
||||
|
||||
void ToOctree(unsigned8 fromLevel = 0)
|
||||
{
|
||||
auto levelIndices = SortOnLevel();
|
||||
BoolArray usedNodes(GetNodeCount(), false);
|
||||
unsigned32 newLevelStart = GetNodeCount();
|
||||
unsigned32 newLevelEnd = GetNodeCount();
|
||||
for (auto level = fromLevel; level < GetMaxLevel(); level++)
|
||||
{
|
||||
// Update start and end index of the nodes that were created during the last iteration
|
||||
newLevelStart = newLevelEnd;
|
||||
newLevelEnd = GetNodeCount();
|
||||
|
||||
// Find indices of existing nodes for each level
|
||||
unsigned32 levelStart = levelIndices[level];
|
||||
unsigned32 levelEnd = levelIndices[level + 1];
|
||||
|
||||
// Process all nodes
|
||||
for (unsigned32 i = levelStart; i < newLevelEnd; i++)
|
||||
{
|
||||
// Skip nodes of other levels
|
||||
if (i == levelEnd) i = newLevelStart;
|
||||
|
||||
NodeType* node = GetTypedNode(i);
|
||||
|
||||
unsigned32* children = node->GetChildren();
|
||||
unsigned8 childCount = node->GetChildCount();
|
||||
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
{
|
||||
if (!usedNodes[children[c]])
|
||||
{
|
||||
// If this node wasn't used yet, we can keep the same child.
|
||||
usedNodes.Set(children[c], true);
|
||||
}
|
||||
else
|
||||
{
|
||||
// If this node was used before, we need to create a new one
|
||||
auto oldNode = GetTypedNode(children[c]);
|
||||
auto newNode = Create(level + 1);
|
||||
newNode->SetChildren(oldNode->GetChildmask(), oldNode->GetChildren());
|
||||
newNode->CopyProperties(oldNode);
|
||||
children[c] = newNode->GetIndex();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ClearOrphans()
|
||||
{
|
||||
BoolArray orphans(GetNodeCount(), true);
|
||||
orphans.Set(0, false);
|
||||
size_t orphansCleared = 0;
|
||||
bool firstPass = true;
|
||||
while (orphans.Any())
|
||||
{
|
||||
// Assume all nodes (except the root) are orphans
|
||||
orphans.SetRange(1, GetNodeCount(), true);
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
NodeType* node = GetTypedNode(i);
|
||||
if (node == NULL) // Empty nodes are not orphans
|
||||
orphans.Set(i, false);
|
||||
else
|
||||
{
|
||||
// Roots are not orphans (by definition, they're just roots)
|
||||
if (node->GetLevel() == 0)
|
||||
orphans.Set(i, false);
|
||||
|
||||
// All the nodes that are children of this node can not be orphans (since this node still lives)
|
||||
for (ChildIndex child = 0; child < node->GetChildCount(); child++)
|
||||
orphans.Set(node->GetChildren()[child], false);
|
||||
}
|
||||
}
|
||||
|
||||
// Delete nodes that are orphans. Note that this might cause other nodes to be orphaned as well
|
||||
// In other words: if you're Batman's child, and Batman gets killed, then you are an orphan as well ;)
|
||||
for (unsigned32 i = 1; i < GetNodeCount(); i++)
|
||||
if (orphans.Get(i))
|
||||
{
|
||||
orphansCleared++;
|
||||
Destroy(i);
|
||||
}
|
||||
}
|
||||
|
||||
printf("%llu orphans have been removed.\n", (unsigned64)orphansCleared);
|
||||
|
||||
// Clean the nodepool to really remove all orphans
|
||||
Clean();
|
||||
}
|
||||
|
||||
// Finds all parent of a node (O(N)).
|
||||
std::vector<std::pair<const NodeType*, ChildIndex>> FindParents(const NodeType* node) const
|
||||
{
|
||||
tbb::concurrent_queue<std::pair<const NodeType*, ChildIndex>> parents;
|
||||
unsigned32 childToFind = node->GetIndex();
|
||||
unsigned8 parentLevel = node->GetLevel() - 1;
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](const unsigned32 i)
|
||||
{
|
||||
NodeType* node = GetTypedNode(i);
|
||||
if (node->GetLevel() == parentLevel)
|
||||
{
|
||||
for (ChildIndex child = 0; child < 8; child++)
|
||||
if (node->HasChild(child) && node->GetChildIndex(child) == childToFind)
|
||||
parents.push(std::pair<const NodeType*, ChildIndex>(node, child));
|
||||
}
|
||||
});
|
||||
std::vector<std::pair<const NodeType*, ChildIndex>> res;
|
||||
for (auto parent = parents.unsafe_begin(); parent != parents.unsafe_end(); parent++)
|
||||
res.push_back(*parent);
|
||||
return res;
|
||||
}
|
||||
|
||||
// Cuts off all nodes that have a level higher than depth.
|
||||
void Shave(unsigned8 depth) override
|
||||
{
|
||||
if (depth >= GetMaxLevel()) return;
|
||||
|
||||
auto levelIndices = SortOnLevel();
|
||||
|
||||
// Make sure all nodes at level "depth" have no children (since we're gonna shave them off)
|
||||
const unsigned* emptyChildren = new unsigned[0];
|
||||
tbb::parallel_for(levelIndices[depth], levelIndices[depth + 1], [&](const unsigned32 i)
|
||||
{
|
||||
GetTypedNode(i)->SetChildren(ChildMask(0), emptyChildren);
|
||||
});
|
||||
delete emptyChildren;
|
||||
|
||||
// Delete all nodes that have a higher level than depth
|
||||
for (unsigned32 i = levelIndices[depth + 1]; i < GetNodeCount(); i++)
|
||||
Destroy(i);
|
||||
|
||||
// Resize the node pool so that no reference to the destroyed nodes exist
|
||||
mNodePool.Clean();
|
||||
mMaxLevel = depth;
|
||||
}
|
||||
|
||||
virtual void PrintDebugInfo() const
|
||||
{
|
||||
printf("NodeCount: %u\n", GetNodeCount());
|
||||
unsigned64 leafVoxelCount = GetLeafVoxelCount();
|
||||
printf("Leaf voxel count: %llu\n", leafVoxelCount);
|
||||
}
|
||||
|
||||
|
||||
protected:
|
||||
void Destroy(unsigned32 index) override { mNodePool.Delete(index); }
|
||||
virtual void Destroy(NodeType* node)
|
||||
{
|
||||
// Assert that we are actually deleting the correct node
|
||||
if (node->GetIndex() < GetNodeCount() && GetTypedNode(node->GetIndex()) == node)
|
||||
mNodePool.Delete(node->GetIndex());
|
||||
else
|
||||
printf("Error: Can't delete, node with ID %u not located at that position", node->GetIndex());
|
||||
}
|
||||
|
||||
virtual void InitializeReadTree() {}
|
||||
virtual void FinalizeReadTree() {}
|
||||
|
||||
virtual void AppendPreProcess(glm::uvec3 coordinates, unsigned8 level, Tree* tree) {}
|
||||
virtual void AppendPostProcess(glm::uvec3 coordinates, unsigned8 level, Tree* tree) {}
|
||||
|
||||
// Sorts a subset of the nodes, but DOES NOT update the indices! Make sure to call UpdateNodeIndices at some point!
|
||||
template<typename Comparer>
|
||||
void SortBetween(unsigned32 startIndex, unsigned32 endIndex, const Comparer& comparer = NodeComparer())
|
||||
{
|
||||
if (startIndex >= endIndex || startIndex >= GetNodeCount()) return; // Nothing to sort
|
||||
|
||||
mNodePool.Sort(startIndex, endIndex, comparer);
|
||||
|
||||
// Check if the given range is monotonically increasing, to make sure the ndoes are still sorted on level (if they were before)
|
||||
if (mSortedOnLevel)
|
||||
{
|
||||
unsigned8 wantedLevel = GetTypedNode(startIndex)->GetLevel();
|
||||
for (unsigned32 i = startIndex; i < endIndex; i++)
|
||||
{
|
||||
unsigned8 level = GetTypedNode(i)->GetLevel();
|
||||
if (level != wantedLevel)
|
||||
{
|
||||
if (level > wantedLevel) wantedLevel = level;
|
||||
else
|
||||
{
|
||||
mSortedOnLevel = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//std::vector<NodeType*> GetNodePoolCopy() const
|
||||
//{
|
||||
// std::vector<NodeType*> nodePoolCopy(mNodePool.size());
|
||||
// std::copy(mNodePool.begin(), mNodePool.end(), nodePoolCopy.begin());
|
||||
// return nodePoolCopy;
|
||||
//}
|
||||
|
||||
// Updates the current index value of all nodes to the value prescribed by the current nodepool.
|
||||
// oldMaxIndex is needed to know the size of the map that maps old to new indices.
|
||||
// If oldMaxIndex is not provided or 0, it will be calculated, requiring an additional pass
|
||||
// through the nodes
|
||||
void UpdateNodeIndices(unsigned32 oldMaxIndex = 0)
|
||||
{
|
||||
if (oldMaxIndex == 0)
|
||||
{
|
||||
// oldMaxIndex = max(oldIndices)
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
if (GetTypedNode(i)->GetIndex() > oldMaxIndex)
|
||||
oldMaxIndex = GetTypedNode(i)->GetIndex();
|
||||
oldMaxIndex++;
|
||||
}
|
||||
// Update the indices in all nodes, store a map of where nodes have been moved
|
||||
std::vector<unsigned32> indexMap(oldMaxIndex + 1);
|
||||
tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](const unsigned32 i)
|
||||
{
|
||||
NodeType* node = GetTypedNode(i);
|
||||
if (node != NULL)
|
||||
{
|
||||
indexMap[node->GetIndex()] = i;
|
||||
node->SetIndex(i);
|
||||
}
|
||||
});
|
||||
// Update the child indices based on the node movement map
|
||||
//tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](const unsigned32 i)
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
NodeType* node = GetTypedNode(i);
|
||||
if (node != NULL)
|
||||
{
|
||||
const unsigned8 nodeChildCount = node->GetChildCount();
|
||||
unsigned32* children = node->GetChildren();
|
||||
for (ChildIndex c = 0; c < nodeChildCount; c++)
|
||||
children[c] = indexMap[children[c]];
|
||||
}
|
||||
}//);
|
||||
|
||||
UpdateLocalReferences(indexMap);
|
||||
}
|
||||
|
||||
virtual void UpdateLocalReferences(const std::vector<unsigned32>& indexMap)
|
||||
{
|
||||
if (mLeafsAreEqual)
|
||||
mLeafNode = indexMap[mLeafNode];
|
||||
}
|
||||
|
||||
struct NodeComparer
|
||||
{
|
||||
bool operator()(const NodeType& a, const NodeType& b) const
|
||||
{
|
||||
return a.Compare(b);
|
||||
}
|
||||
};
|
||||
|
||||
virtual void WriteAdditionalPoolProperties(std::ostream& file) {}
|
||||
virtual void ReadAdditionalPoolProperties(std::istream& file) {}
|
||||
|
||||
unsigned32 mLeafNode;
|
||||
bool mLeafsAreEqual;
|
||||
|
||||
unsigned8 mMaxLevel; // size of the texture in which the node pool will be stored
|
||||
private:
|
||||
ObjectPool<NodeType> mNodePool; // the node pool containing all nodes
|
||||
bool mSortedOnLevel;
|
||||
//ObjectPool<NodeType>* mNodeObjectPool; // The Node ObjectPool, used to reuse nodes instead of new and delete all the time
|
||||
};
|
||||
|
||||
185
Research/scene/Octree/UniqueIndexShiftTree.h
Normal file
185
Research/scene/Octree/UniqueIndexShiftTree.h
Normal file
@@ -0,0 +1,185 @@
|
||||
#pragma once
|
||||
#include "UniqueIndexTree.h"
|
||||
#include "../Material/SignedIntMaterial.h"
|
||||
#include "MaterialTree.h"
|
||||
#include "../Material/MaterialLibrary.h"
|
||||
#include "../../inc/tbb/parallel_for.h"
|
||||
#include "IMaterialTexture.h"
|
||||
#include<vector>
|
||||
|
||||
template<typename T, typename Comparer = std::less<T>, unsigned8 channelsPerPixel = 3>
|
||||
class UniqueIndexShiftTree : public UniqueIndexTree<SignedIntMaterial>, public IMaterialTexture
|
||||
{
|
||||
private:
|
||||
MaterialLibrary<T, Comparer, channelsPerPixel>* mMaterialLibrary;
|
||||
std::vector<unsigned8> mMaterialTexture;
|
||||
unsigned16 mMaterialTextureSize;
|
||||
MaterialLibraryPointer mMaxTextureIndex;
|
||||
|
||||
void ClearMaterialLibrary()
|
||||
{
|
||||
if (mMaterialLibrary != NULL)
|
||||
{
|
||||
delete mMaterialLibrary;
|
||||
mMaterialLibrary = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
inline void WriteMaterialTexture(std::ostream& file)
|
||||
{
|
||||
assert(mMaterialLibrary != NULL);
|
||||
mMaterialLibrary->Serialize(file);
|
||||
}
|
||||
|
||||
inline void ReadMaterialTexture(std::istream& file)
|
||||
{
|
||||
if (mMaterialLibrary == NULL)
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
mMaterialLibrary->Deserialize(file);
|
||||
GetMaterialTexture();
|
||||
}
|
||||
|
||||
void BuildMaterialLibrary(const std::vector<T>& materials)
|
||||
{
|
||||
ClearMaterialLibrary();
|
||||
mMaterialLibrary = new MaterialLibrary<T, Comparer, channelsPerPixel>();
|
||||
for (T m : materials) mMaterialLibrary->AddMaterial(m);
|
||||
mMaterialLibrary->Finalize();
|
||||
}
|
||||
|
||||
bool CheckMaterialLibrary(const std::vector<T>& materials)
|
||||
{
|
||||
for (T m : materials) if (!mMaterialLibrary->Contains(m)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
UniqueIndexShiftTree(unsigned8 maxLevel, CompressedTexture<SignedIntMaterial>* nodeMaterialsTexture, unsigned32 collapsedMaterialLevels) :
|
||||
UniqueIndexTree(maxLevel, nodeMaterialsTexture, collapsedMaterialLevels),
|
||||
mMaterialLibrary(NULL),
|
||||
mMaterialTexture(std::vector<unsigned8>()),
|
||||
mMaterialTextureSize(0),
|
||||
mMaxTextureIndex(MaterialLibraryPointer(0))
|
||||
{}
|
||||
|
||||
UniqueIndexShiftTree(unsigned8 maxLevel, CompressedTexture<SignedIntMaterial>* nodeMaterialsTexture)
|
||||
: UniqueIndexShiftTree(maxLevel, nodeMaterialsTexture, 0)
|
||||
{}
|
||||
|
||||
~UniqueIndexShiftTree() override {
|
||||
ClearMaterialLibrary();
|
||||
}
|
||||
|
||||
void AppendPostProcess(glm::uvec3 coordinates, unsigned8 level, Tree<UniqueIndexNode>* tree) override
|
||||
{
|
||||
UniqueIndexShiftTree<T, Comparer, channelsPerPixel>* typedTree = (UniqueIndexShiftTree<T, Comparer, channelsPerPixel>*)tree;
|
||||
// If the material libraries are equal, appending should work correctly
|
||||
assert(*typedTree->GetMaterialLibrary() == *mMaterialLibrary);
|
||||
UniqueIndexTree::AppendPostProcess(coordinates, level, typedTree, std::unordered_map<SignedIntMaterial, SignedIntMaterial>());
|
||||
}
|
||||
|
||||
// Prepare the tree for a certain set of materials. This should be done before calling "BaseOn" or otherwise adding nodes to the tree.
|
||||
void PrepareForMaterials(const std::vector<T>& materials)
|
||||
{
|
||||
assert(GetNodeCount() == 1);
|
||||
BuildMaterialLibrary(materials);
|
||||
}
|
||||
|
||||
template<typename MaterialTree>
|
||||
void BaseOn(MaterialTree* tree)
|
||||
{
|
||||
// First we build a new material tree that contains the differences in the materials compared to the parents for each node (kind of watch hsc is)
|
||||
// To do this, we need an octree (as the same color doesn't mean it will have the same difference to it's parent)
|
||||
tree->ToOctree();
|
||||
std::vector<T> uniqueMaterials = tree->GetUniqueMaterials();
|
||||
|
||||
// Create the actual material library
|
||||
if (mMaterialLibrary == NULL)
|
||||
BuildMaterialLibrary(uniqueMaterials);
|
||||
else
|
||||
assert(CheckMaterialLibrary(uniqueMaterials));
|
||||
|
||||
// Now we calculate the shift for each node in the tree
|
||||
std::vector<int> intMaterialPointer(tree->GetNodeCount());
|
||||
tbb::parallel_for((unsigned32)0, tree->GetNodeCount(), [&](unsigned32 i)
|
||||
{
|
||||
auto node = tree->GetTypedNode(i);
|
||||
MaterialLibraryPointer matPointer = mMaterialLibrary->GetTextureIndex(tree->GetMaterial(node));
|
||||
intMaterialPointer[i] = (matPointer.y * mMaterialLibrary->GetTextureSize()) + matPointer.x;
|
||||
});
|
||||
std::vector<SignedIntMaterial> shiftMaterials(tree->GetNodeCount());
|
||||
shiftMaterials[0] = intMaterialPointer[0];
|
||||
tbb::parallel_for((unsigned32)0, tree->GetNodeCount(), [&](unsigned32 i)
|
||||
//for (size_t i = 0; i < tree->GetNodeCount(); i++)
|
||||
{
|
||||
Node* node = tree->GetNode(i);
|
||||
int curIntIndex = intMaterialPointer[i];
|
||||
unsigned32* children = node->GetChildren();
|
||||
unsigned8 childCount = node->GetChildCount();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
{
|
||||
int childIntIndex = intMaterialPointer[children[c]];
|
||||
shiftMaterials[children[c]] = SignedIntMaterial(childIntIndex - curIntIndex);
|
||||
}
|
||||
});
|
||||
|
||||
std::vector<int> shiftsCopy(shiftMaterials.size());
|
||||
for (size_t i = 0; i < shiftMaterials.size(); i++) shiftsCopy[i] = shiftMaterials[i];
|
||||
|
||||
CollectionHelper::PrintStats(shiftsCopy);
|
||||
for (size_t i = 0; i < shiftsCopy.size(); i++) shiftsCopy[i] = abs(shiftsCopy[i]);
|
||||
CollectionHelper::PrintStats(shiftsCopy);
|
||||
|
||||
UniqueIndexTree::BaseOn(tree, shiftMaterials);
|
||||
}
|
||||
|
||||
std::vector<T> GetUniqueMaterials() const
|
||||
{
|
||||
return mMaterialLibrary->GetMaterials();
|
||||
}
|
||||
|
||||
MaterialLibrary<T, Comparer, channelsPerPixel>* GetMaterialLibrary() const
|
||||
{
|
||||
return mMaterialLibrary;
|
||||
}
|
||||
|
||||
// Returns the texture containing all materials once
|
||||
std::vector<unsigned8> GetMaterialTexture() override
|
||||
{
|
||||
if (!mMaterialTexture.empty())
|
||||
return mMaterialTexture;
|
||||
assert(mMaterialLibrary->IsFinalized());
|
||||
mMaterialTextureSize = mMaterialLibrary->GetTextureSize();
|
||||
mMaterialTexture = mMaterialLibrary->GetTexture();
|
||||
mMaxTextureIndex = mMaterialLibrary->GetMaxTextureIndex();
|
||||
return mMaterialTexture;
|
||||
}
|
||||
|
||||
unsigned GetMaterialTextureSize() override
|
||||
{
|
||||
GetMaterialTexture();
|
||||
return mMaterialTextureSize;
|
||||
}
|
||||
|
||||
unsigned8 GetMaterialTextureChannelsPerPixel() override { return channelsPerPixel; }
|
||||
|
||||
protected:
|
||||
void WriteAdditionalUniqueIndexTreeProperties(std::ostream& file) override {
|
||||
WriteMaterialTexture(file);
|
||||
}
|
||||
|
||||
void ReadAdditionalUniqueIndexTreeProperties(std::istream& file) override {
|
||||
ReadMaterialTexture(file);
|
||||
}
|
||||
|
||||
void WriteAdditionalPoolProperties(std::ostream& file) override { WriteMaterialTexture(file); UniqueIndexTree::WriteAdditionalPoolProperties(file); }
|
||||
void ReadAdditionalPoolProperties(std::istream& file) override { ReadMaterialTexture(file); UniqueIndexTree::ReadAdditionalPoolProperties(file); }
|
||||
|
||||
void PrintDebugInfo() const override
|
||||
{
|
||||
std::vector<SignedIntMaterial> shifts = GetNodeValues();
|
||||
std::vector<int> shiftValues(shifts.size());
|
||||
for (size_t i = 0; i < shifts.size(); i++) shiftValues[i] = (int)shifts[i];
|
||||
CollectionHelper::PrintStats(shiftValues);
|
||||
}
|
||||
};
|
||||
559
Research/scene/Octree/UniqueIndexTree.h
Normal file
559
Research/scene/Octree/UniqueIndexTree.h
Normal file
@@ -0,0 +1,559 @@
|
||||
#pragma once
|
||||
#include "Tree.h"
|
||||
#include "EdgeMaterialNode.h"
|
||||
#include "IBlockTexture.h"
|
||||
#include "IMaterialTexture.h"
|
||||
#include "IAdditionalProperties.h"
|
||||
#include "../../inc/tbb/parallel_for_each.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include "../../core/Util/BoolArray.h"
|
||||
#include "../TextureCompressor/CompressedTexture.h"
|
||||
#include "../../inc/lodepng/lodepng.h"
|
||||
#include <unordered_map>
|
||||
#include <map>
|
||||
#include <stack>
|
||||
|
||||
typedef EdgeMaterialNode<unsigned64> UniqueIndexNode;
|
||||
|
||||
// Comment or uncomment these defines to enable or disable offset/shift compression
|
||||
#define offset_sizes_per_level
|
||||
#define implicit_store_first_offset
|
||||
|
||||
template<typename T>
|
||||
class UniqueIndexTree : public Tree<UniqueIndexNode>, public IBlockTexture, public IAdditionalProperties
|
||||
{
|
||||
private:
|
||||
std::vector<unsigned8>* mShiftBytesPerLevel;
|
||||
|
||||
// After the tree is finalized, this vector contains the materialPointers for all nodes in the original octree
|
||||
CompressedTexture<T>* mNodeMaterials;
|
||||
unsigned8 mUniqueShiftLevel;
|
||||
|
||||
// Textures and pools
|
||||
std::vector<unsigned8> mBlockPointerPool;
|
||||
bool mBlockPointerPoolLoaded;
|
||||
std::vector<unsigned8> mBlockPool;
|
||||
bool mBlockPoolLoaded;
|
||||
std::map<std::string, std::string> mAdditionalProperties;
|
||||
bool mAdditionalPropertiesLoaded;
|
||||
|
||||
inline void WriteBlockPointerPool(std::ostream& file)
|
||||
{
|
||||
// Make sure the block pointer pool exists
|
||||
GetBlockPointerPool();
|
||||
|
||||
// Write it to the filestream
|
||||
Serializer<std::vector<unsigned8>, unsigned64>::Serialize(mBlockPointerPool, file);
|
||||
}
|
||||
|
||||
inline void ReadBlockPointerPool(std::istream& file)
|
||||
{
|
||||
// Read from filestream
|
||||
Serializer<std::vector<unsigned8>, unsigned64>::Deserialize(mBlockPointerPool, file);
|
||||
mBlockPointerPoolLoaded = true;
|
||||
}
|
||||
|
||||
inline void WriteBlockPool(std::ostream& file)
|
||||
{
|
||||
// Make sure the block pool exists
|
||||
GetBlockPool();
|
||||
|
||||
Serializer<std::vector<unsigned8>, unsigned64>::Serialize(mBlockPool, file);
|
||||
}
|
||||
|
||||
inline void ReadBlockPool(std::istream& file)
|
||||
{
|
||||
Serializer<std::vector<unsigned8>, unsigned64>::Deserialize(mBlockPool, file);
|
||||
mBlockPoolLoaded = true;
|
||||
}
|
||||
|
||||
inline void WriteAdditionalProperties(std::ostream& file)
|
||||
{
|
||||
// Make sure the additional properties are set.
|
||||
GetAdditionalProperties();
|
||||
|
||||
// Write them to a file
|
||||
Serializer<std::map<std::string, std::string>>::Serialize(mAdditionalProperties, file);
|
||||
}
|
||||
|
||||
inline void ReadAdditionalProperties(std::istream& file)
|
||||
{
|
||||
Serializer<std::map<std::string, std::string>>::Deserialize(mAdditionalProperties, file);
|
||||
mAdditionalPropertiesLoaded = true;
|
||||
}
|
||||
|
||||
void ClearPooledNodeMaterials()
|
||||
{
|
||||
if (mBlockPoolLoaded && mBlockPointerPoolLoaded && mAdditionalPropertiesLoaded) ClearNodeMaterials();
|
||||
}
|
||||
|
||||
void ClearNodeMaterials()
|
||||
{
|
||||
if (mNodeMaterials != NULL)
|
||||
{
|
||||
delete mNodeMaterials;
|
||||
mNodeMaterials = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned64 GetShift(const UniqueIndexNode* node, const ChildIndex& child) const
|
||||
{
|
||||
return node->GetEdgeMaterial(child);
|
||||
}
|
||||
|
||||
// Calculates for each node whether the edge pointing to it has a shift of zero on it
|
||||
// (incorrect if the tree is a DAG, as the results might be ambiguous if nodes have multiple parents).
|
||||
BoolArray GetNodesWithZeroShiftParents()
|
||||
{
|
||||
BoolArray hasZeroShiftParent(GetNodeCount());
|
||||
for (unsigned32 i = 1; i < GetNodeCount(); i++)
|
||||
{
|
||||
UniqueIndexNode* cur = (UniqueIndexNode*)GetNode(i);
|
||||
unsigned64* childShifts = cur->GetEdgeMaterials();
|
||||
unsigned32* children = cur->GetChildren();
|
||||
ChildIndex childCount = cur->GetChildCount();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
if (childShifts[c] == 0) hasZeroShiftParent.Set(children[c], true);
|
||||
}
|
||||
for (ChildIndex c = 0; c < GetChildCount(); c++)
|
||||
if (mChildShifts[c] == 0) hasZeroShiftParent.Set(mChildren[c], true);
|
||||
return hasZeroShiftParent;
|
||||
}
|
||||
|
||||
size_t GetHighestShiftSum(const UniqueIndexNode* node) const
|
||||
{
|
||||
// The first child is always the child with the highest shift
|
||||
if (node->HasChildren()) return node->GetEdgeMaterials()[0] + GetHighestShiftSum(GetTypedNode(node->GetChildren()[0]));
|
||||
else return 0;
|
||||
}
|
||||
|
||||
// Create a vector containing all materials from the original tree
|
||||
std::vector<T> GetMappedMaterialTexture(const BaseTree* tree, std::vector<T>& materialPerNode)
|
||||
{
|
||||
std::vector<T> materialTexture;
|
||||
// Go depth-first through all nodes in the tree and copy the materials as they are reached.
|
||||
std::stack<unsigned32> curStack;
|
||||
curStack.push(0); // Push the root index to the stack
|
||||
bool skipNext = false;
|
||||
while (!curStack.empty())
|
||||
{
|
||||
// Go to the next child
|
||||
unsigned node = curStack.top();
|
||||
const Node* cur = tree->GetNode(node);
|
||||
curStack.pop();
|
||||
if (!skipNext)
|
||||
materialTexture.push_back(materialPerNode[node]);
|
||||
else
|
||||
skipNext = false;
|
||||
|
||||
if (cur->GetLevel() < mUniqueShiftLevel)
|
||||
{
|
||||
// if a node has one child, don't update the index of the next node to process (as it will have the same color)
|
||||
//if (cur->GetChildCount() == 1)
|
||||
// skipNext = true;
|
||||
|
||||
// Push the children to the stack
|
||||
unsigned32* children = cur->GetChildren();
|
||||
unsigned8 childCount = cur->GetChildCount();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
curStack.push(children[c]);
|
||||
}
|
||||
}
|
||||
return materialTexture;
|
||||
}
|
||||
|
||||
// This method will move the layout of the given (material) tree to this tree.
|
||||
// Since UniqueIndexTrees only need one leaf, all other leafs will be discarded.
|
||||
// The tree will be deleted by this method (as it will be left in an undefined state otherwise)
|
||||
void MoveShallowWithOneLeaf(BaseTree* tree)
|
||||
{
|
||||
Clear();
|
||||
|
||||
std::vector<unsigned32> levelOffsets = tree->SortOnLevel();
|
||||
// Find the first leaf, the one that will replace all other leafs
|
||||
unsigned32 leafsStart = levelOffsets[GetMaxLevel()];
|
||||
if (leafsStart < tree->GetNodeCount())
|
||||
{
|
||||
// Delete all leafs except the first
|
||||
for (unsigned32 i = leafsStart + 1; i < tree->GetNodeCount(); i++) tree->Destroy(i);
|
||||
|
||||
// Now replace all pointers to leaf nodes with pointers to the first leaf.
|
||||
tbb::parallel_for(levelOffsets[GetMaxLevel() - 1], levelOffsets[GetMaxLevel()], [&](unsigned32 i)
|
||||
{
|
||||
Node* node = tree->GetNode(i);
|
||||
unsigned32* children = node->GetChildren();
|
||||
unsigned8 childCount = node->GetChildCount();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
children[c] = (unsigned32)leafsStart;
|
||||
});
|
||||
}
|
||||
|
||||
// Create new nodes to replace all nodes in the tree. Since we're doing this in order, the pointers will always be correct
|
||||
for (unsigned32 i = 0; i < std::min(leafsStart + 1, tree->GetNodeCount()); i++)
|
||||
{
|
||||
Node* node = tree->GetNode(i);
|
||||
UniqueIndexNode* replacement = Create(node->GetLevel());
|
||||
replacement->SetChildren(node->GetChildmask(), node->GetChildren());
|
||||
tree->Destroy(node->GetIndex());
|
||||
}
|
||||
|
||||
// Delete the given tree as it is now obsolete
|
||||
delete tree;
|
||||
}
|
||||
|
||||
void UpdateShifts(const unsigned8& untilLevel)
|
||||
{
|
||||
// Go bottom up through the tree, calculate the shifts in each node
|
||||
std::vector<unsigned64> highestShiftSums(GetNodeCount(), 0);
|
||||
if (untilLevel != GetMaxLevel()) // If the until level is not the max level, calculate the highest shift sums in the until level before calculating the shifts
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
UniqueIndexNode* cur = GetTypedNode(i);
|
||||
if (cur->GetLevel() == untilLevel)
|
||||
highestShiftSums[i] = GetHighestShiftSum(cur);
|
||||
}
|
||||
|
||||
for (unsigned8 level = untilLevel; level-- > 0;)
|
||||
{
|
||||
//tbb::parallel_for((unsigned32)0, GetNodeCount(), [&](const unsigned32 i)
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
UniqueIndexNode* cur = GetTypedNode(i);
|
||||
if (cur->GetLevel() == level)
|
||||
{
|
||||
unsigned8 childCount = cur->GetChildCount();
|
||||
unsigned64 curShift = 1;
|
||||
unsigned64* childShifts = new unsigned64[childCount];
|
||||
unsigned* curChildren = cur->GetChildren();
|
||||
// Only calculate non-zero shifts for the nodes that need this
|
||||
if (/*childCount == 1 ||*/ level >= mUniqueShiftLevel) {
|
||||
for (ChildIndex c = childCount; c-- > 0;)
|
||||
{
|
||||
childShifts[c] = 0;
|
||||
curShift += highestShiftSums[curChildren[c]];
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (ChildIndex c = childCount; c-- > 0;)
|
||||
{
|
||||
childShifts[c] = curShift;
|
||||
curShift += highestShiftSums[curChildren[c]] + 1;
|
||||
}
|
||||
}
|
||||
cur->SetEdgeMaterials(childShifts);
|
||||
delete childShifts;
|
||||
highestShiftSums[i] = curShift - 1;
|
||||
}
|
||||
}//);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<unsigned64> GetUniqueNodeIndices(const unsigned8& untilLevel) const
|
||||
{
|
||||
// Now go top down through the tree and calculate the indices of each node
|
||||
std::vector<unsigned64> uniqueNodeIndices(GetNodeCount(), 0);
|
||||
for (unsigned8 level = 0; level <= untilLevel; level++)
|
||||
{
|
||||
//tbb::parallel_for(unsigned32(1), GetNodeCount(), [&](const unsigned32& i)
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
const UniqueIndexNode* cur = GetTypedNode(i);
|
||||
if (cur->GetLevel() == level)
|
||||
{
|
||||
unsigned8 childCount = cur->GetChildCount();
|
||||
unsigned64* childShifts = cur->GetEdgeMaterials();
|
||||
unsigned32* curChildren = cur->GetChildren();
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
{
|
||||
unsigned64 childIndex = uniqueNodeIndices[i] + childShifts[c];
|
||||
uniqueNodeIndices[curChildren[c]] = childIndex;
|
||||
}
|
||||
}
|
||||
}//);
|
||||
}
|
||||
|
||||
return uniqueNodeIndices;
|
||||
}
|
||||
|
||||
public:
|
||||
// Creates a UniqueIndexRoot with "maxLevel" levels.
|
||||
// collapsedMaterialLevels are used to decide how many levels of the tree will have the same materials as their parents.
|
||||
// Note that the nodeMaterialsTexture will be deleted if the tree is deleted.
|
||||
UniqueIndexTree(unsigned8 maxLevel, CompressedTexture<T>* nodeMaterialsTexture, unsigned32 collapsedMaterialLevels) :
|
||||
Tree<UniqueIndexNode>(maxLevel),
|
||||
mShiftBytesPerLevel(NULL),
|
||||
mNodeMaterials(nodeMaterialsTexture),
|
||||
mUniqueShiftLevel(maxLevel - collapsedMaterialLevels),
|
||||
mBlockPointerPool(std::vector<unsigned8>()),
|
||||
mBlockPointerPoolLoaded(false),
|
||||
mBlockPool(std::vector<unsigned8>()),
|
||||
mBlockPoolLoaded(false),
|
||||
mAdditionalProperties(std::map<std::string, std::string>()),
|
||||
mAdditionalPropertiesLoaded(false)
|
||||
{
|
||||
mLeafsAreEqual = true;
|
||||
}
|
||||
|
||||
// Creates a UniqueIndexRoot with "maxLevel" levels.
|
||||
// Note that the nodeMaterialsTexture will be deleted if the tree is deleted.
|
||||
UniqueIndexTree(unsigned8 maxLevel, CompressedTexture<T>* nodeMaterialsTextureL)
|
||||
: UniqueIndexRoot(maxLevel, nodeMaterialsTexture, 0)
|
||||
{}
|
||||
|
||||
~UniqueIndexTree() override {
|
||||
if (mShiftBytesPerLevel != NULL)
|
||||
delete mShiftBytesPerLevel;
|
||||
ClearNodeMaterials();
|
||||
}
|
||||
|
||||
void SetNodeValues(const std::vector<T>& materialPointers, const size_t& fromIndex = 0)
|
||||
{
|
||||
mNodeMaterials->SetTexture(materialPointers, fromIndex);
|
||||
}
|
||||
|
||||
std::vector<T> GetNodeValues(size_t fromIndex = 0) const
|
||||
{
|
||||
return mNodeMaterials->GetTexture(fromIndex);
|
||||
}
|
||||
|
||||
T GetNodeValue(size_t i) const
|
||||
{
|
||||
return mNodeMaterials->operator[](i);
|
||||
}
|
||||
|
||||
size_t GetMaterialCount() const
|
||||
{
|
||||
return GetHighestShiftSum(GetRoot());
|
||||
}
|
||||
|
||||
// Call this method after appending.
|
||||
// Replacers should be a map from materials in the appended tree to materials in this tree.
|
||||
// If a certain item is not found in the replacer map, it will be ignored
|
||||
void AppendPostProcess(glm::uvec3 coordinates, unsigned8 level, UniqueIndexTree* tree, std::unordered_map<T, T> replacers)
|
||||
{
|
||||
// Unpack all the blocks after where the new blocks should be inserted
|
||||
std::vector<T> newNodeMaterials = tree->GetNodeValues(1);
|
||||
// Replace the material pointers from the other tree to pointers to the same materials in this tree
|
||||
tbb::parallel_for(size_t(0), newNodeMaterials.size(), [&](size_t i)
|
||||
{
|
||||
auto replacer = replacers.find(newNodeMaterials[i]);
|
||||
if (replacer != replacers.end())
|
||||
newNodeMaterials[i] = replacer->second;
|
||||
});
|
||||
|
||||
|
||||
// Copy the shifts from the root of the appended tree to the node in the new tree
|
||||
UniqueIndexNode* appendedRootCopy = (UniqueIndexNode*)this->AddNode(coordinates, level);
|
||||
|
||||
// Update the shifts in the current tree on the levels above the appended tree
|
||||
UpdateShifts(level + 1);
|
||||
auto uniqueIndices = GetUniqueNodeIndices(level + 1);
|
||||
|
||||
unsigned64 pointerIndex = uniqueIndices[appendedRootCopy->GetIndex()] + 1;
|
||||
std::vector<T> existingPointers = this->GetNodeValues(pointerIndex);
|
||||
assert(existingPointers.size() <= (size_t)((1 << level) * (1 << level) * (1 << level)));
|
||||
newNodeMaterials.insert(newNodeMaterials.end(), existingPointers.begin(), existingPointers.end());
|
||||
mNodeMaterials->SetTexture(newNodeMaterials, pointerIndex);
|
||||
|
||||
// Update the shift sizes per level to be the maximum of the current and the new tree
|
||||
for (unsigned8 l = 0; l < tree->GetMaxLevel(); l++)
|
||||
this->mShiftBytesPerLevel->at(l + level) = std::max(tree->mShiftBytesPerLevel->at(l), this->mShiftBytesPerLevel->at(l + level));
|
||||
}
|
||||
|
||||
void ReplaceMaterials(const std::unordered_map<T, T>& replacers)
|
||||
{
|
||||
mNodeMaterials->ReplaceMaterials(replacers);
|
||||
}
|
||||
|
||||
// Takes the current material texture and compresses it again
|
||||
void Recompress()
|
||||
{
|
||||
mNodeMaterials->Recompress();
|
||||
}
|
||||
|
||||
// Will build a unique index tree with the structure taken from root. The materials
|
||||
// should be given as an additional array that contains at the index of the node
|
||||
// it's material.
|
||||
// This will consume (and delete) both the given tree and the given material array!
|
||||
void BaseOn(BaseTree* tree, std::vector<T>& materialPerNode)
|
||||
{
|
||||
std::vector<T> nodeValues = GetMappedMaterialTexture(tree, materialPerNode);
|
||||
// Free some memory
|
||||
materialPerNode.clear();
|
||||
materialPerNode.shrink_to_fit();
|
||||
|
||||
MoveShallowWithOneLeaf(tree);
|
||||
UpdateShifts(GetMaxLevel());
|
||||
|
||||
// Assert that the maximum index that can be reached using the shifts is equal to the maximum index in the material texture
|
||||
assert(GetHighestShiftSum(GetRoot()) == nodeValues.size() - 1);
|
||||
|
||||
// Add all materials to the material table (BIG texture)
|
||||
SetNodeValues(nodeValues);
|
||||
|
||||
|
||||
// Let's check if the compression works correctly
|
||||
// Method 1: Unpack the whole material at once and check that:
|
||||
//auto check = GetNodeValues();
|
||||
//assert(check.size() == nodeMaterialPointers.size());
|
||||
//for (size_t i = 0; i < check.size(); i++)
|
||||
// assert(nodeMaterialPointers[i] == check[i]);
|
||||
|
||||
// Method 2: Unpack per value
|
||||
assert(mNodeMaterials->size() == nodeValues.size());
|
||||
for (size_t i = 0; i < mNodeMaterials->size(); i++)
|
||||
{
|
||||
T value = mNodeMaterials->operator[](i);
|
||||
assert(nodeValues[i] == value);
|
||||
}
|
||||
|
||||
CalculateShiftBytesPerLevel();
|
||||
}
|
||||
|
||||
void ReplaceCompressedTexture(CompressedTexture<T>* replacementCompressor)
|
||||
{
|
||||
std::vector<T> materials = GetNodeValues();
|
||||
delete mNodeMaterials;
|
||||
mNodeMaterials = replacementCompressor;
|
||||
SetNodeValues(materials);
|
||||
}
|
||||
|
||||
std::vector<unsigned8> GetBlockPointerPool() override
|
||||
{
|
||||
if (mBlockPointerPoolLoaded) return mBlockPointerPool;
|
||||
|
||||
GetBlockPointerPoolSize();
|
||||
mBlockPointerPool = mNodeMaterials->GetAdditionalTexturePool();
|
||||
mBlockPointerPoolLoaded = true;
|
||||
ClearPooledNodeMaterials();
|
||||
return mBlockPointerPool;
|
||||
}
|
||||
|
||||
// Assuming 3D texture with one pointer per node (for example GL_R32UI)
|
||||
size_t GetBlockPointerPoolSize() override
|
||||
{
|
||||
if (mBlockPointerPoolLoaded) return mBlockPointerPool.size();
|
||||
return mNodeMaterials->GetAdditionalTexturePoolSize();
|
||||
}
|
||||
|
||||
std::vector<unsigned8> GetBlockPool() override
|
||||
{
|
||||
if (mBlockPoolLoaded) return mBlockPool;
|
||||
|
||||
GetBlockPoolSize();
|
||||
mBlockPool = mNodeMaterials->GetTexturePool();
|
||||
mBlockPoolLoaded = true;
|
||||
ClearPooledNodeMaterials();
|
||||
return mBlockPool;
|
||||
}
|
||||
|
||||
size_t GetBlockPoolSize() override
|
||||
{
|
||||
if (mBlockPoolLoaded) return mBlockPool.size();
|
||||
return mNodeMaterials->GetTexturePoolSize();
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> GetAdditionalProperties() override
|
||||
{
|
||||
if (mAdditionalPropertiesLoaded) return mAdditionalProperties;
|
||||
mAdditionalProperties = mNodeMaterials->GetAdditionalProperties();
|
||||
mAdditionalPropertiesLoaded = true;
|
||||
ClearPooledNodeMaterials();
|
||||
return mAdditionalProperties;
|
||||
}
|
||||
|
||||
bool HasAdditionalPool() const override{ return true; }
|
||||
protected:
|
||||
void CalculateShiftBytesPerLevel()
|
||||
{
|
||||
if (mShiftBytesPerLevel != NULL) delete mShiftBytesPerLevel;
|
||||
#ifdef offset_sizes_per_level
|
||||
mShiftBytesPerLevel = new std::vector<unsigned8>(GetMaxLevel() + 1);
|
||||
std::vector<unsigned64> maxShiftPerLevel(GetMaxLevel() + 1);
|
||||
for (unsigned32 i = 0; i < GetNodeCount(); i++)
|
||||
{
|
||||
UniqueIndexNode* node = GetTypedNode(i);
|
||||
for (ChildIndex child = 0; child < 8; child++)
|
||||
{
|
||||
if (node->HasChild(child))
|
||||
{
|
||||
unsigned64 shift = GetShift(node, child);
|
||||
unsigned8 level = node->GetLevel();
|
||||
if (shift > maxShiftPerLevel[level]) maxShiftPerLevel[level] = shift;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned8 level = 0; level <= GetMaxLevel(); level++)
|
||||
(*mShiftBytesPerLevel)[level] = BitHelper::RoundToBytes(BitHelper::Log2Ceil(maxShiftPerLevel[level] + 1)) / 8;
|
||||
#else
|
||||
mShiftBytesPerLevel = new std::vector<unsigned8>(GetMaxLevel() + 1, 4);
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned8 GetAdditionalBytesPerPointer(unsigned8 level) const override
|
||||
{
|
||||
assert(mShiftBytesPerLevel != NULL);
|
||||
return mShiftBytesPerLevel->operator[](level);
|
||||
}
|
||||
|
||||
bool LastChildHasAdditionalBytes() const override {
|
||||
#ifdef implicit_store_first_offset
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
std::vector<unsigned8> GetAdditionalPointerBytes(const Node* node, ChildIndex child) const override
|
||||
{
|
||||
return BitHelper::SplitInBytes(GetShift((UniqueIndexNode*)node, child), GetAdditionalBytesPerPointer(node->GetLevel()));
|
||||
}
|
||||
|
||||
virtual void WriteAdditionalUniqueIndexTreeProperties(std::ostream& file) {}
|
||||
virtual void ReadAdditionalUniqueIndexTreeProperties(std::istream& file) {}
|
||||
|
||||
void WriteProperties(std::ostream& file) override {
|
||||
WriteAdditionalUniqueIndexTreeProperties(file);
|
||||
mNodeMaterials->WriteToFile(file);
|
||||
|
||||
}
|
||||
void ReadProperties(std::istream& file) override {
|
||||
ReadAdditionalUniqueIndexTreeProperties(file);
|
||||
mNodeMaterials->ReadFromFile(file);
|
||||
}
|
||||
|
||||
void FinalizeReadTree() override
|
||||
{
|
||||
CalculateShiftBytesPerLevel();
|
||||
}
|
||||
|
||||
void WriteAdditionalPoolProperties(std::ostream& file) override { WriteBlockPointerPool(file); WriteBlockPool(file); WriteAdditionalProperties(file); }
|
||||
void ReadAdditionalPoolProperties(std::istream& file) override { ReadBlockPointerPool(file); ReadBlockPool(file); ReadAdditionalProperties(file); }
|
||||
|
||||
void PrintDebugInfo() const override
|
||||
{
|
||||
printf("Leaf voxel count: %llu\n", GetLeafVoxelCount());
|
||||
//printf("Total voxel count: %llu\n", GetOctreeNodeCount());
|
||||
printf("Max node index: %llu\n", GetHighestShiftSum(GetRoot()));
|
||||
auto nodesPerLevel = GetNodesPerLevel();
|
||||
for (unsigned8 level = 0; level <= GetMaxLevel(); level++)
|
||||
printf("Shifts in level %u with %llu nodes: %u bytes\n", level, (unsigned64)nodesPerLevel[level], GetAdditionalBytesPerPointer(level));
|
||||
|
||||
// Print the childshifts of the root for debug purposes
|
||||
unsigned32* rootChildren = GetRoot()->GetChildren();
|
||||
unsigned64* rootEdgeMaterials = GetRoot()->GetEdgeMaterials();
|
||||
for (ChildIndex c = 0; c < GetRoot()->GetChildCount(); c++)
|
||||
{
|
||||
printf("mChildShifts[%u] = %9llu; ->", c, rootEdgeMaterials[c]);
|
||||
const UniqueIndexNode* child = GetTypedNode(rootChildren[c]);
|
||||
for (unsigned j = 0; j < child->GetChildCount(); j++)
|
||||
printf(" %9llu", child->GetEdgeMaterials()[j]);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
//printf("Node count = %u\nOctreeNodeCount = %u\n", GetNodeCount(), GetOctreeNodeCount());
|
||||
//printf("Leaf node count = %u\n", GetLeafVoxelCount());
|
||||
}
|
||||
};
|
||||
33
Research/scene/PNG.cpp
Normal file
33
Research/scene/PNG.cpp
Normal file
@@ -0,0 +1,33 @@
|
||||
#include "../inc/lodepng/lodepng.h"
|
||||
#include "PNG.h"
|
||||
|
||||
PNG::PNG(const char* png_file_path) {
|
||||
|
||||
mWidth = 0;
|
||||
mHeight = 0;
|
||||
|
||||
unsigned error = lodepng::decode(mImage, mWidth, mHeight, png_file_path);
|
||||
|
||||
if (error)
|
||||
lodepng_error_text(error);
|
||||
}
|
||||
|
||||
PNG::~PNG() {
|
||||
mImage.clear();
|
||||
}
|
||||
|
||||
bool PNG::Initialized() {
|
||||
return mWidth > 0 && mHeight > 0;
|
||||
}
|
||||
|
||||
unsigned char* PNG::Data() {
|
||||
return &mImage[0];
|
||||
}
|
||||
|
||||
unsigned PNG::W() {
|
||||
return mWidth;
|
||||
}
|
||||
|
||||
unsigned PNG::H() {
|
||||
return mHeight;
|
||||
}
|
||||
20
Research/scene/PNG.h
Normal file
20
Research/scene/PNG.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
|
||||
class PNG {
|
||||
|
||||
public:
|
||||
PNG(const char* png_file_path);
|
||||
~PNG();
|
||||
unsigned char* Data();
|
||||
unsigned W();
|
||||
unsigned H();
|
||||
bool Initialized();
|
||||
|
||||
protected:
|
||||
|
||||
private:
|
||||
std::vector<unsigned char> mImage;
|
||||
unsigned mWidth;
|
||||
unsigned mHeight;
|
||||
};
|
||||
32
Research/scene/PointLight.cpp
Normal file
32
Research/scene/PointLight.cpp
Normal file
@@ -0,0 +1,32 @@
|
||||
#include "../core/Defines.h"
|
||||
#include "PointLight.h"
|
||||
|
||||
PointLight::PointLight() {
|
||||
SetPosition(0.0);
|
||||
}
|
||||
|
||||
PointLight::~PointLight() {
|
||||
|
||||
}
|
||||
|
||||
const glm::vec3& PointLight::GetPosition() const
|
||||
{
|
||||
return mPosition;
|
||||
}
|
||||
|
||||
void PointLight::SetPosition(const glm::vec3& pos)
|
||||
{
|
||||
mPosition = pos;
|
||||
}
|
||||
|
||||
void PointLight::SetPosition(double seed) {
|
||||
double theta = mPi * (.65 + .25 * sin(.9*seed));
|
||||
double phi = m2Pi * (.4*seed - int(.4*seed));
|
||||
|
||||
double x = 500.0 * sin(theta) * cos(phi);
|
||||
double y = 500.0 * cos(theta);
|
||||
double z = 500.0 * sin(theta) * sin(phi);
|
||||
|
||||
mPosition = glm::vec3(x, y, z);
|
||||
}
|
||||
|
||||
15
Research/scene/PointLight.h
Normal file
15
Research/scene/PointLight.h
Normal file
@@ -0,0 +1,15 @@
|
||||
#pragma once
|
||||
#include "../inc/glm/glm.hpp"
|
||||
|
||||
class PointLight {
|
||||
public:
|
||||
PointLight();
|
||||
~PointLight();
|
||||
const glm::vec3& GetPosition() const;
|
||||
void SetPosition(const glm::vec3& pos);
|
||||
void SetPosition(double seed);
|
||||
protected:
|
||||
|
||||
private:
|
||||
glm::vec3 mPosition;
|
||||
};
|
||||
337
Research/scene/PoolBuilder/AdaptivePointerPoolBuilder.cpp
Normal file
337
Research/scene/PoolBuilder/AdaptivePointerPoolBuilder.cpp
Normal file
@@ -0,0 +1,337 @@
|
||||
#include "AdaptivePointerPoolBuilder.h"
|
||||
#include <algorithm>
|
||||
#include "../../inc/tbb/parallel_sort.h"
|
||||
|
||||
std::string AdaptivePointerPoolBuilder::GetFullFileName(const std::string& fileName) const
|
||||
{
|
||||
std::string maskBitSizes;
|
||||
for (unsigned8 maskBitSize : mMaskBits)
|
||||
maskBitSizes += std::to_string(maskBitSize);
|
||||
return fileName + ".a" + (mUseLookupTable ? "l" : "d") + std::to_string(mMaskSize) + maskBitSizes + ".pool";
|
||||
}
|
||||
|
||||
size_t GetIndexibleNodes(unsigned8 byteCount, unsigned8 maskSize)
|
||||
{
|
||||
return BitHelper::Exp2(byteCount * 8 - maskSize);
|
||||
}
|
||||
|
||||
size_t AdaptivePointerPoolBuilder::GetMaxLookupTableSize() const
|
||||
{
|
||||
size_t maxSize = 0;
|
||||
// The max lookup table size is the maximum size we can index using the consecutive pointer sizes (based on pointer bits):
|
||||
for (unsigned8 i = 0; i < BitHelper::Exp2(mMaskSize) - 1; i++)
|
||||
maxSize += GetIndexibleNodes(mMaskBits[i], mMaskSize);
|
||||
return maxSize;
|
||||
}
|
||||
|
||||
// Given an index in the lookup table, calculates the size of a pointer to that index (taking the mask size into account)
|
||||
unsigned8 AdaptivePointerPoolBuilder::GetMinimumSizeOfPointer(const unsigned32& pointer) const {
|
||||
size_t rangeStart = 0;
|
||||
size_t rangeEnd = 0;
|
||||
for (unsigned8 i = 0; i < BitHelper::Exp2(mMaskSize) - 1; i++)
|
||||
{
|
||||
unsigned8 size = mMaskBits[i];
|
||||
rangeEnd = rangeStart + GetIndexibleNodes(size, mMaskSize);
|
||||
if (pointer >= rangeStart && pointer < rangeEnd)
|
||||
return size;
|
||||
rangeStart = rangeEnd;
|
||||
}
|
||||
return 4;
|
||||
}
|
||||
|
||||
unsigned32 AdaptivePointerPoolBuilder::GetShortenedPointerTo(const unsigned32& pointer, unsigned8& mask) const
|
||||
{
|
||||
size_t rangeStart = 0;
|
||||
size_t rangeEnd = 0;
|
||||
for (unsigned8 i = 0; i < BitHelper::Exp2(mMaskSize) - 1; i++)
|
||||
{
|
||||
mask = i;
|
||||
unsigned8 size = mMaskBits[i];
|
||||
rangeEnd = rangeStart + GetIndexibleNodes(size, mMaskSize);
|
||||
if (pointer >= rangeStart && pointer < rangeEnd) break;
|
||||
rangeStart = rangeEnd;
|
||||
}
|
||||
if (pointer >= rangeEnd)
|
||||
mask = BitHelper::GetLSMask<unsigned8>(0, mMaskSize);
|
||||
return pointer - (unsigned32)rangeStart;
|
||||
}
|
||||
|
||||
void AdaptivePointerPoolBuilder::InitBuild(const BaseTree* tree)
|
||||
{
|
||||
if (!mBuildInitiated)
|
||||
CalculateEverything(tree, mPointerSizes, mPointerSizePerLevel, mNodeLevelOffsets, mParentCounts, mLookupTableNodesPerLevel, mNodeWithinLookupTable);
|
||||
mBuildInitiated = true;
|
||||
}
|
||||
|
||||
void AdaptivePointerPoolBuilder::CalculateEverything(const BaseTree* tree, std::vector<unsigned8>& pointerSizes, std::vector<unsigned8>& pointerSizesPerLevel, std::vector<unsigned32>& levelOffsets, std::vector<size_t>& parentCounts, std::vector<std::vector<unsigned32>>& lookupTableNodesPerLevel, BoolArray& nodeWithinLookupTable) const
|
||||
{
|
||||
// Calculate the pointer sizes and which nodes are in the lookup table.
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
unsigned32 nodeCount = (unsigned32)tree->GetNodeCount();
|
||||
|
||||
// Find out how many parents all nodes have
|
||||
parentCounts = tree->GetParentCounts();
|
||||
|
||||
// Sort the nodes on parent counts per level.
|
||||
// Do this by sorting a map. This maps indices in the GPU pool to indices in the tree.
|
||||
// e.g. to find the node at position 6 in the GPU pool, use tree->GetNode(nodeMap[6]).
|
||||
std::vector<unsigned32> nodeMap(nodeCount);
|
||||
tbb::parallel_for((unsigned32)0, nodeCount, [&](const unsigned32& i){ nodeMap[i] = i; });
|
||||
OrderNodes(tree, nodeMap, parentCounts);
|
||||
|
||||
// Now find the indices where the levels start
|
||||
levelOffsets = std::vector<unsigned32>(depth + 2);
|
||||
unsigned8 curLevel = 255;
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
{
|
||||
const Node* node = tree->GetNode(nodeMap[i]);
|
||||
if (node->GetLevel() != curLevel) levelOffsets[++curLevel] = i;
|
||||
}
|
||||
levelOffsets[depth + 1] = (unsigned32)tree->GetNodeCount();
|
||||
|
||||
// Go bottom-up through the tree. For each level, calculate the size of the level and the size of normal (byte) pointers.
|
||||
// Also calculate the size of the lookup table and the index until which nodes are put in the lookup table.
|
||||
// Store the index of the last lookup table node to decide the sizes of the lookup table pointers
|
||||
if (mUseLookupTable)
|
||||
{
|
||||
lookupTableNodesPerLevel = std::vector<std::vector<unsigned32>>(depth + 1);
|
||||
nodeWithinLookupTable = BoolArray(nodeCount);
|
||||
}
|
||||
std::vector<unsigned32> nodePointerWithinLevel;
|
||||
if (!mUseLookupTable) nodePointerWithinLevel.resize(nodeCount);
|
||||
|
||||
pointerSizesPerLevel = std::vector<unsigned8>(depth + 1);
|
||||
pointerSizes = std::vector<unsigned8>(nodeCount);
|
||||
|
||||
size_t maxLookupTableSize = GetMaxLookupTableSize();
|
||||
|
||||
for (unsigned8 level = depth + 1; level-- > 0;)
|
||||
{
|
||||
unsigned32 levelStart = levelOffsets[level];
|
||||
unsigned32 levelEnd = levelOffsets[level + 1];
|
||||
size_t levelSize = 0;
|
||||
std::vector<unsigned32> lookupTable;
|
||||
// Calculate the size of this level and which nodes are put in the lookup table
|
||||
for (unsigned32 i = levelStart; i < levelEnd; i++)
|
||||
{
|
||||
unsigned32 nodeId = nodeMap[i];
|
||||
if (!mUseLookupTable) nodePointerWithinLevel[nodeId] = (unsigned32)levelSize;
|
||||
// If the node has more than 2 parents, it's worth putting in the lookup table
|
||||
if (mUseLookupTable && parentCounts[nodeId] > 2 && i - levelStart < maxLookupTableSize)
|
||||
{
|
||||
lookupTable.push_back(nodeId);
|
||||
nodeWithinLookupTable.Set(nodeId, true);
|
||||
}
|
||||
|
||||
const Node* node = tree->GetNode(nodeId);
|
||||
unsigned32* children = node->GetChildren();
|
||||
size_t nodeSize = GetBaseNodeSize(tree, nodeId);
|
||||
for (ChildIndex c = 0; c < node->GetChildCount(); c++)
|
||||
nodeSize += pointerSizes[children[c]];
|
||||
levelSize += nodeSize;
|
||||
}
|
||||
// Now that we know the level size and the lookup table layout, we can calculate the size of pointers to nodes in this level
|
||||
unsigned8 levelPointerSize = BitHelper::RoundToBytes(BitHelper::Log2Ceil(levelSize) + mMaskSize) / 8;
|
||||
if (levelPointerSize == 0) levelPointerSize = 1; // Pointers should be at least 1 byte
|
||||
pointerSizesPerLevel[level] = levelPointerSize;
|
||||
|
||||
if (mUseLookupTable && levelPointerSize <= 1) // We can't save space with a lookup table
|
||||
{
|
||||
lookupTable.clear();
|
||||
for (unsigned32 i = levelStart; i < levelEnd; i++) nodeWithinLookupTable.Set(nodeMap[i], false);
|
||||
}
|
||||
|
||||
//// Hack: put everything in the lookup table.
|
||||
//nodeWithinLookupTable.SetRange(levelStart, levelEnd, true);
|
||||
//lookupTable.clear();
|
||||
//for (unsigned32 i = levelStart; i < levelEnd; i++) lookupTable.push_back(nodeMap[i]);
|
||||
|
||||
for (unsigned32 i = levelStart; i < levelEnd; i++)
|
||||
{
|
||||
unsigned32 nodeId = nodeMap[i];
|
||||
if (mUseLookupTable)
|
||||
{
|
||||
if (nodeWithinLookupTable[nodeId])
|
||||
// Since the nodes are inserted in the lookup table in order, we can calculate a nodes index in the lookup table
|
||||
// using i - levelStart.
|
||||
pointerSizes[nodeId] = GetMinimumSizeOfPointer(i - levelStart);
|
||||
else
|
||||
pointerSizes[nodeId] = levelPointerSize;
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned8 minimumPointerSize = GetMinimumSizeOfPointer(nodePointerWithinLevel[nodeId]);
|
||||
if (minimumPointerSize < BitHelper::Exp2(mMaskSize)) // Only use a smaller pointer if the size of the pointer can be indicated by the mask
|
||||
pointerSizes[nodeId] = minimumPointerSize;
|
||||
else
|
||||
pointerSizes[nodeId] = levelPointerSize;
|
||||
}
|
||||
}
|
||||
if (mUseLookupTable)
|
||||
lookupTableNodesPerLevel[level] = lookupTable;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void AdaptivePointerPoolBuilder::OrderNodes(const BaseTree* tree, std::vector<unsigned32>& nodeMap) const
|
||||
{
|
||||
OrderNodes(tree, nodeMap, mParentCounts);
|
||||
}
|
||||
|
||||
void AdaptivePointerPoolBuilder::OrderNodes(const BaseTree* tree, std::vector<unsigned32>& nodeMap, const std::vector<size_t>& parentCounts)
|
||||
{
|
||||
// First order on level (asc), then on number of parents (desc), so that the most used nodes have the smallest pointers
|
||||
tbb::parallel_sort(nodeMap.begin(), nodeMap.end(), [&](const unsigned32& i1, const unsigned32& i2)
|
||||
{
|
||||
bool res = false;
|
||||
unsigned8 lvl1 = tree->GetNode(i1)->GetLevel();
|
||||
unsigned8 lvl2 = tree->GetNode(i2)->GetLevel();
|
||||
if (lvl1 != lvl2) res = lvl1 < lvl2;
|
||||
else if (parentCounts[i1] != parentCounts[i2]) res = (parentCounts[i1] > parentCounts[i2]);
|
||||
// If the level and number of parents is the same, then, for consistency, order on nodeID.
|
||||
else res = i1 < i2;
|
||||
return res;
|
||||
});
|
||||
}
|
||||
|
||||
void AdaptivePointerPoolBuilder::FinishBuild(const BaseTree* tree)
|
||||
{
|
||||
ClearVariables();
|
||||
mBuildInitiated = false;
|
||||
}
|
||||
|
||||
unsigned8 AdaptivePointerPoolBuilder::GetBytesPerPointer(const BaseTree* tree, const unsigned32& nodeIndex) const
|
||||
{
|
||||
return mPointerSizes[nodeIndex];
|
||||
}
|
||||
|
||||
size_t AdaptivePointerPoolBuilder::GetPoolInfoSize(const BaseTree* tree) const
|
||||
{
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
// Start with a list of pointers to the starts of the lookup table (4 bytes each, one per level)
|
||||
return 4 * (depth + 1) // Size of the level offsets
|
||||
+ (depth + 1) // And the sizes of full pointers per level
|
||||
+ 1 // 1 Byte to indicate the pointer sizes that belong to each mask (e.g. 00 -> 1, 01 -> 2, 10 -> 3)
|
||||
+ (mUseLookupTable ? (4 * depth) : 0); // Size of the pointers to the starts of the lookup tables.
|
||||
// Note that the first level doesn't have a lookup table (as there are no pointers to the root).
|
||||
}
|
||||
|
||||
std::vector<unsigned8> AdaptivePointerPoolBuilder::GetPoolInfo(const BaseTree* tree, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& nodeOrder)
|
||||
{
|
||||
std::vector<unsigned8> res(GetPoolInfoSize(tree));
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
|
||||
// Calculate the pointer level offsets:
|
||||
mPointerLevelOffsets = std::vector<unsigned32>(depth + 1);
|
||||
for (unsigned8 level = 0; level <= depth; level++) mPointerLevelOffsets[level] = (unsigned32)nodePointers[nodeOrder[mNodeLevelOffsets[level]]];
|
||||
|
||||
// Write the level offsets
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
BitHelper::SplitInBytesAndMove(mPointerLevelOffsets[level], res, level * 4, 4);
|
||||
|
||||
// Write the pointer sizes per level (for full pointers)
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
res[(depth + 1) * 4 + level] = mPointerSizePerLevel[level];
|
||||
|
||||
// Write the byte indicating the pointer sizes per mask value
|
||||
unsigned8 maskBitsDescr = 0;
|
||||
for (unsigned8 i = 0; i < BitHelper::Exp2(mMaskSize) - 1; i++)
|
||||
maskBitsDescr |= (mMaskBits[i] - 1) << (i * 2);
|
||||
res[(depth + 1) * (4 + 1)] = maskBitsDescr;
|
||||
|
||||
// Write the pointers to the lookup table starts per level
|
||||
if (mUseLookupTable)
|
||||
{
|
||||
size_t curIndex = GetAdditionalPoolInfoStart(tree); // curIndex is the start of the lookup tables
|
||||
for (unsigned8 level = 1; level <= depth; level++)
|
||||
{
|
||||
BitHelper::SplitInBytesAndMove(curIndex, res, (depth + 1) * (4 + 1) + 1 + (level - 1) * 4, 4);
|
||||
curIndex += mLookupTableNodesPerLevel[level].size() * mPointerSizePerLevel[level];
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
size_t AdaptivePointerPoolBuilder::GetAdditionalPoolInfoSize(const BaseTree* tree) const
|
||||
{
|
||||
// Calculate the size of the lookup tables
|
||||
if (mUseLookupTable)
|
||||
{
|
||||
size_t res = 0;
|
||||
for (unsigned8 level = 1; level <= tree->GetMaxLevel(); level++)
|
||||
res += mLookupTableNodesPerLevel[level].size() * mPointerSizePerLevel[level];
|
||||
return res;
|
||||
}
|
||||
else return 0;
|
||||
}
|
||||
|
||||
std::vector<unsigned8> AdaptivePointerPoolBuilder::GetAdditionalPoolInfo(const BaseTree* tree, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& nodeOrder)
|
||||
{
|
||||
std::vector<unsigned8> res(GetAdditionalPoolInfoSize(tree));
|
||||
if (mUseLookupTable)
|
||||
{
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
|
||||
// Write the lookup tables
|
||||
size_t curIndex = 0;
|
||||
for (unsigned8 level = 1; level <= depth; level++)
|
||||
{
|
||||
unsigned8 levelPointerSize = mPointerSizePerLevel[level];
|
||||
unsigned32 levelOffset = mPointerLevelOffsets[level];
|
||||
std::vector<unsigned32>& levelLookupTableNodes = mLookupTableNodesPerLevel[level];
|
||||
for (unsigned32 lookupTableNode : levelLookupTableNodes)
|
||||
{
|
||||
unsigned32 actualPointer = (unsigned32)nodePointers[lookupTableNode] - levelOffset;
|
||||
BitHelper::SplitInBytesAndMove(actualPointer, res, curIndex, levelPointerSize);
|
||||
curIndex += levelPointerSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<unsigned8> AdaptivePointerPoolBuilder::WrapPointer(const BaseTree* tree, const unsigned32& nodeIndex, const unsigned32& indexInPool, const unsigned32& pointer) const
|
||||
{
|
||||
const Node* node = tree->GetNode(nodeIndex);
|
||||
unsigned8 level = node->GetLevel();
|
||||
unsigned8 pointerSize = mPointerSizes[nodeIndex];
|
||||
unsigned8 bitPointerSize = pointerSize * 8;
|
||||
unsigned32 mask = (unsigned32)BitHelper::Exp2(mMaskSize) - 1;
|
||||
unsigned32 actualPointer = pointer - mPointerLevelOffsets[level];
|
||||
if (mUseLookupTable)
|
||||
{
|
||||
if (mNodeWithinLookupTable[nodeIndex])
|
||||
{
|
||||
// Find the index of the node within the lookup table
|
||||
size_t lookupTablePointer = indexInPool - mNodeLevelOffsets[level];
|
||||
assert(mLookupTableNodesPerLevel[level][lookupTablePointer] == nodeIndex);
|
||||
unsigned8 sectionMask;
|
||||
actualPointer = GetShortenedPointerTo((unsigned32)lookupTablePointer, sectionMask);
|
||||
mask = sectionMask;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned8 sectionMask;
|
||||
actualPointer = GetShortenedPointerTo(actualPointer, sectionMask);
|
||||
mask = sectionMask;
|
||||
}
|
||||
assert(actualPointer < BitHelper::Exp2(bitPointerSize - mMaskSize));
|
||||
assert(mask < BitHelper::Exp2(mMaskSize));
|
||||
unsigned32 pointerWithMask = actualPointer | (mask << (bitPointerSize - mMaskSize));
|
||||
return BitHelper::SplitInBytes(pointerWithMask, pointerSize);
|
||||
}
|
||||
|
||||
|
||||
void AdaptivePointerPoolBuilder::ClearVariables()
|
||||
{
|
||||
mPointerSizes.clear(); mPointerSizes.shrink_to_fit();
|
||||
mPointerSizePerLevel.clear(); mPointerSizePerLevel.shrink_to_fit();
|
||||
mPointerLevelOffsets.clear(); mPointerLevelOffsets.shrink_to_fit();
|
||||
mNodeLevelOffsets.clear(); mNodeLevelOffsets.shrink_to_fit();
|
||||
mParentCounts.clear(); mParentCounts.shrink_to_fit();
|
||||
mLookupTableNodesPerLevel.clear(); mLookupTableNodesPerLevel.shrink_to_fit();
|
||||
mNodeWithinLookupTable.Resize(0);
|
||||
}
|
||||
65
Research/scene/PoolBuilder/AdaptivePointerPoolBuilder.h
Normal file
65
Research/scene/PoolBuilder/AdaptivePointerPoolBuilder.h
Normal file
@@ -0,0 +1,65 @@
|
||||
#pragma once
|
||||
#include "BaseTreePoolBuilder.h"
|
||||
#include "../../core/Util/BoolArray.h"
|
||||
|
||||
class AdaptivePointerPoolBuilder : public BaseTreePoolBuilder
|
||||
{
|
||||
public:
|
||||
AdaptivePointerPoolBuilder(bool useLookupTable, unsigned8 maskSize, unsigned8 size1 = 1, unsigned8 size2 = 2, unsigned8 size3 = 3) : BaseTreePoolBuilder(), mMaskSize(maskSize), mMaskBits(std::vector<unsigned8>{size1, size2, size3}), mUseLookupTable(useLookupTable)
|
||||
{
|
||||
assert(maskSize == 1 || maskSize == 2);
|
||||
assert(size1 <= 4 && size2 <= 4 && size3 <= 4);
|
||||
}
|
||||
|
||||
virtual ~AdaptivePointerPoolBuilder() override {}
|
||||
|
||||
std::string GetFullFileName(const std::string& fileName) const override;
|
||||
protected:
|
||||
void InitBuild(const BaseTree* tree) override;
|
||||
void FinishBuild(const BaseTree* tree) override;
|
||||
bool WordAligned() const override { return false; }
|
||||
|
||||
unsigned8 GetBytesPerPointer(const BaseTree* tree, const unsigned32& nodeIndex) const override;
|
||||
std::vector<unsigned8> WrapPointer(const BaseTree* tree, const unsigned32& nodeIndex, const unsigned32& indexInPool, const unsigned32& pointer) const override;
|
||||
|
||||
size_t GetPoolInfoSize(const BaseTree* tree) const override;
|
||||
std::vector<unsigned8> GetPoolInfo(const BaseTree* tree, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& nodeOrder) override;
|
||||
|
||||
size_t GetAdditionalPoolInfoSize(const BaseTree* tree) const override;
|
||||
std::vector<unsigned8> GetAdditionalPoolInfo(const BaseTree* tree, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& nodeOrder) override;
|
||||
|
||||
void OrderNodes(const BaseTree* tree, std::vector<unsigned32>& nodeOrder) const override;
|
||||
static void OrderNodes(const BaseTree* tree, std::vector<unsigned32>& nodeOrder, const std::vector<size_t>& parentCounts);
|
||||
|
||||
void ClearVariables();
|
||||
|
||||
size_t GetMaxLookupTableSize() const;
|
||||
unsigned8 GetMinimumSizeOfPointer(const unsigned32& index) const;
|
||||
// Calculates in which "section" this pointer is. Wraps to pointer to a pointer within that sections and returns the mask.
|
||||
unsigned32 GetShortenedPointerTo(const unsigned32& pointer, unsigned8& mask) const;
|
||||
void CalculateEverything(const BaseTree* tree, std::vector<unsigned8>& pointerSizes, std::vector<unsigned8>& pointerSizesPerLevel, std::vector<unsigned32>& levelOffsets, std::vector<size_t>& parentCounts, std::vector<std::vector<unsigned32>>& lookupTableNodesPerLevel, BoolArray& nodeWithinLookupTable) const;
|
||||
|
||||
// Size of the mask used to indicate the size of pointers
|
||||
const unsigned8 mMaskSize;
|
||||
// Indicates how many bits each index of the mask indicates. Standard is "1, 2, 3", but
|
||||
// one could image stuff such as "1, 1, 2", when 00 indicates the first 64 nodes, 01 the second 64 nodes, and 10 the next 16K nodes.
|
||||
const std::vector<unsigned8> mMaskBits;
|
||||
const bool mUseLookupTable;
|
||||
|
||||
// Variables used during the current build
|
||||
|
||||
// Pointer sizes for each individual node within the tree
|
||||
std::vector<unsigned8> mPointerSizes;
|
||||
// Pointer sizes for direct (byte precision) pointers per level
|
||||
std::vector<unsigned8> mPointerSizePerLevel;
|
||||
// Level offsets as the index of the first node in each level
|
||||
std::vector<unsigned32> mNodeLevelOffsets;
|
||||
std::vector<unsigned32> mPointerLevelOffsets;
|
||||
//std::vector<unsigned32> mLevelOffsets;
|
||||
std::vector<size_t> mParentCounts;
|
||||
std::vector<std::vector<unsigned32>> mLookupTableNodesPerLevel;
|
||||
BoolArray mNodeWithinLookupTable;
|
||||
|
||||
bool mBuildInitiated = false;
|
||||
};
|
||||
|
||||
66
Research/scene/PoolBuilder/BasePoolBuilder.h
Normal file
66
Research/scene/PoolBuilder/BasePoolBuilder.h
Normal file
@@ -0,0 +1,66 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include <stdio.h>
|
||||
#include <fstream>
|
||||
|
||||
#include "../../core/Defines.h"
|
||||
|
||||
#include "../../core/Serializer.h"
|
||||
|
||||
// Pool builder class that can be used to build a pool for a specific kind of tree. Constructor should indicate the tree type.
|
||||
template<class T>
|
||||
class BasePoolBuilder
|
||||
{
|
||||
public:
|
||||
virtual ~BasePoolBuilder() {}
|
||||
|
||||
virtual size_t GetPoolSize(const T* tree) = 0;
|
||||
virtual bool BuildPool(const T* tree, std::vector<unsigned8>& pool) = 0;
|
||||
virtual bool VerifyPool(std::vector<unsigned8>& pool, const unsigned8& depth) const = 0;
|
||||
virtual std::string GetFullFileName(const std::string& fileName) const = 0;
|
||||
|
||||
virtual bool ReadPool(const std::string& fileName, std::vector<unsigned8>& pool) const
|
||||
{
|
||||
std::string binFileName = GetFullFileName(fileName);
|
||||
std::ifstream nodePoolInFile(binFileName, std::ios::binary);
|
||||
|
||||
if (nodePoolInFile.good()) {
|
||||
|
||||
// Destroy whole current node pool
|
||||
Serializer<std::vector<unsigned8>, unsigned64>::Deserialize(pool, nodePoolInFile);
|
||||
nodePoolInFile.close();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool WritePool(const std::string& fileName, const std::vector<unsigned8>& pool) const
|
||||
{
|
||||
std::string binFileName = GetFullFileName(fileName);
|
||||
|
||||
if (!pool.empty()) {
|
||||
std::ofstream nodePoolOutFile(binFileName, std::ios::binary);
|
||||
Serializer<std::vector<unsigned8>, unsigned64>::Serialize(pool, nodePoolOutFile);
|
||||
nodePoolOutFile.close();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool BuildOrReadPool(const T* tree, const std::string& fileName, std::vector<unsigned8>& pool)
|
||||
{
|
||||
if (ReadPool(fileName, pool)) return true;
|
||||
bool res = BuildPool(tree, pool);
|
||||
if (res) WritePool(fileName, pool);
|
||||
return res;
|
||||
}
|
||||
|
||||
bool VerifyCachedPool(const std::string& fileName, const unsigned8& depth) const
|
||||
{
|
||||
std::vector<unsigned8> pool;
|
||||
if (ReadPool(fileName, pool))
|
||||
return VerifyPool(pool, depth);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
262
Research/scene/PoolBuilder/BaseTreePoolBuilder.cpp
Normal file
262
Research/scene/PoolBuilder/BaseTreePoolBuilder.cpp
Normal file
@@ -0,0 +1,262 @@
|
||||
#include "BaseTreePoolBuilder.h"
|
||||
#include <algorithm>
|
||||
#include "../../inc/tbb/parallel_sort.h"
|
||||
|
||||
//************************************
|
||||
// Calculates the minimum size of the node pool texture, so that all nodes + pointers fit
|
||||
//************************************
|
||||
size_t BaseTreePoolBuilder::GetPoolSize(const BaseTree* tree)
|
||||
{
|
||||
return GetMinimumNodePoolTexelCount(tree);
|
||||
}
|
||||
|
||||
size_t BaseTreePoolBuilder::GetMinimumNodePoolTexelCount(const BaseTree* tree)
|
||||
{
|
||||
assert(tree != NULL);
|
||||
if (!mIsBuilding)
|
||||
InitBuild(tree);
|
||||
size_t texelCount = GetTreeInfoBytesSize(tree);
|
||||
for (unsigned32 i = 0; i < (unsigned32)tree->GetNodeCount(); i++)
|
||||
texelCount += GetNodeSize(tree, i);
|
||||
if (!mIsBuilding)
|
||||
FinishBuild(tree);
|
||||
return texelCount;
|
||||
}
|
||||
|
||||
unsigned32 BaseTreePoolBuilder::GetTreeInfoBytesSize(const BaseTree* tree) const
|
||||
{
|
||||
assert(tree != NULL);
|
||||
bool hasAdditionalPointerBytes = HasAdditionalBytesPerPointer(tree);
|
||||
bool hasAdditionalBytesPerNode = HasAdditionalBytesPerNode(tree);
|
||||
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
size_t treeInfoSize = (hasAdditionalPointerBytes ? (depth + 1) : 0) // additional pointer sizes(if not 0, 8 bits = 1 byte per level)
|
||||
+ (hasAdditionalBytesPerNode ? (depth + 1) : 0) //and additional node sizes(if not 0, 8 bits = 1 byte per level)
|
||||
+ tree->GetAdditionalTreeInfoSize()
|
||||
+ (unsigned32)GetPoolInfoSize(tree)
|
||||
+ (unsigned32)GetAdditionalPoolInfoSize(tree);
|
||||
if (WordAligned()) RoundToWords(treeInfoSize);
|
||||
return (unsigned32)treeInfoSize;
|
||||
}
|
||||
|
||||
//************************************
|
||||
// Insert all nodes into final node pool and updates pointers
|
||||
//************************************
|
||||
bool BaseTreePoolBuilder::BuildPool(const BaseTree* tree, std::vector<unsigned8>& pool) {
|
||||
if (tree == NULL) return false;
|
||||
|
||||
// Notify subclasses that the build is initiating (so they can do pre-calculations)
|
||||
InitBuild(tree);
|
||||
mIsBuilding = true;
|
||||
|
||||
// Initialize the final node pool
|
||||
size_t poolSize = GetMinimumNodePoolTexelCount(tree);
|
||||
pool.resize(poolSize, 0);
|
||||
|
||||
// Get information about the node sizes per level
|
||||
|
||||
std::vector<unsigned8> additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer();
|
||||
std::vector<unsigned8> additionalBytesPerNode = tree->GetAdditionalBytesPerNode();
|
||||
bool hasAdditionalPointerBytes = HasAdditionalBytesPerPointer(tree);
|
||||
bool hasAdditionalBytesPerNode = HasAdditionalBytesPerNode(tree);
|
||||
bool lastChildHasAdditionalBytesPerPointer = LastChildHasAdditionalBytesPerPointer(tree);
|
||||
|
||||
// Find an ordering of the nodes such that all children of a node appear after that node in memory.
|
||||
unsigned32 nodeCount = (unsigned32)tree->GetNodeCount();
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
|
||||
std::vector<unsigned32> nodeMap(nodeCount);
|
||||
for (unsigned32 i = 0; i < nodeCount; i++) nodeMap[i] = i;
|
||||
OrderNodes(tree, nodeMap);
|
||||
std::vector<unsigned32> reverseNodeMap(nodeCount);
|
||||
for (unsigned32 i = 0; i < nodeCount; i++) reverseNodeMap[nodeMap[i]] = i;
|
||||
|
||||
// Calculate all node indices beforehand (also makes calculating the level offsets easy)
|
||||
std::vector<size_t> nodePointers(nodeCount);
|
||||
size_t treeInfoSize = GetTreeInfoBytesSize(tree);
|
||||
size_t curIndex = treeInfoSize;
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
{
|
||||
unsigned32 nodeId = nodeMap[i];
|
||||
nodePointers[nodeId] = curIndex;
|
||||
curIndex += GetNodeSize(tree, nodeId);
|
||||
}
|
||||
|
||||
// Start building the pool.
|
||||
curIndex = 0;
|
||||
// First the (subclass specific) tree/pool information
|
||||
size_t poolInfoSize = GetPoolInfoSize(tree);
|
||||
if (poolInfoSize != 0)
|
||||
{
|
||||
std::vector<unsigned8> poolInfo = GetPoolInfo(tree, nodePointers, nodeMap);
|
||||
assert(poolInfo.size() == poolInfoSize);
|
||||
std::move(poolInfo.begin(), poolInfo.end(), pool.begin());
|
||||
curIndex += poolInfoSize;
|
||||
}
|
||||
|
||||
// Write the additional node bytes (size per level)
|
||||
if (hasAdditionalBytesPerNode)
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
pool[curIndex++] = additionalBytesPerNode[level];
|
||||
|
||||
// Write the additional pointer bytes (size per level)
|
||||
if (hasAdditionalPointerBytes)
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
pool[curIndex++] = additionalBytesPerPointer[level];
|
||||
|
||||
// Write the additional tree info from the tree itself (MultiRoot uses this for root pointers)
|
||||
// TODO: Find a better way to do this, don't let the tree itself determine part of how to pool looks
|
||||
assert(GetAdditionalTreeInfoStart(tree) == curIndex);
|
||||
std::vector<unsigned8> additionalTreeInfo = tree->GetAdditionalTreeInfo(nodePointers);
|
||||
assert(additionalTreeInfo.size() == tree->GetAdditionalTreeInfoSize());
|
||||
std::move(additionalTreeInfo.begin(), additionalTreeInfo.end(), pool.begin() + curIndex);
|
||||
curIndex += additionalTreeInfo.size();
|
||||
|
||||
// Write additional pool info (e.g. lookup tables)
|
||||
assert(GetAdditionalPoolInfoStart(tree) == curIndex);
|
||||
size_t additionalPoolInfoSize = GetAdditionalPoolInfoSize(tree);
|
||||
if (additionalPoolInfoSize != 0)
|
||||
{
|
||||
std::vector<unsigned8> additionalPoolInfo = GetAdditionalPoolInfo(tree, nodePointers, nodeMap);
|
||||
assert(additionalPoolInfo.size() == additionalPoolInfoSize);
|
||||
std::move(additionalPoolInfo.begin(), additionalPoolInfo.end(), pool.begin() + curIndex);
|
||||
curIndex += additionalPoolInfoSize;
|
||||
}
|
||||
|
||||
// Assert that the nodes start at the expected position
|
||||
if (WordAligned()) RoundToWords(curIndex);
|
||||
assert(curIndex == nodePointers[nodeMap[0]]);
|
||||
|
||||
// Write the nodes
|
||||
#ifdef _DEBUG
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
#else
|
||||
tbb::parallel_for((unsigned32)0, nodeCount, [&](const unsigned32& i)
|
||||
#endif
|
||||
|
||||
{
|
||||
unsigned32 nodeId = nodeMap[i];
|
||||
std::vector<unsigned8> bytesForNode = GetBytesForNode(tree, nodeId, nodePointers, reverseNodeMap, additionalBytesPerNode, additionalBytesPerPointer);
|
||||
assert(bytesForNode.size() == GetNodeSize(tree, nodeId));
|
||||
size_t nodeIndex = nodePointers[nodeId];
|
||||
std::move(bytesForNode.begin(), bytesForNode.end(), pool.begin() + nodeIndex);
|
||||
#ifdef _DEBUG
|
||||
}
|
||||
#else
|
||||
});
|
||||
#endif
|
||||
FinishBuild(tree);
|
||||
mIsBuilding = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t BaseTreePoolBuilder::GetAdditionalTreeInfoStart(const BaseTree* tree) const
|
||||
{
|
||||
std::vector<unsigned8> additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer();
|
||||
std::vector<unsigned8> additionalBytesPerNode = tree->GetAdditionalBytesPerNode();
|
||||
bool hasAdditionalPointerBytes = false; for (auto abpp : additionalBytesPerPointer) if (abpp != 0) hasAdditionalPointerBytes = true;
|
||||
bool hasAdditionalBytesPerNode = false; for (auto abpn : additionalBytesPerNode) if (abpn != 0) hasAdditionalBytesPerNode = true;
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
size_t poolInfoSize = GetPoolInfoSize(tree);
|
||||
size_t additionalInfoSize = ((hasAdditionalBytesPerNode ? 1 : 0) + (hasAdditionalPointerBytes ? 1 : 0)) * (depth + 1);
|
||||
return poolInfoSize + additionalInfoSize;
|
||||
}
|
||||
|
||||
size_t BaseTreePoolBuilder::GetAdditionalPoolInfoStart(const BaseTree* tree) const
|
||||
{
|
||||
return GetAdditionalTreeInfoStart(tree) + tree->GetAdditionalTreeInfoSize();
|
||||
}
|
||||
|
||||
void BaseTreePoolBuilder::OrderNodes(const BaseTree* tree, std::vector<unsigned32>& nodeOrder) const
|
||||
{
|
||||
tbb::parallel_sort(nodeOrder.begin(), nodeOrder.end(), [&](const unsigned32& i1, const unsigned32& i2)
|
||||
{
|
||||
const Node* a = tree->GetNode(i1);
|
||||
const Node* b = tree->GetNode(i2);
|
||||
return a->GetLevel() < b->GetLevel();
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<unsigned8> BaseTreePoolBuilder::GetBytesForNode(const BaseTree* tree, const unsigned32& nodeId, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& reverseNodeMap,
|
||||
const std::vector<unsigned8>& fullAdditionalBytesPerNode, const std::vector<unsigned8>& fullAdditionalBytesPerPointer) const
|
||||
{
|
||||
const Node* node = tree->GetNode(nodeId);
|
||||
unsigned8 childCount = node->GetChildCount();
|
||||
unsigned32* children = node->GetChildren();
|
||||
unsigned8 level = node->GetLevel();
|
||||
unsigned8 additionalBytesPerNode = fullAdditionalBytesPerNode[level];
|
||||
unsigned8 additionalBytesPerPointer = fullAdditionalBytesPerPointer[level];
|
||||
|
||||
size_t nodeSize = GetNodeSize(tree, nodeId);
|
||||
std::vector<unsigned8> bytes(nodeSize);
|
||||
size_t curIndex = 0;
|
||||
|
||||
// Add the childmask
|
||||
bytes[0] = node->GetChildmask().mask;
|
||||
curIndex++;
|
||||
|
||||
if (GetAdditionalPoolInfoForNodeSize(tree, nodeId) != 0)
|
||||
{
|
||||
auto additionalPoolInfoForNode = GetAdditionalPoolInfoForNode(tree, nodeId, reverseNodeMap[nodeId]);
|
||||
std::move(additionalPoolInfoForNode.begin(), additionalPoolInfoForNode.end(), bytes.begin() + curIndex);
|
||||
assert(additionalPoolInfoForNode.size() == GetAdditionalPoolInfoForNodeSize(tree, nodeId));
|
||||
curIndex += additionalPoolInfoForNode.size();
|
||||
}
|
||||
|
||||
// Add the additional node bytes
|
||||
if (additionalBytesPerNode != 0)
|
||||
{
|
||||
std::vector<unsigned8> additionalNodeBytes = tree->GetAdditionalNodeBytes(node);
|
||||
assert(additionalNodeBytes.size() == additionalBytesPerNode);
|
||||
std::move(additionalNodeBytes.begin(), additionalNodeBytes.end(), bytes.begin() + curIndex);
|
||||
curIndex += additionalNodeBytes.size();
|
||||
}
|
||||
if (WordAligned()) RoundToWords(curIndex);
|
||||
|
||||
// Write the node pointers
|
||||
for (ChildIndex c = 0; c < childCount; c++)
|
||||
{
|
||||
unsigned32 childNodeIndex = children[c];
|
||||
size_t pointer = nodePointers[childNodeIndex];
|
||||
std::vector<unsigned8> pointerBytes = WrapPointer(tree, childNodeIndex, reverseNodeMap[childNodeIndex], (unsigned32)pointer);
|
||||
std::move(pointerBytes.begin(), pointerBytes.end(), bytes.begin() + curIndex);
|
||||
curIndex += pointerBytes.size();
|
||||
if (WordAligned()) RoundToWords(curIndex);
|
||||
}
|
||||
|
||||
// Followed by the additional pointer info
|
||||
if (additionalBytesPerPointer != 0)
|
||||
{
|
||||
ChildIndex i = 0;
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
{
|
||||
if (node->HasChild(c) && (i < childCount - 1 || LastChildHasAdditionalBytesPerPointer(tree)))
|
||||
{
|
||||
i++;
|
||||
std::vector<unsigned8> additionalBytesForPointer = tree->GetAdditionalPointerBytes(node, c);
|
||||
if (WordAligned())
|
||||
{
|
||||
curIndex += additionalBytesForPointer.size();
|
||||
RoundToWords(curIndex);
|
||||
curIndex -= additionalBytesForPointer.size();
|
||||
}
|
||||
std::move(additionalBytesForPointer.begin(), additionalBytesForPointer.end(), bytes.begin() + curIndex);
|
||||
curIndex += additionalBytesForPointer.size();
|
||||
if (WordAligned()) RoundToWords(curIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
bool BaseTreePoolBuilder::VerifyPool(std::vector<unsigned8>& pool, const unsigned8& treeDepth) const
|
||||
{
|
||||
// Verify that the last level offset is not bigger than the size of the pool
|
||||
for (unsigned8 level = 0; level <= treeDepth; level++)
|
||||
{
|
||||
unsigned32 levelOffset = 0;
|
||||
BitHelper::JoinBytes(pool, levelOffset, level * 4);
|
||||
if (levelOffset > pool.size()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
112
Research/scene/PoolBuilder/BaseTreePoolBuilder.h
Normal file
112
Research/scene/PoolBuilder/BaseTreePoolBuilder.h
Normal file
@@ -0,0 +1,112 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include "BasePoolBuilder.h"
|
||||
#include "../Octree/BaseTree.h"
|
||||
|
||||
// Pool builder class that can be used to build a pool for a specific kind of tree. Constructor should indicate the tree type.
|
||||
class BaseTreePoolBuilder : public BasePoolBuilder<BaseTree>
|
||||
{
|
||||
public:
|
||||
BaseTreePoolBuilder() { mIsBuilding = false; }
|
||||
virtual ~BaseTreePoolBuilder() override {}
|
||||
|
||||
size_t GetPoolSize(const BaseTree* tree) override;
|
||||
bool BuildPool(const BaseTree* tree, std::vector<unsigned8>& pool) override;
|
||||
bool VerifyPool(std::vector<unsigned8>& pool, const unsigned8& depth) const override;
|
||||
|
||||
size_t GetMinimumNodePoolTexelCount(const BaseTree* tree);
|
||||
unsigned32 GetTreeInfoBytesSize(const BaseTree* tree) const;
|
||||
|
||||
static const size_t WORD_SIZE = 4;
|
||||
protected:
|
||||
inline static void RoundToWords(size_t& value) { value += (WORD_SIZE - (value % WORD_SIZE)) % WORD_SIZE; }
|
||||
|
||||
// Returns the size of a node without the pointers (but with the additional pointer, node and nodepool information if needed)
|
||||
inline size_t GetBaseNodeSize(const BaseTree* tree, const unsigned32& nodeId) const
|
||||
{
|
||||
auto node = tree->GetNode(nodeId);
|
||||
unsigned8 level = node->GetLevel();
|
||||
size_t additionalPointerBytes = 0;
|
||||
if (node->GetChildCount() > 0)
|
||||
{
|
||||
size_t additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer(level);
|
||||
// Word-align additional bytes per pointer
|
||||
if (WordAligned()) RoundToWords(additionalBytesPerPointer);
|
||||
additionalPointerBytes = (node->GetChildCount() - (LastChildHasAdditionalBytesPerPointer(tree) ? 0 : 1)) * additionalBytesPerPointer;
|
||||
}
|
||||
size_t basicNodeSize = 1 + tree->GetAdditionalBytesPerNode(level) + GetAdditionalPoolInfoForNodeSize(tree, nodeId);
|
||||
if (WordAligned()) RoundToWords(basicNodeSize);
|
||||
return additionalPointerBytes + basicNodeSize;
|
||||
}
|
||||
|
||||
inline size_t GetNodeSize(const BaseTree* tree, const unsigned32& nodeId) const
|
||||
{
|
||||
const Node* node = tree->GetNode(nodeId);
|
||||
size_t nodeSize = GetBaseNodeSize(tree, nodeId);
|
||||
unsigned32* children = node->GetChildren();
|
||||
for (ChildIndex c = 0; c < node->GetChildCount(); c++)
|
||||
{
|
||||
size_t pointerSize = GetBytesPerPointer(tree, children[c]);
|
||||
if (WordAligned()) RoundToWords(pointerSize);
|
||||
nodeSize += pointerSize;
|
||||
}
|
||||
return nodeSize;
|
||||
}
|
||||
|
||||
inline bool HasAdditionalBytesPerNode(const BaseTree* tree) const
|
||||
{
|
||||
std::vector<unsigned8> additionalBytesPerNode = tree->GetAdditionalBytesPerNode();
|
||||
bool hasAdditionalBytesPerNode = false; for (auto abpn : additionalBytesPerNode) if (abpn != 0) hasAdditionalBytesPerNode = true;
|
||||
return hasAdditionalBytesPerNode;
|
||||
}
|
||||
|
||||
inline bool HasAdditionalBytesPerPointer(const BaseTree* tree) const
|
||||
{
|
||||
std::vector<unsigned8> additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer();
|
||||
bool hasAdditionalBytesPerPointer = false; for (auto abpp : additionalBytesPerPointer) if (abpp != 0) hasAdditionalBytesPerPointer = true;
|
||||
return hasAdditionalBytesPerPointer;
|
||||
}
|
||||
|
||||
inline bool LastChildHasAdditionalBytesPerPointer(const BaseTree* tree) const
|
||||
{
|
||||
return tree->LastChildHasAdditionalBytes();
|
||||
}
|
||||
|
||||
// Use this method to pre-calculate information needed to build the tree
|
||||
virtual void InitBuild(const BaseTree* tree) = 0;
|
||||
// Use this method to finalize the build process and clear resources
|
||||
virtual void FinishBuild(const BaseTree* tree) = 0;
|
||||
|
||||
// Subtrees can use this to ask for the pool to be word aligned. If a subclass returns true, additional pointer and pool sizes will be rounded to word sizes (i.e. 4 bytes)
|
||||
virtual bool WordAligned() const = 0;
|
||||
|
||||
// Should return the number of bytes required for a pointer to the the node with nodeIndex.
|
||||
virtual unsigned8 GetBytesPerPointer(const BaseTree* tree, const unsigned32& nodeIndex) const = 0;
|
||||
|
||||
// Should return the bytes containing a pointer to the node with the given index.
|
||||
virtual std::vector<unsigned8> WrapPointer(const BaseTree* root, const unsigned32& nodeIndex, const unsigned32& indexInPool, const unsigned32& pointer) const = 0;
|
||||
|
||||
// Pool info required for the tree. The size of this should be determined only by the depth of the tree.
|
||||
virtual size_t GetPoolInfoSize(const BaseTree* tree) const = 0;
|
||||
// Pool info required for the tree (e.g. level offsets in memory, pointer sizes per level).
|
||||
virtual std::vector<unsigned8> GetPoolInfo(const BaseTree* tree, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& nodeOrder) = 0;
|
||||
|
||||
// Additional pool info of variable size, such as lookup tables.
|
||||
virtual size_t GetAdditionalPoolInfoSize(const BaseTree* tree) const { return 0; }
|
||||
// Additional pool info of variable size, such as lookup tables.
|
||||
virtual std::vector<unsigned8> GetAdditionalPoolInfo(const BaseTree* tree, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& nodeOrder) { return std::vector<unsigned8>(); }
|
||||
|
||||
virtual unsigned8 GetAdditionalPoolInfoForNodeSize(const BaseTree* tree, const unsigned32& nodeIndex) const { return 0; }
|
||||
virtual std::vector<unsigned8> GetAdditionalPoolInfoForNode(const BaseTree* tree, const unsigned32& nodeIndex, const unsigned32& indexInPool) const { return std::vector<unsigned8>(); }
|
||||
|
||||
// Can be used to change the order of the nodes in memory.
|
||||
virtual void OrderNodes(const BaseTree* tree, std::vector<unsigned32>& nodeOrder) const;
|
||||
|
||||
std::vector<unsigned8> GetBytesForNode(const BaseTree* tree,const unsigned32& nodeId, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& reverseNodeMap,
|
||||
const std::vector<unsigned8>& additionalBytesPerNode, const std::vector<unsigned8>& additionalBytesPerPointer) const;
|
||||
|
||||
size_t GetAdditionalTreeInfoStart(const BaseTree* tree) const;
|
||||
size_t GetAdditionalPoolInfoStart(const BaseTree* tree) const;
|
||||
|
||||
bool mIsBuilding;
|
||||
};
|
||||
50
Research/scene/PoolBuilder/OriginalPoolBuilder.h
Normal file
50
Research/scene/PoolBuilder/OriginalPoolBuilder.h
Normal file
@@ -0,0 +1,50 @@
|
||||
#pragma once
|
||||
#include "BaseTreePoolBuilder.h"
|
||||
|
||||
class OriginalPoolBuilder : public BaseTreePoolBuilder
|
||||
{
|
||||
|
||||
public:
|
||||
using BaseTreePoolBuilder::BaseTreePoolBuilder;
|
||||
virtual ~OriginalPoolBuilder() override {}
|
||||
|
||||
std::string GetFullFileName(const std::string& fileName) const override
|
||||
{
|
||||
return fileName + ".o.pool";
|
||||
}
|
||||
protected:
|
||||
void InitBuild(const BaseTree* tree) override {}
|
||||
void FinishBuild(const BaseTree* tree) override {}
|
||||
bool WordAligned() const override { return true; }
|
||||
|
||||
unsigned8 GetBytesPerPointer(const BaseTree* tree, const unsigned32& nodeId) const override
|
||||
{
|
||||
// All pointers are 4 bytes
|
||||
return 4;
|
||||
}
|
||||
std::vector<unsigned8> WrapPointer(const BaseTree* root, const unsigned32& nodeIndex, const unsigned32& indexInPool, const unsigned32& pointer) const override
|
||||
{
|
||||
return BitHelper::SplitInBytes(pointer);
|
||||
}
|
||||
|
||||
size_t GetPoolInfoSize(const BaseTree* tree) const override { return 0; }
|
||||
std::vector<unsigned8> GetPoolInfo(const BaseTree* tree, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& nodeOrder) override { return std::vector<unsigned8>(); }
|
||||
|
||||
unsigned8 GetAdditionalPoolInfoForNodeSize(const BaseTree* tree, const unsigned32& nodeIndex) const override
|
||||
{
|
||||
const Node* node = tree->GetNode(nodeIndex);
|
||||
unsigned8 level = node->GetLevel();
|
||||
unsigned8 additionalBytes = tree->GetAdditionalBytesPerNode(level);
|
||||
// The original paper uses 32 bit as the atomic unit. Therefore they have 24 unused bits after each childmask. We put these in here for correctness.
|
||||
// However, if they are used to store, for example, color information (i.e., additionalBytes != 0), we use them for that.
|
||||
if (additionalBytes <= 3) return 3 - additionalBytes;
|
||||
else return 0;
|
||||
}
|
||||
|
||||
std::vector<unsigned8> GetAdditionalPoolInfoForNode(const BaseTree* tree, const unsigned32& nodeIndex, const unsigned32& indexInPool) const override
|
||||
{
|
||||
return std::vector<unsigned8>(GetAdditionalPoolInfoForNodeSize(tree, nodeIndex), 0);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
135
Research/scene/PoolBuilder/StandardPoolBuilder.cpp
Normal file
135
Research/scene/PoolBuilder/StandardPoolBuilder.cpp
Normal file
@@ -0,0 +1,135 @@
|
||||
#include "StandardPoolBuilder.h"
|
||||
#include <algorithm>
|
||||
#include "../../inc/tbb/parallel_sort.h"
|
||||
|
||||
std::string StandardPoolBuilder::GetFullFileName(const std::string& fileName) const
|
||||
{
|
||||
return fileName + ".s.pool";
|
||||
}
|
||||
|
||||
std::vector<unsigned8> StandardPoolBuilder::GetPointerSizesPerLevel(const BaseTree* tree) const
|
||||
{
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
std::vector<unsigned8> res(depth + 1);
|
||||
res[depth] = 0; // Pointers in the leafs have size 0;
|
||||
for (unsigned8 level = depth; level > 0; level--)
|
||||
{
|
||||
unsigned8 bytesPerPointer = res[level];
|
||||
size_t requiredBytesNextLevel = 0;
|
||||
for (unsigned32 i = 0; i < (unsigned32)tree->GetNodeCount(); i++)
|
||||
{
|
||||
auto node = tree->GetNode(i);
|
||||
if (node->GetLevel() == level)
|
||||
requiredBytesNextLevel += GetBaseNodeSize(tree, i) + node->GetChildCount() * bytesPerPointer;
|
||||
}
|
||||
res[level - 1] = BitHelper::RoundToBytes(std::max<unsigned8>(1, BitHelper::Log2Ceil(requiredBytesNextLevel))) / 8;
|
||||
}
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
printf("Pointers in level %u: %u bytes\n", level, res[level]);
|
||||
return res;
|
||||
}
|
||||
|
||||
void StandardPoolBuilder::CalculatePointerSizesPerLevel(const BaseTree* tree)
|
||||
{
|
||||
assert(tree != NULL);
|
||||
mPointerSizesPerLevel = GetPointerSizesPerLevel(tree);
|
||||
}
|
||||
|
||||
void StandardPoolBuilder::InitBuild(const BaseTree* tree)
|
||||
{
|
||||
CalculatePointerSizesPerLevel(tree);
|
||||
mIsBuildingTree = true;
|
||||
}
|
||||
void StandardPoolBuilder::FinishBuild(const BaseTree* tree)
|
||||
{
|
||||
ClearVariables();
|
||||
mIsBuildingTree = false;
|
||||
}
|
||||
|
||||
unsigned8 StandardPoolBuilder::GetBytesPerPointer(const BaseTree* tree, const unsigned32& nodeIndex) const
|
||||
{
|
||||
const Node* node = tree->GetNode(nodeIndex);
|
||||
if (node->GetLevel() == 0) return 0;
|
||||
return mPointerSizesPerLevel[node->GetLevel() - 1];
|
||||
}
|
||||
|
||||
size_t StandardPoolBuilder::GetPoolInfoSize(const BaseTree* tree) const
|
||||
{
|
||||
assert(tree != NULL);
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
return (depth + 1) * 4 // Leave some space for the level offsets (32 bits = 4 bytes per level),
|
||||
+ (depth + 1); // Space for the size of the node pointers
|
||||
}
|
||||
|
||||
std::vector<unsigned8> StandardPoolBuilder::GetPoolInfo(const BaseTree* tree, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& nodeOrder)
|
||||
{
|
||||
//// Go through the level (in order), keeping track of the indices
|
||||
//for (unsigned8 level = 0; level <= depth; level++)
|
||||
//{
|
||||
// auto levelStart = levelIndices[level];
|
||||
// auto levelEnd = levelIndices[level + 1];
|
||||
// levelOffsets[level] = (unsigned32)curIndex;
|
||||
// for (auto i = levelStart; i < levelEnd; i++)
|
||||
// {
|
||||
// Node* node = tree->GetNode(nodeOrder[i]);
|
||||
// nodePointers[node->GetIndex()] = curIndex - levelOffsets[level];
|
||||
// assert(level == 0 || nodePointers[node->GetIndex()] < BitHelper::Exp2(bytesPerPointer[level - 1] * 8)); // Assert the index fits
|
||||
// curIndex += 1 + additionalBytesPerNode[level] + node->GetChildCount() * (bytesPerPointer[level] + additionalBytesPerPointer[level]);
|
||||
// }
|
||||
//}
|
||||
mLevelOffsets = std::vector<unsigned32>(tree->GetMaxLevel() + 1);
|
||||
unsigned8 curLevel = 255;
|
||||
for (size_t i = 0; i < tree->GetNodeCount(); i++)
|
||||
{
|
||||
unsigned32 nodeId = nodeOrder[i];
|
||||
const Node* node = tree->GetNode(nodeId);
|
||||
if (node->GetLevel() != curLevel)
|
||||
{
|
||||
curLevel++;
|
||||
mLevelOffsets[curLevel] = (unsigned32)nodePointers[nodeId];
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<unsigned8> res(GetPoolInfoSize(tree));
|
||||
size_t curIndex = 0;
|
||||
// Write the level offsets
|
||||
for (unsigned8 level = 0; level <= tree->GetMaxLevel(); level++)
|
||||
{
|
||||
BitHelper::SplitInBytesAndMove(mLevelOffsets[level], res, curIndex);
|
||||
curIndex += 4;
|
||||
}
|
||||
|
||||
// Write the number of bytes per pointer
|
||||
for (unsigned8 level = 0; level <= tree->GetMaxLevel(); level++)
|
||||
res[curIndex++] = mPointerSizesPerLevel[level];
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<unsigned8> StandardPoolBuilder::WrapPointer(const BaseTree* tree, const unsigned32& nodeIndex, const unsigned32& indexInPool, const unsigned32& pointer) const
|
||||
{
|
||||
const Node* node = tree->GetNode(nodeIndex);
|
||||
unsigned8 nodeLevel = node->GetLevel();
|
||||
unsigned32 withinLevelPointer = pointer - mLevelOffsets[nodeLevel];
|
||||
return BitHelper::SplitInBytes(withinLevelPointer, mPointerSizesPerLevel[nodeLevel - 1]);
|
||||
}
|
||||
|
||||
void StandardPoolBuilder::ClearVariables()
|
||||
{
|
||||
mPointerSizesPerLevel.clear();
|
||||
mLevelOffsets.clear();
|
||||
}
|
||||
|
||||
//void StandardPoolBuilder::OrderNodes(const BaseTree* tree, std::vector<unsigned32>& nodeOrder) const
|
||||
//{
|
||||
// std::vector<size_t> parentCounts = tree->GetParentCounts();
|
||||
// // First order on level (asc), then on number of parents (desc), so that the most used nodes have the smallest pointers
|
||||
// tbb::parallel_sort(nodeOrder.begin(), nodeOrder.end(), [tree, parentCounts](const unsigned32& i1, const unsigned32& i2)
|
||||
// {
|
||||
// Node* a = tree->GetNode(i1);
|
||||
// Node* b = tree->GetNode(i2);
|
||||
// if (a->GetLevel() != b->GetLevel()) return a->GetLevel() < b->GetLevel();
|
||||
// if (parentCounts[i1] != parentCounts[i2]) return parentCounts[i1] > parentCounts[i2];
|
||||
// // If the level and number of parents is the same, then, for consistency, order on nodeID.
|
||||
// return i1 < i2;
|
||||
// });
|
||||
//}
|
||||
35
Research/scene/PoolBuilder/StandardPoolBuilder.h
Normal file
35
Research/scene/PoolBuilder/StandardPoolBuilder.h
Normal file
@@ -0,0 +1,35 @@
|
||||
#pragma once
|
||||
#include "BaseTreePoolBuilder.h"
|
||||
|
||||
class StandardPoolBuilder : public BaseTreePoolBuilder
|
||||
{
|
||||
|
||||
public:
|
||||
using BaseTreePoolBuilder::BaseTreePoolBuilder;
|
||||
virtual ~StandardPoolBuilder() override {}
|
||||
|
||||
std::string GetFullFileName(const std::string& fileName) const override;
|
||||
std::vector<unsigned8> GetPointerSizesPerLevel(const BaseTree* tree) const;
|
||||
protected:
|
||||
void InitBuild(const BaseTree* tree) override;
|
||||
void FinishBuild(const BaseTree* tree) override;
|
||||
bool WordAligned() const override { return false; }
|
||||
|
||||
unsigned8 GetBytesPerPointer(const BaseTree* tree, const unsigned32& nodeId) const override;
|
||||
std::vector<unsigned8> WrapPointer(const BaseTree* root, const unsigned32& nodeIndex, const unsigned32& indexInPool, const unsigned32& pointer) const override;
|
||||
|
||||
size_t GetPoolInfoSize(const BaseTree* tree) const override;
|
||||
std::vector<unsigned8> GetPoolInfo(const BaseTree* tree, const std::vector<size_t>& nodePointers, const std::vector<unsigned32>& nodeOrder) override;
|
||||
|
||||
//void StandardPoolBuilder::OrderNodes(const BaseTree* tree, std::vector<unsigned32>& nodeOrder) const override;
|
||||
|
||||
void CalculatePointerSizesPerLevel(const BaseTree* tree);
|
||||
void ClearVariables();
|
||||
// Variables used during the current build
|
||||
bool mIsBuildingTree = false;
|
||||
std::vector<unsigned8> mPointerSizesPerLevel;
|
||||
std::vector<unsigned32> mLevelOffsets;
|
||||
|
||||
|
||||
};
|
||||
|
||||
507
Research/scene/PoolBuilder/VirtualNodePoolBuilder.cpp
Normal file
507
Research/scene/PoolBuilder/VirtualNodePoolBuilder.cpp
Normal file
@@ -0,0 +1,507 @@
|
||||
#include "VirtualNodePoolBuilder.h"
|
||||
#include <algorithm>
|
||||
#include "../../inc/tbb/parallel_sort.h"
|
||||
#include "../../core/Util/BoolArray.h"
|
||||
#include <numeric>
|
||||
|
||||
std::string VirtualNodePoolBuilder::GetFullFileName(const std::string& filename) const
|
||||
{
|
||||
return filename + ".v.pool";
|
||||
}
|
||||
|
||||
size_t VirtualNodePoolBuilder::GetFullNodeSize(const BaseTree* tree, const unsigned8& level, const unsigned8& pointerSize) const
|
||||
{
|
||||
// 1 byte for childmask
|
||||
// 1 byte for "virtual mask" (indicating which nodes are virtual)
|
||||
// pointerSize bytes for pointer to the first child
|
||||
// + Additional node info
|
||||
if (level > tree->GetMaxLevel()) return 0;
|
||||
return 1 + 1 + pointerSize + tree->GetAdditionalBytesPerNode(level);
|
||||
}
|
||||
size_t VirtualNodePoolBuilder::GetVirtualNodeSize(const BaseTree* tree, const unsigned8& level, const unsigned8& pointerSize) const
|
||||
{
|
||||
if (level == 0) return 0;
|
||||
return pointerSize;
|
||||
}
|
||||
|
||||
size_t VirtualNodePoolBuilder::GetFullNodeSize(const BaseTree* tree, const unsigned8& level, const std::vector<unsigned8>& pointerSizesPerLevel) const
|
||||
{
|
||||
if (level > tree->GetMaxLevel()) return 0;
|
||||
return GetFullNodeSize(tree, level, pointerSizesPerLevel[level]);
|
||||
}
|
||||
|
||||
size_t VirtualNodePoolBuilder::GetVirtualNodeSize(const BaseTree* tree, const unsigned8& level, const std::vector<unsigned8>& pointerSizesPerLevel) const
|
||||
{
|
||||
// pointerSize bytes for pointer to the first child
|
||||
return GetVirtualNodeSize(tree, level, pointerSizesPerLevel[level - 1]);
|
||||
}
|
||||
|
||||
size_t VirtualNodePoolBuilder::GetNormalNodeSize(const BaseTree* tree, const unsigned32& nodeId, const std::vector<unsigned8>& pointerSizesPerLevel, std::vector<unsigned8>& additionalPointerInfoSizesPerLevel, const bool& includingAdditionalPointerInfo) const
|
||||
{
|
||||
// 1 byte for childmask
|
||||
// Additional node info
|
||||
// pointerSize bytes for each pointer needed.
|
||||
const Node* node = tree->GetNode(nodeId);
|
||||
unsigned8 level = node->GetLevel();
|
||||
return 1 + tree->GetAdditionalBytesPerNode(level) + node->GetChildCount() * pointerSizesPerLevel[level] +
|
||||
(includingAdditionalPointerInfo ? (additionalPointerInfoSizesPerLevel[level] * node->GetChildCount()) : 0);
|
||||
}
|
||||
|
||||
std::vector<unsigned8> VirtualNodePoolBuilder::CalculatePointerSizesPerLevel(const BaseTree* tree, const std::vector<size_t>& parentsPerNode, const std::vector<bool>& useVirtualNodes) const
|
||||
{
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
|
||||
// Calculate some counts per level (needed to calculate the size of each level)
|
||||
std::vector<size_t> virtualNodesPerLevel = CalculateVirtualNodesPerLevel(tree, parentsPerNode);
|
||||
std::vector<size_t> fullNodesPerLevel = CalculateFullNodesPerLevel(tree);
|
||||
std::vector<size_t> pointersToLevel = CalculatePointersToPerLevel(tree);
|
||||
std::vector<unsigned8> additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer();
|
||||
|
||||
// Now bottom-up calculate the pointer sizees required to point to each level
|
||||
std::vector<unsigned8> res(depth + 1, 0);
|
||||
for (unsigned8 level = depth; level > 0; level--)
|
||||
{
|
||||
// Keep increasing the pointersize until we can point to all nodes within a level
|
||||
bool fits = false;
|
||||
while (!fits)
|
||||
{
|
||||
res[level - 1]++;
|
||||
size_t requiredSize = CalculateSizeOfLevel(tree, level, virtualNodesPerLevel[level], fullNodesPerLevel[level],
|
||||
pointersToLevel[level], level == depth ? 0 : pointersToLevel[level + 1],
|
||||
level == 0 ? 0 : res[level - 1], res[level],
|
||||
level == 0 ? 0 : additionalBytesPerPointer[level - 1], additionalBytesPerPointer[level],
|
||||
useVirtualNodes[level], level == 0 ? 0 : useVirtualNodes[level - 1]);
|
||||
size_t availableSize = BitHelper::Exp2(res[level - 1] * 8); // Available size is how much bytes we can reach with a pointer
|
||||
fits = requiredSize < availableSize;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<size_t> VirtualNodePoolBuilder::CalculateVirtualNodesPerLevel(const BaseTree* tree, const std::vector<size_t>& parentsPerNode) const
|
||||
{
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
unsigned32 nodeCount = (unsigned32)tree->GetNodeCount();
|
||||
std::vector<size_t> virtualNodesPerLevel(depth + 1);
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
{
|
||||
const Node* node = tree->GetNode(i);
|
||||
unsigned8 level = node->GetLevel();
|
||||
if (parentsPerNode[i] > 1)
|
||||
virtualNodesPerLevel[level] += parentsPerNode[i] - 1;
|
||||
}
|
||||
return virtualNodesPerLevel;
|
||||
}
|
||||
std::vector<size_t> VirtualNodePoolBuilder::CalculateFullNodesPerLevel(const BaseTree* tree) const
|
||||
{
|
||||
// Every node appears exactly once in full
|
||||
return tree->GetNodesPerLevel();
|
||||
}
|
||||
std::vector<size_t> VirtualNodePoolBuilder::CalculatePointersToPerLevel(const BaseTree* tree) const
|
||||
{
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
unsigned32 nodeCount = (unsigned32)tree->GetNodeCount();
|
||||
std::vector<size_t> pointersToPerLevel(depth + 1);
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
{
|
||||
const Node* node = tree->GetNode(i);
|
||||
unsigned8 level = node->GetLevel();
|
||||
if (level < depth)
|
||||
pointersToPerLevel[level + 1] += node->GetChildCount();
|
||||
}
|
||||
return pointersToPerLevel;
|
||||
}
|
||||
size_t VirtualNodePoolBuilder::CalculateSizeOfLevel(const BaseTree* tree, const unsigned8& level,
|
||||
const size_t& virtualNodesThisLevel, const size_t& fullNodesThisLevel, const size_t& pointersToThisLevel, const size_t& pointersFromThisLevel,
|
||||
const unsigned8& pointerSizeToThisLevel, const unsigned8& pointerSizeFromThisLevel,
|
||||
const unsigned8& additionalBytesPointersToThisLevel, const unsigned8& additionalBytesPointersFromThisLevel,
|
||||
const bool& useVirtualNodesThisLevel, const bool& useVirtualNodesPreviousLevel) const
|
||||
{
|
||||
size_t requiredSize = 0;
|
||||
// Calculate size of virtual nodes placed in this level by the previous level
|
||||
if (useVirtualNodesPreviousLevel)
|
||||
requiredSize += virtualNodesThisLevel * GetVirtualNodeSize(tree, level, pointerSizeToThisLevel) + pointersToThisLevel * additionalBytesPointersToThisLevel;
|
||||
// Calculate the size of full nodes (or normal nodes) occupying this level
|
||||
if (useVirtualNodesThisLevel)
|
||||
requiredSize += fullNodesThisLevel * GetFullNodeSize(tree, level, pointerSizeFromThisLevel);
|
||||
else
|
||||
requiredSize += (1 + tree->GetAdditionalBytesPerNode(level)) * fullNodesThisLevel + pointersFromThisLevel * pointerSizeFromThisLevel + pointersFromThisLevel * additionalBytesPointersFromThisLevel;
|
||||
return requiredSize;
|
||||
}
|
||||
std::vector<size_t> VirtualNodePoolBuilder::CalculateSizePerLevel(const BaseTree* tree, const std::vector<unsigned8> pointerSizesPerLevel, const std::vector<size_t>& parentsPerNode, const std::vector<bool>& useVirtualNodes) const
|
||||
{
|
||||
// Calculate some statistics needed to find the size of each level in memory
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
std::vector<size_t> virtualNodesPerLevel = CalculateVirtualNodesPerLevel(tree, parentsPerNode);
|
||||
std::vector<size_t> fullNodesPerLevel = CalculateFullNodesPerLevel(tree);
|
||||
std::vector<size_t> pointersToLevel = CalculatePointersToPerLevel(tree);
|
||||
std::vector<unsigned8> additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer();
|
||||
|
||||
// Calculate the actual size per level
|
||||
std::vector<size_t> sizePerLevel(depth + 1);
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
sizePerLevel[level] = CalculateSizeOfLevel(tree, level, virtualNodesPerLevel[level], fullNodesPerLevel[level],
|
||||
pointersToLevel[level], level == depth ? 0 : pointersToLevel[level + 1],
|
||||
level == 0 ? 0 : pointerSizesPerLevel[level - 1], pointerSizesPerLevel[level],
|
||||
level == 0 ? 0 : additionalBytesPerPointer[level - 1], additionalBytesPerPointer[level],
|
||||
useVirtualNodes[level], level == 0 ? 0 : useVirtualNodes[level - 1]);
|
||||
return sizePerLevel;
|
||||
}
|
||||
std::vector<size_t> VirtualNodePoolBuilder::CalculateApproximateSizePerLevelVirtualNodes(const BaseTree* tree, const std::vector<size_t>& parentsPerNode) const
|
||||
{
|
||||
// Calculate some statistics needed to find the size of each level in memory
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
std::vector<size_t> virtualNodesPerLevel = CalculateVirtualNodesPerLevel(tree, parentsPerNode);
|
||||
std::vector<size_t> fullNodesPerLevel = CalculateFullNodesPerLevel(tree);
|
||||
std::vector<size_t> pointersToLevel = CalculatePointersToPerLevel(tree);
|
||||
std::vector<unsigned8> pointerSizesPerLevel(depth + 1, 4); // Assume 4 bytes pointers per level
|
||||
std::vector<unsigned8> additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer();
|
||||
|
||||
// Calculate the actual size per level
|
||||
std::vector<size_t> sizePerLevel(depth + 1);
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
sizePerLevel[level] =
|
||||
fullNodesPerLevel[level] * GetFullNodeSize(tree, level, pointerSizesPerLevel) + // Full nodes size
|
||||
(level == depth ? 0 : (virtualNodesPerLevel[level + 1] * GetVirtualNodeSize(tree, level + 1, pointerSizesPerLevel))) + // Virtual nodes size
|
||||
(level == depth ? 0 : (pointersToLevel[level + 1] * additionalBytesPerPointer[level])); // additional pointer bytes size
|
||||
return sizePerLevel;
|
||||
}
|
||||
std::vector<size_t> VirtualNodePoolBuilder::CalculateApproximateSizePerLevelStandardNodes(const BaseTree* tree) const
|
||||
{
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
unsigned32 nodeCount = (unsigned32)tree->GetNodeCount();
|
||||
std::vector<unsigned8> pointerSizesPerLevel(depth + 1, 4); // Assume 4 bytes pointers per level
|
||||
std::vector<unsigned8> additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer();
|
||||
std::vector<size_t> sizePerLevel(depth + 1);
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
{
|
||||
const Node* node = tree->GetNode(i);
|
||||
unsigned8 level = node->GetLevel();
|
||||
sizePerLevel[level] += GetNormalNodeSize(tree, i, pointerSizesPerLevel, additionalBytesPerPointer, true);;
|
||||
}
|
||||
return sizePerLevel;
|
||||
}
|
||||
std::vector<bool> VirtualNodePoolBuilder::DecideVirtualPointersPerLevel(const BaseTree* tree, const std::vector<size_t>& parentsPerNode) const
|
||||
{
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
std::vector<size_t> sizePerStandardLevel = CalculateApproximateSizePerLevelStandardNodes(tree);
|
||||
std::vector<size_t> sizePerVirtualNodesLevel = CalculateApproximateSizePerLevelVirtualNodes(tree, parentsPerNode);
|
||||
std::vector<bool> useVirtualNodes(depth + 1);
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
useVirtualNodes[level] = sizePerVirtualNodesLevel[level] < sizePerStandardLevel[level];
|
||||
//useVirtualNodes = std::vector<bool>(depth + 1, false);
|
||||
//useVirtualNodes[0] = true;
|
||||
//useVirtualNodes[1] = true;
|
||||
//useVirtualNodes[2] = true;
|
||||
return useVirtualNodes;
|
||||
}
|
||||
size_t VirtualNodePoolBuilder::CalculatePoolInfoSize(const BaseTree* tree)
|
||||
{
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
// Each tree contains at least the level offsets (4 bytes per level) and pointer sizes per level (1 byte per level)
|
||||
// 1 byte per level for sizes of additional information per level
|
||||
// 4 bytes are used to indicate which levels use virtual nodes
|
||||
size_t poolInfoSize = (depth + 1) * 5 + 4;
|
||||
if (HasAdditionalBytesPerNode(tree)) poolInfoSize += depth + 1;
|
||||
if (HasAdditionalBytesPerPointer(tree)) poolInfoSize += depth + 1;
|
||||
// Additional pool info from the tree
|
||||
poolInfoSize += tree->GetAdditionalTreeInfoSize();
|
||||
return poolInfoSize;
|
||||
}
|
||||
size_t VirtualNodePoolBuilder::GetPoolSize(const BaseTree* tree)
|
||||
{
|
||||
// Calculate the pool info size
|
||||
size_t minSize = CalculatePoolInfoSize(tree);
|
||||
|
||||
// Calculate the main pool size
|
||||
std::vector<size_t> parentsPerNode = tree->GetParentCounts();
|
||||
std::vector<bool> useVirtualNodes = DecideVirtualPointersPerLevel(tree, parentsPerNode);
|
||||
std::vector<unsigned8> pointerSizesPerLevel = CalculatePointerSizesPerLevel(tree, parentsPerNode, useVirtualNodes);
|
||||
std::vector<size_t> sizesPerLevel = CalculateSizePerLevel(tree, pointerSizesPerLevel, parentsPerNode, useVirtualNodes);
|
||||
minSize += std::accumulate(sizesPerLevel.begin(), sizesPerLevel.end(), size_t(0));
|
||||
|
||||
std::vector<size_t> virtualNodesPerLevel = CalculateVirtualNodesPerLevel(tree, parentsPerNode);
|
||||
std::vector<size_t> fullNodesPerLevel = CalculateFullNodesPerLevel(tree);
|
||||
size_t virtualNodesSum = 0;
|
||||
size_t fullNodesSum = 0;
|
||||
size_t normalNodesSum = 0;
|
||||
for (unsigned8 level = 0; level < tree->GetMaxLevel(); level++)
|
||||
{
|
||||
if (useVirtualNodes[level]) fullNodesSum += fullNodesPerLevel[level];
|
||||
else normalNodesSum += fullNodesPerLevel[level];
|
||||
if (level > 0 && useVirtualNodes[level - 1]) virtualNodesSum += virtualNodesPerLevel[level - 1];
|
||||
}
|
||||
printf("Virtual nodes: %llu, Complete nodes: %llu, Normal Nodes: %llu, Percentage virtual: %f\n", (unsigned64)virtualNodesSum, (unsigned64)fullNodesSum, (unsigned64)normalNodesSum, (double(virtualNodesSum) / double(virtualNodesSum + fullNodesSum)) * 100.0);
|
||||
return minSize;
|
||||
}
|
||||
|
||||
//************************************
|
||||
// Insert all nodes into final node pool and updates pointers
|
||||
//************************************
|
||||
bool VirtualNodePoolBuilder::BuildPool(const BaseTree* tree, std::vector<unsigned8>& pool) {
|
||||
if (tree == NULL) return false;
|
||||
mIsBuilding = true;
|
||||
|
||||
unsigned32 nodeCount = (unsigned32)tree->GetNodeCount();
|
||||
unsigned8 depth = tree->GetMaxLevel();
|
||||
|
||||
// Initialize the pool
|
||||
pool = std::vector<unsigned8>(GetPoolSize(tree));
|
||||
|
||||
// Acquire some information about the pool
|
||||
std::vector<size_t> parentsPerNode = tree->GetParentCounts();
|
||||
std::vector<bool> useVirtualNodes = DecideVirtualPointersPerLevel(tree, parentsPerNode);
|
||||
std::vector<unsigned8> pointerSizesPerLevel = CalculatePointerSizesPerLevel(tree, parentsPerNode, useVirtualNodes);
|
||||
std::vector<size_t> sizePerLevel = CalculateSizePerLevel(tree, pointerSizesPerLevel, parentsPerNode, useVirtualNodes);
|
||||
std::vector<unsigned8> additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer();
|
||||
std::vector<unsigned8> additionalBytesPerNode = tree->GetAdditionalBytesPerNode();
|
||||
std::vector<size_t> nodePointers(nodeCount);
|
||||
|
||||
// Calculate the level offsets
|
||||
std::vector<size_t> levelOffsets(depth + 1);
|
||||
size_t curIndex = CalculatePoolInfoSize(tree);
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
{
|
||||
levelOffsets[level] = curIndex;
|
||||
curIndex += sizePerLevel[level];
|
||||
}
|
||||
|
||||
// Calculate the node pointers for nodes in non-switch levels not using virtual nodes
|
||||
bool switchlevel = true;
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
{
|
||||
if (!switchlevel)
|
||||
{
|
||||
curIndex = levelOffsets[level];
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
{
|
||||
const Node* node = tree->GetNode(i);
|
||||
if (node->GetLevel() == level)
|
||||
{
|
||||
nodePointers[i] = curIndex;
|
||||
curIndex += GetNormalNodeSize(tree, i, pointerSizesPerLevel, additionalBytesPerPointer, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (useVirtualNodes[level]) switchlevel = true;
|
||||
else if (switchlevel == true) switchlevel = false;
|
||||
}
|
||||
|
||||
|
||||
curIndex = 0;
|
||||
// Write the level offsets
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
BitHelper::SplitInBytesAndMove(levelOffsets[level], pool, level * 4, 4);
|
||||
curIndex += 4 * (depth + 1);
|
||||
|
||||
// Write the pointer sizes per level
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
pool[curIndex++] = pointerSizesPerLevel[level];
|
||||
|
||||
// Write 4 bytes indicating which levels use virtual nodes
|
||||
unsigned32 levelsUsingVirtualNodesMask = 0;
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
BitHelper::SetLS(levelsUsingVirtualNodesMask, level, useVirtualNodes[level]);
|
||||
BitHelper::SplitInBytesAndMove(levelsUsingVirtualNodesMask, pool, curIndex);
|
||||
curIndex += 4;
|
||||
|
||||
// Write additional bytes per node
|
||||
if (HasAdditionalBytesPerNode(tree))
|
||||
{
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
pool[curIndex++] = additionalBytesPerNode[level];
|
||||
}
|
||||
|
||||
// Write additional bytes per pointer
|
||||
if (HasAdditionalBytesPerPointer(tree))
|
||||
{
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
pool[curIndex++] = additionalBytesPerPointer[level];
|
||||
}
|
||||
|
||||
// Leave some space for the additional pool info.
|
||||
// As the actual node pointers are not yet known, we write them later
|
||||
size_t additionalTreeInfoStart = curIndex;
|
||||
curIndex += tree->GetAdditionalTreeInfoSize();
|
||||
|
||||
// Find all roots (to make sure we write all reachable nodes from any root).
|
||||
std::vector<NodeToWrite> nextLevelNodes;
|
||||
std::vector<NodeToWrite> thisLevelNodes;
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
if (tree->GetNode(i)->GetLevel() == 0) thisLevelNodes.push_back(NodeToWrite(i, 0, useVirtualNodes[0] ? FULL : NORMAL, 0, 0));
|
||||
size_t nextLevelIndex;
|
||||
BoolArray writtenNodes(nodeCount);
|
||||
|
||||
// Write the full node pool
|
||||
for (unsigned8 level = 0; level <= depth; level++)
|
||||
{
|
||||
assert(curIndex = levelOffsets[level]);
|
||||
nextLevelNodes.clear();
|
||||
nextLevelIndex = 0;
|
||||
unsigned8 additionalNodeBytes = additionalBytesPerNode[level];
|
||||
unsigned8 additionalBytesForPointersToThisLevel = level > 0 ? additionalBytesPerPointer[level - 1] : 0;
|
||||
unsigned8 additionalBytesForPointersFromThisLevel = additionalBytesPerPointer[level];
|
||||
size_t childFullNodeSize = GetFullNodeSize(tree, level + 1, pointerSizesPerLevel);
|
||||
size_t childVirtualNodeSize = GetVirtualNodeSize(tree, level + 1, pointerSizesPerLevel);
|
||||
for (NodeToWrite nodeInfo : thisLevelNodes)
|
||||
{
|
||||
const Node* node = nodeInfo.GetNode(tree);
|
||||
unsigned32 nodeId = nodeInfo.nodeId;
|
||||
assert(level == node->GetLevel());
|
||||
if (nodeInfo.type == VIRTUAL)
|
||||
{ // Write a virtual node
|
||||
size_t virtualNodeSize = GetVirtualNodeSize(tree, level, pointerSizesPerLevel);
|
||||
BitHelper::SplitInBytesAndMove(nodePointers[nodeId] - levelOffsets[level], pool, curIndex, virtualNodeSize);
|
||||
curIndex += virtualNodeSize;
|
||||
}
|
||||
else if (nodeInfo.type == FULL)
|
||||
{ // Write a full node
|
||||
assert(useVirtualNodes[level]);
|
||||
nodePointers[nodeId] = curIndex;
|
||||
|
||||
WriteFullNode(tree, nodeId, (unsigned32)nextLevelIndex, pointerSizesPerLevel[level], writtenNodes, additionalNodeBytes, pool, curIndex);
|
||||
// Tell the next level which nodes should be written and in what order
|
||||
unsigned8 vMask = pool[curIndex + 1 + additionalNodeBytes];
|
||||
size_t nextLevelNodesOffset = nextLevelNodes.size();
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
if (node->HasChild(c))
|
||||
{
|
||||
NodeType type;
|
||||
if (!useVirtualNodes[level + 1]) type = BitHelper::GetLS(vMask, c) ? VIRTUAL : NORMAL;
|
||||
else type = BitHelper::GetLS(vMask, c) ? VIRTUAL : FULL;
|
||||
nextLevelNodes.push_back(NodeToWrite(node->GetChildIndex(c), level + 1, type, nodeId, c));
|
||||
}
|
||||
// Calculate the size of those nodes in the next layer.
|
||||
for (auto c = nextLevelNodes.begin() + nextLevelNodesOffset; c != nextLevelNodes.end(); c++)
|
||||
{
|
||||
switch (c->type)
|
||||
{
|
||||
case NORMAL: nextLevelIndex += GetNormalNodeSize(tree, c->nodeId, pointerSizesPerLevel, additionalBytesPerPointer, true); break;
|
||||
case FULL: nextLevelIndex += childFullNodeSize; break;
|
||||
case VIRTUAL: nextLevelIndex += childVirtualNodeSize; break;
|
||||
}
|
||||
}
|
||||
nextLevelIndex += node->GetChildCount() * additionalBytesForPointersFromThisLevel;
|
||||
curIndex += GetFullNodeSize(tree, level, pointerSizesPerLevel);
|
||||
}
|
||||
else if (nodeInfo.type == NORMAL)
|
||||
{
|
||||
assert(nodePointers[nodeId] == 0 || nodePointers[nodeId] == curIndex);
|
||||
nodePointers[nodeId] = curIndex;
|
||||
WriteNormalNode(tree, nodeId, additionalNodeBytes, pointerSizesPerLevel[level], nodePointers, level == depth ? 0 : levelOffsets[level + 1], pool, curIndex);
|
||||
curIndex += GetNormalNodeSize(tree, nodeId, pointerSizesPerLevel, additionalBytesPerPointer, false);
|
||||
// Write additional bytes per pointer
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
{
|
||||
if (node->HasChild(c))
|
||||
{
|
||||
WriteAdditionalPointerInfo(tree, nodeId, c, additionalBytesForPointersFromThisLevel, pool, curIndex);
|
||||
curIndex += additionalBytesForPointersFromThisLevel;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (level > 0 && additionalBytesForPointersToThisLevel != 0 && useVirtualNodes[level - 1])
|
||||
{
|
||||
WriteAdditionalPointerInfo(tree, nodeInfo.parentId, nodeInfo.childIndexOfParent, additionalBytesForPointersToThisLevel, pool, curIndex);
|
||||
curIndex += additionalBytesForPointersToThisLevel;
|
||||
}
|
||||
}
|
||||
if (useVirtualNodes[level])
|
||||
thisLevelNodes = nextLevelNodes;
|
||||
else
|
||||
{
|
||||
thisLevelNodes.clear();
|
||||
for (unsigned32 i = 0; i < nodeCount; i++)
|
||||
{
|
||||
const Node* node = tree->GetNode(i);
|
||||
if (node->GetLevel() == level + 1)
|
||||
thisLevelNodes.push_back(NodeToWrite(i, level + 1, NORMAL, 0, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector<unsigned8> additionalTreeInfo = tree->GetAdditionalTreeInfo(nodePointers);
|
||||
std::move(additionalTreeInfo.begin(), additionalTreeInfo.end(), pool.begin() + additionalTreeInfoStart);
|
||||
mIsBuilding = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void VirtualNodePoolBuilder::WriteFullNode(const BaseTree* tree, const unsigned32& nodeId, const unsigned32& childPointer, const unsigned8& childPointerSize, BoolArray& writtenNodes,
|
||||
const unsigned8& additionalNodeBytes, std::vector<unsigned8>& pool, const size_t& offset) const
|
||||
{
|
||||
size_t curIndex = offset;
|
||||
|
||||
const Node* node = tree->GetNode(nodeId);
|
||||
pool[curIndex++] = node->GetChildmask().mask;
|
||||
|
||||
// Write additional node info (if any)
|
||||
if (additionalNodeBytes != 0)
|
||||
{
|
||||
auto nodeBytes = tree->GetAdditionalNodeBytes(node);
|
||||
std::move(nodeBytes.begin(), nodeBytes.end(), pool.begin() + curIndex);
|
||||
assert(nodeBytes.size() == additionalNodeBytes);
|
||||
curIndex += additionalNodeBytes;
|
||||
}
|
||||
|
||||
// Build the "virtual mask" indicating which nodes have already been written in the next level and are virtual in this level
|
||||
pool[curIndex++] = GetVMask(tree, nodeId, writtenNodes);
|
||||
|
||||
// Write the pointer to the first child
|
||||
BitHelper::SplitInBytesAndMove(childPointer, pool, curIndex, childPointerSize);
|
||||
curIndex += 4;
|
||||
}
|
||||
|
||||
void VirtualNodePoolBuilder::WriteNormalNode(const BaseTree* tree, const unsigned32& nodeId, const unsigned8& additionalNodeBytes, const unsigned8& pointerSize, const std::vector<size_t>& nodePointers, const size_t& nextLevelOffset, std::vector<unsigned8>& pool, const size_t& offset) const
|
||||
{
|
||||
size_t curIndex = offset;
|
||||
|
||||
const Node* node = tree->GetNode(nodeId);
|
||||
pool[curIndex++] = node->GetChildmask().mask;
|
||||
|
||||
// Write additional node info (if any)
|
||||
if (additionalNodeBytes != 0)
|
||||
{
|
||||
auto nodeBytes = tree->GetAdditionalNodeBytes(node);
|
||||
std::move(nodeBytes.begin(), nodeBytes.end(), pool.begin() + curIndex);
|
||||
assert(nodeBytes.size() == additionalNodeBytes);
|
||||
curIndex += additionalNodeBytes;
|
||||
}
|
||||
|
||||
// Write the child pointers
|
||||
unsigned32* children = node->GetChildren();
|
||||
for (ChildIndex c = 0; c < node->GetChildCount(); c++)
|
||||
{
|
||||
unsigned32 child = children[c];
|
||||
size_t pointer = nodePointers[child] - nextLevelOffset;
|
||||
BitHelper::SplitInBytesAndMove(pointer, pool, curIndex, pointerSize);
|
||||
curIndex += pointerSize;
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualNodePoolBuilder::WriteAdditionalPointerInfo(const BaseTree* tree, const unsigned32& nodeId, const ChildIndex& childId, const unsigned8& additionalPointerBytes, std::vector<unsigned8>& pool, const size_t& offset) const
|
||||
{
|
||||
auto pointerInfo = tree->GetAdditionalPointerBytes(tree->GetNode(nodeId), childId);
|
||||
std::move(pointerInfo.begin(), pointerInfo.end(), pool.begin() + offset);
|
||||
}
|
||||
|
||||
unsigned8 VirtualNodePoolBuilder::GetVMask(const BaseTree* tree, const unsigned32& nodeId, BoolArray& writtenNodes) const
|
||||
{
|
||||
const Node* node = tree->GetNode(nodeId);
|
||||
unsigned8 vMask = 0;
|
||||
for (ChildIndex c = 0; c < 8; c++)
|
||||
{
|
||||
if (node->HasChild(c))
|
||||
{
|
||||
unsigned32 childIndex = node->GetChildIndex(c);
|
||||
// A node is virtual if it has been written before
|
||||
BitHelper::SetLS(vMask, c, writtenNodes[childIndex]);
|
||||
writtenNodes.Set(childIndex, true);
|
||||
}
|
||||
}
|
||||
return vMask;
|
||||
}
|
||||
|
||||
bool VirtualNodePoolBuilder::VerifyPool(std::vector<unsigned8>& pool, const unsigned8& treeDepth) const
|
||||
{
|
||||
// TODO: Do some verification here.
|
||||
return true;
|
||||
}
|
||||
88
Research/scene/PoolBuilder/VirtualNodePoolBuilder.h
Normal file
88
Research/scene/PoolBuilder/VirtualNodePoolBuilder.h
Normal file
@@ -0,0 +1,88 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include "BasePoolBuilder.h"
|
||||
#include "../Octree/BaseTree.h"
|
||||
|
||||
class BoolArray;
|
||||
|
||||
// Virtual Node Pool is a node pool in which the children of a node are stored consequetively in memory.
|
||||
// Nodes that are reused (because of the DAG structure) will appear as so-called "virtual nodes".
|
||||
// These nodes are basically only a pointer to the actual node and do appear consecutively in memory to the ordinary nodes.
|
||||
// This has the advantage that only 1 pointer is needed per node (a pointer to the first child).
|
||||
class VirtualNodePoolBuilder : public BasePoolBuilder<BaseTree>
|
||||
{
|
||||
public:
|
||||
VirtualNodePoolBuilder() { mIsBuilding = false; }
|
||||
virtual ~VirtualNodePoolBuilder() override {}
|
||||
|
||||
std::string GetFullFileName(const std::string& fileName) const override;
|
||||
size_t GetPoolSize(const BaseTree* tree) override;
|
||||
bool BuildPool(const BaseTree* tree, std::vector<unsigned8>& pool) override;
|
||||
bool VerifyPool(std::vector<unsigned8>& pool, const unsigned8& depth) const override;
|
||||
private:
|
||||
// Returns the size of a node without the pointers (but with the additional pointer information if needed)
|
||||
std::vector<unsigned8> CalculatePointerSizesPerLevel(const BaseTree* tree, const std::vector<size_t>& parentsPerNode, const std::vector<bool>& useVirtualNodes) const;
|
||||
std::vector<size_t> CalculateVirtualNodesPerLevel(const BaseTree* tree, const std::vector<size_t>& parentsPerNode) const;
|
||||
std::vector<size_t> CalculateFullNodesPerLevel(const BaseTree* tree) const;
|
||||
std::vector<size_t> CalculatePointersToPerLevel(const BaseTree* tree) const;
|
||||
size_t CalculateSizeOfLevel(const BaseTree* tree, const unsigned8& level,
|
||||
const size_t& virtualNodesThisLevel, const size_t& fullNodesThisLevel, const size_t& pointersToThisLevel, const size_t& pointersFromThisLevel,
|
||||
const unsigned8& pointerSizeToThisLevel, const unsigned8& pointerSizeFromThisLevel,
|
||||
const unsigned8& additionalBytesPointersToThisLevel, const unsigned8& additionalBytesPointersFromThisLevel,
|
||||
const bool& useVirtualNodesThisLevel, const bool& useVirtualNodesPreviousLevel) const;
|
||||
std::vector<size_t> CalculateSizePerLevel(const BaseTree* tree, const std::vector<unsigned8> pointerSizesPerLevel, const std::vector<size_t>& parentsPerNode, const std::vector<bool>& usesVirtualNodes) const;
|
||||
std::vector<size_t> CalculateApproximateSizePerLevelVirtualNodes(const BaseTree* tree, const std::vector<size_t>& parentsPerNode) const;
|
||||
std::vector<size_t> CalculateApproximateSizePerLevelStandardNodes(const BaseTree* tree) const;
|
||||
std::vector<bool> DecideVirtualPointersPerLevel(const BaseTree* tree, const std::vector<size_t>& parentsPerNode) const;
|
||||
size_t CalculatePoolInfoSize(const BaseTree* tree);
|
||||
|
||||
size_t GetFullNodeSize(const BaseTree* tree, const unsigned8& level, const unsigned8& pointerSize) const;
|
||||
size_t GetVirtualNodeSize(const BaseTree* tree, const unsigned8& level, const unsigned8& pointerSize) const;
|
||||
size_t GetFullNodeSize(const BaseTree* tree, const unsigned8& level, const std::vector<unsigned8>& pointerSizesPerLevel) const;
|
||||
size_t GetVirtualNodeSize(const BaseTree* tree, const unsigned8& level, const std::vector<unsigned8>& pointerSizesPerLevel) const;
|
||||
size_t GetNormalNodeSize(const BaseTree* tree, const unsigned32& nodeId, const std::vector<unsigned8>& pointerSizesPerLevel, std::vector<unsigned8>& additionalPointerInfoSizesPerLevel, const bool& includingAdditionalPointerInfo) const;
|
||||
|
||||
inline bool HasAdditionalBytesPerNode(const BaseTree* tree) const
|
||||
{
|
||||
std::vector<unsigned8> additionalBytesPerNode = tree->GetAdditionalBytesPerNode();
|
||||
bool hasAdditionalBytesPerNode = false; for (auto abpn : additionalBytesPerNode) if (abpn != 0) hasAdditionalBytesPerNode = true;
|
||||
return hasAdditionalBytesPerNode;
|
||||
}
|
||||
|
||||
inline bool HasAdditionalBytesPerPointer(const BaseTree* tree) const
|
||||
{
|
||||
std::vector<unsigned8> additionalBytesPerPointer = tree->GetAdditionalBytesPerPointer();
|
||||
bool hasAdditionalBytesPerPointer = false; for (auto abpp : additionalBytesPerPointer) if (abpp != 0) hasAdditionalBytesPerPointer = true;
|
||||
return hasAdditionalBytesPerPointer;
|
||||
}
|
||||
|
||||
void WriteFullNode(const BaseTree* tree, const unsigned32& nodeId, const unsigned32& childPointer, const unsigned8& childPointerSize, BoolArray& writtenNodes,
|
||||
const unsigned8& additionalNodeBytes, std::vector<unsigned8>& pool, const size_t& offset) const;
|
||||
|
||||
void WriteNormalNode(const BaseTree* tree, const unsigned32& nodeId, const unsigned8& additionalNodeBytes, const unsigned8& pointerSize, const std::vector<size_t>& nodePointers, const size_t& nextLevelOffset, std::vector<unsigned8>& pool, const size_t& offset) const;
|
||||
|
||||
void WriteAdditionalPointerInfo(const BaseTree* tree, const unsigned32& nodeId, const ChildIndex& childId, const unsigned8& additionalPointerBytes, std::vector<unsigned8>& pool, const size_t& offset) const;
|
||||
|
||||
unsigned8 GetVMask(const BaseTree* tree, const unsigned32& nodeId, BoolArray& writtenNodes) const;
|
||||
|
||||
bool mIsBuilding;
|
||||
|
||||
enum NodeType
|
||||
{
|
||||
VIRTUAL, FULL, NORMAL
|
||||
};
|
||||
|
||||
struct NodeToWrite
|
||||
{
|
||||
unsigned32 nodeId;
|
||||
unsigned8 level;
|
||||
NodeType type;
|
||||
ChildIndex childIndexOfParent;
|
||||
unsigned32 parentId;
|
||||
|
||||
NodeToWrite(const unsigned32& nodeId, const unsigned8& level, const NodeType& type, const unsigned32& parentId, const ChildIndex childOfParent)
|
||||
: nodeId(nodeId), level(level), type(type), childIndexOfParent(childOfParent), parentId(parentId) {}
|
||||
|
||||
const Node* GetNode(const BaseTree* tree) { return tree->GetNode(nodeId); }
|
||||
};
|
||||
};
|
||||
71
Research/scene/Scene.h
Normal file
71
Research/scene/Scene.h
Normal file
@@ -0,0 +1,71 @@
|
||||
#pragma once
|
||||
#include<vector>
|
||||
#include "../inc/glm/glm.hpp"
|
||||
|
||||
struct Mesh {
|
||||
unsigned offset; // offset of mesh in indices
|
||||
unsigned size; // number of vertices in mesh
|
||||
bool hasUVs = false;
|
||||
bool hasVertexColors = false;
|
||||
float reflectivity = 0.f;
|
||||
std::string texture;
|
||||
};
|
||||
|
||||
struct Scene {
|
||||
std::vector<unsigned> indices; // triangle has three indices pointing to correct vertex
|
||||
std::vector<glm::vec3> vertices; // vertex positions
|
||||
std::vector<glm::vec2> uvs; // vertex texture coordinates
|
||||
std::vector<glm::vec3> normals; // vertex normals
|
||||
std::vector<glm::vec3> colors; // vertex colors (used for example in .ply files)
|
||||
std::vector<Mesh> meshes;
|
||||
|
||||
static Scene* merge(const Scene& scene1, const Scene& scene2)
|
||||
{
|
||||
// Simple merge: append the vertices of the second scene to that of the first scene
|
||||
Scene* merged = new Scene;
|
||||
merged->indices.resize(scene1.indices.size() + scene2.indices.size());
|
||||
std::copy(scene1.indices.begin(), scene1.indices.end(), merged->indices.begin());
|
||||
std::copy(scene2.indices.begin(), scene2.indices.end(), merged->indices.begin() + scene1.indices.size());
|
||||
|
||||
merged->vertices.resize(scene1.vertices.size() + scene2.vertices.size());
|
||||
std::copy(scene1.vertices.begin(), scene1.vertices.end(), merged->vertices.begin());
|
||||
std::copy(scene2.vertices.begin(), scene2.vertices.end(), merged->vertices.begin() + scene1.vertices.size());
|
||||
|
||||
if (!scene1.uvs.empty())
|
||||
merged->uvs.resize(scene1.uvs.size() + scene2.uvs.size());
|
||||
std::copy(scene1.uvs.begin(), scene1.uvs.end(), merged->uvs.begin());
|
||||
std::copy(scene2.uvs.begin(), scene2.uvs.end(), merged->uvs.begin() + scene1.uvs.size());
|
||||
|
||||
merged->normals.resize(scene1.normals.size() + scene2.normals.size());
|
||||
std::copy(scene1.normals.begin(), scene1.normals.end(), merged->normals.begin());
|
||||
std::copy(scene2.normals.begin(), scene2.normals.end(), merged->normals.begin() + scene1.normals.size());
|
||||
|
||||
// If one of the scene has colors, load the colors for both scenes
|
||||
if (!scene1.colors.empty() || !scene2.colors.empty())
|
||||
{
|
||||
merged->colors.resize(scene1.vertices.size() + scene2.vertices.size(), glm::vec3(1));
|
||||
if (!scene1.colors.empty()) std::copy(scene1.colors.begin(), scene1.colors.end(), merged->colors.begin());
|
||||
if (!scene2.colors.empty()) std::copy(scene2.colors.begin(), scene2.colors.end(), merged->colors.begin() + scene1.vertices.size());
|
||||
}
|
||||
|
||||
merged->meshes.resize(scene1.meshes.size() + scene2.meshes.size());
|
||||
for (unsigned i = 0; i < (unsigned)scene1.meshes.size(); i++)
|
||||
merged->meshes[i] = scene1.meshes[i];
|
||||
for (unsigned i = 0; i < (unsigned)scene2.meshes.size(); i++) {
|
||||
merged->meshes[i + scene1.meshes.size()] = scene2.meshes[i];
|
||||
merged->meshes[i + scene1.meshes.size()].offset += (unsigned)scene1.indices.size();
|
||||
}
|
||||
return merged;
|
||||
}
|
||||
|
||||
~Scene()
|
||||
{
|
||||
vertices.clear();
|
||||
uvs.clear();
|
||||
normals.clear();
|
||||
colors.clear();
|
||||
indices.clear();
|
||||
meshes.clear();
|
||||
}
|
||||
};
|
||||
|
||||
79
Research/scene/TextureCompressor/BasicTexture.h
Normal file
79
Research/scene/TextureCompressor/BasicTexture.h
Normal file
@@ -0,0 +1,79 @@
|
||||
#pragma once
|
||||
|
||||
#include "CompressedTexture.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include "../../inc/tbb/parallel_for.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
|
||||
template<typename T>
|
||||
class BasicTexture : public CompressedTexture<T>
|
||||
{
|
||||
private:
|
||||
std::vector<T> mData;
|
||||
public:
|
||||
BasicTexture() : mData(std::vector<T>()) {}
|
||||
~BasicTexture() {}
|
||||
|
||||
// Uncompress and retrieve the texture (possibly from a certain index)
|
||||
T operator[](size_t i) const override { return mData[i]; }
|
||||
unsigned64 size() const override { return mData.size(); }
|
||||
|
||||
std::vector<T> GetTexture(size_t fromIndex = 0) override {
|
||||
std::vector<T> res(size() - fromIndex);
|
||||
std::copy(mData.begin() + fromIndex, mData.end(), res.begin());
|
||||
return res;
|
||||
}
|
||||
// Compresses the texture, replacing everything after fromIndex by whatever is in texture
|
||||
void SetTexture(const std::vector<T>& texture, size_t fromIndex = 0) override
|
||||
{
|
||||
mData.resize(fromIndex + texture.size());
|
||||
std::copy(texture.begin(), texture.end(), mData.begin() + fromIndex);
|
||||
}
|
||||
// Replace all materials by other colors. This could be useful as some methods don't need to fully decompress and compress the whole texture to do this.
|
||||
void ReplaceMaterials(const std::unordered_map<T, T>& replacers) override
|
||||
{
|
||||
bool replacersEqual = true;
|
||||
for (auto replacer : replacers)
|
||||
if (replacer.first != replacer.second)
|
||||
{
|
||||
replacersEqual = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!replacersEqual)
|
||||
{
|
||||
tbb::parallel_for(size_t(0), mData.size(), [&](size_t i)
|
||||
{
|
||||
auto replacer = replacers.find(mData[i]);
|
||||
if (replacer != replacers.end())
|
||||
mData[i] = replacer->second;
|
||||
});
|
||||
}
|
||||
}
|
||||
void Recompress() override { return; }
|
||||
|
||||
void ReadFromFile(std::istream& file) override
|
||||
{
|
||||
Serializer<std::vector<T>, unsigned64>::Deserialize(mData, file);
|
||||
}
|
||||
virtual void WriteToFile(std::ostream& file) const override
|
||||
{
|
||||
Serializer<std::vector<T>, unsigned64>::Serialize(mData, file);
|
||||
}
|
||||
|
||||
std::vector<unsigned8> GetTexturePool() const override
|
||||
{
|
||||
size_t poolSize = GetTexturePoolSize();
|
||||
std::vector<unsigned8> pool = std::vector<unsigned8>(poolSize);
|
||||
tbb::parallel_for(size_t(0), mData.size(), [&](size_t i)
|
||||
{
|
||||
// Convert all values to 32 bit integers and split them into bytes
|
||||
unsigned32 value = (unsigned32)mData[i];
|
||||
BitHelper::SplitInBytesAndMove(value, pool, i * 4);
|
||||
});
|
||||
return pool;
|
||||
}
|
||||
size_t GetTexturePoolSize() const override { return mData.size() * 4; }
|
||||
|
||||
virtual std::map<std::string, std::string> GetAdditionalProperties() const override { return std::map<std::string, std::string>(); }
|
||||
};
|
||||
202
Research/scene/TextureCompressor/BlockCompressedTexture.h
Normal file
202
Research/scene/TextureCompressor/BlockCompressedTexture.h
Normal file
@@ -0,0 +1,202 @@
|
||||
#pragma once
|
||||
|
||||
#include "CompressedTexture.h"
|
||||
#include "BasicTexture.h"
|
||||
#include "../Material/Block.h"
|
||||
#include "../../inc/tbb/parallel_for.h"
|
||||
#include "BlockHashers.h"
|
||||
#include <algorithm>
|
||||
#include "../../core/Serializer.h"
|
||||
|
||||
template<typename T, typename BlockTextureType = BasicTexture<T>>
|
||||
class BlockCompressedTexture : public CompressedTexture<T>
|
||||
{
|
||||
private:
|
||||
size_t mActualSize;
|
||||
std::vector<unsigned> mBlockPointers;
|
||||
BlockTextureType mBlocks;
|
||||
unsigned mBlockSize;
|
||||
public:
|
||||
BlockCompressedTexture(unsigned blockSize) :
|
||||
mActualSize(0),
|
||||
mBlockPointers(std::vector<unsigned>()),
|
||||
mBlocks(BlockTextureType()),
|
||||
mBlockSize(blockSize)
|
||||
{}
|
||||
|
||||
~BlockCompressedTexture() override {}
|
||||
|
||||
T operator[](size_t i) const override
|
||||
{
|
||||
size_t blockPointerIndex = i / mBlockSize;
|
||||
size_t blockIndex = mBlockPointers[blockPointerIndex] * mBlockSize;
|
||||
blockIndex += i % mBlockSize;
|
||||
return mBlocks[blockIndex];
|
||||
}
|
||||
|
||||
unsigned64 size() const override { return mActualSize; }
|
||||
|
||||
// Returns the (unpacked) material indices from the given index
|
||||
std::vector<T> GetTexture(size_t fromIndex = 0) override
|
||||
{
|
||||
assert(fromIndex <= mActualSize);
|
||||
std::vector<T> res(mActualSize - fromIndex);
|
||||
if (res.size() == 0) return res;
|
||||
size_t startBlockIndex = fromIndex / mBlockSize;
|
||||
size_t indexInsideStartBlock = fromIndex % mBlockSize;
|
||||
size_t indexFromStartBlock = mBlockSize - indexInsideStartBlock;
|
||||
// Unpack the part of the first block from the "fromIndex"
|
||||
for (size_t j = indexInsideStartBlock; j < mBlockSize && j < res.size(); j++)
|
||||
res[j - indexInsideStartBlock] = mBlocks[mBlockPointers[startBlockIndex] * mBlockSize + j];
|
||||
// Unpack the rest of the blocks
|
||||
for (size_t i = startBlockIndex + 1; i < mBlockPointers.size(); i++)
|
||||
{
|
||||
size_t blockResIndex = indexFromStartBlock + ((i - startBlockIndex - 1) * mBlockSize);
|
||||
size_t blocksIndex = mBlockPointers[i] * mBlockSize;
|
||||
for (size_t j = 0; j < mBlockSize; j++)
|
||||
{
|
||||
if (blockResIndex + j >= res.size()) break;
|
||||
res[blockResIndex + j] = mBlocks[blocksIndex + j];
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void SetTexture(const std::vector<T>& materialPointers, size_t fromIndex = 0) override
|
||||
{
|
||||
// TODO: only copy if necessary
|
||||
std::vector<T> nodeMaterialPointers;
|
||||
|
||||
mActualSize = fromIndex + materialPointers.size();
|
||||
unsigned fromBlockIndex = (unsigned)fromIndex / mBlockSize;
|
||||
unsigned highestIndex = mBlockPointers.empty() ? 0 : *std::max_element(mBlockPointers.begin(), mBlockPointers.begin() + fromBlockIndex);
|
||||
|
||||
// Build a map from blocks to indices that already exist (only necessary if fromIndex > 0)
|
||||
std::unordered_map<Block<T>, unsigned> blockIndices;
|
||||
if (fromIndex >= mBlockSize)
|
||||
{
|
||||
std::vector<T> uncompressedBlocks = mBlocks.GetTexture();
|
||||
for (size_t i = 0; i < highestIndex; i++)
|
||||
{
|
||||
Block<T> cur(uncompressedBlocks, i * mBlockSize, i * mBlockSize + mBlockSize);
|
||||
blockIndices.insert(std::pair<Block<T>, unsigned>(cur, (unsigned)i));
|
||||
}
|
||||
}
|
||||
// Add what was already in the block in which the new nodepointers should be added to nodeMaterialPointers
|
||||
unsigned switchIndex = (unsigned)fromIndex % mBlockSize;
|
||||
if (switchIndex != 0)
|
||||
{
|
||||
std::vector<T> switchBlockStart(switchIndex);
|
||||
auto switchBlockIndex = mBlockPointers[fromBlockIndex] * mBlockSize;
|
||||
for (unsigned j = 0; j < switchIndex; j++)
|
||||
switchBlockStart[j] = mBlocks[switchBlockIndex + j];
|
||||
nodeMaterialPointers.resize(switchIndex + materialPointers.size());
|
||||
std::copy(switchBlockStart.begin(), switchBlockStart.end(), nodeMaterialPointers.begin());
|
||||
std::copy(materialPointers.begin(), materialPointers.end(), nodeMaterialPointers.begin() + switchIndex);
|
||||
}
|
||||
else
|
||||
{
|
||||
nodeMaterialPointers.resize(materialPointers.size());
|
||||
std::copy(materialPointers.begin(), materialPointers.end(), nodeMaterialPointers.begin());
|
||||
}
|
||||
if (nodeMaterialPointers.size() % mBlockSize != 0)
|
||||
{
|
||||
unsigned emptyNodesToAdd = mBlockSize - nodeMaterialPointers.size() % mBlockSize;
|
||||
nodeMaterialPointers.resize(nodeMaterialPointers.size() + emptyNodesToAdd);
|
||||
}
|
||||
std::vector<T> newBlocks;
|
||||
mBlockPointers.resize(mActualSize / mBlockSize + (((mActualSize % mBlockSize) == 0) ? 0 : 1));
|
||||
|
||||
// Compress the material pointers so that material pointers that are the same won't be reused
|
||||
size_t reuseCount = 0;
|
||||
unsigned newBlockPointer = highestIndex;
|
||||
unsigned blockId = fromBlockIndex;
|
||||
for (size_t i = 0; i < nodeMaterialPointers.size(); i += mBlockSize)
|
||||
{
|
||||
// Build the current block
|
||||
Block<T> current(nodeMaterialPointers, i, i + mBlockSize);
|
||||
// Check if the current block is already in the texture
|
||||
auto existingCurrent = blockIndices.find(current);
|
||||
unsigned curBlockPointer;
|
||||
if (existingCurrent == blockIndices.end())
|
||||
{
|
||||
// If it isn't, copy the current block to the block texture.
|
||||
for (size_t j = 0; j < mBlockSize; j++)
|
||||
newBlocks.push_back(current.Get(j));
|
||||
curBlockPointer = newBlockPointer;
|
||||
blockIndices.insert(std::pair<Block<T>, unsigned>(current, newBlockPointer));
|
||||
newBlockPointer++;
|
||||
}
|
||||
else
|
||||
{
|
||||
reuseCount++;
|
||||
curBlockPointer = existingCurrent->second;
|
||||
}
|
||||
mBlockPointers[blockId++] = curBlockPointer;
|
||||
}
|
||||
mBlocks.SetTexture(newBlocks, highestIndex * mBlockSize);
|
||||
printf("%llu Blocks were reused during compression.\n", (unsigned64)reuseCount);
|
||||
}
|
||||
|
||||
void ReplaceMaterials(const std::unordered_map<T, T>& replacementMap) override
|
||||
{
|
||||
mBlocks.ReplaceMaterials(replacementMap);
|
||||
}
|
||||
|
||||
void Recompress() override
|
||||
{
|
||||
// TODO: make this more efficient
|
||||
SetTexture(GetTexture());
|
||||
}
|
||||
|
||||
void ReadFromFile(std::istream& file)
|
||||
{
|
||||
// Read the block pointers
|
||||
Serializer<std::vector<unsigned32>, unsigned64>::Deserialize(mBlockPointers, file);
|
||||
|
||||
// Read the blocks
|
||||
mBlocks.ReadFromFile(file);
|
||||
}
|
||||
|
||||
void WriteToFile(std::ostream& file) const override
|
||||
{
|
||||
// Write the block pointers
|
||||
Serializer<std::vector<unsigned32>, unsigned64>::Serialize(mBlockPointers, file);
|
||||
|
||||
// Write the blocks
|
||||
mBlocks.WriteToFile(file);
|
||||
}
|
||||
|
||||
// Texture pool is taken from how the blocks are stored
|
||||
std::vector<unsigned8> GetTexturePool() const override { return mBlocks.GetTexturePool(); }
|
||||
size_t GetTexturePoolSize() const override { return mBlocks.GetTexturePoolSize(); }
|
||||
|
||||
// Additional texture pool contains for each pixels which block from the blocktexture it is from.
|
||||
std::vector<unsigned8> GetAdditionalTexturePool() const
|
||||
{
|
||||
size_t textureSize = GetAdditionalTexturePoolSize();
|
||||
std::vector<unsigned8> additionalPool(textureSize);
|
||||
tbb::parallel_for(size_t(0), mBlockPointers.size(), [&](const size_t& i)
|
||||
{
|
||||
BitHelper::SplitInBytesAndMove(mBlockPointers[i], additionalPool, i * 4);
|
||||
});
|
||||
return additionalPool;
|
||||
}
|
||||
size_t GetAdditionalTexturePoolSize() const
|
||||
{
|
||||
return mBlockPointers.size() * 4;
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> GetAdditionalProperties() const override
|
||||
{
|
||||
std::map<std::string, std::string> res;
|
||||
res.insert(std::pair<std::string, std::string>("blockSize", std::to_string(mBlockSize)));
|
||||
|
||||
// Get the additional properties from the block texture
|
||||
std::map<std::string, std::string> blockProperties = mBlocks.GetAdditionalProperties();
|
||||
for (auto prop : blockProperties)
|
||||
res.insert(prop);
|
||||
|
||||
return res;
|
||||
};
|
||||
};
|
||||
34
Research/scene/TextureCompressor/BlockHashers.h
Normal file
34
Research/scene/TextureCompressor/BlockHashers.h
Normal file
@@ -0,0 +1,34 @@
|
||||
#pragma once
|
||||
|
||||
#include <unordered_map>
|
||||
#include "../../core/Defines.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
#include "../Material/Block.h"
|
||||
#include "../Material/MaterialLibraryPointer.h"
|
||||
|
||||
namespace std
|
||||
{
|
||||
//template<>
|
||||
//struct hash<Block<MaterialLibraryPointer>>
|
||||
//{
|
||||
// size_t operator()(const Block<MaterialLibraryPointer> &value) const
|
||||
// {
|
||||
// size_t res = 0;
|
||||
// for (size_t i = 0; i < value.size(); i++)
|
||||
// res ^= BitHelper::CircularShiftLeft<size_t>(hash<MaterialLibraryPointer>()(value.Get(i)), i);
|
||||
// return res;
|
||||
// }
|
||||
//};
|
||||
|
||||
template<typename T>
|
||||
struct hash<Block<T>>
|
||||
{
|
||||
size_t operator()(const Block<T> &value) const
|
||||
{
|
||||
size_t res = 0;
|
||||
for (size_t i = 0; i < value.size(); i++)
|
||||
res ^= BitHelper::CircularShiftLeft<size_t>(hash<T>()(value.Get(i)), i);
|
||||
return res;
|
||||
}
|
||||
};
|
||||
}
|
||||
39
Research/scene/TextureCompressor/CompressedTexture.h
Normal file
39
Research/scene/TextureCompressor/CompressedTexture.h
Normal file
@@ -0,0 +1,39 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <fstream>
|
||||
#include <unordered_map>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include "../../inc/gl/glew.h"
|
||||
|
||||
template<typename T>
|
||||
class CompressedTexture
|
||||
{
|
||||
public:
|
||||
virtual ~CompressedTexture() {}
|
||||
|
||||
// Uncompress and retrieve the texture (possibly from a certain index)
|
||||
virtual T operator[](size_t i) const = 0;
|
||||
virtual unsigned64 size() const = 0;
|
||||
|
||||
virtual std::vector<T> GetTexture(size_t fromIndex = 0) = 0;
|
||||
// Compresses the texture, replacing everything after fromIndex by whatever is in texture
|
||||
virtual void SetTexture(const std::vector<T>& texture, size_t fromIndex = 0) = 0;
|
||||
// Replace all materials by other colors. This could be useful as some methods don't need to fully decompress and compress the whole texture to do this.
|
||||
virtual void ReplaceMaterials(const std::unordered_map<T, T>& replacers) = 0;
|
||||
virtual void Recompress() { SetTexture(GetTexture()); }
|
||||
|
||||
virtual void ReadFromFile(std::istream& file) = 0;
|
||||
virtual void WriteToFile(std::ostream& file) const = 0;
|
||||
|
||||
// Use this method to output the compressed texture as a vector of one byte characters
|
||||
virtual std::vector<unsigned8> GetTexturePool() const = 0;
|
||||
virtual size_t GetTexturePoolSize() const = 0;
|
||||
|
||||
// Use this method to output any addition information that might be needed.
|
||||
virtual std::vector<unsigned8> GetAdditionalTexturePool() const { return std::vector<unsigned8>(); };
|
||||
virtual size_t GetAdditionalTexturePoolSize() const { return 0; };
|
||||
|
||||
virtual std::map<std::string, std::string> GetAdditionalProperties() const = 0;
|
||||
};
|
||||
75
Research/scene/TextureCompressor/DagBasedTexture.h
Normal file
75
Research/scene/TextureCompressor/DagBasedTexture.h
Normal file
@@ -0,0 +1,75 @@
|
||||
#pragma once
|
||||
|
||||
#include "CompressedTexture.h"
|
||||
#include "../../core/Util/BinaryTree.h"
|
||||
#include "../../core/Serializer.h"
|
||||
|
||||
template<typename T>
|
||||
class DagBasedTexture : public CompressedTexture<T>
|
||||
{
|
||||
private:
|
||||
BinaryTree<T> mData;
|
||||
unsigned64 mOriginalSize;
|
||||
public:
|
||||
DagBasedTexture() :
|
||||
mData(BinaryTree<T>()),
|
||||
mOriginalSize(0)
|
||||
{}
|
||||
~DagBasedTexture() {}
|
||||
|
||||
// Uncompress and retrieve the texture (possibly from a certain index)
|
||||
T operator[](size_t i) const override { return mData.GetValueAtLeaf(i); }
|
||||
unsigned64 size() const override { return mOriginalSize; }
|
||||
|
||||
std::vector<T> GetTexture(size_t fromIndex = 0) override {
|
||||
std::vector<T> data(mOriginalSize);
|
||||
for (size_t i = 0; i < mOriginalSize; i++) { data[i] = operator[](i); }
|
||||
return data;
|
||||
}
|
||||
// Compresses the texture, replacing everything after fromIndex by whatever is in texture
|
||||
void SetTexture(const std::vector<T>& texture, size_t fromIndex = 0) override
|
||||
{
|
||||
// Calculate the required tree depth
|
||||
unsigned8 requiredDepth = BitHelper::Log2Ceil(fromIndex + texture.size());
|
||||
mData.SetDepth(requiredDepth, true);
|
||||
// Set all leaf nodes correctly:
|
||||
for (size_t i = 0; i < texture.size(); i++)
|
||||
mData.SetValueAtLeaf(fromIndex + i, texture[i]);
|
||||
mData.ToDAG();
|
||||
|
||||
mOriginalSize = fromIndex + texture.size();
|
||||
}
|
||||
|
||||
// Replace all materials by other colors. This could be useful as some methods don't need to fully decompress and compress the whole texture to do this.
|
||||
void ReplaceMaterials(const std::unordered_map<T, T>& replacers) override
|
||||
{
|
||||
bool replacersEqual = true;
|
||||
for (auto replacer : replacers)
|
||||
if (replacer.first != replacer.second)
|
||||
{
|
||||
replacersEqual = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (replacersEqual) return;
|
||||
mData.ReplaceValues(replacers);
|
||||
}
|
||||
void Recompress() override { return; }
|
||||
|
||||
void ReadFromFile(std::istream& file) override
|
||||
{
|
||||
Serializer<unsigned64>::Deserialize(mOriginalSize, file);
|
||||
mData.Deserialize(file);
|
||||
}
|
||||
virtual void WriteToFile(std::ostream& file) const override
|
||||
{
|
||||
Serializer<unsigned64>::Serialize(mOriginalSize, file);
|
||||
mData.Serialize(file);
|
||||
}
|
||||
|
||||
// Use this method to output the compressed texture as a vector of one byte characters
|
||||
std::vector<unsigned char> GetTexturePool() const override { return mData.Serialize(true); }
|
||||
size_t GetTexturePoolSize() const override { return mData.GetSerializedByteCount(true); }
|
||||
|
||||
virtual std::map<std::string, std::string> GetAdditionalProperties() const override { return std::map<std::string, std::string>(); }
|
||||
};
|
||||
175
Research/scene/TextureCompressor/MultiRootBasedTexture.h
Normal file
175
Research/scene/TextureCompressor/MultiRootBasedTexture.h
Normal file
@@ -0,0 +1,175 @@
|
||||
#pragma once
|
||||
|
||||
#include "CompressedTexture.h"
|
||||
#include "../Octree/MultiRootTree.h"
|
||||
#include "../../core/Serializer.h"
|
||||
#include "../../core/MathHelper.h"
|
||||
#include "../PoolBuilder/StandardPoolBuilder.h"
|
||||
|
||||
template<typename T>
|
||||
class MultiRootBasedTexture : public CompressedTexture<T>
|
||||
{
|
||||
private:
|
||||
MultiRootTree<>* mData;
|
||||
std::vector<unsigned8> mBitMap;
|
||||
unsigned32 mMask;
|
||||
unsigned64 mOriginalSize;
|
||||
|
||||
glm::uvec3 GetTextureCoord(size_t i) const
|
||||
{
|
||||
size_t nodeIndex = i / 8;
|
||||
unsigned8 depth = mData->GetMaxLevel();
|
||||
size_t mask = BitHelper::GetLSMask<size_t>(0, depth - 1);
|
||||
glm::uvec3 leafIndex(
|
||||
(nodeIndex & mask) << 1,
|
||||
((nodeIndex >> (depth - 1)) & mask) << 1,
|
||||
((nodeIndex >> ((depth - 1) * 2)) & mask) << 1);
|
||||
if (BitHelper::GetLS(i, 2)) leafIndex.z++;
|
||||
if (BitHelper::GetLS(i, 1)) leafIndex.y++;
|
||||
if (BitHelper::GetLS(i, 0)) leafIndex.x++;
|
||||
return leafIndex;
|
||||
}
|
||||
|
||||
static unsigned8 GetRequiredDepth(size_t nodeCount)
|
||||
{
|
||||
unsigned8 indexBits = BitHelper::Log2Ceil(nodeCount);
|
||||
return indexBits / 3 + ((indexBits % 3 == 0) ? 0 : 1);
|
||||
}
|
||||
public:
|
||||
MultiRootBasedTexture() :
|
||||
mData(NULL),
|
||||
mBitMap(std::vector<unsigned8>()),
|
||||
mMask(0),
|
||||
mOriginalSize(0)
|
||||
{}
|
||||
|
||||
~MultiRootBasedTexture() { delete mData; }
|
||||
|
||||
T operator[](size_t i) const override {
|
||||
assert(i < mOriginalSize);
|
||||
glm::uvec3 textureCoord = GetTextureCoord(i);
|
||||
unsigned32 res = 0;
|
||||
for (unsigned32 bit = 0; bit < (unsigned32)mBitMap.size(); bit++)
|
||||
{
|
||||
bool bitSet;
|
||||
if (bit == 0)
|
||||
bitSet = mData->HasLeaf(textureCoord);
|
||||
else
|
||||
bitSet = mData->SlaveHasLeaf(textureCoord, bit - 1);
|
||||
if (bitSet) BitHelper::SetHS(res, mBitMap[bit]);
|
||||
}
|
||||
T resVal;
|
||||
resVal = res;
|
||||
return resVal;
|
||||
}
|
||||
unsigned64 size() const override { return mOriginalSize; }
|
||||
|
||||
std::vector<T> GetTexture(size_t fromIndex = 0) override {
|
||||
std::vector<T> data(mOriginalSize - fromIndex);
|
||||
for (size_t i = fromIndex; i < mOriginalSize; i++) { data[i - fromIndex] = operator[](i); }
|
||||
return data;
|
||||
}
|
||||
// Compresses the texture, replacing everything after fromIndex by whatever is in texture
|
||||
void SetTexture(const std::vector<T>& texture, size_t fromIndex = 0) override
|
||||
{
|
||||
// TODO: correctly handle the fromIndex
|
||||
if (fromIndex != 0)
|
||||
{
|
||||
// Hack to handle fromindex: just unpack and repack the whole texture
|
||||
std::vector<T>& oldTexture = GetTexture();
|
||||
// Copy the new texture
|
||||
oldTexture.resize(fromIndex + texture.size());
|
||||
std::copy(texture.begin(), texture.end(), oldTexture.begin() + fromIndex);
|
||||
// Build the DAG
|
||||
SetTexture(oldTexture);
|
||||
return;
|
||||
}
|
||||
else if (mData != NULL)
|
||||
{
|
||||
delete mData;
|
||||
mData = NULL;
|
||||
}
|
||||
|
||||
assert(mData == NULL); // Make sure we are working with a fresh tree, since appending does not work correctly yet
|
||||
|
||||
// Figure out which bits are set:
|
||||
unsigned32 mask = 0;
|
||||
for (auto mat : texture)
|
||||
mask |= (unsigned32)mat;
|
||||
|
||||
mMask = mask;
|
||||
unsigned8 bitCount = BitHelper::GetSet(mask);
|
||||
mBitMap = BitHelper::GetBitMapHS(mask);
|
||||
|
||||
unsigned8 depth = GetRequiredDepth(fromIndex + texture.size());
|
||||
|
||||
if (mData == NULL)
|
||||
{
|
||||
mData = new MultiRootTree<>(depth, bitCount - 1);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < texture.size(); i++)
|
||||
{
|
||||
glm::uvec3 texCoord = GetTextureCoord(i);
|
||||
unsigned32 texel = (unsigned32)texture[i];
|
||||
for (unsigned32 bit = 0; bit < (unsigned32)mBitMap.size(); bit++)
|
||||
{
|
||||
if (BitHelper::GetHS(texel, mBitMap[bit]))
|
||||
{
|
||||
if (bit == 0) mData->AddLeafNode(texCoord);
|
||||
else mData->AddLeafNode(texCoord, bit - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
mData->ToDAG();
|
||||
|
||||
mOriginalSize = fromIndex + texture.size();
|
||||
}
|
||||
|
||||
// Replace all materials by other colors. This could be useful as some methods don't need to fully decompress and compress the whole texture to do this.
|
||||
void ReplaceMaterials(const std::unordered_map<T, T>& replacers) override
|
||||
{
|
||||
// TODO: implement this...
|
||||
std::vector<T> texture = GetTexture();
|
||||
delete mData;
|
||||
mData = NULL;
|
||||
mOriginalSize = 0;
|
||||
mMask = 0;
|
||||
mBitMap.clear();
|
||||
SetTexture(texture);
|
||||
}
|
||||
void Recompress() override { return; }
|
||||
|
||||
void ReadFromFile(std::istream& file) override
|
||||
{
|
||||
Serializer<unsigned64>::Deserialize(mOriginalSize, file);
|
||||
Serializer<unsigned32>::Deserialize(mMask, file);
|
||||
unsigned8 depth = GetRequiredDepth(mOriginalSize);
|
||||
unsigned8 slaveRootCount = BitHelper::GetSet(mMask) - 1;
|
||||
if (mData != NULL) delete mData;
|
||||
mData = new MultiRootTree<>(depth, slaveRootCount);
|
||||
mData->Deserialize(file);
|
||||
}
|
||||
virtual void WriteToFile(std::ostream& file) const override
|
||||
{
|
||||
Serializer<unsigned64>::Serialize(mOriginalSize, file);
|
||||
Serializer<unsigned32>::Serialize(mMask, file);
|
||||
mData->Serialize(file);
|
||||
}
|
||||
|
||||
// Use this method to output the compressed texture as a vector of one byte characters
|
||||
std::vector<unsigned8> GetTexturePool() const override
|
||||
{
|
||||
StandardPoolBuilder builder;
|
||||
std::vector<unsigned8> res;
|
||||
builder.BuildPool(mData, res);
|
||||
return res;
|
||||
}
|
||||
size_t GetTexturePoolSize() const override
|
||||
{
|
||||
StandardPoolBuilder builder;
|
||||
return builder.GetPoolSize(mData);
|
||||
}
|
||||
|
||||
virtual std::map<std::string, std::string> GetAdditionalProperties() const override { return std::map<std::string, std::string>(); }
|
||||
};
|
||||
1002
Research/scene/TextureCompressor/PaletteBlockTexture.h
Normal file
1002
Research/scene/TextureCompressor/PaletteBlockTexture.h
Normal file
File diff suppressed because it is too large
Load Diff
265
Research/scene/TextureCompressor/TightlyPackedTexture.h
Normal file
265
Research/scene/TextureCompressor/TightlyPackedTexture.h
Normal file
@@ -0,0 +1,265 @@
|
||||
#pragma once
|
||||
#include "CompressedTexture.h"
|
||||
#include "../../inc/tbb/parallel_reduce.h"
|
||||
#include "../../core/BitHelper.h"
|
||||
|
||||
template<typename T>
|
||||
class TightlyPackedTexture : public CompressedTexture<T>
|
||||
{
|
||||
private:
|
||||
std::vector<unsigned8> mData;
|
||||
// Mask containing bits that are set in any of the items
|
||||
size_t mOriginalSize;
|
||||
unsigned mMask;
|
||||
std::vector<unsigned8> mBitMap;
|
||||
|
||||
void SetMask(unsigned32 mask)
|
||||
{
|
||||
mMask = mask;
|
||||
mBitMap = BitHelper::GetBitMapHS(mask);
|
||||
}
|
||||
|
||||
inline unsigned32 GetValueAt(size_t i) const { return BitHelper::UnpackTightAt<unsigned8, unsigned32>(mData, i, mBitMap); }
|
||||
public:
|
||||
TightlyPackedTexture() :
|
||||
mData(std::vector<unsigned8>()),
|
||||
mOriginalSize(0),
|
||||
mMask(0),
|
||||
mBitMap(std::vector<unsigned8>())
|
||||
{
|
||||
static_assert(sizeof(T) == 4, "Only types of size 4 can be tightly packed");
|
||||
}
|
||||
|
||||
~TightlyPackedTexture() override {}
|
||||
|
||||
T operator[](size_t i) const override
|
||||
{
|
||||
T value;
|
||||
value = GetValueAt(i);
|
||||
return value;
|
||||
}
|
||||
|
||||
unsigned64 size() const override { return mOriginalSize; }
|
||||
|
||||
std::vector<T> GetTexture(size_t fromIndex = 0) override
|
||||
{
|
||||
assert(fromIndex <= size());
|
||||
//fromIndex = std::min(fromIndex, size());
|
||||
std::vector<T> res(mOriginalSize - fromIndex);
|
||||
//for (size_t i = fromIndex; i < mOriginalSize; i++)
|
||||
tbb::parallel_for(fromIndex, mOriginalSize, [&](size_t i)
|
||||
{
|
||||
unsigned value = GetValueAt(i);
|
||||
res[i - fromIndex] = value;
|
||||
});
|
||||
return res;
|
||||
}
|
||||
|
||||
void SetTexture(const std::vector<T>& nodeMaterialPointers, size_t fromIndex = 0) override
|
||||
{
|
||||
if (nodeMaterialPointers.empty()) return;
|
||||
if (fromIndex != 0)
|
||||
{
|
||||
auto curTexture = GetTexture();
|
||||
curTexture.resize(fromIndex);
|
||||
curTexture.insert(curTexture.end(), nodeMaterialPointers.begin(), nodeMaterialPointers.end());
|
||||
SetTexture(curTexture, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
// Find out which bits are set in both the existing and new texture:
|
||||
unsigned mask = 0;
|
||||
size_t setBits = BitHelper::GetSet(mMask);
|
||||
for (T value : nodeMaterialPointers)
|
||||
mask |= static_cast<unsigned32>(value);
|
||||
for (size_t i = 0; i < fromIndex; i++)
|
||||
mask |= GetValueAt(i);
|
||||
// Make sure at least one bit is set (preventing divide by zero and similar problems)
|
||||
if (mask == 0) mask = 1;
|
||||
|
||||
// If the mask changed, we need to repack the whole texture
|
||||
if (mask != mMask && !mData.empty() && fromIndex != 0)
|
||||
{
|
||||
auto curTexture = GetTexture();
|
||||
curTexture.resize(fromIndex);
|
||||
curTexture.insert(curTexture.end(), nodeMaterialPointers.begin(), nodeMaterialPointers.end());
|
||||
SetTexture(curTexture, 0);
|
||||
}
|
||||
|
||||
// Update the current mask
|
||||
SetMask(mask);
|
||||
setBits = BitHelper::GetSet(mMask);
|
||||
printf("Current texture requires %u bits per pointer\n", (unsigned32)setBits);
|
||||
|
||||
// Calculate how many bits are needed each part
|
||||
size_t existingBits = fromIndex * setBits;
|
||||
size_t newBits = nodeMaterialPointers.size() * setBits;
|
||||
size_t totalBits = existingBits + newBits;
|
||||
|
||||
// If the existing bits don't fit in a fixed number of bytes, we need to repack the first byte of the new set
|
||||
unsigned8 bitOffset = (unsigned8)(existingBits & 0x07);
|
||||
unsigned8 switchBytes = (bitOffset == 0) ? 0 : 1;
|
||||
|
||||
// The existing- and newbyte counts don't include the switchbyte
|
||||
size_t existingBytes = existingBits >> 3;
|
||||
size_t totalBytes = (totalBits >> 3) + (((totalBits & 0x07) == 0) ? 0 : 1);
|
||||
|
||||
size_t switchByteIndex = existingBytes;
|
||||
|
||||
// Compress the new data
|
||||
std::vector<unsigned8> packed;
|
||||
{
|
||||
std::vector<unsigned32> unpacked(nodeMaterialPointers.size());
|
||||
tbb::parallel_for(size_t(0), nodeMaterialPointers.size(), [&](size_t i) { unpacked[i] = (unsigned32)nodeMaterialPointers[i]; });
|
||||
packed = BitHelper::PackTight<unsigned8, unsigned32>(unpacked, mBitMap, bitOffset);
|
||||
}
|
||||
|
||||
mData.resize(totalBytes);
|
||||
// If the edge between the existing and new materials is not on the edge of a byte,
|
||||
// we need to construct a "switchByte", which contains part of the last existing materials,
|
||||
// and part of the first new material:
|
||||
if (switchBytes != 0)
|
||||
mData[switchByteIndex] = (mData[switchByteIndex] & BitHelper::GetHSMask<unsigned8>(0, bitOffset)) | packed[0];
|
||||
// Move the packed data over to mData
|
||||
std::move(packed.begin() + switchBytes, packed.end(), mData.begin() + existingBytes + ((size_t)switchBytes));
|
||||
mOriginalSize = fromIndex + nodeMaterialPointers.size();
|
||||
}
|
||||
|
||||
// Replace all materials. Note that for this to work, all materials currently in the texture need to have a key in the dictionary
|
||||
void ReplaceMaterials(const std::unordered_map<T, T>& replacementMap) override
|
||||
{
|
||||
// Check if the replacements aren't the same as the originals:
|
||||
bool replacersEqual = true;
|
||||
for (auto replacer : replacementMap)
|
||||
if (replacer.first != replacer.second)
|
||||
{
|
||||
replacersEqual = false; break;
|
||||
}
|
||||
if (replacersEqual) return;
|
||||
|
||||
//TODO: Check if the same amount of bits is set
|
||||
// If that is the case, we can just replace the data. Otherwise, a full repacking is needed
|
||||
unsigned newMask = 0;
|
||||
for (auto replacer : replacementMap)
|
||||
newMask |= (unsigned)replacer.second;
|
||||
if (newMask == mMask)
|
||||
{
|
||||
// If the mask doesn't change, we can keep using the same bits, only replacing their values
|
||||
unsigned8 setBits = BitHelper::GetSet(mMask);
|
||||
std::unordered_map<unsigned, unsigned> bitReplacementMap;
|
||||
for (auto value : replacementMap)
|
||||
{
|
||||
unsigned source = 0;
|
||||
unsigned dest = 0;
|
||||
for (unsigned8 j = 0; j < setBits; j++)
|
||||
{
|
||||
if (BitHelper::GetHS((unsigned32)value.first, mBitMap[j])) BitHelper::SetLS(source, setBits - j - 1);
|
||||
if (BitHelper::GetHS((unsigned32)value.second, mBitMap[j])) BitHelper::SetLS(dest, setBits - j - 1);
|
||||
}
|
||||
bitReplacementMap.insert(std::pair<unsigned, unsigned>(source, dest));
|
||||
}
|
||||
for (size_t i = 0; i < mOriginalSize; i++)
|
||||
{
|
||||
unsigned source = 0;
|
||||
// Find the current value of material i, and replace it with it's replacement map counterpart.
|
||||
unsigned8 j = 0;
|
||||
while (j < setBits)
|
||||
{
|
||||
size_t bit = i * setBits + j;
|
||||
size_t byte = bit >> 3;
|
||||
unsigned8 startBitInByte = bit & 0x07;
|
||||
unsigned8 endBitInByte = std::min(startBitInByte + setBits - j, 8);
|
||||
unsigned8 bitsInByte = endBitInByte - startBitInByte;
|
||||
source |= (unsigned)((mData[byte] & BitHelper::GetHSMask<unsigned8>(startBitInByte, endBitInByte)) >> (8 - endBitInByte)) << (setBits - bitsInByte - j);
|
||||
j += bitsInByte;
|
||||
}
|
||||
// Find the value to replace it with:
|
||||
auto replacer = bitReplacementMap.find(source);
|
||||
//assert(replacer != bitReplacementMap.end());
|
||||
if (replacer != bitReplacementMap.end())
|
||||
{
|
||||
unsigned dest = replacer->second;
|
||||
j = 0;
|
||||
while (j < setBits)
|
||||
{
|
||||
size_t startBit = i * setBits + j;
|
||||
size_t byte = startBit >> 3;
|
||||
unsigned8 startBitInByte = startBit & 0x07;
|
||||
unsigned8 endBitInByte = std::min(startBitInByte + setBits - j, 8);
|
||||
unsigned8 bitsInByte = endBitInByte - startBitInByte;
|
||||
|
||||
unsigned8 bitsToCopy = (unsigned8)(dest >> (setBits - (j + bitsInByte)));
|
||||
unsigned8 inPlaceBitsToCopy = bitsToCopy << (8 - endBitInByte);
|
||||
|
||||
unsigned8 mask1 = BitHelper::GetHSMask<unsigned8>(0, startBitInByte) | BitHelper::GetHSMask<unsigned8>(endBitInByte, 8);
|
||||
unsigned8 mask2 = BitHelper::GetHSMask<unsigned8>(startBitInByte, endBitInByte);
|
||||
mData[byte] =
|
||||
(mask1 & mData[byte]) | // Copy the original bits
|
||||
(mask2 & inPlaceBitsToCopy); // Insert the new bits
|
||||
|
||||
j += bitsInByte;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// If the mask changes, we need to replace the whole texture
|
||||
std::vector<T> cur = GetTexture();
|
||||
//for (size_t i = 0; i < cur.size(); i++)
|
||||
tbb::parallel_for(size_t(0), cur.size(), [&](size_t i)
|
||||
{
|
||||
auto item = replacementMap.find(cur[i]);
|
||||
//assert(item != replacementMap.end());
|
||||
if (item != replacementMap.end())
|
||||
cur[i] = item->second;
|
||||
});
|
||||
SetTexture(cur);
|
||||
}
|
||||
}
|
||||
void Recompress() override
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void ReadFromFile(std::istream& file)
|
||||
{
|
||||
mData.clear();
|
||||
unsigned64 originalSize;
|
||||
Serializer<unsigned64>::Deserialize(originalSize, file);
|
||||
mOriginalSize = originalSize;
|
||||
unsigned32 mask;
|
||||
Serializer<unsigned32>::Deserialize(mask, file);
|
||||
SetMask(mask);
|
||||
unsigned64 bitCount = ((unsigned64)BitHelper::GetSet(mMask)) * mOriginalSize;
|
||||
unsigned64 byteCount = BitHelper::RoundToBytes(bitCount) >> 3;
|
||||
mData.resize(byteCount);
|
||||
Serializer<unsigned8*>::Deserialize(&mData[0], mData.size(), file);
|
||||
}
|
||||
void WriteToFile(std::ostream& file) const override
|
||||
{
|
||||
unsigned64 originalSize = mOriginalSize;
|
||||
Serializer<unsigned64>::Serialize(originalSize, file);
|
||||
Serializer<unsigned32>::Serialize(mMask, file);
|
||||
Serializer<unsigned8*>::Serialize(&mData[0], mData.size(), file);
|
||||
}
|
||||
|
||||
// Use this method to output the compressed texture as a vector of one byte characters
|
||||
std::vector<unsigned8> GetTexturePool() const override {
|
||||
size_t poolSize = GetTexturePoolSize();
|
||||
std::vector<unsigned8> res(poolSize);
|
||||
std::copy(mData.begin(), mData.end(), res.begin());
|
||||
return res;
|
||||
}
|
||||
size_t GetTexturePoolSize() const
|
||||
{
|
||||
return mData.size();
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> GetAdditionalProperties() const override
|
||||
{
|
||||
std::map<std::string, std::string> res;
|
||||
res.insert(std::pair<std::string, std::string>("mask", std::to_string(mMask)));
|
||||
return res;
|
||||
};
|
||||
};
|
||||
Reference in New Issue
Block a user