
The program uses an .obj model and a texture as a reference and replicates it procedurally and continuously, then bakes the result into the model's uvmap in a new texture and saves it in the project directory. Edges are still visible on the islands, but I couldn't correct it any further. It only loads .obj files, but it could be modified to use Irrlicht loaders or others...
A system could be created to load files using the file explorer, but Windows and Linux differ in this regard. Another option is to use the one that Irrlicht has, but I was too lazy to continue; I spent days struggling with the algorithm...
Code: Select all
// File: bake_halton.cpp
// Your original code with Halton+jitters rasterization (complete)
#include <irrlicht.h>
#include <iostream>
#include <cmath>
#include <algorithm>
#include <vector>
#include <map>
#include <unordered_map>
#include <cstdint>
#include <fstream>
#include <functional>
#include <sstream>
#include <queue>
using namespace std;
using namespace irr;
using namespace core;
using namespace scene;
using namespace video;
using namespace io;
#ifdef _IRR_WINDOWS_
#pragma comment(lib, "Irrlicht.lib")
#pragma comment(linker, "/subsystem:console")
#endif
// --- PARAMETERS ---
const int OUTPUT_TEXTURE_SIZE = 1024;
const int BLEND_FACTOR_NUM = 4;
const int BLEND_FACTOR_DEN = 3;
const core::stringc MODEL_PATH = "../../media/test1.obj";
const core::stringc TEXTURE_PATH = "../../media/whitewashed_brick_nor_gl_2k.jpg";
const core::stringc OUTPUT_FILENAME = "fixed_seams_bake.png";
// Change this as needed: 256 / 512 / 1024
const int SUBPIXEL_SAMPLES = 32; // total samples per pixel (Halton), more samples = higher baking quality
const float WORLD_TEXTURE_SCALE = 0.5f;
// --- STRUCTURES ---
struct SurfaceSample {
vector3df position;
vector3df normal;
vector3df barycentric;
int depth;
int triangleID;
bool hasData;
vector3df worldPos;
int pixelX;
int pixelY;
vector2df uv;
int sampleCount; // Number of subpixels that tested positive
SurfaceSample() : position(0,0,0), normal(0,1,0), barycentric(0,0,0),
depth(1000000000), triangleID(-1), hasData(false), worldPos(0,0,0),
pixelX(-1), pixelY(-1), uv(0,0), sampleCount(0) {}
};
struct TriangleData {
vector3df positions[3];
vector3df normals[3];
vector2df uvs[3];
};
// --- Sutherland-Hodgman (keeping your implementation) ---
struct Point2D {
float x, y;
Point2D(float x = 0, float y = 0) : x(x), y(y) {}
bool operator==(const Point2D& other) const {
return fabs(x - other.x) < 1e-10f && fabs(y - other.y) < 1e-10f;
}
};
bool insideEdge(const Point2D& p, int edge, const Point2D& pixelMin, const Point2D& pixelMax) {
switch(edge) {
case 0: return p.x >= pixelMin.x - 1e-10f; // left
case 1: return p.x <= pixelMax.x + 1e-10f; // right
case 2: return p.y >= pixelMin.y - 1e-10f; // bottom
case 3: return p.y <= pixelMax.y + 1e-10f; // top
}
return false;
}
Point2D computeIntersection(const Point2D& p1, const Point2D& p2, int edge,
const Point2D& pixelMin, const Point2D& pixelMax) {
Point2D intersection;
double x1 = p1.x, y1 = p1.y;
double x2 = p2.x, y2 = p2.y;
double minX = pixelMin.x, minY = pixelMin.y;
double maxX = pixelMax.x, maxY = pixelMax.y;
double t = 0.0;
switch(edge) {
case 0:
if (fabs(x2 - x1) < 1e-15) t = 0.0;
else t = (minX - x1) / (x2 - x1);
intersection.x = (float)minX;
intersection.y = (float)(y1 + t * (y2 - y1));
break;
case 1:
if (fabs(x2 - x1) < 1e-15) t = 0.0;
else t = (maxX - x1) / (x2 - x1);
intersection.x = (float)maxX;
intersection.y = (float)(y1 + t * (y2 - y1));
break;
case 2:
if (fabs(y2 - y1) < 1e-15) t = 0.0;
else t = (minY - y1) / (y2 - y1);
intersection.x = (float)(x1 + t * (x2 - x1));
intersection.y = (float)minY;
break;
case 3:
if (fabs(y2 - y1) < 1e-15) t = 0.0;
else t = (maxY - y1) / (y2 - y1);
intersection.x = (float)(x1 + t * (x2 - x1));
intersection.y = (float)maxY;
break;
}
return intersection;
}
vector<Point2D> sutherlandHodgmanClip(const vector<Point2D>& inputPolygon,
const Point2D& pixelMin, const Point2D& pixelMax) {
vector<Point2D> output = inputPolygon;
if (output.empty()) return output;
for(int edge = 0; edge < 4; edge++) {
vector<Point2D> input = output;
output.clear();
if (input.empty()) break;
Point2D s = input.back();
for(size_t i = 0; i < input.size(); i++) {
Point2D p = input[i];
bool sInside = insideEdge(s, edge, pixelMin, pixelMax);
bool pInside = insideEdge(p, edge, pixelMin, pixelMax);
if (pInside) {
if (!sInside) {
Point2D intersection = computeIntersection(s, p, edge, pixelMin, pixelMax);
if (output.empty() || !(output.back() == intersection)) output.push_back(intersection);
}
if (output.empty() || !(output.back() == p)) output.push_back(p);
} else if (sInside) {
Point2D intersection = computeIntersection(s, p, edge, pixelMin, pixelMax);
if (output.empty() || !(output.back() == intersection)) output.push_back(intersection);
}
s = p;
}
if (output.empty()) break;
}
return output;
}
float polygonArea(const vector<Point2D>& polygon) {
if(polygon.size() < 3) return 0.0f;
double area = 0.0;
for(size_t i = 0; i < polygon.size(); i++) {
size_t j = (i + 1) % polygon.size();
area += (double)polygon[i].x * (double)polygon[j].y - (double)polygon[j].x * (double)polygon[i].y;
}
return (float)(fabs(area) * 0.5);
}
Point2D polygonCentroid(const vector<Point2D>& poly) {
Point2D c(0,0);
if (poly.empty()) return c;
double signedArea = 0.0;
double cx = 0.0, cy = 0.0;
for (size_t i = 0; i < poly.size(); ++i) {
size_t j = (i + 1) % poly.size();
double a = (double)poly[i].x * (double)poly[j].y - (double)poly[j].x * (double)poly[i].y;
signedArea += a;
cx += (poly[i].x + poly[j].x) * a;
cy += (poly[i].y + poly[j].y) * a;
}
if (fabs(signedArea) < 1e-20) {
for (const auto &p : poly) { cx += p.x; cy += p.y; }
cx /= (double)poly.size();
cy /= (double)poly.size();
return Point2D((float)cx, (float)cy);
}
signedArea *= 0.5;
cx /= (6.0 * signedArea);
cy /= (6.0 * signedArea);
return Point2D((float)cx, (float)cy);
}
struct Barycentric {
float u, v, w;
Barycentric(float u = 0, float v = 0, float w = 0) : u(u), v(v), w(w) {}
};
Barycentric calculateBarycentric(const Point2D& p, const Point2D& a, const Point2D& b, const Point2D& c) {
double x = p.x, y = p.y;
double x1 = a.x, y1 = a.y;
double x2 = b.x, y2 = b.y;
double x3 = c.x, y3 = c.y;
double denom = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3);
if (fabs(denom) < 1e-20) return Barycentric(-1, -1, -1);
double u = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / denom;
double v = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / denom;
double w = 1.0 - u - v;
return Barycentric((float)u, (float)v, (float)w);
}
bool pointInTriangle(const Point2D& p, const Point2D& a, const Point2D& b, const Point2D& c, float tolerance = 1e-8f) {
Barycentric bar = calculateBarycentric(p, a, b, c);
return (bar.u >= -tolerance && bar.v >= -tolerance && bar.w >= -tolerance);
}
// --- CUSTOM OBJ MODEL LOADER (same as yours) ---
bool loadOBJModel(const core::stringc& filename, std::vector<TriangleData>& triangles, aabbox3d<f32>& bbox) {
std::ifstream file(filename.c_str());
if (!file.is_open()) {
std::cerr << "Error: Could not open OBJ file: " << filename.c_str() << std::endl;
return false;
}
std::vector<vector3df> vertices;
std::vector<vector3df> normals;
std::vector<vector2df> texcoords;
std::string line;
bbox.reset(0,0,0);
while (std::getline(file, line)) {
std::istringstream iss(line);
std::string prefix;
iss >> prefix;
if (prefix == "v") {
float x, y, z;
iss >> x >> y >> z;
vertices.push_back(vector3df(x, y, z));
bbox.addInternalPoint(x, y, z);
}
else if (prefix == "vn") {
float x, y, z;
iss >> x >> y >> z;
normals.push_back(vector3df(x, y, z));
}
else if (prefix == "vt") {
float u, v;
iss >> u >> v;
texcoords.push_back(vector2df(u, 1.0f - v)); // invert V
}
else if (prefix == "f") {
std::string v1, v2, v3;
iss >> v1 >> v2 >> v3;
TriangleData tri;
int vertexIndices[3];
int normalIndices[3];
int texcoordIndices[3];
std::string* verticesStr[3] = {&v1, &v2, &v3};
for (int i = 0; i < 3; i++) {
std::string& vertexStr = *verticesStr[i];
std::replace(vertexStr.begin(), vertexStr.end(), '/', ' ');
std::istringstream viss(vertexStr);
viss >> vertexIndices[i];
if (viss.peek() != EOF) viss >> texcoordIndices[i];
if (viss.peek() != EOF) viss >> normalIndices[i];
vertexIndices[i] = abs(vertexIndices[i]) - 1;
if (texcoords.size() > 0) texcoordIndices[i] = abs(texcoordIndices[i]) - 1;
if (normals.size() > 0) normalIndices[i] = abs(normalIndices[i]) - 1;
}
for (int i = 0; i < 3; i++) {
if (vertexIndices[i] < vertices.size()) tri.positions[i] = vertices[vertexIndices[i]];
if (normals.size() > 0 && normalIndices[i] < normals.size()) tri.normals[i] = normals[normalIndices[i]];
else tri.normals[i] = vector3df(0, 1, 0);
if (texcoords.size() > 0 && texcoordIndices[i] < texcoords.size()) tri.uvs[i] = texcoords[texcoordIndices[i]];
else tri.uvs[i] = vector2df(0, 0);
}
if (normals.empty()) {
vector3df edge1 = tri.positions[1] - tri.positions[0];
vector3df edge2 = tri.positions[2] - tri.positions[0];
vector3df normal = edge1.crossProduct(edge2);
normal.normalize();
for (int i = 0; i < 3; i++) tri.normals[i] = normal;
}
triangles.push_back(tri);
}
}
file.close();
std::cout << "OBJ model loaded: " << vertices.size() << " vertices, "
<< triangles.size() << " triangles" << std::endl;
return true;
}
// --- TEXTURE UTILITIES (keeping yours) ---
SColor getPixelTiled(IImage* src, int x, int y) {
if (!src) return SColor(255, 255, 0, 255);
int w = src->getDimension().Width;
int h = src->getDimension().Height;
int wrappedX = ((x % w) + w) % w;
int wrappedY = ((y % h) + h) % h;
return src->getPixel(wrappedX, wrappedY);
}
SColor sampleTexture(IImage* src, float u, float v) {
if (!src) return SColor(255, 0, 0, 0);
int w = src->getDimension().Width;
int h = src->getDimension().Height;
int x = (int)(u * w) % w;
int y = (int)(v * h) % h;
if (x < 0) x += w;
if (y < 0) y += h;
return getPixelTiled(src, x, y);
}
SColor sampleTextureBilinear(IImage* src, float u, float v) {
if (!src) return SColor(255, 0, 0, 0);
int w = src->getDimension().Width;
int h = src->getDimension().Height;
float fu = u - floorf(u);
float fv = v - floorf(v);
if (fu < 0.0f) fu += 1.0f;
if (fv < 0.0f) fv += 1.0f;
float x = fu * w - 0.5f;
float y = fv * h - 0.5f;
int x0 = (int)floorf(x);
int y0 = (int)floorf(y);
float sx = x - x0;
float sy = y - y0;
SColor c00 = getPixelTiled(src, x0 + 0, y0 + 0);
SColor c10 = getPixelTiled(src, x0 + 1, y0 + 0);
SColor c01 = getPixelTiled(src, x0 + 0, y0 + 1);
SColor c11 = getPixelTiled(src, x0 + 1, y0 + 1);
auto lerp8 = [&](u32 a, u32 b, float t)->u32 {
return (u32)( (1.0f - t) * (float)a + t * (float)b );
};
u32 a00 = c00.getAlpha(), r00 = c00.getRed(), g00 = c00.getGreen(), b00 = c00.getBlue();
u32 a10 = c10.getAlpha(), r10 = c10.getRed(), g10 = c10.getGreen(), b10 = c10.getBlue();
u32 a01 = c01.getAlpha(), r01 = c01.getRed(), g01 = c01.getGreen(), b01 = c01.getBlue();
u32 a11 = c11.getAlpha(), r11 = c11.getRed(), g11 = c11.getGreen(), b11 = c11.getBlue();
u32 a0 = lerp8(a00, a10, sx), a1 = lerp8(a01, a11, sx);
u32 r0 = lerp8(r00, r10, sx), r1 = lerp8(r01, r11, sx);
u32 g0 = lerp8(g00, g10, sx), g1 = lerp8(g01, g11, sx);
u32 b0 = lerp8(b00, b10, sx), b1 = lerp8(b01, b11, sx);
u32 af = lerp8(a0, a1, sy);
u32 rf = lerp8(r0, r1, sy);
u32 gf = lerp8(g0, g1, sy);
u32 bf = lerp8(b0, b1, sy);
return SColor((u32)af, (u32)rf, (u32)gf, (u32)bf);
}
inline SColor lerpColor(const SColor& a, const SColor& b, float t) {
t = core::clamp(t, 0.0f, 1.0f);
u32 ra = a.getRed(); u32 ga = a.getGreen(); u32 ba = a.getBlue();
u32 rb = b.getRed(); u32 gb = b.getGreen(); u32 bb = b.getBlue();
u32 r = (u32)(ra + (rb - ra) * t);
u32 g = (u32)(ga + (gb - ga) * t);
u32 blu = (u32)(ba + (bb - ba) * t);
return SColor(255, r, g, blu);
}
// MAIN FIX: Use world coordinates instead of normalized bbox
vector3df getGeneratedCoords(const vector3df& pos, const aabbox3d<f32>& bbox) {
return pos * WORLD_TEXTURE_SCALE;
}
void blenderBoxProjection(IImage* tex, const vector3df& co, const vector3df& nor, float blend, SColor& outColor) {
vector3df n = vector3df(fabs(nor.X), fabs(nor.Y), fabs(nor.Z));
float effective_blend = max_(blend, 0.00001f);
float sharpness = 1.0f / effective_blend;
n.X = pow(n.X, sharpness);
n.Y = pow(n.Y, sharpness);
n.Z = pow(n.Z, sharpness);
float total = n.X + n.Y + n.Z;
if (total > 0.0f) n /= total;
SColor colX = sampleTexture(tex, co.Y, co.Z);
SColor colY = sampleTexture(tex, co.X, co.Z);
SColor colZ = sampleTexture(tex, co.X, co.Y);
float r = colX.getRed() * n.X + colY.getRed() * n.Y + colZ.getRed() * n.Z;
float g = colX.getGreen() * n.X + colY.getGreen() * n.Y + colZ.getGreen() * n.Z;
float b = colX.getBlue() * n.X + colY.getBlue() * n.Y + colZ.getBlue() * n.Z;
outColor = SColor(255, (u32)r, (u32)g, (u32)b);
}
int calculatePositionBasedDepth(const vector3df& position, const aabbox3d<f32>& bbox) {
vector3df center = bbox.getCenter();
vector3df extents = bbox.getExtent();
vector3df relPos = (position - center) / extents;
return (int)((relPos.X * 0.333f + relPos.Y * 0.333f + relPos.Z * 0.334f) * 1000000.0f);
}
// ---------------- Halton & helpers ----------------
// Radical inverse / Halton (base b). index should be >= 1 for best properties.
static inline double halton(int index, int base) {
double result = 0.0;
double f = 1.0 / (double)base;
int i = index;
while (i > 0) {
int digit = i % base;
result += digit * f;
i /= base;
f /= (double)base;
}
return result;
}
// Hash / PRNG deterministic per pixel/tri: 32-bit integer hash (Wang/Jenkins-like)
static inline uint32_t wangHash(uint32_t x) {
x = (x ^ 61) ^ (x >> 16);
x = x + (x << 3);
x = x ^ (x >> 4);
x = x * 0x27d4eb2d;
x = x ^ (x >> 15);
return x;
}
// Float in [0,1) from uint32
static inline double hashToUnit(uint32_t h) {
// use 24 bits of precision
uint32_t v = h & 0x00FFFFFFu;
return (double)v / (double)0x01000000u;
}
// ------------------ HALTON + STRATIFIED JITTER RASTERIZATION ------------------
void hyperPreciseRasterization(const std::vector<TriangleData>& triangles,
std::vector<SurfaceSample>& surfaceBuffer,
u32 width, u32 height, const aabbox3d<f32>& bbox) {
printf("Processing %d triangles with Halton+Jitter sampling (%d samples/pixel)...\n", (int)triangles.size(), SUBPIXEL_SAMPLES);
// Precompute triangle bboxes in pixel space (speeds up checks)
struct TriBBox { int minX, maxX, minY, maxY; };
std::vector<TriBBox> triBBoxes(triangles.size());
for (size_t i = 0; i < triangles.size(); ++i) {
const TriangleData& tri = triangles[i];
vector2df uv1 = vector2df(tri.uvs[0].X * width, tri.uvs[0].Y * height);
vector2df uv2 = vector2df(tri.uvs[1].X * width, tri.uvs[1].Y * height);
vector2df uv3 = vector2df(tri.uvs[2].X * width, tri.uvs[2].Y * height);
float minU = min_(uv1.X, min_(uv2.X, uv3.X));
float maxU = max_(uv1.X, max_(uv2.X, uv3.X));
float minV = min_(uv1.Y, min_(uv2.Y, uv3.Y));
float maxV = max_(uv1.Y, max_(uv2.Y, uv3.Y));
int minX = max_((int)floor(minU), 0);
int maxX = min_((int)ceil(maxU), (int)width - 1);
int minY = max_((int)floor(minV), 0);
int maxY = min_((int)ceil(maxV), (int)height - 1);
triBBoxes[i] = { minX, maxX, minY, maxY };
}
// For each triangle
for (size_t triIdx = 0; triIdx < triangles.size(); ++triIdx) {
const TriangleData& tri = triangles[triIdx];
if (triIdx % 100 == 0) {
printf("Processing triangle %zu/%zu\n", triIdx + 1, triangles.size());
}
// Triangle in pixel space
Point2D triPts[3] = {
Point2D(tri.uvs[0].X * width, tri.uvs[0].Y * height),
Point2D(tri.uvs[1].X * width, tri.uvs[1].Y * height),
Point2D(tri.uvs[2].X * width, tri.uvs[2].Y * height)
};
// Integer bbox for pixel iteration
TriBBox tb = triBBoxes[triIdx];
tb.minX = max(tb.minX, 0); tb.maxX = min(tb.maxX, (int)width - 1);
tb.minY = max(tb.minY, 0); tb.maxY = min(tb.maxY, (int)height - 1);
// For each pixel in bbox
for (int y = tb.minY; y <= tb.maxY; ++y) {
for (int x = tb.minX; x <= tb.maxX; ++x) {
int index = y * width + x;
// Fast-test: if all 4 corners are outside and tri bbox doesn't cover, skip (simple optimization)
Point2D c00((float)x + 0.0f, (float)y + 0.0f);
Point2D c10((float)x + 1.0f, (float)y + 0.0f);
Point2D c01((float)x + 0.0f, (float)y + 1.0f);
Point2D c11((float)x + 1.0f, (float)y + 1.0f);
// If none of the corners are in the triangle and there's no simple intersection,
// we can still continue because Halton will find slivers, so we DON'T do aggressive skipping.
int samplesInside = 0;
Point2D bestSample(0,0);
float bestDistance = FLT_MAX;
int bestDepth = INT_MAX;
Barycentric bestBar;
// Generate seed per pixel/tri for deterministic scramble
uint32_t seed = (uint32_t)x * 73856093u ^ (uint32_t)y * 19349663u ^ (uint32_t)(triIdx + 1) * 83492791u;
seed = wangHash(seed);
// Offset for Halton series per pixel: avoids all pixels using the same initial subsequence
// (multiply and modulo to distribute)
int haltonOffset = (int)(seed % 9973u); // 9973 is prime; just an offset to shift the sequence
// Small jitter amplitude: 1/(2*N) to not break low-discrepancy
double jitterAmp = 1.0 / (2.0 * (double)SUBPIXEL_SAMPLES);
// Halton sampling
for (int s = 0; s < SUBPIXEL_SAMPLES; ++s) {
int idx = s + haltonOffset + 1; // +1 to avoid index 0 in Halton
double hx = halton(idx, 2); // base 2
double hy = halton(idx, 3); // base 3
// Small deterministic stratified jitter using mixed hash
uint32_t h2 = wangHash(seed + (uint32_t)s * 2654435761u);
double jitter = (hashToUnit(h2) - 0.5) * jitterAmp;
double sampleX = (double)x + hx + jitter;
double sampleY = (double)y + hy + jitter;
// clamp [x, x+1)
if (sampleX < (double)x) sampleX = (double)x + 1e-12;
if (sampleX >= (double)(x + 1)) sampleX = (double)(x + 1) - 1e-12;
if (sampleY < (double)y) sampleY = (double)y + 1e-12;
if (sampleY >= (double)(y + 1)) sampleY = (double)(y + 1) - 1e-12;
Point2D samplePoint((float)sampleX, (float)sampleY);
if (pointInTriangle(samplePoint, triPts[0], triPts[1], triPts[2])) {
samplesInside++;
// barycentric and depth
Barycentric bar = calculateBarycentric(samplePoint, triPts[0], triPts[1], triPts[2]);
if (bar.u < -1e-6f || bar.v < -1e-6f || bar.w < -1e-6f) continue;
float sum = bar.u + bar.v + bar.w;
if (sum > 0.0f) { bar.u /= sum; bar.v /= sum; bar.w /= sum; }
vector3df pos = tri.positions[0] * bar.u +
tri.positions[1] * bar.v +
tri.positions[2] * bar.w;
vector3df normal = tri.normals[0] * bar.u +
tri.normals[1] * bar.v +
tri.normals[2] * bar.w;
normal.normalize();
int depth = calculatePositionBasedDepth(pos, bbox);
// Distance to pixel center (secondary criterion to choose best sample)
float distToCenter = fabs((float)sampleX - (x + 0.5f)) + fabs((float)sampleY - (y + 0.5f));
// Prioritize lower depth (closer triangle). If equal, lower distToCenter.
if (depth < bestDepth || (depth == bestDepth && distToCenter < bestDistance)) {
bestDepth = depth;
bestDistance = distToCenter;
bestSample = samplePoint;
bestBar = bar;
}
}
} // end samples loop
if (samplesInside > 0) {
// Update surfaceBuffer only if better
int new_depth = bestDepth;
bool shouldUpdate = !surfaceBuffer[index].hasData ||
new_depth < surfaceBuffer[index].depth ||
(new_depth == surfaceBuffer[index].depth && samplesInside > surfaceBuffer[index].sampleCount);
if (shouldUpdate) {
// Interpolate position/normal using bestBar (best sample)
vector3df pos = tri.positions[0] * bestBar.u +
tri.positions[1] * bestBar.v +
tri.positions[2] * bestBar.w;
vector3df normal = tri.normals[0] * bestBar.u +
tri.normals[1] * bestBar.v +
tri.normals[2] * bestBar.w;
normal.normalize();
surfaceBuffer[index].position = pos;
surfaceBuffer[index].normal = normal;
surfaceBuffer[index].barycentric = vector3df(bestBar.u, bestBar.v, bestBar.w);
surfaceBuffer[index].depth = new_depth;
surfaceBuffer[index].triangleID = (int)triIdx;
surfaceBuffer[index].hasData = true;
surfaceBuffer[index].worldPos = pos;
surfaceBuffer[index].pixelX = x;
surfaceBuffer[index].pixelY = y;
surfaceBuffer[index].uv = vector2df(bestSample.x / width, bestSample.y / height);
surfaceBuffer[index].sampleCount = samplesInside;
}
}
}
}
}
}
// --- Fill missing pixels due to geometric clipping (same as your previous version) ---
void fillMissingPixelsWithClipping(const std::vector<TriangleData>& triangles,
std::vector<SurfaceSample>& surfaceBuffer,
u32 width, u32 height, const aabbox3d<f32>& bbox) {
printf("\n--- Extra Phase: Filling missing pixels via geometric clipping ---\n");
const float areaEps = 1e-4f;
struct TriBBox { int minX, maxX, minY, maxY; };
std::vector<TriBBox> triBBoxes(triangles.size());
for (size_t i = 0; i < triangles.size(); ++i) {
const TriangleData &tri = triangles[i];
vector2df uv1 = vector2df(tri.uvs[0].X * width, tri.uvs[0].Y * height);
vector2df uv2 = vector2df(tri.uvs[1].X * width, tri.uvs[1].Y * height);
vector2df uv3 = vector2df(tri.uvs[2].X * width, tri.uvs[2].Y * height);
float minU = min_(uv1.X, min_(uv2.X, uv3.X));
float maxU = max_(uv1.X, max_(uv2.X, uv3.X));
float minV = min_(uv1.Y, min_(uv2.Y, uv3.Y));
float maxV = max_(uv1.Y, max_(uv2.Y, uv3.Y));
int minX = max_((int)floor(minU), 0);
int maxX = min_((int)ceil(maxU), (int)width - 1);
int minY = max_((int)floor(minV), 0);
int maxY = min_((int)ceil(maxV), (int)height - 1);
triBBoxes[i] = { minX, maxX, minY, maxY };
}
int filled = 0;
for (u32 y = 0; y < height; ++y) {
for (u32 x = 0; x < width; ++x) {
u32 idx = y * width + x;
if (surfaceBuffer[idx].hasData) continue;
Point2D pixelMin((float)x, (float)y);
Point2D pixelMax((float)(x + 1.0f), (float)(y + 1.0f));
Point2D pA1(pixelMin.x, pixelMin.y);
Point2D pA2(pixelMax.x, pixelMin.y);
Point2D pA3(pixelMin.x, pixelMax.y);
Point2D pB1(pixelMax.x, pixelMax.y);
Point2D pB2 = pA2;
Point2D pB3 = pA3;
for (size_t triIdx = 0; triIdx < triangles.size(); ++triIdx) {
const TriBBox &tb = triBBoxes[triIdx];
if (x < tb.minX || x > tb.maxX || y < tb.minY || y > tb.maxY) continue;
const TriangleData &tri = triangles[triIdx];
Point2D triPts[3] = {
Point2D(tri.uvs[0].X * width, tri.uvs[0].Y * height),
Point2D(tri.uvs[1].X * width, tri.uvs[1].Y * height),
Point2D(tri.uvs[2].X * width, tri.uvs[2].Y * height)
};
vector<Point2D> inputPoly;
inputPoly.push_back(triPts[0]);
inputPoly.push_back(triPts[1]);
inputPoly.push_back(triPts[2]);
vector<Point2D> clipped = sutherlandHodgmanClip(inputPoly, pixelMin, pixelMax);
float a = polygonArea(clipped);
if (a <= areaEps) continue;
Point2D cent = polygonCentroid(clipped);
bool insideHalf = pointInTriangle(cent, pA1, pA2, pA3) || pointInTriangle(cent, pB1, pB2, pB3);
if (!insideHalf) {
for (const auto &pp : clipped) {
if (pointInTriangle(pp, pA1, pA2, pA3) || pointInTriangle(pp, pB1, pB2, pB3)) { insideHalf = true; break; }
}
}
if (!insideHalf) continue;
Barycentric bar = calculateBarycentric(cent, triPts[0], triPts[1], triPts[2]);
if (bar.u < -1e-6f || bar.v < -1e-6f || bar.w < -1e-6f) continue;
float sum = bar.u + bar.v + bar.w;
if (sum > 0.0f) { bar.u /= sum; bar.v /= sum; bar.w /= sum; }
vector3df pos = tri.positions[0] * bar.u + tri.positions[1] * bar.v + tri.positions[2] * bar.w;
vector3df normal = tri.normals[0] * bar.u + tri.normals[1] * bar.v + tri.normals[2] * bar.w;
normal.normalize();
int new_depth = calculatePositionBasedDepth(pos, bbox);
bool shouldUpdate = !surfaceBuffer[idx].hasData || new_depth < surfaceBuffer[idx].depth;
if (shouldUpdate) {
surfaceBuffer[idx].position = pos;
surfaceBuffer[idx].normal = normal;
surfaceBuffer[idx].barycentric = vector3df(bar.u, bar.v, bar.w);
surfaceBuffer[idx].depth = new_depth;
surfaceBuffer[idx].triangleID = (int)triIdx;
surfaceBuffer[idx].hasData = true;
surfaceBuffer[idx].worldPos = pos;
surfaceBuffer[idx].pixelX = x;
surfaceBuffer[idx].pixelY = y;
surfaceBuffer[idx].uv = vector2df(cent.x / width, cent.y / height);
surfaceBuffer[idx].sampleCount = 1;
filled++;
}
if (surfaceBuffer[idx].hasData) break;
}
}
}
printf("Pixels filled by extra pass: %d\n", filled);
}
// --- ISLANDS + TRIPLANAR (fixed version) ---
struct Island {
std::vector<std::pair<int, int>> pixels;
std::vector<std::pair<int, int>> borderPixels;
};
std::vector<Island> findIslands(const std::vector<SurfaceSample>& surfaceBuffer, u32 width, u32 height) {
std::vector<Island> islands;
std::vector<bool> visited(width * height, false);
int dx[] = {-1, 1, 0, 0};
int dy[] = {0, 0, -1, 1};
for (u32 y = 0; y < height; ++y) {
for (u32 x = 0; x < width; ++x) {
u32 idx = y * width + x;
if (surfaceBuffer[idx].hasData && !visited[idx]) {
Island newIsland;
std::queue<std::pair<int, int>> q;
q.push({x, y});
visited[idx] = true;
newIsland.pixels.push_back({x, y});
while (!q.empty()) {
auto current = q.front(); q.pop();
int cx = current.first; int cy = current.second;
u32 currentIdx = cy * width + cx;
bool isBorder = false;
for (int i = 0; i < 4; i++) {
int nx = cx + dx[i]; int ny = cy + dy[i];
if (nx >= 0 && nx < (int)width && ny >= 0 && ny < (int)height) {
u32 neighborIdx = ny * width + nx;
if (!surfaceBuffer[neighborIdx].hasData) { isBorder = true; break; }
} else { isBorder = true; }
}
if (isBorder) newIsland.borderPixels.push_back({cx, cy});
for (int i = 0; i < 4; i++) {
int nx = cx + dx[i]; int ny = cy + dy[i];
if (nx >= 0 && nx < (int)width && ny >= 0 && ny < (int)height) {
u32 neighborIdx = ny * width + nx;
if (surfaceBuffer[neighborIdx].hasData && !visited[neighborIdx]) {
visited[neighborIdx] = true;
q.push({nx, ny});
newIsland.pixels.push_back({nx, ny});
}
}
}
}
if (!newIsland.pixels.empty()) islands.push_back(newIsland);
}
}
}
return islands;
}
void computeTriangleTangentBitangent(const TriangleData& tri, vector3df& outTangent, vector3df& outBitangent, float& outWorldPerUV) {
vector3df p0 = tri.positions[0];
vector3df p1 = tri.positions[1];
vector3df p2 = tri.positions[2];
vector2df uv0 = tri.uvs[0];
vector2df uv1 = tri.uvs[1];
vector2df uv2 = tri.uvs[2];
vector3df edge1 = p1 - p0;
vector3df edge2 = p2 - p0;
float du1 = uv1.X - uv0.X;
float dv1 = uv1.Y - uv0.Y;
float du2 = uv2.X - uv0.X;
float dv2 = uv2.Y - uv0.Y;
float denom = du1 * dv2 - du2 * dv1;
if (fabs(denom) > 1e-8f) {
float r = 1.0f / denom;
vector3df tangent = (edge1 * dv2 - edge2 * dv1) * r;
vector3df bitangent = (edge2 * du1 - edge1 * du2) * r;
tangent.normalize();
bitangent.normalize();
outTangent = tangent;
outBitangent = bitangent;
} else {
vector3df n = (tri.normals[0] + tri.normals[1] + tri.normals[2]) / 3.0f;
n.normalize();
vector3df arbitrary = fabs(n.X) < 0.9f ? vector3df(1,0,0) : vector3df(0,1,0);
vector3df tangent = n.crossProduct(arbitrary);
tangent.normalize();
vector3df bitangent = n.crossProduct(tangent);
bitangent.normalize();
outTangent = tangent;
outBitangent = bitangent;
}
double worldArea = 0.5 * sqrt( (double)edge1.getLengthSQ() * (double)edge2.getLengthSQ() - pow((double)edge1.dotProduct(edge2), 2) );
double uvEdge1x = du1, uvEdge1y = dv1;
double uvEdge2x = du2, uvEdge2y = dv2;
double uvArea = 0.5 * fabs(uvEdge1x * uvEdge2y - uvEdge2x * uvEdge1y);
if (uvArea <= 1e-12 || worldArea <= 1e-12) outWorldPerUV = 1.0f;
else outWorldPerUV = (float)sqrt(worldArea / uvArea);
if (!(outWorldPerUV > 0.0f)) outWorldPerUV = 1.0f;
}
void fixIslandSeams(std::vector<SColor>& colorBuffer,
const std::vector<SurfaceSample>& surfaceBuffer,
const std::vector<TriangleData>& triangles,
IImage* srcTex,
u32 width, u32 height,
const aabbox3d<f32>& bbox) {
printf("\n--- Phase 3: Fixing seams between islands (triplanar/world-space) ---\n");
// FIXED: Pass surfaceBuffer instead of colorBuffer to findIslands
std::vector<Island> islands = findIslands(surfaceBuffer, width, height);
printf("Islands found: %zu\n", islands.size());
const int MAX_EXTENSION_RADIUS = OUTPUT_TEXTURE_SIZE / 124; // 8 for 1024
int directions[] = {-1, 0, 1, 0, 0, -1, 0, 1};
int totalFixedPixels = 0;
std::vector<vector3df> triTangents(triangles.size());
std::vector<vector3df> triBitangents(triangles.size());
std::vector<float> triWorldPerUV(triangles.size());
std::vector<char> triComputed(triangles.size(), 0);
for (size_t it = 0; it < islands.size(); ++it) {
const auto& island = islands[it];
printf("Processing island with %zu pixels (%zu borders)\n",
island.pixels.size(), island.borderPixels.size());
for (const auto& borderPixel : island.borderPixels) {
int x = borderPixel.first;
int y = borderPixel.second;
u32 borderIdx = y * width + x;
if (!surfaceBuffer[borderIdx].hasData) continue;
const SurfaceSample& borderSample = surfaceBuffer[borderIdx];
int triID = borderSample.triangleID;
if (triID < 0 || triID >= (int)triangles.size()) continue;
if (!triComputed[triID]) {
computeTriangleTangentBitangent(triangles[triID], triTangents[triID], triBitangents[triID], triWorldPerUV[triID]);
triComputed[triID] = 1;
}
vector3df tangent = triTangents[triID];
vector3df bitangent = triBitangents[triID];
float worldPerUV = triWorldPerUV[triID];
vector2df borderUV = borderSample.uv;
if (!(borderUV.X > 0.0f || borderUV.Y > 0.0f)) {
borderUV.X = (x + 0.5f) / (float)width;
borderUV.Y = (y + 0.5f) / (float)height;
}
vector3df baseGenerated = getGeneratedCoords(borderSample.position, bbox);
for (int d = 0; d < 8; d += 2) {
int ddx = directions[d];
int ddy = directions[d + 1];
for (int radius = 1; radius <= MAX_EXTENSION_RADIUS; ++radius) {
int targetX = x + ddx * radius;
int targetY = y + ddy * radius;
if (targetX < 0 || targetX >= (int)width || targetY < 0 || targetY >= (int)height) {
break;
}
u32 targetIdx = targetY * width + targetX;
if (surfaceBuffer[targetIdx].hasData) {
break;
}
if (colorBuffer[targetIdx].getAlpha() == 0) {
float deltaU = ddx * (1.0f / (float)width) * (float)radius;
float deltaV = ddy * (1.0f / (float)height) * (float)radius;
vector3df dispWorld = tangent * deltaU + bitangent * deltaV;
dispWorld *= worldPerUV;
vector3df displacedGeneratedCoords = baseGenerated + dispWorld * WORLD_TEXTURE_SCALE;
SColor extendedColor;
blenderBoxProjection(srcTex, displacedGeneratedCoords, borderSample.normal,
(float)BLEND_FACTOR_NUM / BLEND_FACTOR_DEN, extendedColor);
colorBuffer[targetIdx] = extendedColor;
totalFixedPixels++;
} else {
break;
}
}
}
}
}
printf("Pixels fixed in seams (triplanar/world-space): %d\n", totalFixedPixels);
}
// --- MAIN BAKING ---
void bakeTexture(const std::vector<TriangleData>& triangles, IImage* srcTex, IImage* dstTex, IVideoDriver* driver, const aabbox3d<f32>& bbox) {
u32 width = dstTex->getDimension().Width;
u32 height = dstTex->getDimension().Height;
std::vector<SurfaceSample> surfaceBuffer(width * height);
std::vector<SColor> colorBuffer(width * height);
printf("=== HYPER-PRECISE HALTON BAKING ===\n");
printf("Resolution: %dx%d\n", width, height);
printf("World texture scale: %.2f\n", WORLD_TEXTURE_SCALE);
printf("Samples per pixel (Halton): %d\n", SUBPIXEL_SAMPLES);
// 1. RASTERIZATION (Halton)
printf("\n--- Phase 1: Halton+Jitter Rasterization ---\n");
hyperPreciseRasterization(triangles, surfaceBuffer, width, height, bbox);
// 1.b extra pass to fill pixels that escaped Halton sampling
fillMissingPixelsWithClipping(triangles, surfaceBuffer, width, height, bbox);
// 2. BAKING
printf("\n--- Phase 2: Baking ---\n");
int validPixels = 0;
int totalSamples = 0;
for (u32 i = 0; i < width * height; ++i) {
if (surfaceBuffer[i].hasData) {
vector3df genCoords = getGeneratedCoords(surfaceBuffer[i].position, bbox);
SColor finalColor;
blenderBoxProjection(srcTex, genCoords, surfaceBuffer[i].normal,
(float)BLEND_FACTOR_NUM / BLEND_FACTOR_DEN, finalColor);
colorBuffer[i] = finalColor;
validPixels++;
totalSamples += surfaceBuffer[i].sampleCount;
} else {
colorBuffer[i] = SColor(0, 0, 0, 0);
}
}
printf("Valid pixels: %d/%d (%.1f%%)\n",
validPixels, width * height, (validPixels * 100.0f) / (width * height));
printf("Total subpixels detected: %d\n", totalSamples);
printf("Average subpixels per valid pixel: %.2f\n",
validPixels > 0 ? (float)totalSamples / validPixels : 0.0f);
// 3. FIX SEAMS BETWEEN ISLANDS (triplanar/world-space)
fixIslandSeams(colorBuffer, surfaceBuffer, triangles, srcTex, width, height, bbox);
// 4. SAVE RESULTS
printf("\n--- Phase 4: Saving results ---\n");
for (u32 y = 0; y < height; ++y) {
for (u32 x = 0; x < width; ++x) {
u32 srcIdx = y * width + x;
dstTex->setPixel(x, y, colorBuffer[srcIdx]);
}
}
printf("Process completed!\n");
}
int main() {
IrrlichtDevice *device = createDevice(video::EDT_SOFTWARE, dimension2d<u32>(640, 480), 16, false, false, false, 0);
if (!device) {
std::cerr << "Error: Could not create Irrlicht device." << std::endl;
return 1;
}
IVideoDriver* driver = device->getVideoDriver();
std::vector<TriangleData> triangles;
aabbox3d<f32> bbox;
if (!loadOBJModel(MODEL_PATH, triangles, bbox)) {
std::cerr << "Error: Could not load model: " << MODEL_PATH.c_str() << std::endl;
device->drop();
return 1;
}
IImage* src = driver->createImageFromFile(TEXTURE_PATH);
if (!src) {
std::cerr << "Error: Could not load texture: " << TEXTURE_PATH.c_str() << std::endl;
device->drop();
return 1;
}
IImage* dst = driver->createImage(ECF_A8R8G8B8, dimension2d<u32>(OUTPUT_TEXTURE_SIZE, OUTPUT_TEXTURE_SIZE));
if (!dst) {
std::cerr << "Error: Could not create destination image." << std::endl;
src->drop();
device->drop();
return 1;
}
bakeTexture(triangles, src, dst, driver, bbox);
if (driver->writeImageToFile(dst, OUTPUT_FILENAME)) {
printf("Baked texture saved to: %s\n", OUTPUT_FILENAME.c_str());
} else {
std::cerr << "Error: Could not save baked texture." << std::endl;
}
src->drop();
dst->drop();
device->drop();
return 0;
}
WORLD_TEXTURE_SCALE= the tiled repeated in the model 3d, more = more tiles.
I have only tested with closed models, with a correct uvmap.
"When the camera zooms out (increases the distance), the GPU uses smaller versions of the texture (Mipmaps). If your UV "islands" are close together or have little border, as the camera zooms out, the GPU will average island pixels with empty (black/transparent) pixels, creating visible seams."
To solve this "based on distance", we need to calculate the Mipmap Level that would activate at that distance and ensure that the padding radius covers exactly that need.
But it is difficult, a correct bake in a resolution of 800x600 may be an incorrect bake in a higher resolution, since it not only affects the distance of the camera, but also the pixels that the camera renders, so it is difficult to make a "global baking algorithm"...
Even the same distances can perform poorly if the object is scaled too large or too small for what the algorithm is used to, so the only solution is to add more variables to control the bake... I'm working on this...
