Added a deprecation sniffer so I can find problematic areas later

Revert tjunc fixes
Add -notjunc to match other editors
Rid Light_Add since it's too simple
This commit is contained in:
Jonathan 2021-10-20 04:27:22 -04:00
parent 1bf6261826
commit 84b4388f4b
7 changed files with 250 additions and 129 deletions

View File

@ -803,10 +803,16 @@ namespace qv
}; // namespace qv
// "vec3" type. legacy; eventually will be replaced entirely
using vec3_t = vec_t[3];
#if 0
#define DEPRECATE_SNIFF [[deprecated]]
#else
#define DEPRECATE_SNIFF
#endif
using vec3_t DEPRECATE_SNIFF = vec_t[3];
template<typename T1, typename T2>
constexpr bool VectorCompare(const T1 &v1, const T2 &v2, vec_t epsilon)
DEPRECATE_SNIFF constexpr bool VectorCompare(const T1 &v1, const T2 &v2, vec_t epsilon)
{
for (size_t i = 0; i < std::size(v1); i++)
if (fabs(v1[i] - v2[i]) > epsilon)
@ -816,7 +822,7 @@ constexpr bool VectorCompare(const T1 &v1, const T2 &v2, vec_t epsilon)
}
template<typename T, typename T2, typename T3>
constexpr void CrossProduct(const T &v1, const T2 &v2, T3 &cross)
DEPRECATE_SNIFF constexpr void CrossProduct(const T &v1, const T2 &v2, T3 &cross)
{
cross[0] = v1[1] * v2[2] - v1[2] * v2[1];
cross[1] = v1[2] * v2[0] - v1[0] * v2[2];
@ -824,13 +830,13 @@ constexpr void CrossProduct(const T &v1, const T2 &v2, T3 &cross)
}
template<typename Tx, typename Ty>
constexpr vec_t DotProduct(const Tx &x, const Ty &y)
DEPRECATE_SNIFF constexpr vec_t DotProduct(const Tx &x, const Ty &y)
{
return x[0] * y[0] + x[1] * y[1] + x[2] * y[2];
}
template<typename Tx, typename Ty, typename Tout>
constexpr void VectorSubtract(const Tx &x, const Ty &y, Tout &out)
DEPRECATE_SNIFF constexpr void VectorSubtract(const Tx &x, const Ty &y, Tout &out)
{
out[0] = x[0] - y[0];
out[1] = x[1] - y[1];
@ -838,7 +844,7 @@ constexpr void VectorSubtract(const Tx &x, const Ty &y, Tout &out)
}
template<typename Tx, typename Ty, typename Tout>
constexpr void VectorAdd(const Tx &x, const Ty &y, Tout &out)
DEPRECATE_SNIFF constexpr void VectorAdd(const Tx &x, const Ty &y, Tout &out)
{
out[0] = x[0] + y[0];
out[1] = x[1] + y[1];
@ -846,7 +852,7 @@ constexpr void VectorAdd(const Tx &x, const Ty &y, Tout &out)
}
template<typename TFrom, typename TTo>
constexpr void VectorCopy(const TFrom &in, TTo &out)
DEPRECATE_SNIFF constexpr void VectorCopy(const TFrom &in, TTo &out)
{
out[0] = in[0];
out[1] = in[1];
@ -854,7 +860,7 @@ constexpr void VectorCopy(const TFrom &in, TTo &out)
}
template<typename TFrom, typename TScale, typename TTo>
constexpr void VectorScale(const TFrom &v, TScale scale, TTo &out)
DEPRECATE_SNIFF constexpr void VectorScale(const TFrom &v, TScale scale, TTo &out)
{
out[0] = v[0] * scale;
out[1] = v[1] * scale;
@ -862,7 +868,7 @@ constexpr void VectorScale(const TFrom &v, TScale scale, TTo &out)
}
template<typename T>
constexpr void VectorInverse(T &v)
DEPRECATE_SNIFF constexpr void VectorInverse(T &v)
{
v[0] = -v[0];
v[1] = -v[1];
@ -870,7 +876,7 @@ constexpr void VectorInverse(T &v)
}
template<typename T>
constexpr void VectorSet(T &out, vec_t x, vec_t y, vec_t z)
DEPRECATE_SNIFF constexpr void VectorSet(T &out, vec_t x, vec_t y, vec_t z)
{
out[0] = x;
out[1] = y;
@ -878,7 +884,7 @@ constexpr void VectorSet(T &out, vec_t x, vec_t y, vec_t z)
}
template<typename T>
constexpr void VectorClear(T &out)
DEPRECATE_SNIFF constexpr void VectorClear(T &out)
{
out[0] = 0;
out[1] = 0;
@ -886,7 +892,7 @@ constexpr void VectorClear(T &out)
}
template<typename Ta, typename Tb, typename Tc>
constexpr void VectorMA(const Ta &va, vec_t scale, const Tb &vb, Tc &vc)
DEPRECATE_SNIFF constexpr void VectorMA(const Ta &va, vec_t scale, const Tb &vb, Tc &vc)
{
vc[0] = va[0] + scale * vb[0];
vc[1] = va[1] + scale * vb[1];
@ -894,7 +900,7 @@ constexpr void VectorMA(const Ta &va, vec_t scale, const Tb &vb, Tc &vc)
}
template<typename T>
constexpr vec_t VectorLengthSq(const T &v)
DEPRECATE_SNIFF constexpr vec_t VectorLengthSq(const T &v)
{
vec_t length = 0;
for (int i = 0; i < 3; i++)
@ -903,7 +909,7 @@ constexpr vec_t VectorLengthSq(const T &v)
}
template<typename T>
inline vec_t VectorLength(const T &v)
DEPRECATE_SNIFF inline vec_t VectorLength(const T &v)
{
vec_t length = VectorLengthSq(v);
length = sqrt(length);
@ -911,7 +917,7 @@ inline vec_t VectorLength(const T &v)
}
template<typename T>
inline vec_t VectorNormalize(T &v)
DEPRECATE_SNIFF inline vec_t VectorNormalize(T &v)
{
vec_t length = 0;
for (size_t i = 0; i < 3; i++)

View File

@ -282,6 +282,7 @@ public:
vec_t worldExtent = 65536.0f;
bool fNoThreads = false;
bool includeSkip = false;
bool fNoTJunc = false;
};
extern options_t options;

View File

@ -1035,17 +1035,11 @@ float GetLightDist(const globalconfig_t &cfg, const light_t *entity, vec_t desir
return fadedist;
}
constexpr void Light_Add(lightsample_t *sample, const vec_t light, const qvec3d &color, const qvec3d &direction = {})
{
sample->color += color * (light / 255.0f);
sample->direction += direction * light;
}
// CHECK: naming? why clamp*min*?
constexpr void Light_ClampMin(lightsample_t *sample, const vec_t light, const qvec3d &color)
{
for (int i = 0; i < 3; i++) {
sample->color[i] = std::max(sample->color[i], color[i] * light / 255.0f);
sample->color[i] = std::max(sample->color[i], color[i] * (light / 255.0f));
}
}
@ -1649,7 +1643,7 @@ static void LightFace_Min(const mbsp_t *bsp, const mface_t *face, const qvec3d &
value *= Dirt_GetScaleFactor(cfg, lightsurf->occlusion[i], NULL, 0.0, lightsurf);
}
if (cfg.addminlight.boolValue()) {
Light_Add(sample, value, color);
sample->color += color * (value / 255.0);
} else {
Light_ClampMin(sample, value, color);
}
@ -1715,7 +1709,7 @@ static void LightFace_Min(const mbsp_t *bsp, const mface_t *face, const qvec3d &
value *=
Dirt_GetScaleFactor(cfg, lightsurf->occlusion[i], &entity, 0.0 /* TODO: pass distance */, lightsurf);
if (cfg.addminlight.boolValue()) {
Light_Add(sample, value, entity.color.vec3Value());
sample->color += entity.color.vec3Value() * (value / 255.0);
} else {
Light_ClampMin(sample, value, entity.color.vec3Value());
}

View File

@ -90,6 +90,8 @@ This allows for arbitrary collision sizes in engines that support it, currently
"-wrbrushes" combined with "-noclip" argument. This is NOT backwards compatible.
.IP "\fB-notex\fP"
Write only placeholder textures, to depend upon replacements. This avoids inclusion of third-party copyrighted images inside your maps, but is not backwards compatible but will work in FTEQW and QSS.
.IP "\fB-notjunc\fP"
Don't attempt to fix T-junctions. This is only for engines or formats that prefer micro-cracks over degenerate triangles. If you don't know what that means, don't set this.
.IP "\fB-omitdetail\fP"
Detail brushes are omitted from the compile.
.IP "\fB-convert <fmt>\fP"

View File

@ -406,7 +406,7 @@ static surfflags_t SurfFlagsForEntity(const mtexinfo_t &texinfo, const mapentity
const vec_t minlight = atof(ValueForKey(entity, "_minlight"));
if (minlight > 0) {
// CHECK: allow > 510 now that we're float? or is it not worth it since it will
// be beyond max.
// be beyond max?
flags.minlight = clamp(minlight, 0.0, 510.0);
}

View File

@ -673,8 +673,10 @@ static void ProcessEntity(mapentity_t *entity, const int hullnum)
// make the real portals for vis tracing
PortalizeWorld(entity, nodes, hullnum);
if (!options.fNoTJunc) {
TJunc(entity, nodes);
}
}
// Area portals
/*if (options.target_game->id == GAME_QUAKE_II) {
@ -693,7 +695,7 @@ static void ProcessEntity(mapentity_t *entity, const int hullnum)
}
// bmodels
if (entity != pWorldEnt()) {
if (entity != pWorldEnt() && !options.fNoTJunc) {
TJunc(entity, nodes);
}
@ -1348,6 +1350,8 @@ static void ParseOptions(char *szOptions)
szTok = szTok2;
} else if (!Q_strcasecmp(szTok, "objexport")) {
options.fObjExport = true;
} else if (!Q_strcasecmp(szTok, "notjunc")) {
options.fNoTJunc = true;
} else if (!Q_strcasecmp(szTok, "omitdetail")) {
options.fOmitDetail = true;
} else if (!Q_strcasecmp(szTok, "omitdetailwall")) {

View File

@ -22,112 +22,162 @@
#include <qbsp/qbsp.hh>
// don't let a base face get past this
// because it can be split more later
constexpr size_t MAXPOINTS = 60;
namespace qv
struct wvert_t
{
template<typename T>
[[nodiscard]] constexpr int32_t compareEpsilon(const T &v1, const T &v2, const T &epsilon)
{
T diff = v1 - v2;
return (diff > epsilon || diff < -epsilon) ? (diff < 0 ? -1 : 1) : 0;
}
template<typename T, size_t N>
[[nodiscard]] inline int32_t compareEpsilon(const qvec<T, N> &v1, const qvec<T, N> &v2, const T &epsilon)
{
for (size_t i = 0; i < N; i++) {
int32_t diff = compareEpsilon(v1[i], v2[i], epsilon);
if (diff) {
return diff;
}
}
return 0;
}
}
struct wedge_key_t
{
qvec3d dir; /* direction vector for the edge */
qvec3d origin; /* origin (t = 0) in parametric form */
inline bool operator<(const wedge_key_t &other) const
{
int32_t diff = qv::compareEpsilon(dir, other.dir, EQUAL_EPSILON);
if (diff) {
return diff < 0;
}
diff = qv::compareEpsilon(origin, other.origin, EQUAL_EPSILON);
if (diff) {
return diff < 0;
}
return false;
}
vec_t t; /* t-value for parametric equation of edge */
wvert_t *prev, *next; /* t-ordered list of vertices on same edge */
};
using wedge_t = std::list<vec_t>; /* linked list of vertices on this edge */
struct wedge_t
{
wedge_t *next; /* pointer for hash bucket chain */
vec3_t dir; /* direction vector for the edge */
vec3_t origin; /* origin (t = 0) in parametric form */
wvert_t head; /* linked list of verticies on this edge */
};
static int numwverts;
static int numwedges, numwverts;
static int tjuncs;
static int tjuncfaces;
static std::map<wedge_key_t, wedge_t> pWEdges;
static int cWVerts;
static int cWEdges;
static wvert_t *pWVerts;
static wedge_t *pWEdges;
//============================================================================
static qvec3d CanonicalVector(const qvec3d &p1, const qvec3d &p2)
#define NUM_HASH 1024
static wedge_t *wedge_hash[NUM_HASH];
static qvec3d hash_min, hash_scale;
static void InitHash(const qvec3d &mins, const qvec3d &maxs)
{
qvec3d vec = p2 - p1;
vec_t volume;
vec_t scale;
int newsize[2];
hash_min = mins;
qvec3d size = maxs - mins;
memset(wedge_hash, 0, sizeof(wedge_hash));
volume = size[0] * size[1];
scale = sqrt(volume / NUM_HASH);
newsize[0] = (int)(size[0] / scale);
newsize[1] = (int)(size[1] / scale);
hash_scale[0] = newsize[0] / size[0];
hash_scale[1] = newsize[1] / size[1];
hash_scale[2] = (vec_t)newsize[1];
}
static unsigned HashVec(vec3_t vec)
{
unsigned h;
h = (unsigned)(hash_scale[0] * (vec[0] - hash_min[0]) * hash_scale[2] + hash_scale[1] * (vec[1] - hash_min[1]));
if (h >= NUM_HASH)
return NUM_HASH - 1;
return h;
}
//============================================================================
static void CanonicalVector(const qvec3d &p1, const qvec3d &p2, qvec3d &vec)
{
VectorSubtract(p2, p1, vec);
vec_t length = VectorNormalize(vec);
for (size_t i = 0; i < 3; i++) {
if (vec[i] > EQUAL_EPSILON) {
return vec;
} else if (vec[i] < -EQUAL_EPSILON) {
if (vec[0] > EQUAL_EPSILON)
return;
else if (vec[0] < -EQUAL_EPSILON) {
VectorInverse(vec);
return vec;
} else {
vec[i] = 0;
}
return;
} else
vec[0] = 0;
if (vec[1] > EQUAL_EPSILON)
return;
else if (vec[1] < -EQUAL_EPSILON) {
VectorInverse(vec);
return;
} else
vec[1] = 0;
if (vec[2] > EQUAL_EPSILON)
return;
else if (vec[2] < -EQUAL_EPSILON) {
VectorInverse(vec);
return;
} else
vec[2] = 0;
LogPrint("WARNING: Line {}: Healing degenerate edge ({}) at ({:.3f} {:.3} {:.3})\n", length, p1[0], p1[1], p1[2]);
}
LogPrint("WARNING: Line {}: Healing degenerate edge ({}) at ({:.3}\n", length, vec);
return vec;
}
static std::pair<const wedge_key_t, wedge_t> &FindEdge(const qvec3d &p1, const qvec3d &p2, vec_t &t1, vec_t &t2)
static wedge_t *FindEdge(const qvec3d &p1, const qvec3d &p2, vec_t &t1, vec_t &t2)
{
qvec3d edgevec = CanonicalVector(p1, p2);
qvec3d origin, edgevec;
wedge_t *edge;
int h;
CanonicalVector(p1, p2, edgevec);
t1 = DotProduct(p1, edgevec);
t2 = DotProduct(p2, edgevec);
qvec3d origin = p1 + (edgevec * -t1);
VectorMA(p1, -t1, edgevec, origin);
if (t1 > t2) {
std::swap(t1, t2);
}
wedge_key_t key { edgevec, origin };
auto it = pWEdges.find(key);
h = HashVec(&origin[0]);
if (it != pWEdges.end()) {
return *it;
for (edge = wedge_hash[h]; edge; edge = edge->next) {
vec_t temp = edge->origin[0] - origin[0];
if (temp < -EQUAL_EPSILON || temp > EQUAL_EPSILON)
continue;
temp = edge->origin[1] - origin[1];
if (temp < -EQUAL_EPSILON || temp > EQUAL_EPSILON)
continue;
temp = edge->origin[2] - origin[2];
if (temp < -EQUAL_EPSILON || temp > EQUAL_EPSILON)
continue;
temp = edge->dir[0] - edgevec[0];
if (temp < -EQUAL_EPSILON || temp > EQUAL_EPSILON)
continue;
temp = edge->dir[1] - edgevec[1];
if (temp < -EQUAL_EPSILON || temp > EQUAL_EPSILON)
continue;
temp = edge->dir[2] - edgevec[2];
if (temp < -EQUAL_EPSILON || temp > EQUAL_EPSILON)
continue;
return edge;
}
auto &edge = pWEdges.emplace(key, wedge_t { }).first;
if (numwedges >= cWEdges)
FError("Internal error: didn't allocate enough edges for tjuncs?");
edge = pWEdges + numwedges;
numwedges++;
edge->second.emplace_front(VECT_MAX);
edge->next = wedge_hash[h];
wedge_hash[h] = edge;
return *edge;
VectorCopy(origin, edge->origin);
VectorCopy(edgevec, edge->dir);
edge->head.next = edge->head.prev = &edge->head;
edge->head.t = VECT_MAX;
return edge;
}
/*
@ -136,21 +186,31 @@ AddVert
===============
*/
static void AddVert(wedge_t &edge, vec_t t)
static void AddVert(wedge_t *edge, vec_t t)
{
auto it = edge.begin();
wvert_t *v, *newv;
for (; it != edge.end(); it++) {
if (fabs(*it - t) < T_EPSILON) {
v = edge->head.next;
do {
if (fabs(v->t - t) < T_EPSILON)
return;
} else if (*it > t) {
if (v->t > t)
break;
}
}
v = v->next;
} while (1);
// insert a new wvert before v
edge.insert(it, t);
if (numwverts >= cWVerts)
FError("Internal error: didn't allocate enough vertices for tjuncs?");
newv = pWVerts + numwverts;
numwverts++;
newv->t = t;
newv->next = v;
newv->prev = v->prev;
v->prev->next = newv;
v->prev = newv;
}
/*
@ -162,9 +222,9 @@ AddEdge
static void AddEdge(const qvec3d &p1, const qvec3d &p2)
{
vec_t t1, t2;
auto &edge = FindEdge(p1, p2, t1, t2);
AddVert(edge.second, t1);
AddVert(edge.second, t2);
wedge_t *edge = FindEdge(p1, p2, t1, t2);
AddVert(edge, t1);
AddVert(edge, t2);
}
/*
@ -287,20 +347,24 @@ FixFaceEdges
*/
static void FixFaceEdges(face_t *face, face_t *superface, face_t **facelist)
{
int i, j;
wedge_t *edge;
wvert_t *v;
vec_t t1, t2;
*superface = *face;
restart:
for (size_t i = 0; i < superface->w.size(); i++) {
size_t j = (i + 1) % superface->w.size();
for (i = 0; i < superface->w.size(); i++) {
j = (i + 1) % superface->w.size();
vec_t t1, t2;
auto &edge = FindEdge(superface->w[i], superface->w[j], t1, t2);
edge = FindEdge(superface->w[i], superface->w[j], t1, t2);
auto it = edge.second.begin();
while (*it < t1 + T_EPSILON)
it++;
v = edge->head.next;
while (v->t < t1 + T_EPSILON)
v = v->next;
if (*it < t2 - T_EPSILON) {
if (v->t < t2 - T_EPSILON) {
/* insert a new vertex here */
if (superface->w.size() == MAX_SUPERFACE_POINTS)
FError("tjunc fixups generated too many edges (max {})", MAX_SUPERFACE_POINTS);
@ -313,7 +377,10 @@ restart:
for (int32_t k = superface->w.size() - 1; k > j; k--)
VectorCopy(superface->w[k - 1], superface->w[k]);
superface->w[j] = edge.first.origin + (edge.first.dir * *it);
vec3_t temp;
VectorMA(edge->origin, v->t, edge->dir, temp);
superface->w[j] = temp;
goto restart;
}
}
@ -331,12 +398,28 @@ restart:
//============================================================================
static void tjunc_find_r(node_t *node)
static void tjunc_count_r(node_t *node)
{
face_t *f;
if (node->planenum == PLANENUM_LEAF)
return;
for (face_t *f = node->faces; f; f = f->next)
for (f = node->faces; f; f = f->next)
cWVerts += f->w.size();
tjunc_count_r(node->children[0]);
tjunc_count_r(node->children[1]);
}
static void tjunc_find_r(node_t *node)
{
face_t *f;
if (node->planenum == PLANENUM_LEAF)
return;
for (f = node->faces; f; f = f->next)
AddFaceEdges(f);
tjunc_find_r(node->children[0]);
@ -345,12 +428,14 @@ static void tjunc_find_r(node_t *node)
static void tjunc_fix_r(node_t *node, face_t *superface)
{
face_t *face, *next, *facelist;
if (node->planenum == PLANENUM_LEAF)
return;
face_t *facelist = nullptr;
facelist = NULL;
for (face_t *face = node->faces, *next = nullptr; face; face = next) {
for (face = node->faces; face; face = next) {
next = face->next;
FixFaceEdges(face, superface, &facelist);
}
@ -370,13 +455,39 @@ void TJunc(const mapentity_t *entity, node_t *headnode)
{
LogPrint(LOG_PROGRESS, "---- {} ----\n", __func__);
pWEdges.clear();
/*
* Guess edges = 1/2 verts
* Verts are arbitrarily multiplied by 2 because there appears to
* be a need for them to "grow" slightly.
*/
cWVerts = 0;
tjunc_count_r(headnode);
cWEdges = cWVerts;
cWVerts *= 2;
numwverts = 0;
pWVerts = new wvert_t[cWVerts]{};
pWEdges = new wedge_t[cWEdges]{};
qvec3d maxs;
/*
* identify all points on common edges
* origin points won't allways be inside the map, so extend the hash area
*/
for (size_t i = 0; i < 3; i++) {
if (fabs(entity->bounds.maxs()[i]) > fabs(entity->bounds.mins()[i]))
maxs[i] = fabs(entity->bounds.maxs()[i]);
else
maxs[i] = fabs(entity->bounds.mins()[i]);
}
qvec3d mins = -maxs;
InitHash(mins, maxs);
numwedges = numwverts = 0;
tjunc_find_r(headnode);
LogPrint(LOG_STAT, " {:8} world edges\n", pWEdges.size());
LogPrint(LOG_STAT, " {:8} world edges\n", numwedges);
LogPrint(LOG_STAT, " {:8} edge points\n", numwverts);
face_t superface;
@ -385,6 +496,9 @@ void TJunc(const mapentity_t *entity, node_t *headnode)
tjuncs = tjuncfaces = 0;
tjunc_fix_r(headnode, &superface);
delete[] pWVerts;
delete[] pWEdges;
LogPrint(LOG_STAT, " {:8} edges added by tjunctions\n", tjuncs);
LogPrint(LOG_STAT, " {:8} faces added by tjunctions\n", tjuncfaces);
}