Re: Bump mapping for Animated meshes
Posted: Sat Oct 17, 2015 7:56 pm
Yeah I'll have to add that later.
Official forum of the Irrlicht Engine
https://irrlicht.sourceforge.io/forum/
Code: Select all
// Written by Jacques Pretorius 2015..
// This is a fully functional D3D HLSL Shader that renders
// "physically correct" anything you throw at it!
// (yes, this time it works)
//--------------------------------- --- -- -
// The naming of these matrices are mainly the result of
// having tried hundreds of different variations..
// Feel free to "standardise" the code.. (if there is such a thing in GPU programming)..
float4x4 M01World;
float4x4 M05WorldViewProjection;
float4x4 M17WorldInverseTranspose;
// We want to do more in the Fragment Shader and less in the Vertex Program..
sampler2D DTCDSample;
sampler2D NSGSample;
sampler2D ERMSample;
sampler2D CUBEMAPSample;
// sampler2D RESERVEDSample;
float3 L001Col; float3 L002Col; float3 L003Col;
float3 L004Col; // NEW LIGHT!
float3 CameraPosFRAG;
float3 L001PosFRAG; float3 L002PosFRAG; float3 L003PosFRAG;
float3 L004PosFRAG; // NEW LIGHT!
float GlobalLightMultiplier;
int ShaderCommand; // Not quite fed from the app yet..
// Data from Irrlicht's vertex buffer..
struct appdata
{float3 Position : POSITION; // O.K.
float4 Normal : NORMAL;
float4 UVCoords : TEXCOORD0; // Can we have A second set of UV COORDS HERE?..
float4 Tangent : TEXCOORD1; // As per Irrlicht HLSL..
float4 Binormal : TEXCOORD2; // WHAT MAKES THE DESICION ABOUT WHICH SEMANTICS ARE USED FOR WHAT..
float4 UVCoordsSET_2 : TEXCOORD3; // Can we have A second set of UV COORDS HERE?..
float4 TangentSET_2 : TEXCOORD4; // As per Irrlicht HLSL..
float4 BinormalSET_2 : TEXCOORD5; // WHAT MAKES THE DESICION ABOUT WHICH SEMANTICS ARE USED FOR WHAT..
// Plus THREE to spare!! (this is the main reason for doing more thing in the Fragment Shader..)
};
// data passed from vertex shader to pixel shader..
struct vertexOutput
{float4 HPosition : POSITION;
float2 UVCoords : TEXCOORD0; // WE WANT UVCoordsTWO !!
float3 PassedWORLDVertPos : TEXCOORD1; // LOOKING GOOD!!
float3 WorldITNormal : TEXCOORD2;
float3 WorldITTangent : TEXCOORD3;
float3 WorldITBinormal : TEXCOORD4;
float3 WorldITNormalSECOND : TEXCOORD5; // Now very possible!!
float3 WorldITTangentSECOND : TEXCOORD6;
float3 WorldITBinormalSECOND : TEXCOORD7;
};
float3 GetClosestPointOnLine (float3 TestPoint , float3 LineStart, float3 LineEnd)
{// float TheShortestDistance = 0.0f; // note (method for getting module of shortest CheckPoint..)
float TheLineModule = 0.0f;
float TheDotProduct = 0.0f;
float3 VectorPrimary;
float3 VectorSecondary;
float3 VectorTertiary;
float3 ClosestCheckPoint;
float3 Deltas;
Deltas.x = LineEnd.x - LineStart.x;
Deltas.y = LineEnd.y - LineStart.y;
Deltas.z = LineEnd.z - LineStart.z;
TheLineModule = sqrt(( Deltas.x * Deltas.x) + ( Deltas.y * Deltas.y ) + ( Deltas.z * Deltas.z));
// TestCheckPoint = CheckPoint.GetVector();
VectorPrimary.x = TestPoint.x - LineStart.x;
VectorPrimary.y = TestPoint.y - LineStart.y;
VectorPrimary.z = TestPoint.z - LineStart.z;
VectorSecondary.x = Deltas.x / TheLineModule;
VectorSecondary.y = Deltas.y / TheLineModule;
VectorSecondary.z = Deltas.z / TheLineModule;
TheDotProduct = ((VectorPrimary.x * VectorSecondary.x) + (VectorPrimary.y * VectorSecondary.y) + (VectorPrimary.z * VectorSecondary.z));
if ( TheDotProduct <= 0) return LineStart;
if ( TheDotProduct >= TheLineModule) return LineEnd;
VectorTertiary.x = VectorSecondary.x * TheDotProduct;
VectorTertiary.y = VectorSecondary.y * TheDotProduct;
VectorTertiary.z = VectorSecondary.z * TheDotProduct;
ClosestCheckPoint.x = LineStart.x + VectorTertiary.x;
ClosestCheckPoint.y = LineStart.y + VectorTertiary.y;
ClosestCheckPoint.z = LineStart.z + VectorTertiary.z;
return ClosestCheckPoint;
}
// =============== VERTEX PROGRAM =================
vertexOutput vertexMain(appdata IN)
{vertexOutput OUT = (vertexOutput)0;
OUT.WorldITNormal = mul(IN.Normal, M17WorldInverseTranspose).xyz; // SWOP for CG vs HLSL (now HLSL) ! (be very aware of this issue)..
OUT.WorldITTangent = mul(IN.Tangent, M17WorldInverseTranspose).xyz; // Happens to NOT be "World" as in so many examples!!..
OUT.WorldITBinormal = mul(IN.Binormal, M17WorldInverseTranspose).xyz;
OUT.PassedWORLDVertPos = (mul(float4(IN.Position.xyz,1),M01World).xyz); // LOOKING GOOD!!!
OUT.UVCoords = IN.UVCoords.xy;
OUT.HPosition = mul(float4(IN.Position.xyz,1),M05WorldViewProjection); // NEEDED EVEN IF NOT VISIBLY ACCESSED..
return OUT;
}
//========================================================== === == =
// =============== FRAGMENT SHADING =================
void PHONGShading(vertexOutput IN,
float3 ColRGBL001,
float3 ColRGBL002,
float3 ColRGBL003,
float3 ColRGBL004,
float3 TheShadedNormal,
float3 NVectorL001,
float3 NVectorL002,
float3 NVectorL003,
float3 NVectorL004,
float3 NVectorCAMERA, // NEW..
float3 VnormW,
float MappedGloss, // For PLAECF to use..
out float3 RawDiffL001, // Start using arrays..
out float3 RawDiffL002,
out float3 RawDiffL003,
out float3 RawDiffL004,
out float3 RawSpecL001,
out float3 RawSpecL002,
out float3 RawSpecL003,
out float3 RawSpecL004
)
{
//
float3 NormPlusCameraPlusDirL001 = normalize(VnormW + NVectorL001 + NVectorCAMERA);
float3 NormPlusCameraPlusDirL002 = normalize(VnormW + NVectorL002 + NVectorCAMERA);
float3 NormPlusCameraPlusDirL003 = normalize(VnormW + NVectorL003 + NVectorCAMERA);
float3 NormPlusCameraPlusDirL004 = normalize(VnormW + NVectorL004 + NVectorCAMERA);
// Diffuse Straight from my GLSL Shader..
RawDiffL001 = max(dot(TheShadedNormal, (NVectorL001)),0.0) * ColRGBL001;
RawDiffL002 = max(dot(TheShadedNormal, (NVectorL002)),0.0) * ColRGBL002;
RawDiffL003 = max(dot(TheShadedNormal, (NVectorL003)),0.0) * ColRGBL003;
RawDiffL004 = max(dot(TheShadedNormal, (NVectorL004)),0.0) * ColRGBL004;
/*
// Specular Straight from my GLSL Shader..
RawSpecL001 = clamp(pow(clamp(dot(normalize((NVectorCAMERA + NVectorL001)),TheShadedNormal),0.0,1.0), 1.0 * 777.777 ), 0.0 , 1.0 ) * ColRGBL001;
RawSpecL002 = clamp(pow(clamp(dot(normalize((NVectorCAMERA + NVectorL002)),TheShadedNormal),0.0,1.0), 1.0 * 777.777 ), 0.0 , 1.0 ) * ColRGBL002;
RawSpecL003 = clamp(pow(clamp(dot(normalize((NVectorCAMERA + NVectorL003)),TheShadedNormal),0.0,1.0), 1.0 * 777.777 ), 0.0 , 1.0 ) * ColRGBL003;
RawSpecL004 = clamp(pow(clamp(dot(normalize((NVectorCAMERA + NVectorL004)),TheShadedNormal),0.0,1.0), 1.0 * 777.777 ), 0.0 , 1.0 ) * ColRGBL004;
// IMPLEMENT "PLAECF" HERE..
*/
///*
// start **** PRETORIUS's LINEARLY ACCESSED EXPONENTIAL CURVE ***
// Please be aware of all the issues involved with the following procedure..
// if (GpuPLAECFActive == 1)
// {// PROTECTED "PLAECF" ACTIVATED!!
RawSpecL001 = clamp(pow(clamp(dot(normalize((NVectorCAMERA + NVectorL001)),TheShadedNormal),0.0,1.0), pow(2, (MappedGloss * 10.0)) ), 0.0 , 1.0) * ColRGBL001;
RawSpecL002 = clamp(pow(clamp(dot(normalize((NVectorCAMERA + NVectorL002)),TheShadedNormal),0.0,1.0), pow(2, (MappedGloss * 10.0)) ), 0.0 , 1.0) * ColRGBL002;
RawSpecL003 = clamp(pow(clamp(dot(normalize((NVectorCAMERA + NVectorL003)),TheShadedNormal),0.0,1.0), pow(2, (MappedGloss * 10.0)) ), 0.0 , 1.0) * ColRGBL003;
RawSpecL004 = clamp(pow(clamp(dot(normalize((NVectorCAMERA + NVectorL004)),TheShadedNormal),0.0,1.0), pow(2, (MappedGloss * 10.0)) ), 0.0 , 1.0) * ColRGBL004;
// }
// end **** PRETORIUS's LINEARLY ACCESSED EXPONENTIAL CURVE FORMULA ***
//*/
}
//========================================================== === == =
float4 pixelMain(vertexOutput IN ) : COLOR
{float3 LitRawDiffuseL001;
float3 LitRawDiffuseL002;
float3 LitRawDiffuseL003;
float3 LitRawDiffuseL004;
float3 RawSpecularL001;
float3 RawSpecularL002;
float3 RawSpecularL003;
float3 RawSpecularL004;
float4 CubemapReflection;
float3 VertexPositionWORLDFFF = IN.PassedWORLDVertPos; // LOOKING GOOD!!!
// Here we tried implementing a "Line Light" like a flourescent tube, but no go?
// The "get closest point on given line from given point" function seems to work in 3D mockups..
// float3 TestLineA; TestLineA.x = -80000.0; TestLineA.y = 30.0; TestLineA.z = 40.0;
// float3 TestLineB; TestLineB.x = 80000.0; TestLineB.y = -30.0; TestLineB.z = 40.0;
// float3 MomentaryPos ; // THIS MAY YET WORK..
// MomentaryPos = GetClosestPointOnLine (VertexPositionWORLDFFF , TestLineA, TestLineB);
float3 DirectVecCameraFRAG = (CameraPosFRAG - VertexPositionWORLDFFF);
float3 DirectVecL001FRAG = (L001PosFRAG - VertexPositionWORLDFFF);
float3 DirectVecL002FRAG = (L002PosFRAG - VertexPositionWORLDFFF);
float3 DirectVecL003FRAG = (L003PosFRAG - VertexPositionWORLDFFF);
float3 DirectVecL004FRAG = (L004PosFRAG - VertexPositionWORLDFFF);
// Optimize this so that these values are fed straight into the shader function..
float3 NDirectionL001 = normalize(DirectVecL001FRAG);
float3 NDirectionL002 = normalize(DirectVecL002FRAG);
float3 NDirectionL003 = normalize(DirectVecL003FRAG);
float3 NDirectionL004 = normalize(DirectVecL004FRAG);
float3 NDirectionCAMERA = normalize(DirectVecCameraFRAG);
float3 VertNormalWorld = normalize(IN.WorldITNormal);
float3 Tn = normalize(IN.WorldITTangent);
float3 Bn = normalize(IN.WorldITBinormal);
float3 NormalisedWorldITNormal = normalize(IN.WorldITNormal);
float3 FinalNormal;
float4 MappedDTCDrgba = tex2D(DTCDSample,IN.UVCoords); // .rgba NOT needed..(unless RGB?)
float4 MappedNSGrgba = tex2D(NSGSample,IN.UVCoords).rgba; // Because we want to see the mapped normal under debug conditions..
float3 UncompressedNormal; // Like this because we want to see it in debug mode.. (optimize for final release..)
UncompressedNormal = 2.0 * (MappedNSGrgba.xyz - float3(0.5,0.5,0.5)); // Old XYZ method..
// Calculate Z on the fly by a Manipulated form of Pythagoras's Theorem.
UncompressedNormal.z *= 0.000000001 ; // This sothat DX dont give Error messages in the console..
// DX don't like input variables to be equated to anyhting else directly or indirectly..
UncompressedNormal.z = sqrt(-(UncompressedNormal.x*UncompressedNormal.x) - (UncompressedNormal.y*UncompressedNormal.y) + 1.0);
// EMULATE FLAT NORMARL MAP..
// UncompressedNormal.x = 0.000001;
// UncompressedNormal.y = 0.000001;
// UncompressedNormal.z = 1.0;
float4 MappedERM = tex2D(ERMSample,IN.UVCoords); // Above "rgb" apparently needless?
// A straight subtraction here makes it possibile to have Negative Mapped Diffuse Colours which
// is definitely not what we want here..
MappedDTCDrgba.x *= 1.0 - MappedERM.w; // IF Diffuse is multiplied by inverse of the REFLECTION MASK was not done during
MappedDTCDrgba.y *= 1.0 - MappedERM.w; // surface texturing in the modelling phase..
MappedDTCDrgba.z *= 1.0 - MappedERM.w;
FinalNormal = NormalisedWorldITNormal + UncompressedNormal.x * Tn + UncompressedNormal.y * Bn;
FinalNormal = normalize(FinalNormal); // Note that unlike GLSL we CANNOT just say "normalize(OurNormal)"!..
float3 L001ColJJ = L001Col; float3 L002ColJJ = L002Col; float3 L003ColJJ = L003Col; float3 L004ColJJ = L004Col;
// Again the system complains when you use EQUALS.. (so we do a work around)
L001ColJJ *= 0.000001; L002ColJJ *= 0.000001; L003ColJJ *= 0.000001; L004ColJJ *= 0.000001;
L001ColJJ += 1.0;
L002ColJJ += 1.0;
L003ColJJ += 1.0;
L004ColJJ += 1.0;
L001ColJJ.x *= 0.90001; L001ColJJ.y *= 0.90001; L001ColJJ.z *= 0.90001;
L002ColJJ.x *= 0.90001; L002ColJJ.y *= 0.50001; L002ColJJ.z *= 0.40001;
L003ColJJ.x *= 0.30001; L003ColJJ.y *= 0.70001; L003ColJJ.z *= 0.90001;
L004ColJJ.x *= 1.00001; L004ColJJ.y *= 0.20001; L004ColJJ.z *= 0.20001;
L001ColJJ *= GlobalLightMultiplier;
L002ColJJ *= GlobalLightMultiplier;
L003ColJJ *= GlobalLightMultiplier;
L004ColJJ *= GlobalLightMultiplier;
int ShaderComPass = ShaderCommand;
float4 EarlyOutput;
EarlyOutput = 1.0;
if (ShaderComPass == 1 ) // UV COORDS
{EarlyOutput.xyz *= 0.0000001; EarlyOutput.xy += IN.UVCoords.xy; EarlyOutput.z += 0.0;
return EarlyOutput;
}
if (ShaderComPass == 2 ) // VERTEX NORMALS
{EarlyOutput.xyz *= 0.0000001;
EarlyOutput.xyz += ( VertNormalWorld / 2.0) + 0.5;
return EarlyOutput;
}
if (ShaderComPass == 3 ) // FINAL NORMALS
{EarlyOutput.xyz *= 0.0000001; EarlyOutput.xyz += (FinalNormal.xyz / 2.0) + 0.5;
return EarlyOutput;
}
if (ShaderComPass == 4 ) // MAPPED NORMALS
{EarlyOutput.xyz *= 0.0000001; EarlyOutput.xyz += MappedNSGrgba.xyz;
return EarlyOutput;
}
if (ShaderComPass == 5 ) // TANGENTS
{EarlyOutput.xyz *= 0.0000001; EarlyOutput.xyz += (Tn.xyz / 2.0) + 0.5;
return EarlyOutput;
}
if (ShaderComPass == 6 ) // BINORMALS
{EarlyOutput.x *= 0.0000001; EarlyOutput.y *= 0.0000001; EarlyOutput.z *= 0.0000001;
EarlyOutput.xyz += (Bn.xyz / 2.0) + 0.5;
return EarlyOutput;
}
if (ShaderComPass == 7 ) // MAPPED DIFFUSE
{EarlyOutput.x *= 0.0000001; EarlyOutput.y *= 0.0000001; EarlyOutput.z *= 0.0000001;
EarlyOutput.xyz += MappedDTCDrgba.xyz;
return EarlyOutput;
}
if (ShaderComPass == 8 ) // RAW DIFFUSE & SPECULAR
{MappedDTCDrgba.x *= 0.0000001; MappedDTCDrgba.y *= 0.0000001; MappedDTCDrgba.z *= 0.0000001;
MappedDTCDrgba.x += 0.5; MappedDTCDrgba.y += 0.5; MappedDTCDrgba.z += 0.5;
}
if (ShaderComPass == 9 ) // GREY DIFFUSE (no colours)
{MappedERM .x *= 0.0000001; MappedERM.y *= 0.0000001; MappedERM.z *= 0.0000001;
MappedDTCDrgba.x *= 0.0000001; MappedDTCDrgba.y *= 0.0000001; MappedDTCDrgba.z *= 0.0000001;
MappedDTCDrgba.x += 0.5; MappedDTCDrgba.y += 0.5; MappedDTCDrgba.z += 0.5;
}
if (ShaderComPass == 10 ) // RAW DIFFUSE & SPECULAR CAMERA LIGHT ONLY
{MappedDTCDrgba.x *= 0.0000001; MappedDTCDrgba.y *= 0.0000001; MappedDTCDrgba.z *= 0.0000001;
MappedDTCDrgba.x += 0.5; MappedDTCDrgba.y += 0.5; MappedDTCDrgba.z += 0.5;
L002ColJJ.x *= 0.00001; L002ColJJ.y *= 0.00001; L002ColJJ.z *= 0.00001;
L003ColJJ.x *= 0.00001; L003ColJJ.y *= 0.00001; L003ColJJ.z *= 0.00001;
L004ColJJ.x *= 0.00001; L004ColJJ.y *= 0.00001; L004ColJJ.z *= 0.00001;
}
if (ShaderComPass == 11 ) // RAW DIFFUSE CAMERA LIGHT ONLY (no specular)
{MappedERM .x *= 0.0000001; MappedERM.y *= 0.0000001; MappedERM.z *= 0.0000001;
MappedDTCDrgba.x *= 0.0000001; MappedDTCDrgba.y *= 0.0000001; MappedDTCDrgba.z *= 0.0000001;
MappedDTCDrgba.x += 0.7; MappedDTCDrgba.y += 0.7; MappedDTCDrgba.z += 0.7;
L002ColJJ.x *= 0.00001; L002ColJJ.y *= 0.00001; L002ColJJ.z *= 0.00001;
L003ColJJ.x *= 0.00001; L003ColJJ.y *= 0.00001; L003ColJJ.z *= 0.00001;
L004ColJJ.x *= 0.00001; L004ColJJ.y *= 0.00001; L004ColJJ.z *= 0.00001;
}
// CALCULATE REFLECTION. (Combining into final render done elsewhere)..
// Thanks to jimy-byerly..
float3 ReflectVec = normalize(float3(reflect(NDirectionCAMERA, FinalNormal))); // Reflection based on Normal Mapped Normals..
// for irrlicht, set z coordinate to y coordinate (rotation of coordinates)..
float TZ = ReflectVec.y; float TY = ReflectVec.x;
ReflectVec.x = ReflectVec.z; ReflectVec.y = TY; ReflectVec.z = TZ;
float3 AbsReflect = abs(ReflectVec);
float2 sky_coord; // texture coordinates of the sky point viewed..
// selection of the face to use (so texture sector)..
if (ReflectVec.z >= AbsReflect.x && ReflectVec.z >= AbsReflect.y)
{sky_coord = float2( ReflectVec.x / ReflectVec.z + 4 , ReflectVec.y / ReflectVec.z + 2 );} // Top.. (6) Y Negative ..
else if (ReflectVec.y >= AbsReflect.x && ReflectVec.y >= AbsReflect.z)
{sky_coord = float2( -ReflectVec.x / ReflectVec.y + 2 , ReflectVec.z / ReflectVec.y);} // Front.. (2) .. X_POSITIVE
else if (ReflectVec.x >= AbsReflect.y && ReflectVec.x >= AbsReflect.z)
{sky_coord = float2( ReflectVec.y / ReflectVec.x + 4 , ( ReflectVec.z / ReflectVec.x));} // Left.. (3) .. Z_NEGATIVE MAIN..
else if (ReflectVec.z <= -AbsReflect .x && ReflectVec.z <= -AbsReflect .y)
{sky_coord = float2(-ReflectVec.x / ReflectVec.z , ReflectVec.y / ReflectVec.z + 2 );} // Bottom (4).. Y Positive ..O.K.
else if (ReflectVec.y <= -AbsReflect .x && ReflectVec.y <= -AbsReflect .z)
{sky_coord = float2(-ReflectVec.x / ReflectVec.y + 2 , -ReflectVec.z / ReflectVec.y + 2 );} // Back (5).. ..X NEGATIVE
else
{sky_coord = float2( ( ReflectVec.y / ReflectVec.x ) + ( 0.0 ), ( -ReflectVec.z / ReflectVec.x ) - ( 0.0 ) );} // (1)Right .. Z_POSITIVE
CubemapReflection = tex2D(CUBEMAPSample, (sky_coord + 1.0) * (1.0 / 6.0) );
CubemapReflection.x *= MappedERM.w; // REFLECT ONLY WHAT WE WANT..
CubemapReflection.y *= MappedERM.w;
CubemapReflection.z *= MappedERM.w;
PHONGShading(IN,
L001ColJJ,
L002ColJJ,
L003ColJJ,
L004ColJJ,
FinalNormal,
NDirectionL001,
NDirectionL002,
NDirectionL003,
NDirectionL004,
NDirectionCAMERA, // O.K.!!
VertNormalWorld,
// GlossMapped.x,
MappedNSGrgba.w,
LitRawDiffuseL001,
LitRawDiffuseL002,
LitRawDiffuseL003,
LitRawDiffuseL004,
RawSpecularL001,
RawSpecularL002,
RawSpecularL003 ,
RawSpecularL004
);
// == = == = == = == = == = == = == = == = == = == = == = == = == = == = == = == =
float3 FinalRGBA;
FinalRGBA = (RawSpecularL001.xyz * MappedNSGrgba.z) + (MappedDTCDrgba.xyz *(LitRawDiffuseL001.xyz));
FinalRGBA += (RawSpecularL002.xyz * MappedNSGrgba.z) + (MappedDTCDrgba.xyz *(LitRawDiffuseL002.xyz));
FinalRGBA += (RawSpecularL003.xyz * MappedNSGrgba.z) + (MappedDTCDrgba.xyz *(LitRawDiffuseL003.xyz));
FinalRGBA += (RawSpecularL004.xyz * MappedNSGrgba.z) + (MappedDTCDrgba.xyz *(LitRawDiffuseL004.xyz));
float4 OutputRGBA = float4 (1.0, 1.0, 1.0, 1.0);;
OutputRGBA.xyz = FinalRGBA.xyz;
OutputRGBA.x += CubemapReflection.x; // Here as reflection dont undergo much processing..
OutputRGBA.y += CubemapReflection.y;
OutputRGBA.z += CubemapReflection.z;
OutputRGBA. w = MappedDTCDrgba.w; // Clipmapped by our special renaged domain..
return OutputRGBA;
}
// There is a lot to do but this basis solves the most important lighting problems..
// Enjoy! and please comment..
// Vectrotek..
Code: Select all
class OurShaderCallBackClass : public video::IShaderConstantSetCallBack
{public:
virtual void OnSetConstants(video::IMaterialRendererServices* TheServices, s32 userData)
{video::IVideoDriver* TheDriver = TheServices->getVideoDriver();
// BUILD CUSTOM MATRICES..
M01World = TheDriver->getTransform(video::ETS_WORLD);
M05WorldViewProjection = TheDriver->getTransform(video::ETS_PROJECTION);
M05WorldViewProjection *= TheDriver->getTransform(video::ETS_VIEW);
M05WorldViewProjection *= TheDriver->getTransform(video::ETS_WORLD);
M17WorldInverseTranspose = M01World.getTransposed(); // For Irrlicht this replaces the commonly used "wrold matrix!"
M17WorldInverseTranspose.makeInverse();
// You may have to use.. MyMatrix = services->getVertexShaderConstantID("my_matrix"); etc..
TheServices->setVertexShaderConstant("M01World", M01World.pointer(), 16);
TheServices->setVertexShaderConstant("M05WorldViewProjection", M05WorldViewProjection.pointer(), 16);
TheServices->setVertexShaderConstant("M17WorldInverseTranspose", M17WorldInverseTranspose.pointer(), 16);
// Now we do less in the Vertex Program and more in the Fragment Shader..
core::vector3df L001PosFRAG ( 0.0 , 100.0 , 0.0 ); // hooked to the camera....
core::vector3df L002PosFRAG ( -100.0 , 0.0 , 0.0 ); // Far Away..
core::vector3df L003PosFRAG ( 100.0 , 0.0 , 0.0 );
core::vector3df L004PosFRAG ( 0.0 , -100.0 , 0.0 ); // NEW LIGHT!!
core::vector3df CameraPositionFRAG = TheDevice->getSceneManager()->getActiveCamera()->getAbsolutePosition();
video::SColorf L001Col( 1.0f , 1.0f , 1.0f ); // Lights all set to white as we controll them in the shader for now..
video::SColorf L002Col( 1.0f , 1.0f , 1.0f );
video::SColorf L003Col( 1.0f , 1.0f , 1.0f );
video::SColorf L004Col( 1.0f , 1.0f , 1.0f ); // NEW LIGHT!!
int PixelShaderCommand; // Not used from here yet, and you can change commands in the shader..
if (GeneralCounter > 500) {PixelShaderCommand = 1;} // Automatically change commands, but not used now..
if (GeneralCounter < 500) {PixelShaderCommand = 0;}
// TheServices->setPixelShaderConstant("ShaderCommand", reinterpret_cast<int*>(&PixelShaderCommand), 1); // NOT YET..
TheServices->setPixelShaderConstant("L001Col", reinterpret_cast<f32*>(&L001Col), 3);
TheServices->setPixelShaderConstant("L002Col", reinterpret_cast<f32*>(&L002Col), 3);
TheServices->setPixelShaderConstant("L003Col", reinterpret_cast<f32*>(&L003Col), 3);
TheServices->setPixelShaderConstant("L004Col", reinterpret_cast<f32*>(&L004Col), 3);
f32 TextureLayerID0 = 0;
f32 TextureLayerID1 = 1;
f32 TextureLayerID2 = 2;
f32 TextureLayerID3 = 3;
TheServices->setPixelShaderConstant("DTCDSample", &TextureLayerID0, 1); // Same as GLSL..
TheServices->setPixelShaderConstant("NSGSample", &TextureLayerID1, 1);
TheServices->setPixelShaderConstant("ERMSample", &TextureLayerID2, 1);
TheServices->setPixelShaderConstant("CUBEMAPSample", &TextureLayerID3, 1);
TheServices->setPixelShaderConstant("ShaderCommand", reinterpret_cast<int*>(&CurrentShaderIntValue), 1);
TheServices->setPixelShaderConstant("GlobalLightMultiplier", reinterpret_cast<float*>(&GlobalLightMultiplier), 1);
TheServices->setPixelShaderConstant("CameraPosFRAG", reinterpret_cast<f32*>(&CameraPositionFRAG), 3);
// NEW! YEAH!! (better off feeding these to the Fragment Shader) (not constrained by semantically named hardware channels!)
TheServices->setPixelShaderConstant("L001PosFRAG", reinterpret_cast<f32*>(&CameraPositionFRAG), 3); // Camera Light..
TheServices->setPixelShaderConstant("L002PosFRAG", reinterpret_cast<f32*>(&L002PosFRAG), 3);
TheServices->setPixelShaderConstant("L003PosFRAG", reinterpret_cast<f32*>(&L003PosFRAG), 3);
TheServices->setPixelShaderConstant("L004PosFRAG", reinterpret_cast<f32*>(&L004PosFRAG), 3);
// GlobalLightMultiplier
// CurrentShaderIntValue
float AppShaderCommand = 0;
}
};