I wondered, is it possible with Irrlicht to simply apply a pixel shader to a GUI element and its children ?
Let's say I have a custom GUI element (which looks like an old TV screen), with a few buttons / text inside. I would like to apply an effet such as this one : https://www.shadertoy.com/view/ldjGzV
I've browsed a bit in the Irrlicht documentation, and all the tutorials mention applying the shader to a material type set on a scene node, not GUI Elements.
I've tried doing something like this to no avail :
In the GUI element's draw() method :
Code: Select all
s32 materialIndex = gpu->addHighLevelShaderMaterialFromFiles("", basePath + "vcr_distortion.glsl", new SomeShaderCallBack(), EMT_SOLID, 0);
E_MATERIAL_TYPE PREVIOUS_ID = driver->getMaterial2D().MaterialType;
driver->getMaterial2D().MaterialType = static_cast<E_MATERIAL_TYPE>(materialIndex);
IGUIElement::draw() // <== Draws the children
// Other drawing stuff with draw2dimage
driver->getMaterial2D().MaterialType = PREVIOUS_ID;
In the constructor I set a screen quad (which seems to fill the whole screen no matter the specified dimension) with the proper material ID and textures. I can see in the debugger that the shader callback's OnSetConstants is regularly called.
Code: Select all
this->screenQuad = new CScreenQuadSceneNode(smgr->getRootSceneNode(), smgr, -1, dim /* = the GUI element dimension */);
if (Environment->getVideoDriver()->queryFeature(video::EVDF_RENDER_TO_TARGET))
{
this->rtt = Environment->getVideoDriver()->addRenderTargetTexture(dim, "rtt0");
this->screenQuad->getMaterial(0).setTexture(0, rtt);
this->screenQuad->getMaterial(0).setTexture(1, someLoadedNoiseTexture);
this->screenQuad->getMaterial(0).MaterialType = static_cast<video::E_MATERIAL_TYPE>(24); // <== Matches the material ID returned by addHighLevelShaderMaterialFromFiles()
}
In the draw() method :
Code: Select all
Environment->getVideoDriver()->setRenderTarget(rtt, false, false, video::SColor(0, 0, 0, 0));
CGUIViewport::draw(); // Draws the sub elements.
Environment->getVideoDriver()->setRenderTarget(ERT_FRAME_BUFFER, false, false, 0);
screenQuad->render();
I know there's something really wrong with my approach(es), but I can't seem to at least find a good starting point / way of thinking.
For further infon here's the converted shader to proper GLSL :
Code: Select all
uniform vec3 iResolution; // viewport resolution (in pixels)
uniform float iGlobalTime; // shader playback time (in seconds)
uniform float iChannelTime[4]; // channel playback time (in seconds)
uniform vec3 iChannelResolution[4]; // channel resolution (in pixels)
uniform vec4 iMouse; // mouse pixel coords. xy: current (if MLB down), zw: click
uniform sampler2D iChannel0; // input channel. XX = 2D/Cube
uniform sampler2D iChannel1;
uniform sampler2D iChannel2;
uniform sampler2D iChannel3;
uniform vec4 iDate; // (year, month, day, time in seconds)
float noise(vec2 p)
{
float sample = texture2D(iChannel1,vec2(1.,2.*cos(iGlobalTime))*iGlobalTime*8. + p*1.).x;
sample *= sample;
return sample;
}
float onOff(float a, float b, float c)
{
return step(c, sin(iGlobalTime + a*cos(iGlobalTime*b)));
}
float ramp(float y, float start, float end)
{
float inside = step(start,y) - step(end,y);
float fact = (y-start)/(end-start)*inside;
return (1.-fact) * inside;
}
float stripes(vec2 uv)
{
float noi = noise(uv*vec2(0.5,1.) + vec2(1.,3.));
return ramp(mod(uv.y*4. + iGlobalTime/2.+sin(iGlobalTime + sin(iGlobalTime*0.63)),1.),0.5,0.6)*noi;
}
vec3 getVideo(vec2 uv)
{
vec2 look = uv;
float window = 1./(1.+20.*(look.y-mod(iGlobalTime/4.,1.))*(look.y-mod(iGlobalTime/4.,1.)));
look.x = look.x + sin(look.y*10. + iGlobalTime)/50.*onOff(4.,4.,.3)*(1.+cos(iGlobalTime*80.))*window;
float vShift = 0.4*onOff(2.,3.,.9)*(sin(iGlobalTime)*sin(iGlobalTime*20.) +
(0.5 + 0.1*sin(iGlobalTime*200.)*cos(iGlobalTime)));
look.y = mod(look.y + vShift, 1.);
vec3 video = vec3(texture2D(iChannel0,look));
return video;
}
vec2 screenDistort(vec2 uv)
{
uv -= vec2(.5,.5);
uv = uv*1.2*(1./1.2+2.*uv.x*uv.x*uv.y*uv.y);
uv += vec2(.5,.5);
return uv;
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
vec2 uv = fragCoord.xy / iResolution.xy;
uv = screenDistort(uv);
vec3 video = getVideo(uv);
float vigAmt = 3.+.3*sin(iGlobalTime + 5.*cos(iGlobalTime*5.));
float vignette = (1.-vigAmt*(uv.y-.5)*(uv.y-.5))*(1.-vigAmt*(uv.x-.5)*(uv.x-.5));
video += stripes(uv);
video += noise(uv*2.)/2.;
video *= vignette;
video *= (12.+mod(uv.y*30.+iGlobalTime,1.))/13.;
fragColor = vec4(video,1.0);
}
void main(void)
{
mainImage(gl_FragColor, gl_FragCoord.xy);
}
Of course, in the ShaderCallBack I set the expected constants :
Code: Select all
s32 layer0 = 0;
s32 layer1 = 1;
s32 layer2 = 2;
s32 layer3 = 3;
s32 resolution[] =
{ static_cast<s32>(this->dimension.Width), static_cast<s32>(this->dimension.Height) }; // <== Matches the dimension of the GUI element
f32 globalTime = static_cast<f32>(this->timer->getTime() - this->startTime) / 1000.0f; // <== The shader expects seconds. startTime is set in the ShaderCallback constructor.
services->setPixelShaderConstant("iResolution", resolution, 2);
services->setPixelShaderConstant("iGlobalTime", &globalTime, 1);
services->setPixelShaderConstant("iChannel0", &layer0, 1);
services->setPixelShaderConstant("iChannel1", &layer1, 1);
services->setPixelShaderConstant("iChannel2", &layer2, 1);
services->setPixelShaderConstant("iChannel3", &layer3, 1);