www.jpct.net
jPCTAE  a 3d engine for Android => Support => Topic started by: MrM on September 17, 2011, 04:11:58 am

I'm using a normal shader I found on the forum:
uniform mat4 modelViewMatrix;
uniform mat4 modelViewProjectionMatrix;
uniform vec4 additionalColor;
uniform vec4 ambientColor;
uniform vec3 lightPositions[8];
attribute vec4 position;
attribute vec4 tangent;
attribute vec3 normal;
attribute vec2 texture0;
varying vec3 lightVec[2];
varying vec3 eyeVec;
varying vec2 texCoord;
void main(void)
{
texCoord = texture0.xy;
vec3 n = normalize(modelViewMatrix * vec4(normal,0.0)).xyz;
vec3 t = normalize(modelViewMatrix * vec4(tangent.xyz, 0.0)).xyz;
vec3 b = tangent.w*cross(n, t);
vec3 vVertex = vec3(modelViewMatrix * position);
vec3 tmpVec = lightPositions[0].xyz  vVertex;
vec3 lv;
vec3 ev;
lv.x = dot(tmpVec, t);
lv.y = dot(tmpVec, b);
lv.z = dot(tmpVec, n);
lightVec[0]=lv;
tmpVec = vVertex*1.0;
eyeVec.x = dot(tmpVec, t);
eyeVec.y = dot(tmpVec, b);
eyeVec.z = dot(tmpVec, n);
gl_Position = modelViewProjectionMatrix * position;
}
precision mediump float;
varying vec3 lightVec[2];
varying vec3 eyeVec;
varying vec2 texCoord;
uniform sampler2D textureUnit0;
uniform sampler2D textureUnit1;
uniform vec3 diffuseColors[8];
uniform vec3 specularColors[8];
uniform vec4 ambientColor;
uniform float invRadius;
void main ()
{
vec4 vAmbient = ambientColor;
vec3 vVec = normalize(eyeVec);
vec4 base = texture2D(textureUnit0, texCoord);
vec3 bump = normalize(texture2D(textureUnit1, texCoord).xyz * 2.0  1.0);
float distSqr = dot(lightVec[0], lightVec[0]);
float att = clamp(1.0  invRadius * sqrt(distSqr), 0.0, 1.0);
vec3 lVec = lightVec[0] * inversesqrt(distSqr);
float diffuse = max(dot(lVec, bump), 0.0);
vec4 vDiffuse = vec4(diffuseColors[0],0) * diffuse;
float specular = pow(clamp(dot(reflect(lVec, bump), vVec), 0.0, 1.0), 0.85);
vec4 vSpecular = vec4(specularColors[0],0) * specular;
vec4 col = (vDiffuse*base + vSpecular) * att;
gl_FragColor = col+(vAmbient*base + vDiffuse*base + vSpecular) * att;
}
And it's working very well, but it seems to me that it can only register one light? I'm using three lights placed on different positions (I know how JPCT's Coordinate system works), front, left and right, and all a bit high on the Y axis (10), but the only one having effect is the front one, since there is absolutely no difference between using just that one light and all three of them. They are all in the same world, I'm using enable() and setIntensity(); for all three of them, and world's ambient light.
My question is, (since I don't understand the shader code yet) is the shader code allowing only 1 light for the effect, or it's something else?
P.S.: I've tried playing around with shader.setStaticUniform("invRadius", 0.0003f); because I though it might matter, it does darken the map if higher values are placed, but not much of a difference with lower ones, I guess because it's already so close to clean zero.

Yes, this shader uses one light source only. If you want more, you have to calculate additional lightVecs in the vertex shader, use them in the fragment shader and mix the results.

Great, thanks... Off to read the Red Book then :)

Just an update:
The shader code works excellent when modified to work with multiple lights too, which turned out to be simple enough.
P.S.: Just make sure of your objects dimensions, as placing lights within your object, thinking they should be far away (that was the mistake I made), will make them pitch black (from the outside), which is understandable.

Would you be willing to post your source for the multiplelight normal map shader, MrM? I'd love to see it.

This one's the same as above, only more of them are initialized, three to be exact. It's not perfect, as it was done for testing purposes. Attenuation is calculated for only one of them.
By the way this is nothing revolutionary, I manually initialize every extra light I want to make, It's not made to work with unlimited lights. For that (if you have patience), look through this:
http://www.gamasutra.com/view/feature/2361/let_there_be_light_a_unified_.php
Vertex:
uniform mat4 modelViewMatrix;
uniform mat4 modelViewProjectionMatrix;
uniform vec4 additionalColor;
uniform vec4 ambientColor;
uniform vec3 lightPositions[8];
attribute vec4 position;
attribute vec4 tangent;
attribute vec3 normal;
attribute vec2 texture0;
varying vec3 lightVec[3];
varying vec3 eyeVec;
varying vec2 texCoord;
void main(void)
{
texCoord = texture0.xy;
vec3 n = normalize(modelViewMatrix * vec4(normal,0.0)).xyz;
vec3 t = normalize(modelViewMatrix * vec4(tangent.xyz, 0.0)).xyz;
vec3 b = tangent.w*cross(n, t);
vec3 vVertex = vec3(modelViewMatrix * position);
vec3 tmpVec = lightPositions[0].xyz  vVertex;
vec3 tmpVec1 = lightPositions[1].xyz  vVertex;
vec3 tmpVec2 = lightPositions[2].xyz  vVertex;
vec3 lv;
vec3 lv1;
vec3 lv2;
vec3 ev;
lv.x = dot(tmpVec, t);
lv.y = dot(tmpVec, b);
lv.z = dot(tmpVec, n);
lightVec[0]=lv;
lv1.x = dot(tmpVec1, t);
lv1.y = dot(tmpVec1, b);
lv1.z = dot(tmpVec1, n);
lightVec[1]=lv1;
lv2.x = dot(tmpVec2, t);
lv2.y = dot(tmpVec2, b);
lv2.z = dot(tmpVec2, n);
lightVec[2]=lv2;
tmpVec = vVertex*1.0;
eyeVec.x = dot(tmpVec, t);
eyeVec.y = dot(tmpVec, b);
eyeVec.z = dot(tmpVec, n);
gl_Position = modelViewProjectionMatrix * position;
}
Fragment:
precision mediump float;
varying vec3 lightVec[3];
varying vec3 eyeVec;
varying vec2 texCoord;
uniform sampler2D textureUnit0;
uniform sampler2D textureUnit1;
uniform vec3 diffuseColors[8];
uniform vec3 specularColors[8];
uniform vec4 ambientColor;
uniform float invRadius;
void main ()
{
vec4 vAmbient = ambientColor;
vec3 vVec = normalize(eyeVec);
vec4 base = texture2D(textureUnit0, texCoord);
vec3 bump = normalize(texture2D(textureUnit1, texCoord).xyz * 2.0  1.0);
float distSqr = dot(lightVec[0], lightVec[0]);
float att = clamp(1.0  invRadius * sqrt(distSqr), 0.0, 1.0);
float distSqr1 = dot(lightVec[1], lightVec[1]);
float att1 = clamp(1.0  invRadius * sqrt(distSqr1), 0.0, 1.0);
float distSqr2 = dot(lightVec[2], lightVec[2]);
float att2 = clamp(1.0  invRadius * sqrt(distSqr2), 0.0, 1.0);
vec3 lVec = (lightVec[0] * inversesqrt(distSqr)) + (lightVec[1] * inversesqrt(distSqr1)) + (lightVec[2] * inversesqrt(distSqr2));
float diffuse = max(dot(lVec, bump), 0.0);
vec4 vDiffuse = vec4(diffuseColors[0],0) * diffuse;
float specular = pow(clamp(dot(reflect(lVec, bump), vVec), 0.0, 1.0), 0.85);
vec4 vSpecular = vec4(specularColors[0],0) * specular;
vec4 col = (vDiffuse*base + vSpecular) * att;
vec4 col1 = (vDiffuse*base + vSpecular);
vec4 col2 = (vDiffuse*base + vSpecular);
gl_FragColor = col+col1+col2+(vAmbient*base + vDiffuse*base + vSpecular);
}

It's a good idea to unroll the loop even if it clutters the code. It's faster and using a loop on Adreno based devices is asking for a visual Fukushima.

I was thinking of something similar while I was writing that. :)
I'll have to optimize it later on, but I didn't even want to bother with that ATM, since I only needed a couple of lights... They aren't going to change the speed *TOO* much, and as I said, it was testing purposes only.
But I have to agree with you, too much standalone operations, ifs or manual inits, slows stuff down.