In many examples over internet (such as webglfundamentals or webgl-bolerplate) authors used two triangles to cover full screen and invoke pixel shader for every pixel on canvas.
var canvas, gl, buffer,
vertex_shader, fragment_shader,
currentProgram, vertex_position,
timeLocation, resolutionLocation,
parameters = { start_time : new Date().getTime(),
time : 0,
screenWidth : 0,
screenHeight: 0 };
init();
animate();
function init() {
vertex_shader = document.getElementById('vs').textContent;
fragment_shader = document.getElementById('fs').textContent;
canvas = document.querySelector( 'canvas' );
try {
gl = canvas.getContext( 'experimental-webgl' );
} catch( error ) { }
if ( !gl )
throw "cannot create webgl context";
// Create Vertex buffer (2 triangles)
buffer = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, buffer );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( [ - 1.0, - 1.0, 1.0, - 1.0, - 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, - 1.0, 1.0 ] ), gl.STATIC_DRAW );
currentProgram = createProgram( vertex_shader, fragment_shader );
timeLocation = gl.getUniformLocation( currentProgram, 'time' );
resolutionLocation = gl.getUniformLocation( currentProgram, 'resolution' );
}
function createProgram( vertex, fragment ) {
var program = gl.createProgram();
var vs = createShader( vertex, gl.VERTEX_SHADER );
var fs = createShader( '#ifdef GL_ES\nprecision highp float;\n#endif\n\n' + fragment, gl.FRAGMENT_SHADER );
if ( vs == null || fs == null )
return null;
gl.attachShader( program, vs );
gl.attachShader( program, fs );
gl.deleteShader( vs );
gl.deleteShader( fs );
gl.linkProgram( program );
if ( !gl.getProgramParameter( program, gl.LINK_STATUS ) ) {
alert( "ERROR:\n" +
"VALIDATE_STATUS: " + gl.getProgramParameter( program, gl.VALIDATE_STATUS ) + "\n" +
"ERROR: " + gl.getError() + "\n\n" +
"- Vertex Shader -\n" + vertex + "\n\n" +
"- Fragment Shader -\n" + fragment );
return null;
}
return program;
}
function createShader( src, type ) {
var shader = gl.createShader( type );
gl.shaderSource( shader, src );
glpileShader( shader );
if ( !gl.getShaderParameter( shader, gl.COMPILE_STATUS ) ) {
alert( ( type == gl.VERTEX_SHADER ? "VERTEX" : "FRAGMENT" ) + " SHADER:\n" + gl.getShaderInfoLog( shader ) );
return null;
}
return shader;
}
function resizeCanvas( event ) {
if ( canvas.width != canvas.clientWidth ||
canvas.height != canvas.clientHeight ) {
canvas.width = canvas.clientWidth;
canvas.height = canvas.clientHeight;
parameters.screenWidth = canvas.width;
parameters.screenHeight = canvas.height;
gl.viewport( 0, 0, canvas.width, canvas.height );
}
}
function animate() {
resizeCanvas();
render();
requestAnimationFrame( animate );
}
function render() {
if ( !currentProgram )
return;
parameters.time = new Date().getTime() - parameters.start_time;
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
gl.useProgram( currentProgram );
gl.uniform1f( timeLocation, parameters.time / 1000 );
gl.uniform2f( resolutionLocation, parameters.screenWidth, parameters.screenHeight );
gl.bindBuffer( gl.ARRAY_BUFFER, buffer );
gl.vertexAttribPointer( vertex_position, 2, gl.FLOAT, false, 0, 0 );
gl.enableVertexAttribArray( vertex_position );
gl.drawArrays( gl.TRIANGLES, 0, 6 );
gl.disableVertexAttribArray( vertex_position );
}
html, body {
background-color: #000000;
margin: 0px;
overflow: hidden;
width: 100%;
height: 100%;
}
canvas {
width: 100%;
height: 100%;
}
<canvas></canvas>
<div id="info"></div>
<script id="vs" type="x-shader/vertex">
attribute vec3 position;
void main() {
gl_Position = vec4( position, 1.0 );
}
</script>
<script id="fs" type="x-shader/fragment">
uniform float time;
uniform vec2 resolution;
void main( void ) {
vec2 position = - 1.0 + 2.0 * gl_FragCoord.xy / resolution.xy;
float red = abs( sin( position.x * position.y + time / 5.0 ) );
float green = abs( sin( position.x * position.y + time / 4.0 ) );
float blue = abs( sin( position.x * position.y + time / 3.0 ) );
gl_FragColor = vec4( red, green, blue, 1.0 );
}
</script>
In many examples over internet (such as webglfundamentals or webgl-bolerplate) authors used two triangles to cover full screen and invoke pixel shader for every pixel on canvas.
var canvas, gl, buffer,
vertex_shader, fragment_shader,
currentProgram, vertex_position,
timeLocation, resolutionLocation,
parameters = { start_time : new Date().getTime(),
time : 0,
screenWidth : 0,
screenHeight: 0 };
init();
animate();
function init() {
vertex_shader = document.getElementById('vs').textContent;
fragment_shader = document.getElementById('fs').textContent;
canvas = document.querySelector( 'canvas' );
try {
gl = canvas.getContext( 'experimental-webgl' );
} catch( error ) { }
if ( !gl )
throw "cannot create webgl context";
// Create Vertex buffer (2 triangles)
buffer = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, buffer );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( [ - 1.0, - 1.0, 1.0, - 1.0, - 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, - 1.0, 1.0 ] ), gl.STATIC_DRAW );
currentProgram = createProgram( vertex_shader, fragment_shader );
timeLocation = gl.getUniformLocation( currentProgram, 'time' );
resolutionLocation = gl.getUniformLocation( currentProgram, 'resolution' );
}
function createProgram( vertex, fragment ) {
var program = gl.createProgram();
var vs = createShader( vertex, gl.VERTEX_SHADER );
var fs = createShader( '#ifdef GL_ES\nprecision highp float;\n#endif\n\n' + fragment, gl.FRAGMENT_SHADER );
if ( vs == null || fs == null )
return null;
gl.attachShader( program, vs );
gl.attachShader( program, fs );
gl.deleteShader( vs );
gl.deleteShader( fs );
gl.linkProgram( program );
if ( !gl.getProgramParameter( program, gl.LINK_STATUS ) ) {
alert( "ERROR:\n" +
"VALIDATE_STATUS: " + gl.getProgramParameter( program, gl.VALIDATE_STATUS ) + "\n" +
"ERROR: " + gl.getError() + "\n\n" +
"- Vertex Shader -\n" + vertex + "\n\n" +
"- Fragment Shader -\n" + fragment );
return null;
}
return program;
}
function createShader( src, type ) {
var shader = gl.createShader( type );
gl.shaderSource( shader, src );
gl.compileShader( shader );
if ( !gl.getShaderParameter( shader, gl.COMPILE_STATUS ) ) {
alert( ( type == gl.VERTEX_SHADER ? "VERTEX" : "FRAGMENT" ) + " SHADER:\n" + gl.getShaderInfoLog( shader ) );
return null;
}
return shader;
}
function resizeCanvas( event ) {
if ( canvas.width != canvas.clientWidth ||
canvas.height != canvas.clientHeight ) {
canvas.width = canvas.clientWidth;
canvas.height = canvas.clientHeight;
parameters.screenWidth = canvas.width;
parameters.screenHeight = canvas.height;
gl.viewport( 0, 0, canvas.width, canvas.height );
}
}
function animate() {
resizeCanvas();
render();
requestAnimationFrame( animate );
}
function render() {
if ( !currentProgram )
return;
parameters.time = new Date().getTime() - parameters.start_time;
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
gl.useProgram( currentProgram );
gl.uniform1f( timeLocation, parameters.time / 1000 );
gl.uniform2f( resolutionLocation, parameters.screenWidth, parameters.screenHeight );
gl.bindBuffer( gl.ARRAY_BUFFER, buffer );
gl.vertexAttribPointer( vertex_position, 2, gl.FLOAT, false, 0, 0 );
gl.enableVertexAttribArray( vertex_position );
gl.drawArrays( gl.TRIANGLES, 0, 6 );
gl.disableVertexAttribArray( vertex_position );
}
html, body {
background-color: #000000;
margin: 0px;
overflow: hidden;
width: 100%;
height: 100%;
}
canvas {
width: 100%;
height: 100%;
}
<canvas></canvas>
<div id="info"></div>
<script id="vs" type="x-shader/vertex">
attribute vec3 position;
void main() {
gl_Position = vec4( position, 1.0 );
}
</script>
<script id="fs" type="x-shader/fragment">
uniform float time;
uniform vec2 resolution;
void main( void ) {
vec2 position = - 1.0 + 2.0 * gl_FragCoord.xy / resolution.xy;
float red = abs( sin( position.x * position.y + time / 5.0 ) );
float green = abs( sin( position.x * position.y + time / 4.0 ) );
float blue = abs( sin( position.x * position.y + time / 3.0 ) );
gl_FragColor = vec4( red, green, blue, 1.0 );
}
</script>
this code uses buffer with 6 vertices for render something like this:
does this method has any advantages or not?
comparing to method, where we render one triangle (3 vertices) covering full screen like on this picture:
body{
margin: 0;
overflow: hidden;
}
<canvas></canvas>
<script type='glsl/vertex'>
attribute vec2 coords;
void main(void) {
gl_Position = vec4(coords.xy, 0.0, 1.0);
}
</script>
<script type='glsl/fragment'>precision highp float;
uniform vec4 mr;
void main(void) {
vec2 p = gl_FragCoord.xy;
vec2 q = (p + p - mr.ba) / mr.b;
for(int i = 0; i < 13; i++) {
q = abs(q)/dot(q,q) - mr.xy/mr.zw;
}
gl_FragColor = vec4(q, q.x/q.y, 1.0);
}
</script>
<script>
let canvas = document.querySelector('canvas');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
let gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl');
var h = gl.drawingBufferHeight;
var w = gl.drawingBufferWidth;
let pid = gl.createProgram();
shader('glsl/vertex', gl.VERTEX_SHADER);
shader('glsl/fragment', gl.FRAGMENT_SHADER);
gl.linkProgram(pid);
gl.useProgram(pid);
let array = new Float32Array([-1, 3, -1, -1, 3, -1]);
gl.bindBuffer(gl.ARRAY_BUFFER, gl.createBuffer());
gl.bufferData(gl.ARRAY_BUFFER, array, gl.STATIC_DRAW);
let al = gl.getAttribLocation(pid, "coords");
gl.vertexAttribPointer(al, 2 /*components per vertex */, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(al);
let mr = gl.getUniformLocation(pid, 'mr');
window.addEventListener('mousemove', draw);
window.addEventListener('touchmove', draw);
draw();
function draw(e) {
let ev = e && e.touches ? e.touches[0] : e;
let x = ev ? ev.clientX : 250;
let y = ev ? h - ev.clientY: 111;
gl.uniform4f(mr, x, y, w, h);
gl.viewport(0, 0, w, h);
gl.clearColor(0, 0, 0, 0);
gl.drawArrays(gl.TRIANGLES, 0, 3);
}
function shader(name, type) {
let src = [].slice.call(document.scripts).find(s => s.type === name).innerText;
let sid = gl.createShader(type);
gl.shaderSource(sid, src);
gl.compileShader(sid);
gl.attachShader(pid, sid);
}
</script>
Share
Improve this question
edited Mar 31, 2019 at 23:23
Stranger in the Q
asked Mar 16, 2019 at 13:27
Stranger in the QStranger in the Q
3,8982 gold badges23 silver badges26 bronze badges
5
- There is no important difference. You're drawing single screen quad anyway. It's entirely a preference. I'd argue it's normal to draw that you want to draw, not to rely on side effects. As an example I could draw circles and rectangles by drawing spheres and cylinders with an orthographic projection but I don't. If I want to draw a circle or a rectangle I provide data for a circle or a rectangle not a sphere and cylinder. Similarly if I want to draw a quad I give data for a quad. I'd argue the triangle method is just of way of being tricky. But it really doesn't matter. Do whatever you want. – user128511 Commented Mar 16, 2019 at 13:32
- @gman i think about pixels on edge of triangles, does gpu handles triangles separately or not, does rasterization invoked per triangle or per quad? – Stranger in the Q Commented Mar 16, 2019 at 13:36
- @rabbit76, not they are not drawn twice. That would be explicitly against the spec and would also make edges appear when blending is on. As for rasterization that's up to the driver and GPU. For example tiled GPUs bucket things into tiles and draw each tile separately. Rendering tiles is parallelized as just one example. Also depending on the GPU it's just going to clip the triangle into a quad and then subdivide the quad into 2 triangles. That would certainly be the simplest way to rasterize. Make the hardware do one thing fast (draw triangles) then turn everything into triangles. – user128511 Commented Mar 16, 2019 at 13:37
- @gman : Don't your comments sound like answers rather than just clarifications? – Peter O. Commented Mar 16, 2019 at 21:44
- And here's me using a fan with 4 vertices to draw my ndc quad ;) – LJᛃ Commented Mar 16, 2019 at 22:53
1 Answer
Reset to default 22In both cases, every pixel on the screen will be rasterized once, but they will not necessarily only be shaded once. Using two triangles, you will be subject to quad overshading along the diagonal; some pixels will be shaded even though they're outside of their triangle, as helper invocations for the 2×2 quads, then shaded again by the other triangle.
Using two triangles can also be less efficient for caching, due to implementation details of how pixel shader invocations are packaged into SIMD work groups by the GPU—again, around the edge between the two triangles, you could end up with pixels close together in space being shaded farther apart in time, compared to what happens when using a single fullscreen triangle.
Michal Drobot, in the blog post linked in the previous paragraph, found a performance delta of about 8% between single-triangle and two-triangle fullscreen draws. That only applies to the specific hardware and shaders he was working with, but it demonstrates that these overshading and cache issues can lead to a measurable performance decrease.
Also note that the fullscreen triangle will not be clipped into a quad by the GPU. GPUs use guard-band clipping, meaning that they don't clip offscreen geometry until the vertices are so far offscreen that numerical precision would be lost in the rasterizer (which is very far). In case of the fullscreen triangle, the rasterizer will process it as a single triangle and will simply not generate fragments for the offscreen parts of it.
In short, there's no downside to using a fullscreen triangle, and it could give you a minor performance bump, so I would prefer that over a fullscreen quad in all cases.