Search Results

Search found 563 results on 23 pages for 'vertices'.

Page 12/23 | < Previous Page | 8 9 10 11 12 13 14 15 16 17 18 19  | Next Page >

  • Rotate triangle so that its tip points in the direction of the point on the screen that we last touched

    - by Sid
    OpenGL ES - Android. Hello all, I am unable to rotate the triangle accordingly in such a way that its tip always points to my finger. What i did : Constructed a triangle in by GL.GL_TRIANGLES. Added touch events to it. I can rotate the triangle along my Z-axis successfully. Even made the vector class for it. What i need : Each time when I touch the screen, I want to rotate the triangle to face the touch point. Need some help. Here's what i implemented. I wonder that where i am going wrong? My code : public class Graphic2DTriangle { private FloatBuffer vertexBuffer; private ByteBuffer indexBuffer; private float[] vertices = { -1.0f,-1.0f, 0.0f, 2.0f, 0.0f, 0.0f, -1.0f, 1.0f, 0.0f }; private byte[] indices = { 0, 1, 2 }; public Graphic2DTriangle() { ByteBuffer vbb = ByteBuffer.allocateDirect(vertices.length * 4); vbb.order(ByteOrder.nativeOrder()); // Use native byte order vertexBuffer = vbb.asFloatBuffer(); // Convert byte buffer to float vertexBuffer.put(vertices); // Copy data into buffer vertexBuffer.position(0); // Rewind // Setup index-array buffer. Indices in byte. indexBuffer = ByteBuffer.allocateDirect(indices.length); indexBuffer.put(indices); indexBuffer.position(0); } public void draw(GL10 gl) { gl.glEnableClientState(GL10.GL_VERTEX_ARRAY); gl.glVertexPointer(3, GL10.GL_FLOAT, 0, vertexBuffer); gl.glDrawElements(GL10.GL_TRIANGLES, indices.length, GL10.GL_UNSIGNED_BYTE, indexBuffer); gl.glDisableClientState(GL10.GL_VERTEX_ARRAY); } } My SurfaceView class where i've done some Touch Events. public class BallThrowGLSurfaceView extends GLSurfaceView{ MySquareRender _renderObj; View _viewObj; float oldX,oldY,dX,dY; final float TOUCH_SCALE_FACTOR = 0.6f; Vector2 touchPos = new Vector2(); float angle=0; public BallThrowGLSurfaceView(Context context) { super(context); // TODO Auto-generated constructor stub _renderObj = new MySquareRender(context); this.setRenderer(_renderObj); this.setRenderMode(RENDERMODE_WHEN_DIRTY); } @Override public boolean onTouchEvent(MotionEvent event) { // TODO Auto-generated method stub touchPos.x = event.getX(); touchPos.y = event.getY(); Log.i("Co-ord", touchPos.x+"hh"+touchPos.y); switch(event.getAction()){ case MotionEvent.ACTION_MOVE : dX = touchPos.x - oldX; dY = touchPos.y - oldY; if(touchPos.y > getHeight()/2){ dX = dX*-1; } if(touchPos.x < getWidth()/2){ dY = dY*-1; } _renderObj.mAngle += (dX+dY) * TOUCH_SCALE_FACTOR; requestRender(); Log.i("AngleCo-ord", _renderObj.mAngle +"hh"); } oldX = touchPos.x; oldY = touchPos.y; Log.i("OldCo-ord", oldX+" hh "+oldY); return true; } } Last but not the least. My vector2 class. public class Vector2 { public static float TO_RADIANS = (1 / 180.0f) * (float) Math.PI; public static float TO_DEGREES = (1 / (float) Math.PI) * 180; public float x, y; public Vector2() { } public Vector2(float x, float y) { this.x = x; this.y = y; } public Vector2(Vector2 other) { this.x = other.x; this.y = other.y; } public Vector2 cpy() { return new Vector2(x, y); } public Vector2 set(float x, float y) { this.x = x; this.y = y; return this; } public Vector2 set(Vector2 other) { this.x = other.x; this.y = other.y; return this; } public Vector2 add(float x, float y) { this.x += x; this.y += y; return this; } public Vector2 add(Vector2 other) { this.x += other.x; this.y += other.y; return this; } public Vector2 sub(float x, float y) { this.x -= x; this.y -= y; return this; } public Vector2 sub(Vector2 other) { this.x -= other.x; this.y -= other.y; return this; } public Vector2 mul(float scalar) { this.x *= scalar; this.y *= scalar; return this; } public float len() { return FloatMath.sqrt(x * x + y * y); } public Vector2 nor() { float len = len(); if (len != 0) { this.x /= len; this.y /= len; } return this; } public float angle() { float angle = (float) Math.atan2(y, x) * TO_DEGREES; if (angle < 0) angle += 360; return angle; } public Vector2 rotate(float angle) { float rad = angle * TO_RADIANS; float cos = FloatMath.cos(rad); float sin = FloatMath.sin(rad); float newX = this.x * cos - this.y * sin; float newY = this.x * sin + this.y * cos; this.x = newX; this.y = newY; return this; } public float dist(Vector2 other) { float distX = this.x - other.x; float distY = this.y - other.y; return FloatMath.sqrt(distX * distX + distY * distY); } public float dist(float x, float y) { float distX = this.x - x; float distY = this.y - y; return FloatMath.sqrt(distX * distX + distY * distY); } public float distSquared(Vector2 other) { float distX = this.x - other.x; float distY = this.y - other.y; return distX * distX + distY * distY; } public float distSquared(float x, float y) { float distX = this.x - x; float distY = this.y - y; return distX * distX + distY * distY; } } PS : i am able to handle the touch events. I can rotate the triangle with the touch of my finger. But i want that ONE VERTEX of the triangle should point at my finger position respective of the position of my finger.

    Read the article

  • If I project a sphere in 3D will it be a circle?

    - by yuumei
    Assuming I have infinite vertices to represent the sphere, if I project the sphere from any position/scale in 3D to 2D, will it be a circle? I know it will not be a circle on the screen, because of scaling and different resolutions. But do field of view and aspect ratio effect the results? Edit: Sorry yes, I am talking about perspective projection. Seems the answer is no then, perspective will distort the sphere. Thanks!

    Read the article

  • Problem rendering VBO

    - by Onno
    I'm developing a game engine using OpenTK. I'm trying to get to grips with the use of VBO's. I've run into some trouble because somehow it doesn't render correctly. Thus far I've used immediate mode to render a test object, a test cube with a texture. namespace SharpEngine.Utility.Mesh { using System; using System.Collections.Generic; using OpenTK; using OpenTK.Graphics; using OpenTK.Graphics.OpenGL; using SharpEngine.Utility; using System.Drawing; public class ImmediateFaceBasedCube : IMesh { private IList<Face> faces = new List<Face>(); public ImmediateFaceBasedCube() { IList<Vector3> allVertices = new List<Vector3>(); //rechtsbovenvoor allVertices.Add(new Vector3(1.0f, 1.0f, 1.0f)); //0 //rechtsbovenachter allVertices.Add(new Vector3(1.0f, 1.0f, -1.0f)); //1 //linksbovenachter allVertices.Add(new Vector3(-1.0f, 1.0f, -1.0f)); //2 //linksbovenvoor allVertices.Add(new Vector3(-1.0f, 1.0f, 1.0f)); //3 //rechtsondervoor allVertices.Add(new Vector3(1.0f, -1.0f, 1.0f)); //4 //rechtsonderachter allVertices.Add(new Vector3(1.0f, -1.0f, -1.0f)); //5 //linksonderachter allVertices.Add(new Vector3(-1.0f, -1.0f, -1.0f)); //6 //linksondervoor allVertices.Add(new Vector3(-1.0f, -1.0f, 1.0f)); //7 IList<Vector2> textureCoordinates = new List<Vector2>(); textureCoordinates.Add(new Vector2(0, 0)); //AA - 0 textureCoordinates.Add(new Vector2(0, 0.3333333f)); //AB - 1 textureCoordinates.Add(new Vector2(0, 0.6666666f)); //AC - 2 textureCoordinates.Add(new Vector2(0, 1)); //AD - 3 textureCoordinates.Add(new Vector2(0.3333333f, 0)); //BA - 4 textureCoordinates.Add(new Vector2(0.3333333f, 0.3333333f)); //BB - 5 textureCoordinates.Add(new Vector2(0.3333333f, 0.6666666f)); //BC - 6 textureCoordinates.Add(new Vector2(0.3333333f, 1)); //BD - 7 textureCoordinates.Add(new Vector2(0.6666666f, 0)); //CA - 8 textureCoordinates.Add(new Vector2(0.6666666f, 0.3333333f)); //CB - 9 textureCoordinates.Add(new Vector2(0.6666666f, 0.6666666f)); //CC -10 textureCoordinates.Add(new Vector2(0.6666666f, 1)); //CD -11 textureCoordinates.Add(new Vector2(1, 0)); //DA -12 textureCoordinates.Add(new Vector2(1, 0.3333333f)); //DB -13 textureCoordinates.Add(new Vector2(1, 0.6666666f)); //DC -14 textureCoordinates.Add(new Vector2(1, 1)); //DD -15 Vector3 copy1 = new Vector3(-2.0f, -2.5f, -3.5f); IList<Vector3> normals = new List<Vector3>(); normals.Add(new Vector3(0, 1.0f, 0)); //0 normals.Add(new Vector3(0, 0, 1.0f)); //1 normals.Add(new Vector3(1.0f, 0, 0)); //2 normals.Add(new Vector3(0, 0, -1.0f)); //3 normals.Add(new Vector3(-1.0f, 0, 0)); //4 normals.Add(new Vector3(0, -1.0f, 0)); //5 //todo: move vertex normal and texture data to datastructure //todo: VBO based rendering //top face //1 IList<VertexData> verticesT1 = new List<VertexData>(); VertexData T1a = new VertexData(); T1a.Normal = normals[0]; T1a.TexCoord = textureCoordinates[5]; T1a.Position = allVertices[3]; verticesT1.Add(T1a); VertexData T1b = new VertexData(); T1b.Normal = normals[0]; T1b.TexCoord = textureCoordinates[9]; T1b.Position = allVertices[0]; verticesT1.Add(T1b); VertexData T1c = new VertexData(); T1c.Normal = normals[0]; T1c.TexCoord = textureCoordinates[10]; T1c.Position = allVertices[1]; verticesT1.Add(T1c); Face F1 = new Face(verticesT1); faces.Add(F1); //2 IList<VertexData> verticesT2 = new List<VertexData>(); VertexData T2a = new VertexData(); T2a.Normal = normals[0]; T2a.TexCoord = textureCoordinates[10]; T2a.Position = allVertices[1]; verticesT2.Add(T2a); VertexData T2b = new VertexData(); T2b.Normal = normals[0]; T2b.TexCoord = textureCoordinates[6]; T2b.Position = allVertices[2]; verticesT2.Add(T2b); VertexData T2c = new VertexData(); T2c.Normal = normals[0]; T2c.TexCoord = textureCoordinates[5]; T2c.Position = allVertices[3]; verticesT2.Add(T2c); Face F2 = new Face(verticesT2); faces.Add(F2); //front face //3 IList<VertexData> verticesT3 = new List<VertexData>(); VertexData T3a = new VertexData(); T3a.Normal = normals[1]; T3a.TexCoord = textureCoordinates[1]; T3a.Position = allVertices[3]; verticesT3.Add(T3a); VertexData T3b = new VertexData(); T3b.Normal = normals[1]; T3b.TexCoord = textureCoordinates[0]; T3b.Position = allVertices[7]; verticesT3.Add(T3b); VertexData T3c = new VertexData(); T3c.Normal = normals[1]; T3c.TexCoord = textureCoordinates[5]; T3c.Position = allVertices[0]; verticesT3.Add(T3c); Face F3 = new Face(verticesT3); faces.Add(F3); //4 IList<VertexData> verticesT4 = new List<VertexData>(); VertexData T4a = new VertexData(); T4a.Normal = normals[1]; T4a.TexCoord = textureCoordinates[5]; T4a.Position = allVertices[0]; verticesT4.Add(T4a); VertexData T4b = new VertexData(); T4b.Normal = normals[1]; T4b.TexCoord = textureCoordinates[0]; T4b.Position = allVertices[7]; verticesT4.Add(T4b); VertexData T4c = new VertexData(); T4c.Normal = normals[1]; T4c.TexCoord = textureCoordinates[4]; T4c.Position = allVertices[4]; verticesT4.Add(T4c); Face F4 = new Face(verticesT4); faces.Add(F4); //right face //5 IList<VertexData> verticesT5 = new List<VertexData>(); VertexData T5a = new VertexData(); T5a.Normal = normals[2]; T5a.TexCoord = textureCoordinates[2]; T5a.Position = allVertices[0]; verticesT5.Add(T5a); VertexData T5b = new VertexData(); T5b.Normal = normals[2]; T5b.TexCoord = textureCoordinates[1]; T5b.Position = allVertices[4]; verticesT5.Add(T5b); VertexData T5c = new VertexData(); T5c.Normal = normals[2]; T5c.TexCoord = textureCoordinates[6]; T5c.Position = allVertices[1]; verticesT5.Add(T5c); Face F5 = new Face(verticesT5); faces.Add(F5); //6 IList<VertexData> verticesT6 = new List<VertexData>(); VertexData T6a = new VertexData(); T6a.Normal = normals[2]; T6a.TexCoord = textureCoordinates[1]; T6a.Position = allVertices[4]; verticesT6.Add(T6a); VertexData T6b = new VertexData(); T6b.Normal = normals[2]; T6b.TexCoord = textureCoordinates[5]; T6b.Position = allVertices[5]; verticesT6.Add(T6b); VertexData T6c = new VertexData(); T6c.Normal = normals[2]; T6c.TexCoord = textureCoordinates[6]; T6c.Position = allVertices[1]; verticesT6.Add(T6c); Face F6 = new Face(verticesT6); faces.Add(F6); //back face //7 IList<VertexData> verticesT7 = new List<VertexData>(); VertexData T7a = new VertexData(); T7a.Normal = normals[3]; T7a.TexCoord = textureCoordinates[4]; T7a.Position = allVertices[5]; verticesT7.Add(T7a); VertexData T7b = new VertexData(); T7b.Normal = normals[3]; T7b.TexCoord = textureCoordinates[9]; T7b.Position = allVertices[2]; verticesT7.Add(T7b); VertexData T7c = new VertexData(); T7c.Normal = normals[3]; T7c.TexCoord = textureCoordinates[5]; T7c.Position = allVertices[1]; verticesT7.Add(T7c); Face F7 = new Face(verticesT7); faces.Add(F7); //8 IList<VertexData> verticesT8 = new List<VertexData>(); VertexData T8a = new VertexData(); T8a.Normal = normals[3]; T8a.TexCoord = textureCoordinates[9]; T8a.Position = allVertices[2]; verticesT8.Add(T8a); VertexData T8b = new VertexData(); T8b.Normal = normals[3]; T8b.TexCoord = textureCoordinates[4]; T8b.Position = allVertices[5]; verticesT8.Add(T8b); VertexData T8c = new VertexData(); T8c.Normal = normals[3]; T8c.TexCoord = textureCoordinates[8]; T8c.Position = allVertices[6]; verticesT8.Add(T8c); Face F8 = new Face(verticesT8); faces.Add(F8); //left face //9 IList<VertexData> verticesT9 = new List<VertexData>(); VertexData T9a = new VertexData(); T9a.Normal = normals[4]; T9a.TexCoord = textureCoordinates[8]; T9a.Position = allVertices[6]; verticesT9.Add(T9a); VertexData T9b = new VertexData(); T9b.Normal = normals[4]; T9b.TexCoord = textureCoordinates[13]; T9b.Position = allVertices[3]; verticesT9.Add(T9b); VertexData T9c = new VertexData(); T9c.Normal = normals[4]; T9c.TexCoord = textureCoordinates[9]; T9c.Position = allVertices[2]; verticesT9.Add(T9c); Face F9 = new Face(verticesT9); faces.Add(F9); //10 IList<VertexData> verticesT10 = new List<VertexData>(); VertexData T10a = new VertexData(); T10a.Normal = normals[4]; T10a.TexCoord = textureCoordinates[8]; T10a.Position = allVertices[6]; verticesT10.Add(T10a); VertexData T10b = new VertexData(); T10b.Normal = normals[4]; T10b.TexCoord = textureCoordinates[12]; T10b.Position = allVertices[7]; verticesT10.Add(T10b); VertexData T10c = new VertexData(); T10c.Normal = normals[4]; T10c.TexCoord = textureCoordinates[13]; T10c.Position = allVertices[3]; verticesT10.Add(T10c); Face F10 = new Face(verticesT10); faces.Add(F10); //bottom face //11 IList<VertexData> verticesT11 = new List<VertexData>(); VertexData T11a = new VertexData(); T11a.Normal = normals[5]; T11a.TexCoord = textureCoordinates[10]; T11a.Position = allVertices[7]; verticesT11.Add(T11a); VertexData T11b = new VertexData(); T11b.Normal = normals[5]; T11b.TexCoord = textureCoordinates[9]; T11b.Position = allVertices[6]; verticesT11.Add(T11b); VertexData T11c = new VertexData(); T11c.Normal = normals[5]; T11c.TexCoord = textureCoordinates[14]; T11c.Position = allVertices[4]; verticesT11.Add(T11c); Face F11 = new Face(verticesT11); faces.Add(F11); //12 IList<VertexData> verticesT12 = new List<VertexData>(); VertexData T12a = new VertexData(); T12a.Normal = normals[5]; T12a.TexCoord = textureCoordinates[13]; T12a.Position = allVertices[5]; verticesT12.Add(T12a); VertexData T12b = new VertexData(); T12b.Normal = normals[5]; T12b.TexCoord = textureCoordinates[14]; T12b.Position = allVertices[4]; verticesT12.Add(T12b); VertexData T12c = new VertexData(); T12c.Normal = normals[5]; T12c.TexCoord = textureCoordinates[9]; T12c.Position = allVertices[6]; verticesT12.Add(T12c); Face F12 = new Face(verticesT12); faces.Add(F12); } public void draw() { GL.Begin(BeginMode.Triangles); foreach (Face face in faces) { foreach (VertexData datapoint in face.verticesWithTexCoords) { GL.Normal3(datapoint.Normal); GL.TexCoord2(datapoint.TexCoord); GL.Vertex3(datapoint.Position); } } GL.End(); } } } Gets me this very nice picture: The immediate mode cube renders nicely and taught me a bit on how to use OpenGL, but VBO's are the way to go. Since I read on the OpenTK forums that OpenTK has problems doing VA's or DL's, I decided to skip using those. Now, I've tried to change this cube to a VBO by using the same vertex, normal and tc collections, and making float arrays from them by using the coordinates in combination with uint arrays which contain the index numbers from the immediate cube. (see the private functions at end of the code sample) Somehow this only renders two triangles namespace SharpEngine.Utility.Mesh { using System; using System.Collections.Generic; using OpenTK; using OpenTK.Graphics; using OpenTK.Graphics.OpenGL; using SharpEngine.Utility; using System.Drawing; public class VBOFaceBasedCube : IMesh { private int VerticesVBOID; private int VerticesVBOStride; private int VertexCount; private int ELementBufferObjectID; private int textureCoordinateVBOID; private int textureCoordinateVBOStride; //private int textureCoordinateArraySize; private int normalVBOID; private int normalVBOStride; public VBOFaceBasedCube() { IList<Vector3> allVertices = new List<Vector3>(); //rechtsbovenvoor allVertices.Add(new Vector3(1.0f, 1.0f, 1.0f)); //0 //rechtsbovenachter allVertices.Add(new Vector3(1.0f, 1.0f, -1.0f)); //1 //linksbovenachter allVertices.Add(new Vector3(-1.0f, 1.0f, -1.0f)); //2 //linksbovenvoor allVertices.Add(new Vector3(-1.0f, 1.0f, 1.0f)); //3 //rechtsondervoor allVertices.Add(new Vector3(1.0f, -1.0f, 1.0f)); //4 //rechtsonderachter allVertices.Add(new Vector3(1.0f, -1.0f, -1.0f)); //5 //linksonderachter allVertices.Add(new Vector3(-1.0f, -1.0f, -1.0f)); //6 //linksondervoor allVertices.Add(new Vector3(-1.0f, -1.0f, 1.0f)); //7 IList<Vector2> textureCoordinates = new List<Vector2>(); textureCoordinates.Add(new Vector2(0, 0)); //AA - 0 textureCoordinates.Add(new Vector2(0, 0.3333333f)); //AB - 1 textureCoordinates.Add(new Vector2(0, 0.6666666f)); //AC - 2 textureCoordinates.Add(new Vector2(0, 1)); //AD - 3 textureCoordinates.Add(new Vector2(0.3333333f, 0)); //BA - 4 textureCoordinates.Add(new Vector2(0.3333333f, 0.3333333f)); //BB - 5 textureCoordinates.Add(new Vector2(0.3333333f, 0.6666666f)); //BC - 6 textureCoordinates.Add(new Vector2(0.3333333f, 1)); //BD - 7 textureCoordinates.Add(new Vector2(0.6666666f, 0)); //CA - 8 textureCoordinates.Add(new Vector2(0.6666666f, 0.3333333f)); //CB - 9 textureCoordinates.Add(new Vector2(0.6666666f, 0.6666666f)); //CC -10 textureCoordinates.Add(new Vector2(0.6666666f, 1)); //CD -11 textureCoordinates.Add(new Vector2(1, 0)); //DA -12 textureCoordinates.Add(new Vector2(1, 0.3333333f)); //DB -13 textureCoordinates.Add(new Vector2(1, 0.6666666f)); //DC -14 textureCoordinates.Add(new Vector2(1, 1)); //DD -15 Vector3 copy1 = new Vector3(-2.0f, -2.5f, -3.5f); IList<Vector3> normals = new List<Vector3>(); normals.Add(new Vector3(0, 1.0f, 0)); //0 normals.Add(new Vector3(0, 0, 1.0f)); //1 normals.Add(new Vector3(1.0f, 0, 0)); //2 normals.Add(new Vector3(0, 0, -1.0f)); //3 normals.Add(new Vector3(-1.0f, 0, 0)); //4 normals.Add(new Vector3(0, -1.0f, 0)); //5 //todo: VBO based rendering uint[] vertexElements = { 3,0,1, //01 1,2,3, //02 3,7,0, //03 0,7,4, //04 0,4,1, //05 4,5,1, //06 5,2,1, //07 2,5,6, //08 6,3,2, //09 6,7,5, //10 7,6,4, //11 5,4,6 //12 }; VertexCount = vertexElements.Length; IList<uint> vertexElementList = new List<uint>(vertexElements); uint[] normalElements = { 0,0,0, 0,0,0, 1,1,1, 1,1,1, 2,2,2, 2,2,2, 3,3,3, 3,3,3, 4,4,4, 4,4,4, 5,5,5, 5,5,5 }; IList<uint> normalElementList = new List<uint>(normalElements); uint[] textureIndexArray = { 5,9,10, 10,6,5, 1,0,5, 5,0,4, 2,1,6, 1,5,6, 4,9,5, 9,4,8, 8,13,9, 8,12,13, 10,9,14, 13,14,9 }; //textureCoordinateArraySize = textureIndexArray.Length; IList<uint> textureIndexList = new List<uint>(textureIndexArray); LoadVBO(allVertices, normals, textureCoordinates, vertexElements, normalElementList, textureIndexList); } public void draw() { //bind vertices //bind elements //bind normals //bind texture coordinates GL.EnableClientState(ArrayCap.VertexArray); GL.EnableClientState(ArrayCap.NormalArray); GL.EnableClientState(ArrayCap.TextureCoordArray); GL.BindBuffer(BufferTarget.ArrayBuffer, VerticesVBOID); GL.VertexPointer(3, VertexPointerType.Float, VerticesVBOStride, 0); GL.BindBuffer(BufferTarget.ArrayBuffer, normalVBOID); GL.NormalPointer(NormalPointerType.Float, normalVBOStride, 0); GL.BindBuffer(BufferTarget.ArrayBuffer, textureCoordinateVBOID); GL.TexCoordPointer(2, TexCoordPointerType.Float, textureCoordinateVBOStride, 0); GL.BindBuffer(BufferTarget.ElementArrayBuffer, ELementBufferObjectID); GL.DrawElements(BeginMode.Polygon, VertexCount, DrawElementsType.UnsignedShort, 0); } //loads a static VBO void LoadVBO(IList<Vector3> vertices, IList<Vector3> normals, IList<Vector2> texcoords, uint[] elements, IList<uint> normalIndices, IList<uint> texCoordIndices) { int size; //todo // To create a VBO: // 1) Generate the buffer handles for the vertex and element buffers. // 2) Bind the vertex buffer handle and upload your vertex data. Check that the buffer was uploaded correctly. // 3) Bind the element buffer handle and upload your element data. Check that the buffer was uploaded correctly. float[] verticesArray = convertVector3fListToFloatArray(vertices); float[] normalsArray = createFloatArrayFromListOfVector3ElementsAndIndices(normals, normalIndices); float[] textureCoordinateArray = createFloatArrayFromListOfVector2ElementsAndIndices(texcoords, texCoordIndices); GL.GenBuffers(1, out VerticesVBOID); GL.BindBuffer(BufferTarget.ArrayBuffer, VerticesVBOID); Console.WriteLine("load 1 - vertices"); VerticesVBOStride = BlittableValueType.StrideOf(verticesArray); GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(verticesArray.Length * sizeof(float)), verticesArray, BufferUsageHint.StaticDraw); GL.GetBufferParameter(BufferTarget.ArrayBuffer, BufferParameterName.BufferSize, out size); if (verticesArray.Length * BlittableValueType.StrideOf(verticesArray) != size) { throw new ApplicationException("Vertex data not uploaded correctly"); } else { Console.WriteLine("load 1 finished ok"); size = 0; } Console.WriteLine("load 2 - elements"); GL.GenBuffers(1, out ELementBufferObjectID); GL.BindBuffer(BufferTarget.ElementArrayBuffer, ELementBufferObjectID); GL.BufferData(BufferTarget.ElementArrayBuffer, (IntPtr)(elements.Length * sizeof(uint)), elements, BufferUsageHint.StaticDraw); GL.GetBufferParameter(BufferTarget.ElementArrayBuffer, BufferParameterName.BufferSize, out size); if (elements.Length * sizeof(uint) != size) { throw new ApplicationException("Element data not uploaded correctly"); } else { size = 0; Console.WriteLine("load 2 finished ok"); } GL.GenBuffers(1, out normalVBOID); GL.BindBuffer(BufferTarget.ArrayBuffer, normalVBOID); Console.WriteLine("load 3 - normals"); normalVBOStride = BlittableValueType.StrideOf(normalsArray); GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(normalsArray.Length * sizeof(float)), normalsArray, BufferUsageHint.StaticDraw); GL.GetBufferParameter(BufferTarget.ArrayBuffer, BufferParameterName.BufferSize, out size); Console.WriteLine("load 3 - pre check"); if (normalsArray.Length * BlittableValueType.StrideOf(normalsArray) != size) { throw new ApplicationException("Normal data not uploaded correctly"); } else { Console.WriteLine("load 3 finished ok"); size = 0; } GL.GenBuffers(1, out textureCoordinateVBOID); GL.BindBuffer(BufferTarget.ArrayBuffer, textureCoordinateVBOID); Console.WriteLine("load 4- texture coordinates"); textureCoordinateVBOStride = BlittableValueType.StrideOf(textureCoordinateArray); GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(textureCoordinateArray.Length * textureCoordinateVBOStride), textureCoordinateArray, BufferUsageHint.StaticDraw); GL.GetBufferParameter(BufferTarget.ArrayBuffer, BufferParameterName.BufferSize, out size); if (textureCoordinateArray.Length * BlittableValueType.StrideOf(textureCoordinateArray) != size) { throw new ApplicationException("texture coordinate data not uploaded correctly"); } else { Console.WriteLine("load 3 finished ok"); size = 0; } } //used to convert vertex arrayss for use with VBO's private float[] convertVector3fListToFloatArray(IList<Vector3> input) { int arrayElementCount = input.Count * 3; float[] output = new float[arrayElementCount]; int fillCount = 0; foreach (Vector3 v in input) { output[fillCount] = v.X; output[fillCount + 1] = v.Y; output[fillCount + 2] = v.Z; fillCount += 3; } return output; } //used for converting texture coordinate arrays for use with VBO's private float[] convertVector2List_to_floatArray(IList<Vector2> input) { int arrayElementCount = input.Count * 2; float[] output = new float[arrayElementCount]; int fillCount = 0; foreach (Vector2 v in input) { output[fillCount] = v.X; output[fillCount + 1] = v.Y; fillCount += 2; } return output; } //used to create an array of floats from private float[] createFloatArrayFromListOfVector3ElementsAndIndices(IList<Vector3> inputVectors, IList<uint> indices) { int arrayElementCount = inputVectors.Count * indices.Count * 3; float[] output = new float[arrayElementCount]; int fillCount = 0; foreach (int i in indices) { output[fillCount] = inputVectors[i].X; output[fillCount + 1] = inputVectors[i].Y; output[fillCount + 2] = inputVectors[i].Z; fillCount += 3; } return output; } private float[] createFloatArrayFromListOfVector2ElementsAndIndices(IList<Vector2> inputVectors, IList<uint> indices) { int arrayElementCount = inputVectors.Count * indices.Count * 2; float[] output = new float[arrayElementCount]; int fillCount = 0; foreach (int i in indices) { output[fillCount] = inputVectors[i].X; output[fillCount + 1] = inputVectors[i].Y; fillCount += 2; } return output; } } } This code will only render two triangles and they're nothing like I had in mind: I've done some searching. In some other questions I read that, if I did something wrong, I'd get no rendering at all. Clearly, something gets sent to the GFX card, but it might be that I'm not sending the right data. I've tried altering the sequence in which the triangles are rendered by swapping some of the index numbers in the vert, tc and normal index arrays, but this doesn't seem to be of any effect. I'm slightly lost here. What am I doing wrong here?

    Read the article

  • CodePlex Daily Summary for Sunday, December 26, 2010

    CodePlex Daily Summary for Sunday, December 26, 2010Popular ReleasesNoSimplerAccounting: NoSimplerAccounting 6.0: -Fixed a bug in expense category report.Temporary Data Storage Folder: TDS Folder source code: This is latest version 0.2 Beta code. To know the password request at neutron.request@gmail.comNHibernate Mapping Generator: NHibernate Mapping Generator 2.0: Added support for Postgres (Thanks to Angelo)NewLife XCode: XCode v6.5.2010.1223 ????(????v3.5??): XCode v6.5.2010.1223 ????,??: NewLife.Core ??? NewLife.Net ??? XControl ??? XTemplate ????,??C#?????? XAgent ???? NewLife.CommonEnitty ??????(???,XCode??????) XCode?? ?????????,??????????????????,?????95% XCode v3.5.2009.0714 ??,?v3.5?v6.0???????????????,?????????。v3.5???????????,??????????????。 XCoder ??XTemplate?????????,????????XCode??? XCoder_Src ???????(????XTemplate????),??????????????????Windows Workflow Foundation on Codeplex: Microsoft.Activities.UnitTesting 1.71: New in this release Episode as Tasks using the Task Parallel Library CHM Help file now included in release. StyleCop compliance is resulting in massive refatoring but should not cause breaking changes Breaking Change DefaultTimeout is now a TimeSpan Removed default parameters using int timeout = DefaultTimeout and added overloads insteadMiniTwitter: 1.64: MiniTwitter 1.64 ???? ?? 1.63 ??? URL ??????????????Ajax ASP.Net Forum: InSeCla Forum Software v0.1.9: *VERSION: 0.1.9* HAPPY CHRISTMAS FEATURES ADDED Added features customizabled per category level (Customize at ADMIN/Categories Tab) Allow Anonymous Threads, Allow Anonymous Post Virtual URLs (friendly urls) has finally added And you can have some forum (category) using virtual urls and other using normal urls. Check !, as this improve the SEO indexing results Moderation Instant On: Delete Thread, Move Thread Available to users being members of moderators or administrators InstantO...VivoSocial: VivoSocial 7.4.0: Please see changes: http://support.vivoware.com/project/ChangeLog.aspx?PROJID=48Umbraco CMS: Umbraco 4.6 Beta - codename JUNO: The Umbraco 4.6 beta (codename JUNO) release contains many new features focusing on an improved installation experience, a number of robust developer features, and contains more than 89 bug fixes since the 4.5.2 release. Improved installer experience Updated Starter Kits (Simple, Blog, Personal, Business) Beautiful, free, customizable skins included Skinning engine and Skin customization (see Skinning Documentation Kit) Default dashboards on install with hide option Updated Login t...SSH.NET Library: 2010.12.23: This release includes some bug fixes and few new fetures. Fixes Allow to retrieve big directory structures ssh-dss algorithm is fixed Populate sftp file attributes New Features Support for passhrase when private key is used Support added for diffie-hellman-group14-sha1,diffie-hellman-group-exchange-sha256 and diffie-hellman-group-exchange-sha1 key exchange algorithms Allow to provide multiple key files for authentication Add support for "keyboard-interactive" authentication method...ASP.NET MVC SiteMap provider: MvcSiteMapProvider 2.3.0: Using NuGet?MvcSiteMapProvider is also listed in the NuGet feed. Learn more... Like the project? Consider a donation!Donate via PayPal via PayPal. Release notesThis will be the last release targeting ASP.NET MVC 2 and .NET 3.5. MvcSiteMapProvider 3.0.0 will be targeting ASP.NET MVC 3 and .NET 4 Web.config setting skipAssemblyScanOn has been deprecated in favor of excludeAssembliesForScan and includeAssembliesForScan ISiteMapNodeUrlResolver is now completely responsible for generating th...SuperSocket, an extensible socket application framework: SuperSocket 1.3 beta 2: Compared with SuperSocket 1.3 beta 1, the changes listed below have been done in SuperSocket 1.3 beta 2: added supports for .NET 3.5 replaced Logging Application Block of EntLib with Log4Net improved the code about logging fixed a bug in QuickStart sample project added IPv6 supportTibiaPinger: TibiaPinger v1.0: TibiaPinger v1.0Media Companion: Media Companion 3.400: Extract the entire archive to a folder which has user access rights, eg desktop, documents etc. A manual is included to get you startedMulticore Task Framework: MTF 1.0.1: Release 1.0.1 of Multicore Task Framework.SQL Monitor - tracking sql server activities: SQL Monitor 3.0 alpha 7: 1. added script save/load in user query window 2. fixed problem with connection dialog when choosing windows auth but still ask for user name 3. auto open user table when double click one table node 4. improved alert message, added log only methodEnhSim: EnhSim 2.2.6 ALPHA: 2.2.6 ALPHAThis release supports WoW patch 4.03a at level 85 To use this release, you must have the Microsoft Visual C++ 2010 Redistributable Package installed. This can be downloaded from http://www.microsoft.com/downloads/en/details.aspx?FamilyID=A7B7A05E-6DE6-4D3A-A423-37BF0912DB84 To use the GUI you must have the .NET 4.0 Framework installed. This can be downloaded from http://www.microsoft.com/downloads/en/details.aspx?FamilyID=9cfb2d51-5ff4-4491-b0e5-b386f32c0992 - Fixing up some r...ASP.NET MVC Project Awesome (jQuery Ajax helpers): 1.4.3: Helpers (controls) that you can use to build highly responsive and interactive Ajax-enabled Web applications. These helpers include Autocomplete, AjaxDropdown, Lookup, Confirm Dialog, Popup Form, Popup and Pager new stuff: Improvements for confirm, popup, popup form RenderView controller extension the user experience for crud in live demo has been substantially improved + added search all the features are shown in the live demoGanttPlanner: GanttPlanner V1.0: GanttPlanner V1.0 include GanttPlanner.dll and also a Demo application.N2 CMS: 2.1 release candidate 3: * Web platform installer support available N2 is a lightweight CMS framework for ASP.NET. It helps you build great web sites that anyone can update. Major Changes Support for auto-implemented properties ({get;set;}, based on contribution by And Poulsen) A bunch of bugs were fixed File manager improvements (multiple file upload, resize images to fit) New image gallery Infinite scroll paging on news Content templates First time with N2? Try the demo site Download one of the templ...New ProjectsA.I. Semantic Net Tests: A test using "semantic net" artificial intelligence developed in C++.Awesome.ClientID: Awesome.ClientID is a HTTPModule which serializes the ClientID of server controls, and page/usercontrol properties to a Json to make working with JavaScript easier without the need for outputting ClientID's. It's a solution for clean ClientIDs for .NET 2.0/3.5BitTweet: BitTweet is a fast, simple and easy to use Twitter client with tweeting, reading and writing functions included. It is know for being the most secure and fast Twitter client out there! It is written in Visual Basic .NET 2008. ~Tweet in a bit!CarRental001: carCrudo: CRUDO - The MCG (Model-Controller-Generator) CGF (Code Generation Framework) Visit The Project HomePage: http://adityayadav.com/CRUDO_The_MCG_Model_Controller_Generator_CGF_Code_Generation_Framework.aspx Licenses: 1) GPL v2 2) Commercial (contact us for information)Effeks: FX trading sample application sing prismEnhanced WebPart: Enhanced Webpart is a webpart with enhanced properties editor. It allows you display properties of most common types, and provides easy way to add custom controls to display your own types in webpart properties editor. Enhanced Webpart fully supports localization.Esteffin's graphic: Esercitazioni grafica 2010farasun: Source code Repository for farasun.wordpress.comGCTF: Desafio .NET Realizado na FACISAImgshow Plugin for Windows Live Writer: By using Imgshow Query you are able to publish multimedia content such as Youtube, Facebook Video, PDF, and other Imgshow supported service.Instant Messenger Using WPF and WCF: instant messenger using wcf and wpfjMenu: Create a dotnetnuke module that cover all kind of jquery menu pluginsMcopy API: Copy all files and directores in windows shell. Support long path (less then 32000 chars) and network path (eg. \\server\share or \\127.0.0.1\share)MOSSWebServices: This project contains code for interacting with the 21 web services provided by MOSS. This is targeted towards WSS developers This currently contains code to interact with "UserGroups" web service. I will keep adding code to interact with other MOSS web services soon. Nest Hub - Twitter Client: Nest Hub is a free Twitter client for PC. It's developed in VB.NET.Neural Networks Library: Neural networks Library by SefnajParallelism: Parallelism- Unified Parallel & Concurrency Framework Visit the Project HomePage: http://adityayadav.com/Parallelism.aspx Licenses: 1) GPL v2 2) Commercial (contact us for information)PDF IRM Protector For SharePoint 2007 And 2010: The PDF IRM Protector controls the conversion of PDF documents to their encrypted, rights-managed format and the decryption of PDF documents from their rights-managed format back to their original format. The project targets both SharePoint 2007 and 2010. PHP Form Validator - Isis: Project Isis simplifies the process of form validation for PHP. Offering a robust flag and function system, allowing you to encode the forms with the correct validation data and be done.Recycle Me: Open Source Project is to help and educate people in regarding reuse and recycling of daily life items. This is a Social project so we need funds for research and surveys. Volunteers are most welcome to contribute and certifications for their participation are available. ThanksSencha ExtJS Import Library for Script#: Script# import library for Sencha Ext JS Javascript Framework Separating Vertices: Given a graph as a collection of edges and vertices, it identifies the separating vertices, biconnected components, and edges. SIG - SISTEMA DE GESTÃO INTERNA: Projeto feito para a empresa MODEMTELECOM, consultoria em telecomunicoções.Smart Card COM: Smart Card COM Library, useful to access smart cards from a Web page in Internet ExplorerSocial Hub: Social HubStock Research: Test project for stocke researchtaokeba: ????????????????????,???????????。Temporary Data Storage Folder: This software creates a folder that stores temporary data that you want to store for temporary tasks. Temporary tasks such as storing a file that is to be deleted after some time. It automatically deleted those files on exit. Learn more at www.neutronsoftwares.weebly.comTukUI Updater: A OpenSource Windows Updater tool for the World of Warcraft(R) addon TukUI Developed in .NET 3.5 Discusion thread are found at the official TukUI forums: http://www.tukui.org/v2/forums/topic.php?id=4982 WebUtil: This is a project for using all of features by the web.

    Read the article

  • Combine 3D objects in XNA 4

    - by Christoph
    Currently I am writing on my thesis for university, the theme I am working on is 3D Visualization of hierarchical structures using cone trees. I want to do is to draw a cone and arrange a number of spheres at the bottom of the cone. The spheres should be arranged according to the radius and the number of spheres correctly. As you can imagine I need a lot of these cone/sphere combinations. First Attempt I was able to find some tutorials that helped with drawing cones and spheres. Cone public Cone(GraphicsDevice device, float height, int tessellation, string name, List<Sphere> children) { //prepare children and calculate the children spacing and radius of the cone if (children == null || children.Count == 0) { throw new ArgumentNullException("children"); } this.Height = height; this.Name = name; this.Children = children; //create the cone if (tessellation < 3) { throw new ArgumentOutOfRangeException("tessellation"); } //Create a ring of triangels around the outside of the cones bottom for (int i = 0; i < tessellation; i++) { Vector3 normal = this.GetCircleVector(i, tessellation); // add the vertices for the top of the cone base.AddVertex(Vector3.Up * height, normal); //add the bottom circle base.AddVertex(normal * this.radius + Vector3.Down * height, normal); //Add indices base.AddIndex(i * 2); base.AddIndex(i * 2 + 1); base.AddIndex((i * 2 + 2) % (tessellation * 2)); base.AddIndex(i * 2 + 1); base.AddIndex((i * 2 + 3) % (tessellation * 2)); base.AddIndex((i * 2 + 2) % (tessellation * 2)); } //create flate triangle to seal the bottom this.CreateCap(tessellation, height, this.Radius, Vector3.Down); base.InitializePrimitive(device); } Sphere public void Initialize(GraphicsDevice device, Vector3 qi) { int verticalSegments = this.Tesselation; int horizontalSegments = this.Tesselation * 2; //single vertex on the bottom base.AddVertex((qi * this.Radius) + this.lowering, Vector3.Down); for (int i = 0; i < verticalSegments; i++) { float latitude = ((i + 1) * MathHelper.Pi / verticalSegments) - MathHelper.PiOver2; float dy = (float)Math.Sin(latitude); float dxz = (float)Math.Cos(latitude); //Create a singe ring of latitudes for (int j = 0; j < horizontalSegments; j++) { float longitude = j * MathHelper.TwoPi / horizontalSegments; float dx = (float)Math.Cos(longitude) * dxz; float dz = (float)Math.Sin(longitude) * dxz; Vector3 normal = new Vector3(dx, dy, dz); base.AddVertex(normal * this.Radius, normal); } } // Finish with a single vertex at the top of the sphere. AddVertex((qi * this.Radius) + this.lowering, Vector3.Up); // Create a fan connecting the bottom vertex to the bottom latitude ring. for (int i = 0; i < horizontalSegments; i++) { AddIndex(0); AddIndex(1 + (i + 1) % horizontalSegments); AddIndex(1 + i); } // Fill the sphere body with triangles joining each pair of latitude rings. for (int i = 0; i < verticalSegments - 2; i++) { for (int j = 0; j < horizontalSegments; j++) { int nextI = i + 1; int nextJ = (j + 1) % horizontalSegments; base.AddIndex(1 + i * horizontalSegments + j); base.AddIndex(1 + i * horizontalSegments + nextJ); base.AddIndex(1 + nextI * horizontalSegments + j); base.AddIndex(1 + i * horizontalSegments + nextJ); base.AddIndex(1 + nextI * horizontalSegments + nextJ); base.AddIndex(1 + nextI * horizontalSegments + j); } } // Create a fan connecting the top vertex to the top latitude ring. for (int i = 0; i < horizontalSegments; i++) { base.AddIndex(CurrentVertex - 1); base.AddIndex(CurrentVertex - 2 - (i + 1) % horizontalSegments); base.AddIndex(CurrentVertex - 2 - i); } base.InitializePrimitive(device); } The tricky part now is to arrange the spheres at the bottom of the cone. I tried is to draw just the cone and then draw the spheres. I need a lot of these cones, so it would be pretty hard to calculate all the positions correctly. Second Attempt So the second try was to generate a object that builds all vertices of the cone and all of the spheres at once. So I was hoping to render a cone with all its spheres arranged correctly. After a short debug I found out that the cone is created and the first sphere, when it turn of the second sphere I am running into an OutOfBoundsException of ushort.MaxValue. Cone and Spheres public ConeWithSpheres(GraphicsDevice device, float height, float coneDiameter, float sphereDiameter, int coneTessellation, int sphereTessellation, int numberOfSpheres) { if (coneTessellation < 3) { throw new ArgumentException(string.Format("{0} is to small for the tessellation of the cone. The number must be greater or equal to 3", coneTessellation)); } if (sphereTessellation < 3) { throw new ArgumentException(string.Format("{0} is to small for the tessellation of the sphere. The number must be greater or equal to 3", sphereTessellation)); } //set properties this.Height = height; this.ConeDiameter = coneDiameter; this.SphereDiameter = sphereDiameter; this.NumberOfChildren = numberOfSpheres; //end set properties //generate the cone this.GenerateCone(device, coneTessellation); //generate the spheres //vector that defines the Y position of the sphere on the cones bottom Vector3 lowering = new Vector3(0, 0.888f, 0); this.GenerateSpheres(device, sphereTessellation, numberOfSpheres, lowering); } // ------ GENERATE CONE ------ private void GenerateCone(GraphicsDevice device, int coneTessellation) { int doubleTessellation = coneTessellation * 2; //Create a ring of triangels around the outside of the cones bottom for (int index = 0; index < coneTessellation; index++) { Vector3 normal = this.GetCircleVector(index, coneTessellation); //add the vertices for the top of the cone base.AddVertex(Vector3.Up * this.Height, normal); //add the bottom of the cone base.AddVertex(normal * this.ConeRadius + Vector3.Down * this.Height, normal); //add indices base.AddIndex(index * 2); base.AddIndex(index * 2 + 1); base.AddIndex((index * 2 + 2) % doubleTessellation); base.AddIndex(index * 2 + 1); base.AddIndex((index * 2 + 3) % doubleTessellation); base.AddIndex((index * 2 + 2) % doubleTessellation); } //create flate triangle to seal the bottom this.CreateCap(coneTessellation, this.Height, this.ConeRadius, Vector3.Down); base.InitializePrimitive(device); } // ------ GENERATE SPHERES ------ private void GenerateSpheres(GraphicsDevice device, int sphereTessellation, int numberOfSpheres, Vector3 lowering) { int verticalSegments = sphereTessellation; int horizontalSegments = sphereTessellation * 2; for (int childCount = 1; childCount < numberOfSpheres; childCount++) { //single vertex at the bottom of the sphere base.AddVertex((this.GetCircleVector(childCount, this.NumberOfChildren) * this.SphereRadius) + lowering, Vector3.Down); for (int verticalSegmentsCount = 0; verticalSegmentsCount < verticalSegments; verticalSegmentsCount++) { float latitude = ((verticalSegmentsCount + 1) * MathHelper.Pi / verticalSegments) - MathHelper.PiOver2; float dy = (float)Math.Sin(latitude); float dxz = (float)Math.Cos(latitude); //create a single ring of latitudes for (int horizontalSegmentsCount = 0; horizontalSegmentsCount < horizontalSegments; horizontalSegmentsCount++) { float longitude = horizontalSegmentsCount * MathHelper.TwoPi / horizontalSegments; float dx = (float)Math.Cos(longitude) * dxz; float dz = (float)Math.Sin(longitude) * dxz; Vector3 normal = new Vector3(dx, dy, dz); base.AddVertex((normal * this.SphereRadius) + lowering, normal); } } //finish with a single vertex at the top of the sphere base.AddVertex((this.GetCircleVector(childCount, this.NumberOfChildren) * this.SphereRadius) + lowering, Vector3.Up); //create a fan connecting the bottom vertex to the bottom latitude ring for (int i = 0; i < horizontalSegments; i++) { base.AddIndex(0); base.AddIndex(1 + (i + 1) % horizontalSegments); base.AddIndex(1 + i); } //Fill the sphere body with triangles joining each pair of latitude rings for (int i = 0; i < verticalSegments - 2; i++) { for (int j = 0; j < horizontalSegments; j++) { int nextI = i + 1; int nextJ = (j + 1) % horizontalSegments; base.AddIndex(1 + i * horizontalSegments + j); base.AddIndex(1 + i * horizontalSegments + nextJ); base.AddIndex(1 + nextI * horizontalSegments + j); base.AddIndex(1 + i * horizontalSegments + nextJ); base.AddIndex(1 + nextI * horizontalSegments + nextJ); base.AddIndex(1 + nextI * horizontalSegments + j); } } //create a fan connecting the top vertiex to the top latitude for (int i = 0; i < horizontalSegments; i++) { base.AddIndex(this.CurrentVertex - 1); base.AddIndex(this.CurrentVertex - 2 - (i + 1) % horizontalSegments); base.AddIndex(this.CurrentVertex - 2 - i); } base.InitializePrimitive(device); } } Any ideas how I could fix this?

    Read the article

  • OpenGL-ES Texture Mapping. Texture is reversed?

    - by Feet
    I am trying to get my head around Texture mapping, I thought I had it the other day after asking this. However, I am having some trouble with my texture coordinates being flipped from what I am expecting. I am loading my texture like so int[] textures = new int[1]; gl.glGenTextures(1, textures, 0); _textureID = textures[0]; gl.glBindTexture(GL10.GL_TEXTURE_2D, _textureID); Bitmap bmp = BitmapFactory.decodeResource( _context.getResources(), R.drawable.die_1); GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, bmp, 0); gl.glTexParameterx(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MIN_FILTER, GL10.GL_LINEAR); gl.glTexParameterx(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MAG_FILTER, GL10.GL_LINEAR); bmp.recycle(); My cube is this float vertices[] = { // Front face -width, -height, depth, // 0 width, -height, depth, // 1 width, height, depth, // 2 -width, height, depth, // 3 // Back Face width, -height, -depth, // 4 -width, -height, -depth, // 5 -width, height, -depth, // 6 width, height, -depth, // 7 // Left face -width, -height, -depth, // 8 -width, -height, depth, // 9 -width, height, depth, // 10 -width, height, -depth, // 11 // Right face width, -height, depth, // 12 width, -height, -depth, // 13 width, height, -depth, // 14 width, height, depth, // 15 // Top face -width, height, depth, // 16 width, height, depth, // 17 width, height, -depth, // 18 -width, height, -depth, // 19 // Bottom face -width, -height, -depth, // 20 width, -height, -depth, // 21 width, -height, depth, // 22 -width, -height, depth, // 23 }; short indices[] = { // Front 0,1,2, 0,2,3, // Back 4,5,6, 4,6,7, // Left 8,9,10, 8,10,11, // Right 12,13,14, 12,14,15, // Top 16,17,18, 16,18,19, // Bottom 20,21,22, 20,22,23, }; float texCoords[] = { // Front face textured only, for simplicity 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f}; And it is drawn like so // Counter-clockwise winding. gl.glFrontFace(GL10.GL_CCW); // Enable face culling. gl.glEnable(GL10.GL_CULL_FACE); // What faces to remove with the face culling. gl.glCullFace(GL10.GL_BACK); // Enabled the vertices buffer for writing and to be used during // rendering. gl.glEnableClientState(GL10.GL_VERTEX_ARRAY); // Specifies the location and data format of an array of vertex // coordinates to use when rendering. gl.glVertexPointer(3, GL10.GL_FLOAT, 0, verticesBuffer); if (normalsBuffer != null) { // Enabled the normal buffer for writing and to be used during rendering. gl.glEnableClientState(GL10.GL_NORMAL_ARRAY); // Specifies the location and data format of an array of normals to use when rendering. gl.glNormalPointer(GL10.GL_FLOAT, 0, normalsBuffer); } // Set flat color gl.glColor4f(rgba[0], rgba[1], rgba[2], rgba[3]); // Smooth color if ( colorBuffer != null ) { // Enable the color array buffer to be used during rendering. gl.glEnableClientState(GL10.GL_COLOR_ARRAY); // Point out the where the color buffer is. gl.glColorPointer(4, GL10.GL_FLOAT, 0, colorBuffer); } // Use textures? if ( textureBuffer != null) { gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY); gl.glEnable(GL10.GL_TEXTURE_2D); gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, textureBuffer); } // Translation and rotation before drawing gl.glTranslatef(x, y, z); gl.glRotatef(rx, 1, 0, 0); gl.glRotatef(ry, 0, 1, 0); gl.glRotatef(rz, 0, 0, 1); gl.glDrawElements(GL10.GL_TRIANGLES, numOfIndices, GL10.GL_UNSIGNED_SHORT, indicesBuffer); // Disable the vertices buffer. gl.glDisableClientState(GL10.GL_VERTEX_ARRAY); gl.glDisableClientState(GL10.GL_COLOR_ARRAY); gl.glDisableClientState(GL10.GL_NORMAL_ARRAY); gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY); // Disable face culling. gl.glDisable(GL10.GL_CULL_FACE); However my front face looks like this I also add, I haven't got any normals set, are textures affected by normals? float texCoords[] = { // Front 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f} It seems as if the texture is being flipped, so the coordinates don't match up properly?

    Read the article

  • How color attributes work in VBO?

    - by Jayesh
    I am coding to OpenGL ES 2.0 (Webgl). I am using VBOs to draw primitives. I have vertex array, color array and array of indices. I have looked at sample codes, books and tutorial, but one thing I don't get - if color is defined per vertex how does it affect the polygonal surfaces adjacent to those vertices? (I am a newbie to OpenGL(ES)) I will explain with an example. I have a cube to draw. From what I read in OpenGLES book, the color is defined as an vertex attribute. In that case, if I want to draw 6 faces of the cube with 6 different colors how should I define the colors. The source of my confusion is: each vertex is common to 3 faces, then how will it help defining a color per vertex? (Or should the color be defined per index?). The fact that we need to subdivide these faces into triangles, makes it harder for me to understand how this relationship works. The same confusion goes for edges. Instead of drawing triangles, let's say I want to draw edges using LINES primitives. Each edge of different color. How am I supposed to define color attributes in that case? I have seen few working examples. Specifically this tutorial: http://learningwebgl.com/blog/?p=370 I see how color array is defined in the above example to draw a cube with 6 different colored faces, but I don't understand why is defined that way. (Why is each color copied 4 times into unpackedColors for instance?) Can someone explain how color attributes work in VBO? [The link above seems inaccessible, so I will post the relevant code here] cubeVertexPositionBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, cubeVertexPositionBuffer); vertices = [ // Front face -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0, // Back face -1.0, -1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0, -1.0, -1.0, // Top face -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, // Bottom face -1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, -1.0, -1.0, 1.0, // Right face 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, // Left face -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0, -1.0, ]; gl.bufferData(gl.ARRAY_BUFFER, new WebGLFloatArray(vertices), gl.STATIC_DRAW); cubeVertexPositionBuffer.itemSize = 3; cubeVertexPositionBuffer.numItems = 24; cubeVertexColorBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, cubeVertexColorBuffer); var colors = [ [1.0, 0.0, 0.0, 1.0], // Front face [1.0, 1.0, 0.0, 1.0], // Back face [0.0, 1.0, 0.0, 1.0], // Top face [1.0, 0.5, 0.5, 1.0], // Bottom face [1.0, 0.0, 1.0, 1.0], // Right face [0.0, 0.0, 1.0, 1.0], // Left face ]; var unpackedColors = [] for (var i in colors) { var color = colors[i]; for (var j=0; j < 4; j++) { unpackedColors = unpackedColors.concat(color); } } gl.bufferData(gl.ARRAY_BUFFER, new WebGLFloatArray(unpackedColors), gl.STATIC_DRAW); cubeVertexColorBuffer.itemSize = 4; cubeVertexColorBuffer.numItems = 24; cubeVertexIndexBuffer = gl.createBuffer(); gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, cubeVertexIndexBuffer); var cubeVertexIndices = [ 0, 1, 2, 0, 2, 3, // Front face 4, 5, 6, 4, 6, 7, // Back face 8, 9, 10, 8, 10, 11, // Top face 12, 13, 14, 12, 14, 15, // Bottom face 16, 17, 18, 16, 18, 19, // Right face 20, 21, 22, 20, 22, 23 // Left face ] gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new WebGLUnsignedShortArray(cubeVertexIndices), gl.STATIC_DRAW); cubeVertexIndexBuffer.itemSize = 1; cubeVertexIndexBuffer.numItems = 36;

    Read the article

  • Longest Path in Boost Graph

    - by TheTSPSolver
    Hi, Sorry if this is a very basic questions for some of you but I'm new to C++ (let alone Boost Graph Library) and couldn't figure out this problem. So far I've been able to formulate/gather code to create a graph using the code below. Now I'm trying to figure out the code to find the longest path in this graph. Can someone please help with what would the code be? I was having trouble trying to figure out if/how to traverse through each node and/or edge when trying to find the path? I have to try to return all the nodes and edges in the longest path. Any help will be greatly appreciated. P.S. does anyone know if C++ has organized documentation like Javadoc?? #include <boost/graph/dag_shortest_paths.hpp> #include <boost/graph/adjacency_list.hpp> #include <windows.h> #include <iostream> int main() { using namespace boost; typedef adjacency_list<vecS, vecS, directedS, property<vertex_distance_t, double>, property<edge_weight_t, double> > graph_t; graph_t g(6); enum verts { stationA, stationB, stationC, stationD, stationE, stationF }; char name[] = "rstuvx"; add_edge(stationA, stationB, 5000.23, g); add_edge(stationA, stationC, 3001, g); add_edge(stationA, stationD, 2098.67, g); add_edge(stationA, stationE, 3298.84, g); add_edge(stationB, stationF, 2145, g); add_edge(stationC, stationF, 4290, g); add_edge(stationD, stationF, 2672.78, g); add_edge(stationE, stationF, 11143.876, g); add_edge(stationA, stationF, 1, g); //Display all the vertices typedef property_map<graph_t, vertex_index_t>::type IndexMap; IndexMap index = get(vertex_index, g); std::cout << "vertices(g) = "; typedef graph_traits<graph_t>::vertex_iterator vertex_iter; std::pair<vertex_iter, vertex_iter> vp; for (vp = vertices(g); vp.first != vp.second; ++vp.first) std::cout << index[*vp.first] << " "; std::cout << std::endl; // ... // Display all the edges // ... std::cout << "edges(g) = " << std::endl; graph_traits<graph_t>::edge_iterator ei, ei_end; for (tie(ei, ei_end) = edges(g); ei != ei_end; ++ei) std::cout << "(" << index[source(*ei, g)] << "," << index[target(*ei, g)] << ") \n"; std::cout << std::endl; // ...

    Read the article

  • Drawing using Dynamic Array and Buffer Object

    - by user1905910
    I have a problem when creating the vertex array and the indices array. I don't know what really is the problem with the code, but I guess is something with the type of the arrays, can someone please give me a light on this? #define GL_GLEXT_PROTOTYPES #include<GL/glut.h> #include<iostream> using namespace std; #define BUFFER_OFFSET(offset) ((GLfloat*) NULL + offset) const GLuint numDiv = 2; const GLuint numVerts = 9; GLuint VAO; void display(void) { enum vertex {VERTICES, INDICES, NUM_BUFFERS}; GLuint * buffers = new GLuint[NUM_BUFFERS]; GLfloat (*squareVerts)[2] = new GLfloat[numVerts][2]; GLubyte * indices = new GLubyte[numDiv*numDiv*4]; GLuint delta = 80/numDiv; for(GLuint i = 0; i < numVerts; i++) { squareVerts[i][1] = (i/(numDiv+1))*delta; squareVerts[i][0] = (i%(numDiv+1))*delta; } for(GLuint i=0; i < numDiv; i++){ for(GLuint j=0; j < numDiv; j++){ //cada iteracao gera 4 pontos #define NUM_VERT(ii,jj) ((ii)*(numDiv+1)+(jj)) #define INDICE(ii,jj) (4*((ii)*numDiv+(jj))) indices[INDICE(i,j)] = NUM_VERT(i,j); indices[INDICE(i,j)+1] = NUM_VERT(i,j+1); indices[INDICE(i,j)+2] = NUM_VERT(i+1,j+1); indices[INDICE(i,j)+3] = NUM_VERT(i+1,j); } } glGenVertexArrays(1, &VAO); glBindVertexArray(VAO); glGenBuffers(NUM_BUFFERS, buffers); glBindBuffer(GL_ARRAY_BUFFER, buffers[VERTICES]); glBufferData(GL_ARRAY_BUFFER, sizeof(squareVerts), squareVerts, GL_STATIC_DRAW); glVertexPointer(2, GL_FLOAT, 0, BUFFER_OFFSET(0)); glEnableClientState(GL_VERTEX_ARRAY); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffers[INDICES]); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW); glClear(GL_COLOR_BUFFER_BIT); glColor3f(1.0,1.0,1.0); glDrawElements(GL_POINTS, 16, GL_UNSIGNED_BYTE, BUFFER_OFFSET(0)); glutSwapBuffers(); } void init() { glClearColor(0.0, 0.0, 0.0, 0.0); gluOrtho2D((GLdouble) -1.0, (GLdouble) 90.0, (GLdouble) -1.0, (GLdouble) 90.0); } int main(int argv, char** argc) { glutInit(&argv, argc); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE); glutInitWindowSize(500,500); glutInitWindowPosition(100,100); glutCreateWindow("myCode.cpp"); init(); glutDisplayFunc(display); glutMainLoop(); return 0; } Edit: The problem here is that drawing don't work at all. But I don't get any error, this just don't display what I want to display. Even if I put the code that make the vertices and put them in the buffers in a diferent function, this don't work. I just tried to do this: void display(void) { glBindVertexArray(VAO); glClear(GL_COLOR_BUFFER_BIT); glColor3f(1.0,1.0,1.0); glDrawElements(GL_POINTS, 16, GL_UNSIGNED_BYTE, BUFFER_OFFSET(0)); glutSwapBuffers(); } and I placed the rest of the code in display in another function that is called on the start of the program. But the problem still

    Read the article

  • How do I use setFilmSize in panda3d to achieve the correct view?

    - by lhk
    I'm working with Panda3d and recently switched my game to isometric rendering. I moved the virtual camera accordingly and set an orthographic lens. Then I implemented the classes "Map" and "Canvas". A canvas is a dynamically generated mesh: a flat quad. I'm using it to render the ingame graphics. Since the game itself is still set in a 3d coordinate system I'm planning to rely on these canvases to draw sprites. I could have named this class "Tile" but as I'd like to use it for non-tile sketches (enemies, environment) as well I thought canvas would describe it's function better. Map does exactly what it's name suggests. Its constructor receives the number of rows and columns and then creates a standard isometric map. It uses the canvas class for tiles. I'm planning to write a map importer that reads a file to create maps on the fly. Here's the canvas implementation: class Canvas: def __init__(self, texture, vertical=False, width=1,height=1): # create the mesh format=GeomVertexFormat.getV3t2() format = GeomVertexFormat.registerFormat(format) vdata=GeomVertexData("node-vertices", format, Geom.UHStatic) vertex = GeomVertexWriter(vdata, 'vertex') texcoord = GeomVertexWriter(vdata, 'texcoord') # add the vertices for a flat quad vertex.addData3f(1, 0, 0) texcoord.addData2f(1, 0) vertex.addData3f(1, 1, 0) texcoord.addData2f(1, 1) vertex.addData3f(0, 1, 0) texcoord.addData2f(0, 1) vertex.addData3f(0, 0, 0) texcoord.addData2f(0, 0) prim = GeomTriangles(Geom.UHStatic) prim.addVertices(0, 1, 2) prim.addVertices(2, 3, 0) self.geom = Geom(vdata) self.geom.addPrimitive(prim) self.node = GeomNode('node') self.node.addGeom(self.geom) # this is the handle for the canvas self.nodePath=NodePath(self.node) self.nodePath.setSx(width) self.nodePath.setSy(height) if vertical: self.nodePath.setP(90) # the most important part: "Drawing" the image self.texture=loader.loadTexture(""+texture+".png") self.nodePath.setTexture(self.texture) Now the code for the Map class class Map: def __init__(self,rows,columns,size): self.grid=[] for i in range(rows): self.grid.append([]) for j in range(columns): # create a canvas for the tile. For testing the texture is preset tile=Canvas(texture="../assets/textures/flat_concrete",width=size,height=size) x=(i-1)*size y=(j-1)*size # set the tile up for rendering tile.nodePath.reparentTo(render) tile.nodePath.setX(x) tile.nodePath.setY(y) # and store it for later access self.grid[i].append(tile) And finally the usage def loadMap(self): self.map=Map(10, 10, 1) this function is called within the constructor of the World class. The instantiation of world is the entry point to the execution. The code is pretty straightforward and runs good. Sadly the output is not as expected: Please note: The problem is not the white rectangle, it's my player object. The problem is that although the map should have equal width and height it's stretched weirdly. With orthographic rendering I expected the map to be a perfect square. What did I do wrong ? UPDATE: I've changed the viewport. This is how I set up the orthographic camera: lens = OrthographicLens() lens.setFilmSize(40, 20) base.cam.node().setLens(lens) You can change the "aspect" by modifying the parameters of setFilmSize. I don't know exactly how they are related to window size and screen resolution but after testing a little the values above seem to work for me. Now everything is rendered correctly as long as I don't resize the window. Every change of the window's size as well as switching to fullscreen destroys the correct rendering. I know that implementing a listener for resize events is not in the scope of this question. However I wonder why I need to make the Film's height two times bigger than its width. My window is quadratic ! Can you tell me how to find out correct setting for the FilmSize ? UPDATE 2: I can imagine that it's hard to envision the behaviour of the game. At first glance the obvious solution is to pass the window's width and height in pixels to setFilmSize. There are two problems with that approach. The parameters for setFilmSize are ingame units. You'll get a way to big view if you pass the pixel size For some strange reason the image is distorted if you pass equal values for width and height. Here's the output for setFilmSize(800,800) You'll have to stress your eyes but you'll see what I mean

    Read the article

  • Memory is full with vertex buffer

    - by Christian Frantz
    I'm having a pretty strange problem that I didn't think I'd run into. I was able to store a 50x50 grid in one vertex buffer finally, in hopes of better performance. Before I had each cube have an individual vertex buffer and with 4 50x50 grids, this slowed down my game tremendously. But it still ran. With 4 50x50 grids with my new code, that's only 4 vertex buffers. With the 4 vertex buffers, I get a memory error. When I load the game with 1 grid, it takes forever to load and with my previous version, it started up right away. So I don't know if I'm storing chunks wrong or what but it stumped me -.- for (int x = 0; x < 50; x++) { for (int z = 0; z < 50; z++) { for (int y = 0; y <= map[x, z]; y++) { SetUpVertices(); SetUpIndices(); cubes.Add(new Cube(device, new Vector3(x, map[x, z] - y, z), grass)); } } } vertexBuffer = new VertexBuffer(device, typeof(VertexPositionTexture), vertices.Count(), BufferUsage.WriteOnly); vertexBuffer.SetData<VertexPositionTexture>(vertices.ToArray()); indexBuffer = new IndexBuffer(device, typeof(short), indices.Count(), BufferUsage.WriteOnly); indexBuffer.SetData(indices.ToArray()); Thats how theyre stored. The array I'm reading from is a byte array which defines the coordinates of my map. Now with my old version, I used the same loading from an array so that hasn't changed. The only difference is the one vertex buffer instead of 2500 for a 50x50 grid. cubes is just a normal list that holds all my cubes for the vertex buffer. Another thing that just came to mind would be my draw calls. If I'm setting an effect for each cube in my cube list, that's probably going to take a lot of memory. How can I avoid doing this? I need the foreach method to set my cubes to the right position foreach (Cube block in cube.cubes) { effect.VertexColorEnabled = false; effect.TextureEnabled = true; Matrix center = Matrix.CreateTranslation(new Vector3(-0.5f, -0.5f, -0.5f)); Matrix scale = Matrix.CreateScale(1f); Matrix translate = Matrix.CreateTranslation(block.cubePosition); effect.World = center * scale * translate; effect.View = cam.view; effect.Projection = cam.proj; effect.FogEnabled = false; effect.FogColor = Color.CornflowerBlue.ToVector3(); effect.FogStart = 1.0f; effect.FogEnd = 50.0f; cube.Draw(effect); noc++; }

    Read the article

  • How would you solve this graph theory handshake problem in python?

    - by Zachary Burt
    I graduated college last year with a degree in Psychology, but I also took a lot of math for fun. I recently got the book "Introductory Graph Theory" by Gary Chartrand to brush up on my math and have some fun. Here is an exercise from the book that I'm finding particularly befuddling: Suppose you and your husband attended a party with three other married couples. Several handshakes took place. No one shook hands with himself (or herself) or with his (or her) spouse, and no one shook hands with the same person more than once. After all the handshaking was completed, suppose you asked each person, including your husband, how many hands he or she had shaken. Each person gave a different answer. a) How many hands did you shake? b) How many hands did your husband shake? Now, I've been reasoning about this for a while, and trying to draw sample graphs that could illustrate a solution, but I'm coming up empty-handed. My logic is this: there are 8 different vertices in the graph, and 7 of them have different degrees. The values for the degrees must therefore be 0, 1, 2, 3, 4, 5, 6, and x. The # of degrees for one married couple is (0, 6). Since all graphs have an even number of odd vertices, x must be either 5, 3, or 1. What's your solution to this problem? And, if you could solve it in python, how would you do it? (python is fun.) Cheers.

    Read the article

  • How would you solve this graph theory handshake problem in python?

    - by Zachary Burt
    I graduated college last year with a degree in Psychology, but I also took a lot of math for fun. I recently got the book "Introductory Graph Theory" by Gary Chartrand to brush up on my math and have some fun. Here is an exercise from the book that I'm finding particularly befuddling: Suppose you and your husband attended a party with three other married couples. Several handshakes took place. No one shook hands with himself (or herself) or with his (or her) spouse, and no one shook hands with the same person more than once. After all the handshaking was completed, suppose you asked each person, including your husband, how many hands he or she had shaken. Each person gave a different answer. a) How many hands did you shake? b) How many hands did your husband shake? Now, I've been reasoning about this for a while, and trying to draw sample graphs that could illustrate a solution, but I'm coming up empty-handed. My logic is this: there are 8 different vertices in the graph, and 7 of them have different degrees. The values for the degrees must therefore be 0, 1, 2, 3, 4, 5, 6, and x. The # of degrees for one married couple is (0, 6). Since all graphs have an even number of odd vertices, x must be either 5, 3, or 1. What's your solution to this problem? And, if you could solve it in python, how would you do it? (python is fun.) Cheers.

    Read the article

  • Big problem with Dijkstra algorithm in a linked list graph implementation

    - by Nazgulled
    Hi, I have my graph implemented with linked lists, for both vertices and edges and that is becoming an issue for the Dijkstra algorithm. As I said on a previous question, I'm converting this code that uses an adjacency matrix to work with my graph implementation. The problem is that when I find the minimum value I get an array index. This index would have match the vertex index if the graph vertexes were stored in an array instead. And the access to the vertex would be constant. I don't have time to change my graph implementation, but I do have an hash table, indexed by a unique number (but one that does not start at 0, it's like 100090000) which is the problem I'm having. Whenever I need, I use the modulo operator to get a number between 0 and the total number of vertices. This works fine for when I need an array index from the number, but when I need the number from the array index (to access the calculated minimum distance vertex in constant time), not so much. I tried to search for how to inverse the modulo operation, like, 100090000 mod 18000 = 10000 and, 10000 invmod 18000 = 100090000 but couldn't find a way to do it. My next alternative is to build some sort of reference array where, in the example above, arr[10000] = 100090000. That would fix the problem, but would require to loop the whole graph one more time. Do I have any better/easier solution with my current graph implementation?

    Read the article

  • OpenGL-ES Texture Atlas. t axis is inverted.

    - by Feet
    I'm mapping a texture from my texture atlas to a square on a cube. For some reason, the t axis is inverted with 0 being at the top and 1 being at the bottom. Also, I have to specify the texture coordinates in clockwise order rather than counter-clockwise. I am using counter-clockwise windings. The vertices, indices and texture coordinates I'm using are below. float vertices[] = { // Front face -width, -height, depth, // 0 width, -height, depth, // 1 width, height, depth, // 2 -width, height, depth, // 3 // Back Face width, -height, -depth, // 4 -width, -height, -depth, // 5 -width, height, -depth, // 6 width, height, -depth, // 7 // Left face -width, -height, -depth, // 8 -width, -height, depth, // 9 -width, height, depth, // 10 -width, height, -depth, // 11 // Right face width, -height, depth, // 12 width, -height, -depth, // 13 width, height, -depth, // 14 width, height, depth, // 15 // Top face -width, height, depth, // 16 width, height, depth, // 17 width, height, -depth, // 18 -width, height, -depth, // 19 // Bottom face -width, -height, -depth, // 20 width, -height, -depth, // 21 width, -height, depth, // 22 -width, -height, depth, // 23 }; short indices[] = { // Front // Back 0,1,2, 0,2,3, 4,5,6, 4,6,7, // Left // Right 8,9,10, 8,10,11, 12,13,14, 12,14,15, // Top // Bottom 16,17,18, 16,18,19, 20,21,22, 20,22,23, }; float textures[] = { // Front 0.0f, 0.0f, 0.25f, 0.0f, 0.25f, 0.25f, 0.0f, 0.25f, // Back 0.25f, 0.0f, 0.50f, 0.0f, 0.50f, 0.25f, 0.25f, 0.25f, // Left 0.50f, 0.0f, 0.75f, 0.0f, 0.75f, 0.25f, 0.50f, 0.25f, // Right 0.75f, 0.0f, 1f, 0.0f, 1f, 0.25f, 0.75f, 0.25f, // Top 0.0f, 0.25f, 0.25f, 0.25f, 0.25f, 0.50f, 0.0f, 0.50f, // Bottom 0.25f, 0.25f, 0.50f, 0.25f, 0.50f, 0.50f, 0.25f, 0.50f, };

    Read the article

  • OpenGL ES - texture map all faces of an 8 vertex cube?

    - by Feet
    Working through some OpenGL-ES tutorials, using the Android emulator. I've gotten up to texture mapping and am having some trouble mapping to a cube. Is it possible to map a texture to all faces of a cube that has 8 vertices and 12 triangles for the 6 faces as described below? // Use half as we are going for a 0,0,0 centre. width /= 2; height /= 2; depth /= 2; float vertices[] = { -width, -height, depth, // 0 width, -height, depth, // 1 width, height, depth, // 2 -width, height, depth, // 3 -width, -height, -depth, // 4 width, -height, -depth, // 5 width, height, -depth, // 6 -width, height, -depth, // 7 }; short indices[] = { // Front 0,1,2, 0,2,3, // Back 5,4,7, 5,7,6, // Left 4,0,3, 4,3,7, // Right 1,5,6, 1,6,2, // Top 3,2,6, 3,6,7, // Bottom 4,5,1, 4,1,0, }; float texCoords[] = { 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, }; I have gotten the front and back faces working correctly, however, none of the other faces are showing the texture.

    Read the article

  • Attempted to render a circle in opengl es 1.1, renders as oval

    - by eipxen
    Hi all, I attempted to render a circle in opengl es 1.1 as a test before building a larger program, but it renders as an oval. Here is the code I use to generate and render my vertices: static const int numVerts = 40; static GLfloat myFirstCircle[82]; myFirstCircle[0] = 0.0f; myFirstCircle[1] = 0.0f; for (int i = 2; i < (numVerts+1)*2; i+=2) { myFirstCircle[i] = .5 * cosf(i*2*3.14159/numVerts); myFirstCircle[i+1] = .5 * sinf(i*2*3.14159/numVerts); } glVertexPointer(2, GL_FLOAT, 0, myFirstCircle); glEnableClientState(GL_VERTEX_ARRAY); glDrawArrays(GL_TRIANGLE_FAN, 0, 22); I'm still somewhat new to this system, so I may have a silly error that I do not see, but it seems to me like this should generate 40 vertices on a circle of radius .5. When it renders, the shape on screen appears to be an oval, significantly taller than it is wide. My question is thus: why is my circle rendering this way, and what could I do to fix it? This is the first question on stackoverflow, so I'm not sure how to share an image of my output.

    Read the article

  • C++: is it safe to work with std::vectors as if they were arrays?

    - by peoro
    I need to have a fixed-size array of elements and to call on them functions that require to know about how they're placed in memory, in particular: functions like glVertexPointer, that needs to know where the vertices are, how distant they are one from the other and so on. In my case vertices would be members of the elements to store. to get the index of an element within this array, I'd prefer to avoid having an index field within my elements, but would rather play with pointers arithmetic (ie: index of Element *x will be x - & array[0]) -- btw, this sounds dirty to me: is it good practice or should I do something else? Is it safe to use std::vector for this? Something makes me think that an std::array would be more appropriate but: Constructor and destructor for my structure will be rarely called: I don't mind about such overhead. I'm going to set the std::vector capacity to size I need (the size that would use for an std::array, thus won't take any overhead due to sporadic reallocation. I don't mind a little space overhead for std::vector's internal structure. I could use the ability to resize the vector (or better: to have a size chosen during setup), and I think there's no way to do this with std::array, since its size is a template parameter (that's too bad: I could do that even with an old C-like array, just dynamically allocating it on the heap). If std::vector is fine for my purpose I'd like to know into details if it will have some runtime overhead with respect to std::array (or to a plain C array): I know that it'll call the default constructor for any element once I increase its size (but I guess this won't cost anything if my data has got an empty default constructor?), same for destructor. Anything else?

    Read the article

  • Big-O for calculating all routes from GPS data

    - by HH
    A non-critical GPS module use lists because it needs to be modifiable, new routes added, new distances calculated, continuos comparisons. Well so I thought but my team member wrote something I am very hard to get into. His pseudo code int k =0; a[][] <- create mapModuleNearbyDotList -array //CPU O(n) for(j = 1 to n) // O(nlog(m)) for(i =1 to n) for(k = 1 to n) if(dot is nearby) adj[i][j]=min(adj[i][j], adj[i][k] + adj[k][j]); His ideas transformations of lists to tables His worst case time complexity is O(n^3), where n is number of elements in his so-called table. Exception to the last point with Finite structure: O(mlog(n)) where n is number of vertices and m is the amount of neighbour vertices. Questions about his ideas why to waste resources to transform constantly-modified lists to table? Fast? only point where I to some extent agree but cannot understand the same upper limits n for each for-loops -- perhaps he supposed it circular why does the code take O(mlog(n)) to proceed in time as finite structure? The term finite may be wrong, explicit?

    Read the article

  • Why doesn't the Java Collections API include a Graph implementation?

    - by dvanaria
    I’m currently learning the Java Collections API and feel I have a good understanding of the basics, but I’ve never understood why this standard API doesn’t include a Graph implementation. The three base classes are easily understandable (List, Set, and Map) and all their implementations in the API are mostly straightforward and consistent. Considering how often graphs come up as a potential way to model a given problem, this just doesn’t make sense to me (it’s possible it does exist in the API and I’m not looking in the right place of course). Steve Yegge suggests in one of his blog posts that a programmer should consider graphs first when attacking a problem, and if the problem domain doesn’t fit naturally into this data structure, only then consider the alternative structures. My first guess is that there is no universal way to represent graphs, or that their interfaces may not be generic enough for an API implementation to be useful? But if you strip down a graph to its basic components (vertices and a set of edges that connect some or all of the vertices) and consider the ways that graphs are commonly constructed (methods like addVertex(v) and insertEdge(v1, v2)) it seems that a generic Graph implementation would be possible and useful. Thanks for helping me understand this better.

    Read the article

  • android opengl es texture mapping into polygons

    - by kamil
    I wrote opengl es code for android to map textures on a square but i want to draw texture on polygons. When user moved the image, texture will be mapped on polygons have more vertexes. I tried the arrays combination below for pentagon but i could not find the correct triangle combination in indices array. public float vertices[] = { // -1.0f, 1.0f, 0.0f, //Top Left // -1.0f, -1.0f, 0.0f, //Bottom Left // 1.0f, -1.0f, 0.0f, //Bottom Right // 1.0f, 1.0f, 0.0f //Top Right -1.0f, 1.0f, 0.0f, //Top Left -1.0f, -1.0f, 0.0f, //Bottom Left 1.0f, -1.0f, 0.0f, //Bottom Right 1.0f, 1.0f, 0.0f, //Top Right 0.4f, 1.4f, 0.0f }; /** Our texture pointer */ private int[] textures = new int[1]; /** The initial texture coordinates (u, v) */ private float texture[] = { //Mapping coordinates for the vertices // 1.0f, 0.0f, // 1.0f, 1.0f, // 0.0f, 1.0f, // 0.0f, 0.0f, // 0.0f, 1.0f, // 0.0f, 0.0f, // 1.0f, 0.0f, // 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.7f, }; /** The initial indices definition */ private byte indices[] = { //2 triangles // 0,1,2, 2,3,0, 0,1,2, 2,3,4, 3,4,0, //triangles for five vertexes }; i draw with the code below gl.glDrawElements(GL10.GL_TRIANGLES, indices.length, GL10.GL_UNSIGNED_BYTE, indexBuffer);

    Read the article

  • How much is too much memory allocation in NDK?

    - by Maximus
    The NDK download page notes that, "Typical good candidates for the NDK are self-contained, CPU-intensive operations that don't allocate much memory, such as signal processing, physics simulation, and so on." I came from a C background and was excited to try to use the NDK to operate most of my OpenGL ES functions and any native functions related to physics, animation of vertices, etc... I'm finding that I'm relying quite a bit on Native code and wondering if I may be making some mistakes. I've had no trouble with testing at this point, but I'm curious if I may run into problems in the future. For example, I have game struct defined (somewhat like is seen in the San-Angeles example). I'm loading vertex information for objects dynamically (just what is needed for an active game area) so there's quite a bit of memory allocation happening for vertices, normals, texture coordinates, indices and texture graphic data... just to name the essentials. I'm quite careful about freeing what is allocated between game areas. Would I be safer setting some caps on array sizes or should I charge bravely forward as I'm going now?

    Read the article

  • Find if a user is facing a certain Location using Digital Compass by constructing a triangle and usi

    - by Aidan
    Hi Guys, I'm constructing a geolocation based application and I'm trying to figure out a way to make my application realise when a user is facing the direction of the given location (a particular long / lat co-ord). I've done some Googling and checked the SDK but can't really find anything for such a thing. Does anyone know of a way? To clarify Android knows my location, the second location and my orientation. What I want is a way for Android to recognise when my orientation is "facing" the second location (e.g within 90 Degrees or so). We're also assuming that the user is stationary and needs updates every second or so therefore getBearing(); is useless. Alright so we get it has to be math, there appears to be no simple SDK stuff we can use. I did some searching of my own and found Barycentric Co-ords http://www.blackpawn.com/texts/pointinpoly/default.html . So what I'm trying to do now is map the camera's field of view. For Example if the person is facing a certain direction the program should construct a triangle around that field of view, e.g it should make one vertices the phone's position and then go out either side for a set distance making the 2 end points vertices constructing a triangle. If I had this I could then apply Barycentric co-ords to see if the point lay within the newly constructed triangle. Idea's anyone? Example. I could get my current orientation, add 45 to it and go up X distance one side and subtact 45 and go up X distance the other side to find my 2 other points. Though, how would I make android know which way it should go "up" I guess? It would know its baring as this stage so I need it to recognise it's bearing and go out that direction.

    Read the article

  • Build OpenGL model in parallel?

    - by Brendan Long
    I have a program which draws some terrain and simulates water flowing over it (in a cheap and easy way). Updating the water was easy to parallelize using OpenMP, so I can do ~50 updates per second. The problem is that even with a small amounts of water, my draws per second are very very low (starts at 5 and drops to around 2 once there's a significant amount of water). It's not a problem with the video card because the terrain is more complicated and gets drawn so quickly that boost::timer tells me that I get infinity draws per second if I turn the water off. It may be related to memory bandwidth though (since I assume the model stays on the card and doesn't have to be transfered every time). What I'm concerned about is that on every draw, I'm calling glVertex3f() about a million times (max size is 450*600, 4 vertices each), and it's done entirely sequentially because Glut won't let me call anything in parallel. So.. is if there's some way of building the list in parallel and then passing it to OpenGL all at once? Or some other way of making it draw this faster? Am I using the wrong method (besides the obvious "use less vertices")?

    Read the article

  • OpenGL basics: calling glDrawElements once per object

    - by Bethor
    Hi all, continuing on from my explorations of the basics of OpenGL (see this question), I'm trying to figure out the basic principles of drawing a scene with OpenGL. I am trying to render a simple cube repeated n times in every direction. My method appears to yield terrible performance : 1000 cubes brings performance below 50fps (on a QuadroFX 1800, roughly a GeForce 9600GT). My method for drawing these cubes is as follows: done once: set up a vertex buffer and array buffer containing my cube vertices in model space set up an array buffer indexing the cube for drawing as 12 triangles done for each frame: update uniform values used by the vertex shader to move all cubes at once done for each cube, for each frame: update uniform values used by the vertex shader to move each cube to its position call glDrawElements to draw the positioned cube Is this a sane method ? If not, how does one go about something like this ? I'm guessing I need to minimize calls to glUniform, glDrawElements, or both, but I'm not sure how to do that. Full code for my little test : (depends on gletools and pyglet) I'm aware that my init code (at least) is really ugly; I'm concerned with the rendering code for each frame right now, I'll move to something a little less insane for the creation of the vertex buffers and such later on. import pyglet from pyglet.gl import * from pyglet.window import key from numpy import deg2rad, tan from gletools import ShaderProgram, FragmentShader, VertexShader, GeometryShader vertexData = [-0.5, -0.5, -0.5, 1.0, -0.5, 0.5, -0.5, 1.0, 0.5, -0.5, -0.5, 1.0, 0.5, 0.5, -0.5, 1.0, -0.5, -0.5, 0.5, 1.0, -0.5, 0.5, 0.5, 1.0, 0.5, -0.5, 0.5, 1.0, 0.5, 0.5, 0.5, 1.0] elementArray = [2, 1, 0, 1, 2, 3,## back face 4, 7, 6, 4, 5, 7,## front face 1, 3, 5, 3, 7, 5,## top face 2, 0, 4, 2, 4, 6,## bottom face 1, 5, 4, 0, 1, 4,## left face 6, 7, 3, 6, 3, 2]## right face def toGLArray(input): return (GLfloat*len(input))(*input) def toGLushortArray(input): return (GLushort*len(input))(*input) def initPerspectiveMatrix(aspectRatio = 1.0, fov = 45): frustumScale = 1.0 / tan(deg2rad(fov) / 2.0) fzNear = 0.5 fzFar = 300.0 perspectiveMatrix = [frustumScale*aspectRatio, 0.0 , 0.0 , 0.0 , 0.0 , frustumScale, 0.0 , 0.0 , 0.0 , 0.0 , (fzFar+fzNear)/(fzNear-fzFar) , -1.0, 0.0 , 0.0 , (2*fzFar*fzNear)/(fzNear-fzFar), 0.0 ] return perspectiveMatrix class ModelObject(object): vbo = GLuint() vao = GLuint() eao = GLuint() initDone = False verticesPool = [] indexPool = [] def __init__(self, vertices, indexing): super(ModelObject, self).__init__() if not ModelObject.initDone: glGenVertexArrays(1, ModelObject.vao) glGenBuffers(1, ModelObject.vbo) glGenBuffers(1, ModelObject.eao) glBindVertexArray(ModelObject.vao) initDone = True self.numIndices = len(indexing) self.offsetIntoVerticesPool = len(ModelObject.verticesPool) ModelObject.verticesPool.extend(vertices) self.offsetIntoElementArray = len(ModelObject.indexPool) ModelObject.indexPool.extend(indexing) glBindBuffer(GL_ARRAY_BUFFER, ModelObject.vbo) glEnableVertexAttribArray(0) #position glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ModelObject.eao) glBufferData(GL_ARRAY_BUFFER, len(ModelObject.verticesPool)*4, toGLArray(ModelObject.verticesPool), GL_STREAM_DRAW) glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(ModelObject.indexPool)*2, toGLushortArray(ModelObject.indexPool), GL_STREAM_DRAW) def draw(self): glDrawElements(GL_TRIANGLES, self.numIndices, GL_UNSIGNED_SHORT, self.offsetIntoElementArray) class PositionedObject(object): def __init__(self, mesh, pos, objOffsetUf): super(PositionedObject, self).__init__() self.mesh = mesh self.pos = pos self.objOffsetUf = objOffsetUf def draw(self): glUniform3f(self.objOffsetUf, self.pos[0], self.pos[1], self.pos[2]) self.mesh.draw() w = 800 h = 600 AR = float(h)/float(w) window = pyglet.window.Window(width=w, height=h, vsync=False) window.set_exclusive_mouse(True) pyglet.clock.set_fps_limit(None) ## input forward = [False] left = [False] back = [False] right = [False] up = [False] down = [False] inputs = {key.Z: forward, key.Q: left, key.S: back, key.D: right, key.UP: forward, key.LEFT: left, key.DOWN: back, key.RIGHT: right, key.PAGEUP: up, key.PAGEDOWN: down} ## camera camX = 0.0 camY = 0.0 camZ = -1.0 def simulate(delta): global camZ, camX, camY scale = 10.0 move = scale*delta if forward[0]: camZ += move if back[0]: camZ += -move if left[0]: camX += move if right[0]: camX += -move if up[0]: camY += move if down[0]: camY += -move pyglet.clock.schedule(simulate) @window.event def on_key_press(symbol, modifiers): global forward, back, left, right, up, down if symbol in inputs.keys(): inputs[symbol][0] = True @window.event def on_key_release(symbol, modifiers): global forward, back, left, right, up, down if symbol in inputs.keys(): inputs[symbol][0] = False ## uniforms for shaders camOffsetUf = GLuint() objOffsetUf = GLuint() perspectiveMatrixUf = GLuint() camRotationUf = GLuint() program = ShaderProgram( VertexShader(''' #version 330 layout(location = 0) in vec4 objCoord; uniform vec3 objOffset; uniform vec3 cameraOffset; uniform mat4 perspMx; void main() { mat4 translateCamera = mat4(1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, cameraOffset.x, cameraOffset.y, cameraOffset.z, 1.0f); mat4 translateObject = mat4(1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, objOffset.x, objOffset.y, objOffset.z, 1.0f); vec4 modelCoord = objCoord; vec4 positionedModel = translateObject*modelCoord; vec4 cameraPos = translateCamera*positionedModel; gl_Position = perspMx * cameraPos; }'''), FragmentShader(''' #version 330 out vec4 outputColor; const vec4 fillColor = vec4(1.0f, 1.0f, 1.0f, 1.0f); void main() { outputColor = fillColor; }''') ) shapes = [] def init(): global camOffsetUf, objOffsetUf with program: camOffsetUf = glGetUniformLocation(program.id, "cameraOffset") objOffsetUf = glGetUniformLocation(program.id, "objOffset") perspectiveMatrixUf = glGetUniformLocation(program.id, "perspMx") glUniformMatrix4fv(perspectiveMatrixUf, 1, GL_FALSE, toGLArray(initPerspectiveMatrix(AR))) obj = ModelObject(vertexData, elementArray) nb = 20 for i in range(nb): for j in range(nb): for k in range(nb): shapes.append(PositionedObject(obj, (float(i*2), float(j*2), float(k*2)), objOffsetUf)) glEnable(GL_CULL_FACE) glCullFace(GL_BACK) glFrontFace(GL_CW) glEnable(GL_DEPTH_TEST) glDepthMask(GL_TRUE) glDepthFunc(GL_LEQUAL) glDepthRange(0.0, 1.0) glClearDepth(1.0) def update(dt): print pyglet.clock.get_fps() pyglet.clock.schedule_interval(update, 1.0) @window.event def on_draw(): with program: pyglet.clock.tick() glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) glUniform3f(camOffsetUf, camX, camY, camZ) for shape in shapes: shape.draw() init() pyglet.app.run()

    Read the article

< Previous Page | 8 9 10 11 12 13 14 15 16 17 18 19  | Next Page >