Search Results

Search found 43935 results on 1758 pages for 'development process'.

Page 479/1758 | < Previous Page | 475 476 477 478 479 480 481 482 483 484 485 486  | Next Page >

  • Vertex buffer acting strange? [on hold]

    - by Ryan Capote
    I'm having a strange problem, and I don't know what could be causing it. My current code is identical to how I've done this before. I'm trying to render a rectangle using VBO and orthographic projection.   My results:     What I expect: 3x3 rectangle in the top left corner   #include <stdio.h> #include <GL\glew.h> #include <GLFW\glfw3.h> #include "lodepng.h"   static const int FALSE = 0; static const int TRUE = 1;   static const char* VERT_SHADER =     "#version 330\n"       "layout(location=0) in vec4 VertexPosition; "     "layout(location=1) in vec2 UV;"     "uniform mat4 uProjectionMatrix;"     /*"out vec2 TexCoords;"*/       "void main(void) {"     "    gl_Position = uProjectionMatrix*VertexPosition;"     /*"    TexCoords = UV;"*/     "}";   static const char* FRAG_SHADER =     "#version 330\n"       /*"uniform sampler2D uDiffuseTexture;"     "uniform vec4 uColor;"     "in vec2 TexCoords;"*/     "out vec4 FragColor;"       "void main(void) {"    /* "    vec4 texel = texture2D(uDiffuseTexture, TexCoords);"     "    if(texel.a <= 0) {"     "         discard;"     "    }"     "    FragColor = texel;"*/     "    FragColor = vec4(1.f);"     "}";   static int g_running; static GLFWwindow *gl_window; static float gl_projectionMatrix[16];   /*     Structures */ typedef struct _Vertex {     float x, y, z, w;     float u, v; } Vertex;   typedef struct _Position {     float x, y; } Position;   typedef struct _Bitmap {     unsigned char *pixels;     unsigned int width, height; } Bitmap;   typedef struct _Texture {     GLuint id;     unsigned int width, height; } Texture;   typedef struct _VertexBuffer {     GLuint bufferObj, vertexArray; } VertexBuffer;   typedef struct _ShaderProgram {     GLuint vertexShader, fragmentShader, program; } ShaderProgram;   /*   http://en.wikipedia.org/wiki/Orthographic_projection */ void createOrthoProjection(float *projection, float width, float height, float far, float near)  {       const float left = 0;     const float right = width;     const float top = 0;     const float bottom = height;          projection[0] = 2.f / (right - left);     projection[1] = 0.f;     projection[2] = 0.f;     projection[3] = -(right+left) / (right-left);     projection[4] = 0.f;     projection[5] = 2.f / (top - bottom);     projection[6] = 0.f;     projection[7] = -(top + bottom) / (top - bottom);     projection[8] = 0.f;     projection[9] = 0.f;     projection[10] = -2.f / (far-near);     projection[11] = (far+near)/(far-near);     projection[12] = 0.f;     projection[13] = 0.f;     projection[14] = 0.f;     projection[15] = 1.f; }   /*     Textures */ void loadBitmap(const char *filename, Bitmap *bitmap, int *success) {     int error = lodepng_decode32_file(&bitmap->pixels, &bitmap->width, &bitmap->height, filename);       if (error != 0) {         printf("Failed to load bitmap. ");         printf(lodepng_error_text(error));         success = FALSE;         return;     } }   void destroyBitmap(Bitmap *bitmap) {     free(bitmap->pixels); }   void createTexture(Texture *texture, const Bitmap *bitmap) {     texture->id = 0;     glGenTextures(1, &texture->id);     glBindTexture(GL_TEXTURE_2D, texture);       glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);       glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bitmap->width, bitmap->height, 0,              GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels);       glBindTexture(GL_TEXTURE_2D, 0); }   void destroyTexture(Texture *texture) {     glDeleteTextures(1, &texture->id);     texture->id = 0; }   /*     Vertex Buffer */ void createVertexBuffer(VertexBuffer *vertexBuffer, Vertex *vertices) {     glGenBuffers(1, &vertexBuffer->bufferObj);     glGenVertexArrays(1, &vertexBuffer->vertexArray);     glBindVertexArray(vertexBuffer->vertexArray);       glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer->bufferObj);     glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex) * 6, (const GLvoid*)vertices, GL_STATIC_DRAW);       const unsigned int uvOffset = sizeof(float) * 4;       glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);     glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)uvOffset);       glEnableVertexAttribArray(0);     glEnableVertexAttribArray(1);       glBindBuffer(GL_ARRAY_BUFFER, 0);     glBindVertexArray(0); }   void destroyVertexBuffer(VertexBuffer *vertexBuffer) {     glDeleteBuffers(1, &vertexBuffer->bufferObj);     glDeleteVertexArrays(1, &vertexBuffer->vertexArray); }   void bindVertexBuffer(VertexBuffer *vertexBuffer) {     glBindVertexArray(vertexBuffer->vertexArray);     glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer->bufferObj); }   void drawVertexBufferMode(GLenum mode) {     glDrawArrays(mode, 0, 6); }   void drawVertexBuffer() {     drawVertexBufferMode(GL_TRIANGLES); }   void unbindVertexBuffer() {     glBindVertexArray(0);     glBindBuffer(GL_ARRAY_BUFFER, 0); }   /*     Shaders */ void compileShader(ShaderProgram *shaderProgram, const char *vertexSrc, const char *fragSrc) {     GLenum err;     shaderProgram->vertexShader = glCreateShader(GL_VERTEX_SHADER);     shaderProgram->fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);       if (shaderProgram->vertexShader == 0) {         printf("Failed to create vertex shader.");         return;     }       if (shaderProgram->fragmentShader == 0) {         printf("Failed to create fragment shader.");         return;     }       glShaderSource(shaderProgram->vertexShader, 1, &vertexSrc, NULL);     glCompileShader(shaderProgram->vertexShader);     glGetShaderiv(shaderProgram->vertexShader, GL_COMPILE_STATUS, &err);       if (err != GL_TRUE) {         printf("Failed to compile vertex shader.");         return;     }       glShaderSource(shaderProgram->fragmentShader, 1, &fragSrc, NULL);     glCompileShader(shaderProgram->fragmentShader);     glGetShaderiv(shaderProgram->fragmentShader, GL_COMPILE_STATUS, &err);       if (err != GL_TRUE) {         printf("Failed to compile fragment shader.");         return;     }       shaderProgram->program = glCreateProgram();     glAttachShader(shaderProgram->program, shaderProgram->vertexShader);     glAttachShader(shaderProgram->program, shaderProgram->fragmentShader);     glLinkProgram(shaderProgram->program);          glGetProgramiv(shaderProgram->program, GL_LINK_STATUS, &err);       if (err != GL_TRUE) {         printf("Failed to link shader.");         return;     } }   void destroyShader(ShaderProgram *shaderProgram) {     glDetachShader(shaderProgram->program, shaderProgram->vertexShader);     glDetachShader(shaderProgram->program, shaderProgram->fragmentShader);       glDeleteShader(shaderProgram->vertexShader);     glDeleteShader(shaderProgram->fragmentShader);       glDeleteProgram(shaderProgram->program); }   GLuint getUniformLocation(const char *name, ShaderProgram *program) {     GLuint result = 0;     result = glGetUniformLocation(program->program, name);       return result; }   void setUniformMatrix(float *matrix, const char *name, ShaderProgram *program) {     GLuint loc = getUniformLocation(name, program);       if (loc == -1) {         printf("Failed to get uniform location in setUniformMatrix.\n");         return;     }       glUniformMatrix4fv(loc, 1, GL_FALSE, matrix); }   /*     General functions */ static int isRunning() {     return g_running && !glfwWindowShouldClose(gl_window); }   static void initializeGLFW(GLFWwindow **window, int width, int height, int *success) {     if (!glfwInit()) {         printf("Failed it inialize GLFW.");         *success = FALSE;        return;     }          glfwWindowHint(GLFW_RESIZABLE, 0);     *window = glfwCreateWindow(width, height, "Alignments", NULL, NULL);          if (!*window) {         printf("Failed to create window.");         glfwTerminate();         *success = FALSE;         return;     }          glfwMakeContextCurrent(*window);       GLenum glewErr = glewInit();     if (glewErr != GLEW_OK) {         printf("Failed to initialize GLEW.");         printf(glewGetErrorString(glewErr));         *success = FALSE;         return;     }       glClearColor(0.f, 0.f, 0.f, 1.f);     glViewport(0, 0, width, height);     *success = TRUE; }   int main(int argc, char **argv) {          int err = FALSE;     initializeGLFW(&gl_window, 480, 320, &err);     glDisable(GL_DEPTH_TEST);     if (err == FALSE) {         return 1;     }          createOrthoProjection(gl_projectionMatrix, 480.f, 320.f, 0.f, 1.f);          g_running = TRUE;          ShaderProgram shader;     compileShader(&shader, VERT_SHADER, FRAG_SHADER);     glUseProgram(shader.program);     setUniformMatrix(&gl_projectionMatrix, "uProjectionMatrix", &shader);       Vertex rectangle[6];     VertexBuffer vbo;     rectangle[0] = (Vertex){0.f, 0.f, 0.f, 1.f, 0.f, 0.f}; // Top left     rectangle[1] = (Vertex){3.f, 0.f, 0.f, 1.f, 1.f, 0.f}; // Top right     rectangle[2] = (Vertex){0.f, 3.f, 0.f, 1.f, 0.f, 1.f}; // Bottom left     rectangle[3] = (Vertex){3.f, 0.f, 0.f, 1.f, 1.f, 0.f}; // Top left     rectangle[4] = (Vertex){0.f, 3.f, 0.f, 1.f, 0.f, 1.f}; // Bottom left     rectangle[5] = (Vertex){3.f, 3.f, 0.f, 1.f, 1.f, 1.f}; // Bottom right       createVertexBuffer(&vbo, &rectangle);            bindVertexBuffer(&vbo);          while (isRunning()) {         glClear(GL_COLOR_BUFFER_BIT);         glfwPollEvents();                    drawVertexBuffer();                    glfwSwapBuffers(gl_window);     }          unbindVertexBuffer(&vbo);       glUseProgram(0);     destroyShader(&shader);     destroyVertexBuffer(&vbo);     glfwTerminate();     return 0; }

    Read the article

  • What tools do you use for 2D art/sprite creation?

    - by daemious
    What cheap/free tools do you use for 2D art and/or animation? I don't really like Gimp's interface, Paint.NET is limited and GraphicsGale is sort of archaic. Cosmigo ProMotion looks like it could be good, anyone use it? Seems a bit pricey at $78/92 but of course cheaper than Photoshop. I used to like Jasc Paint Shop Pro 7, but the newer versions Corel makes are more for photos. 2D Bones support would be handy also.

    Read the article

  • Software rendering 3d triangles in the proper order

    - by at.
    I'm implementing a basic 3d rendering engine in software (for education purposes, please don't mention to use an API). When I project a triangle from 3d to 2d coordinates, I draw the triangle. However, it's in a random order and so whatever gets drawn last draws on top of all other triangles (which might be in front of triangles it shouldn't be in front of)... Intuitively, seems I need to draw the triangles in the correct order. So I can calculate all their distances to the camera and sort by that. The objects furthest away get drawn last. Is this the proper way to render triangles? If I'm sorting all the objects, this is n*log(n) now. Is this the most efficient way to do this?

    Read the article

  • Panning with the OpenGL Camera / View Matrix

    - by Pris
    I'm gonna try this again I've been trying to setup a simple camera class with OpenGL but I'm completely lost and I've made zero progress creating anything useful. I'm using modern OpenGL and the glm library for matrix math. To get the most basic thing I can think of down, I'd like to pan an arbitrarily positioned camera around. That means move it along its own Up and Side axes. Here's a picture of a randomly positioned camera looking at an object: It should be clear what the Up (Green) and Side (Red) vectors on the camera are. Even though the picture shows otherwise, assume that the Model matrix is just the identity matrix. Here's what I do to try and get it to work: Step 1: Create my View/Camera matrix (going to refer to it as the View matrix from now on) using glm::lookAt(). Step 2: Capture mouse X and Y positions. Step 3: Create a translation matrix mapping changes in the X mouse position to the camera's Side vector, and mapping changes in the Y mouse position to the camera's Up vector. I get the Side vector from the first column of the View matrix. I get the Up vector from the second column of the View matrix. Step 4: Apply the translation: viewMatrix = glm::translate(viewMatrix,translationVector); But this doesn't work. I see that the mouse movement is mapped to some kind of perpendicular axes, but they're definitely not moving as you'd expect with respect to the camera. Could someone please explain what I'm doing wrong and point me in the right direction with this camera stuff?

    Read the article

  • Rotating a view of a chunked 2d tilemap

    - by Danie Clawson
    I'm working on a top-down (oblique) tile-based engine. I would like for the tiles to have a definable height in the world, with Characters being occluded by them, etc. This has led to a desire to be able to "rotate" the view of the world, even though I'm using all hand-drawn graphics and blitting. Therefor, I need to rotate the actual world itself, or change how the Camera traverses these arrays. How can, or should, I create individual rotations of 90 degrees, when I have multi-dimensional arrays? Is it faster to actually rotate the array, to access it differently, or to create pre-computed accessor(?) arrays, something like how my chunks work? How can I rotate an individual chunk, or set of chunks? Currently I establish my tile grid like this (tile height not included): function Surface(WIDTH, HEIGHT) { WIDTH = Math.max(WIDTH-(WIDTH%TPC), TPC); HEIGHT = Math.max(HEIGHT-(HEIGHT%TPC), TPC); this.tiles = []; this.chunks = []; //Establish tiles for(var x = 0; x < WIDTH; x++) { var col = [], ch_x = Math.floor(x/TPC); if(!this.chunks[ch_x]) this.chunks.push([]); for(var y = 0; y < HEIGHT; y++) { var tile = new Tile(x, y), ch_y = Math.floor(y/TPC); if(!this.chunks[ch_x][ch_y]) this.chunks[ch_x].push([]); this.chunks[ch_x][ch_y].push(tile); col.push(tile); } this.tiles.push(col); } }; Even some basic advice on my data struct would be much appreciated.

    Read the article

  • Problem with signal handlers being called too many times [closed]

    - by Hristo
    how can something print 3 times when it only goes the printing code twice? I'm coding in C and the code is in a SIGCHLD signal handler I created. void chld_signalHandler() { int pidadf = (int) getpid(); printf("pidafdfaddf: %d\n", pidadf); while (1) { int termChildPID = waitpid(-1, NULL, WNOHANG); if (termChildPID == 0 || termChildPID == -1) { break; } dll_node_t *temp = head; while (temp != NULL) { printf("stuff\n"); if (temp->pid == termChildPID && temp->type == WORK) { printf("inside if\n"); // read memory mapped file b/w WORKER and MAIN // get statistics and write results to pipe char resultString[256]; // printing TIME int i; for (i = 0; i < 24; i++) { sprintf(resultString, "TIME; %d ; %d ; %d ; %s\n",i,1,2,temp->stats->mboxFileName); fwrite(resultString, strlen(resultString), 1, pipeFD); } remove_node(temp); break; } temp = temp->next; } printf("done printing from sigchld \n"); } return; } the output for my MAIN process is this: MAIN PROCESS 16214 created WORKER PROCESS 16220 for file class.sp10.cs241.mbox pidafdfaddf: 16214 stuff stuff inside if done printing from sigchld MAIN PROCESS 16214 created WORKER PROCESS 16221 for file class.sp10.cs225.mbox pidafdfaddf: 16214 stuff stuff inside if done printing from sigchld and the output for the MONITOR process is this: MONITOR: pipe is open for reading MONITOR PIPE: TIME; 0 ; 1 ; 2 ; class.sp10.cs225.mbox MONITOR PIPE: TIME; 0 ; 1 ; 2 ; class.sp10.cs225.mbox MONITOR PIPE: TIME; 0 ; 1 ; 2 ; class.sp10.cs241.mbox MONITOR: end of readpipe ( I've taken out repeating lines so I don't take up so much space ) Thanks, Hristo

    Read the article

  • Direction of the bullet - how to have something else than left, right, top, bottom

    - by Florian Margaine
    I'm making a simple shooter game using canvas and javascript. The current code can be seen here. To know which way I want the bullet to be shot, I simply have a direction property that can have 4 values (left, right, bottom, top), and I can then calculate the next position of the bullet easily. However, I'd like to move the bullet to the mouse position, but I don't really see how to do this. How do I calculate the next position? I'm guessing there is some formula to calculate the line between two positions (the player's and the mouse's), but I don't have much idea yet. So there is no obstacle, but I don't see how to calculate this, and be able have the next position of the bullet at each frame.

    Read the article

  • Converting 3 axis vectors to a rotation matrix

    - by user38858
    I am trying to get a rotation matrix (in 3dsmax) from 3 vectors that form an axis (all 3 vectors are aligned by 90 degrees each other) Somewhere I read that I could build a rotation matrix just by inserting in every row one vector at a time (source: http://renderdan.blogspot.cz/2006/05/rotation-matrix-from-axis-vectors.html) So, I built a matrix with these example vectors x-axis : [-0.194624,-0.23715,-0.951778] y-axis : [-0.773012,0.634392,0] z-axis : [-0.6038,-0.735735,0.306788] But for some reason, if I try to convert this matrix to eulerangles, I receive this rotation: (eulerAngles 47.7284 6.12831 36.8263) ... which is totally wrong, and doesn't align to my 3 vectors at all. I know that rotation is quite difficult to understand, may someone shed some light? :)

    Read the article

  • Optimizing graphics for an iOS flash game

    - by 1GR3
    A friend of mine and myself are working on a flash developed iOS (and later Android) puzzle board game. He's a developer and I'm a designer/developer so (no surprise) we have different points of view. His method: make small tiles (100x100px) in Photoshop join them into the board and then in flash apply effects to the board to avoid repetition (80's not in the good way). My method: precompose the whole board (960x640px+bleed) in Photoshop and than mask active and inactive areas in flash. What do you think?

    Read the article

  • Thread safe double buffering

    - by kdavis8
    I am trying to implement a draw map method that will draw the tiled image across the surface of the component. I'm having issue with this code. The double buffering does not seem to be working, because the sprite flickers like crazy; my source code: package myPackage; import java.awt.Color; import java.awt.Graphics; import java.awt.Graphics2D; import java.awt.Image; import java.awt.Toolkit; import java.awt.image.BufferStrategy; import java.awt.image.BufferedImage; import javax.swing.JFrame; public class GameView extends JFrame implements Runnable { public BufferedImage backbuffer; public Graphics2D g2d; public Image img; Thread gameloop; Scene scene; public GameView() { super("Game View"); setSize(600, 600); setVisible(true); setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); backbuffer = new BufferedImage(getWidth(), getHeight(), BufferedImage.TYPE_INT_RGB); g2d = backbuffer.createGraphics(); Toolkit tk = Toolkit.getDefaultToolkit(); img = tk.getImage(this.getClass().getResource("cage.png")); scene = new Scene(g2d, this); gameloop = new Thread(this); gameloop.start(); } public static void main(String args[]) { new GameView(); } public void paint(Graphics g) { g.drawImage(backbuffer, 0, 0, this); repaint(); } @Override public void run() { // TODO Auto-generated method stub Thread t = Thread.currentThread(); while (t == gameloop) { scene.getScene("dirtmap"); g2d.drawImage(img, 80, 80,this![enter image description here][1]); } } private void drawScene(String string) { // TODO Auto-generated method stub // g2d.setColor(Color.white); // g2d.fillRect(0, 0, getWidth(), getHeight()); scene.getScene(string); } } package myPackage; import java.awt.Color; import java.awt.Component; import java.awt.Graphics; import java.awt.Graphics2D; import java.awt.Image; import java.awt.Toolkit; public class Scene { Graphics g2d; Component c; boolean loaded = false; public Scene(Graphics2D gr, Component co) { g2d = gr; c = co; } public void getScene(String mapName) { Toolkit tk = Toolkit.getDefaultToolkit(); Image tile = tk.getImage(this.getClass().getResource("dirt.png")); // g2d.setColor(Color.red); for (int y = 0; y <= 18; y++) { for (int x = 0; x <= 18; x += 1) { g2d.drawImage(tile, x * 32, y * 32, c); } } loaded = true; } }

    Read the article

  • Using virtual functions

    - by Tucker Morgan
    I am starting to use virtual functions, and i am programming a simple text game, my question is this, if i have a virtual function called spec_abil with in a Super class called rpg_class. If you allow the player to class what class they want to play, say a mage class, a archer class, and a warrior class, which all have their own spec_abil function. How do you write it so that the program knows which one to use depending on the chosen class.

    Read the article

  • Off center projection

    - by N0xus
    I'm trying to implement the code that was freely given by a very kind developer at the following link: http://forum.unity3d.com/threads/142383-Code-sample-Off-Center-Projection-Code-for-VR-CAVE-or-just-for-fun Right now, all I'm trying to do is bring it in on one camera, but I have a few issues. My class, looks as follows: using UnityEngine; using System.Collections; public class PerspectiveOffCenter : MonoBehaviour { // Use this for initialization void Start () { } // Update is called once per frame void Update () { } public static Matrix4x4 GeneralizedPerspectiveProjection(Vector3 pa, Vector3 pb, Vector3 pc, Vector3 pe, float near, float far) { Vector3 va, vb, vc; Vector3 vr, vu, vn; float left, right, bottom, top, eyedistance; Matrix4x4 transformMatrix; Matrix4x4 projectionM; Matrix4x4 eyeTranslateM; Matrix4x4 finalProjection; ///Calculate the orthonormal for the screen (the screen coordinate system vr = pb - pa; vr.Normalize(); vu = pc - pa; vu.Normalize(); vn = Vector3.Cross(vr, vu); vn.Normalize(); //Calculate the vector from eye (pe) to screen corners (pa, pb, pc) va = pa-pe; vb = pb-pe; vc = pc-pe; //Get the distance;; from the eye to the screen plane eyedistance = -(Vector3.Dot(va, vn)); //Get the varaibles for the off center projection left = (Vector3.Dot(vr, va)*near)/eyedistance; right = (Vector3.Dot(vr, vb)*near)/eyedistance; bottom = (Vector3.Dot(vu, va)*near)/eyedistance; top = (Vector3.Dot(vu, vc)*near)/eyedistance; //Get this projection projectionM = PerspectiveOffCenter(left, right, bottom, top, near, far); //Fill in the transform matrix transformMatrix = new Matrix4x4(); transformMatrix[0, 0] = vr.x; transformMatrix[0, 1] = vr.y; transformMatrix[0, 2] = vr.z; transformMatrix[0, 3] = 0; transformMatrix[1, 0] = vu.x; transformMatrix[1, 1] = vu.y; transformMatrix[1, 2] = vu.z; transformMatrix[1, 3] = 0; transformMatrix[2, 0] = vn.x; transformMatrix[2, 1] = vn.y; transformMatrix[2, 2] = vn.z; transformMatrix[2, 3] = 0; transformMatrix[3, 0] = 0; transformMatrix[3, 1] = 0; transformMatrix[3, 2] = 0; transformMatrix[3, 3] = 1; //Now for the eye transform eyeTranslateM = new Matrix4x4(); eyeTranslateM[0, 0] = 1; eyeTranslateM[0, 1] = 0; eyeTranslateM[0, 2] = 0; eyeTranslateM[0, 3] = -pe.x; eyeTranslateM[1, 0] = 0; eyeTranslateM[1, 1] = 1; eyeTranslateM[1, 2] = 0; eyeTranslateM[1, 3] = -pe.y; eyeTranslateM[2, 0] = 0; eyeTranslateM[2, 1] = 0; eyeTranslateM[2, 2] = 1; eyeTranslateM[2, 3] = -pe.z; eyeTranslateM[3, 0] = 0; eyeTranslateM[3, 1] = 0; eyeTranslateM[3, 2] = 0; eyeTranslateM[3, 3] = 1f; //Multiply all together finalProjection = new Matrix4x4(); finalProjection = Matrix4x4.identity * projectionM*transformMatrix*eyeTranslateM; //finally return return finalProjection; } // Update is called once per frame public void FixedUpdate () { Camera cam = camera; //calculate projection Matrix4x4 genProjection = GeneralizedPerspectiveProjection( new Vector3(0,1,0), new Vector3(1,1,0), new Vector3(0,0,0), new Vector3(0,0,0), cam.nearClipPlane, cam.farClipPlane); //(BottomLeftCorner, BottomRightCorner, TopLeftCorner, trackerPosition, cam.nearClipPlane, cam.farClipPlane); cam.projectionMatrix = genProjection; } } My error lies in projectionM = PerspectiveOffCenter(left, right, bottom, top, near, far); The debugger states: Expression denotes a `type', where a 'variable', 'value' or 'method group' was expected. Thus, I changed the line to read: projectionM = new PerspectiveOffCenter(left, right, bottom, top, near, far); But then the error is changed to: The type 'PerspectiveOffCenter' does not contain a constructor that takes '6' arguments. For reasons that are obvious. So, finally, I changed the line to read: projectionM = new GeneralizedPerspectiveProjection(left, right, bottom, top, near, far); And the error I get is: is a 'method' but a 'type' was expected. With this last error, I'm not sure what it is I should do / missing. Can anyone see what it is that I'm missing to fix this error?

    Read the article

  • How can I compile SM 3.0 effects in D3D11 in slimdx?

    - by jacker
    var bytecode = ShaderBytecode.CompileFromFile("shaders\\testShader.fx", "fx_5_0", ShaderFlags.None, SlimDX.D3DCompiler.EffectFlags.None, null, null, out str); var effect = new SlimDX.Direct3D11.Effect(gpu.Device, bytecode); Works fine but if I try to use another shader model like 4.0 or 3.0 it throws an error on the new effect creation: E_FAIL: An undetermined error occurred (-2147467259) How do I compile older shaders? And I've read about device context but I can't find any information on how to use them to maintain DX9 compatibility.

    Read the article

  • How to improve batching performance

    - by user4241
    Hello, I am developing a sprite based 2D game for mobile platform(s) and I'm using OpenGL (well, actually Irrlicht) to render graphics. First I implemented sprite rendering in a simple way: every game object is rendered as a quad with its own GPU draw call, meaning that if I had 200 game objects, I made 200 draw calls per frame. Of course this was a bad choice and my game was completely CPU bound because there is a little CPU overhead assosiacted in every GPU draw call. GPU stayed idle most of the time. Now, I thought I could improve performance by collecting objects into large batches and rendering these batches with only a few draw calls. I implemented batching (so that every game object sharing the same texture is rendered in same batch) and thought that my problems are gone... only to find out that my frame rate was even lower than before. Why? Well, I have 200 (or more) game objects, and they are updated 60 times per second. Every frame I have to recalculate new position (translation and rotation) for vertices in CPU (GPU on mobile platforms does not support instancing so I can't do it there), and doing this calculation 48000 per second (200*60*4 since every sprite has 4 vertices) simply seems to be too slow. What I could do to improve performance? All game objects are moving/rotating (almost) every frame so I really have to recalculate vertex positions. Only optimization that I could think of is a look-up table for rotations so that I wouldn't have to calculate them. Would point sprites help? Any nasty hacks? Anything else? Thanks.

    Read the article

  • Append a dynamically changing watermark to a PDF in SharePoint

    - by ccomet
    This is primarily a question of possibilities more than instructions. I'm a programming consultant working on a WSS project site system for my client. We have a document library in which files are uploaded to go through a complex approval process. With multiple stages in this process, we have an extra field which dictates what the current status of the document is. Now, my client has become enamored with the idea of PDF watermarking. He wants the document (which is already a PDF) to be affixed with a watermark corresponding to the current status, such that with each stage of the approval process the watermark will change. One method, the traditional method for PDF watermarking, of accomplishing this is to have one "clean" copy of the document somewhere hidden on the site, and create a new PDF from it that has the watermark at each stage of the approval process. Since the filename will never change, this new PDF can be uploaded continually to a public library, always overwriting the old version and simulating a "dynamically changing watermark". However, in the various stages there will also be people uploading clean copies with corrections and suggestions, nevermind the complex nature of juggling around two libraries and the fact we double the number of files stored. My client and I agree that this is not a practical path to choose. What we would like to do is be able to "modify" the watermark in a PDF, so that we only have to keep one copy of the file. Unfortunately, from what I've seen, in most cases when you make something like a watermark, which in its nature is supposed to be "unmodifyable", you won't be able to edit it later. So, is it possible to have a part of a PDF which cannot be changed by anyone who downloads the file, but can be changed as part of a workflow or other object model process? Thanks in advance!

    Read the article

  • How to prevent a hacked-server from spoofing a master server?

    - by Cody Smith
    I wish to setup a room-based multilayer game model where players may host matches and serve as host (IE the server with authoritative power). I wish to host a master server which tracks player's items, rank, cash, exp, etc. In such a model, how can I prevent someone that is hosting a game (with a modified server) from spoofing the master server with invalid match results, thus gaining exp, money or rankings. Thanks. -Cody

    Read the article

  • GLSL vertex shaders with movements vs vertex off the screen

    - by user827992
    If i have a vertex shader that manage some movements and variations about the position of some vertex in my OpenGL context, OpenGL is smart enough to just run this shader on only the vertex visible on the screen? This part of the OpenGL programmable pipeline is not clear to me because all the sources are not really really clear about this, they talk about fragments and pixels and I get that, but what about vertex shaders? If you need a reference i'm reading from this right now and this online book has a couple of examples about this.

    Read the article

  • Collision Detection in Java for a game

    - by gordsmash
    Im making a game in Java with a few other people but we are stuck on one part of it, making the collision detection. The game is an RPG and I know how to do the collision detection with the characters using Rectangles, but what I dont know how to do is the collision detection for the maps. What I mean by that is like so the character cant walk over trees or water and that stuff but using rectangles doesnt seem like the best option here. Well to explain what the game maps are gonna look like, here is an example http://i980.photobucket.com/albums/ae287/gordsmash/7-8.jpg Now I could use rectangles to get bounds and stop the player from walking over the trees and water but that would take a lot of them. But is there another easier way to prevent the player from walking over the trees and obstacles besides using Rectangles?

    Read the article

  • OpenGL: Attempt to allocate a texture to big for the current hardware

    - by AnonymousMan
    I'm getting the following error: java.io.IOException: Attempt to allocate a texture to big for the current hardware at org.newdawn.slick.opengl.InternalTextureLoader.getTexture(InternalTextureLoader.java:320) at org.newdawn.slick.opengl.InternalTextureLoader.getTexture(InternalTextureLoader.java:254) at org.newdawn.slick.opengl.InternalTextureLoader.getTexture(InternalTextureLoader.java:200) at org.newdawn.slick.opengl.TextureLoader.getTexture(TextureLoader.java:64) at org.newdawn.slick.opengl.TextureLoader.getTexture(TextureLoader.java:24) The image I'm trying to use is 128x128. System.out.println(GL11.glGetInteger(GL11.GL_MAX_TEXTURE_SIZE)); I get: 32. 32??!! My graphics card is AMD Radeon HD 7970M with 2048 MB GDDR5 RAM, I can run all the latest games in 1080p and 60fps with no problem, and those textures sure as hell doesn't look like they are 32x32 pixels to me! How can I fix this? -- Edit: Here's the chaos code I use to init OpenGL: Display.setDisplayMode(new DisplayMode(500,500)); Display.create(); if (!GLContext.getCapabilities().OpenGL11) { throw new Exception("OpenGL 1.1 not supported."); } Display.setTitle("Game"); glMatrixMode(GL_PROJECTION); glLoadIdentity(); GLU.gluPerspective(45, 1, 0.1f, 5000); Mouse.setGrabbed(true); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glEnable(GL_TEXTURE_2D); glClearColor(0, 0, 0, 0); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LEQUAL); glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glEnable(GL_BLEND); glEnable(GL_POINT_SMOOTH); glEnable(GL_LINE_SMOOTH); glEnable(GL_POLYGON_SMOOTH); glEnable(GL_POLYGON_OFFSET_FILL); glShadeModel(GL_SMOOTH); Display is a LWJGL thing, it makes the OpenGL context and the window. Anyway, I don't think there's anything in the init code that can help me but you never know...

    Read the article

  • Making body (box2d) a sprite (andengine) in Android

    - by Kadir
    I can't make body (box2d) a sprite (andengine) and at the same time apply MoveModifier to sprite which is body. If i can make just body, it works namely the sprites can collide. If I apply just MoveModifier to sprites, the sprites can move where i want. But I want to make body (they can collide) and apply MoveModifier (they can move where I want) at the same time. How can i do it? This my code just run MoveModifier not as body at the same time. circles[i] = new Sprite(startX, startY, textRegCircle[i]); body[i] = PhysicsFactory.createCircleBody(physicsWorld, circles[i], BodyType.DynamicBody, FIXTURE_DEF); physicsWorld.registerPhysicsConnector(new PhysicsConnector(circles[i], body[i], true, true)); circles[i].registerEntityModifier( (IEntityModifier) new SequenceEntityModifier ( new MoveModifier(10.0f, circles[i].getX(), circles[i].getX(), circles[i].getY(),CAMERA_HEIGHT+64.0f))); scene.getLastChild().attachChild(circles[i]); scene.registerUpdateHandler(physicsWorld);

    Read the article

  • Traffic estimation for a multiplayer flash game

    - by Steve Addington
    hey, i want to know if my rough traffic estimations are right, it would be for a pretty simple realtime flashgame in the style of haxball (but not as a soccer game) heres a video of it http://www.youtube.com/watch?v=z_xBdFg1RcI So here comes my estimation, i dont know if they are realistic! i hope someone can help me. consider the packet attached as a typical one sent every 200ms, its 148bytes + 64 bytes of header will make around a 200bytes packet. The server will receive 200bytes x 6 players x 5 times a sec=6000bytes/s=5.85Kbytes/s=46.9kbit/s plus he has to send all back to the players, so at this point are 94Kbit/s.The server received all the information, perform the definitive calculation and send the new position to all players, in a bigger packet of around 900bytes that have to be delivered to the others 6, which makes 900bytes x 6 players x 5 times a sec=27000bytes/s=26Kbytes/s=210kbit/s. overall that would be 26kbyte per second. thats like 130mb traffic per hour for a 6player room. but somehow i think the numbers are too high? that would be really much traffic for such a simple game. did i calculate something wrong?

    Read the article

  • Routes on a sphere surface - Find geodesic?

    - by CaNNaDaRk
    I'm working with some friends on a browser based game where people can move on a 2D map. It's been almost 7 years and still people play this game so we are thinking of a way to give them something new. Since then the game map was a limited plane and people could move from (0, 0) to (MAX_X, MAX_Y) in quantized X and Y increments (just imagine it as a big chessboard). We believe it's time to give it another dimension so, just a couple of weeks ago, we began to wonder how the game could look with other mappings: Unlimited plane with continous movement: this could be a step forward but still i'm not convinced. Toroidal World (continous or quantized movement): sincerely I worked with torus before but this time I want something more... Spherical world with continous movement: this would be great! What we want Users browsers are given a list of coordinates like (latitude, longitude) for each object on the spherical surface map; browsers must then show this in user's screen rendering them inside a web element (canvas maybe? this is not a problem). When people click on the plane we convert the (mouseX, mouseY) to (lat, lng) and send it to the server which has to compute a route between current user's position to the clicked point. What we have We began writing a Java library with many useful maths to work with Rotation Matrices, Quaternions, Euler Angles, Translations, etc. We put it all together and created a program that generates sphere points, renders them and show them to the user inside a JPanel. We managed to catch clicks and translate them to spherical coords and to provide some other useful features like view rotation, scale, translation etc. What we have now is like a little (very little indeed) engine that simulates client and server interaction. Client side shows points on the screen and catches other interactions, server side renders the view and does other calculus like interpolating the route between current position and clicked point. Where is the problem? Obviously we want to have the shortest path to interpolate between the two route points. We use quaternions to interpolate between two points on the surface of the sphere and this seemed to work fine until i noticed that we weren't getting the shortest path on the sphere surface: We though the problem was that the route is calculated as the sum of two rotations about X and Y axis. So we changed the way we calculate the destination quaternion: We get the third angle (the first is latitude, the second is longitude, the third is the rotation about the vector which points toward our current position) which we called orientation. Now that we have the "orientation" angle we rotate Z axis and then use the result vector as the rotation axis for the destination quaternion (you can see the rotation axis in grey): What we got is the correct route (you can see it lays on a great circle), but we get to this ONLY if the starting route point is at latitude, longitude (0, 0) which means the starting vector is (sphereRadius, 0, 0). With the previous version (image 1) we don't get a good result even when startin point is 0, 0, so i think we're moving towards a solution, but the procedure we follow to get this route is a little "strange" maybe? In the following image you get a view of the problem we get when starting point is not (0, 0), as you can see starting point is not the (sphereRadius, 0, 0) vector, and as you can see the destination point (which is correctly drawn!) is not on the route. The magenta point (the one which lays on the route) is the route's ending point rotated about the center of the sphere of (-startLatitude, 0, -startLongitude). This means that if i calculate a rotation matrix and apply it to every point on the route maybe i'll get the real route, but I start to think that there's a better way to do this. Maybe I should try to get the plane through the center of the sphere and the route points, intersect it with the sphere and get the geodesic? But how? Sorry for being way too verbose and maybe for incorrect English but this thing is blowing my mind! EDIT: This code version is related to the first image: public void setRouteStart(double lat, double lng) { EulerAngles tmp = new EulerAngles ( Math.toRadians(lat), 0, -Math.toRadians(lng)); //set route start Quaternion qtStart.setInertialToObject(tmp); //do other stuff like drawing start point... } public void impostaDestinazione(double lat, double lng) { EulerAngles tmp = new AngoliEulero( Math.toRadians(lat), 0, -Math.toRadians(lng)); qtEnd.setInertialToObject(tmp); //do other stuff like drawing dest point... } public V3D interpolate(double totalTime, double t) { double _t = t/totalTime; Quaternion q = Quaternion.Slerp(qtStart, qtEnd, _t); RotationMatrix.inertialQuatToIObject(q); V3D p = matInt.inertialToObject(V3D.Xaxis.scale(sphereRadius)); //other stuff, like drawing point ... return p; } //mostly taken from a book! public static Quaternion Slerp(Quaternion q0, Quaternion q1, double t) { double cosO = q0.dot(q1); double q1w = q1.w; double q1x = q1.x; double q1y = q1.y; double q1z = q1.z; if (cosO < 0.0f) { q1w = -q1w; q1x = -q1x; q1y = -q1y; q1z = -q1z; cosO = -cosO; } double sinO = Math.sqrt(1.0f - cosO*cosO); double O = Math.atan2(sinO, cosO); double oneOverSinO = 1.0f / senoOmega; k0 = Math.sin((1.0f - t) * O) * oneOverSinO; k1 = Math.sin(t * O) * oneOverSinO; // Interpolate return new Quaternion( k0*q0.w + k1*q1w, k0*q0.x + k1*q1x, k0*q0.y + k1*q1y, k0*q0.z + k1*q1z ); } A little dump of what i get (again check image 1): Route info: Sphere radius and center: 200,000, (0.0, 0.0, 0.0) Route start: lat 0,000 °, lng 0,000 ° @v: (200,000, 0,000, 0,000), |v| = 200,000 Route end: lat 30,000 °, lng 30,000 ° @v: (150,000, 86,603, 100,000), |v| = 200,000 Qt dump: (w, x, y, z), rot. angle°, (x, y, z) rot. axis Qt start: (1,000, 0,000, -0,000, 0,000); 0,000 °; (1,000, 0,000, 0,000) Qt end: (0,933, 0,067, -0,250, 0,250); 42,181 °; (0,186, -0,695, 0,695) Route start: lat 30,000 °, lng 10,000 ° @v: (170,574, 30,077, 100,000), |v| = 200,000 Route end: lat 80,000 °, lng -50,000 ° @v: (22,324, -26,604, 196,962), |v| = 200,000 Qt dump: (w, x, y, z), rot. angle°, (x, y, z) rot. axis Qt start: (0,962, 0,023, -0,258, 0,084); 31,586 °; (0,083, -0,947, 0,309) Qt end: (0,694, -0,272, -0,583, -0,324); 92,062 °; (-0,377, -0,809, -0,450)

    Read the article

  • How to copy depth buffer to CPU memory in DirectX?

    - by Ashwin
    I have code in OpenGL that uses glReadPixels to copy the depth buffer to a CPU memory buffer: glReadPixels(0, 0, w, h, GL_DEPTH_COMPONENT, GL_FLOAT, dbuf); How do I achieve the same in DirectX? I have looked at a similar question which gives the solution to copy the RGB buffer. I've tried to write similar code to copy the depth buffer: IDirect3DSurface9* d3dSurface; d3dDevice->GetDepthStencilSurface(&d3dSurface); D3DSURFACE_DESC d3dSurfaceDesc; d3dSurface->GetDesc(&d3dSurfaceDesc); IDirect3DSurface9* d3dOffSurface; d3dDevice->CreateOffscreenPlainSurface( d3dSurfaceDesc.Width, d3dSurfaceDesc.Height, D3DFMT_D32F_LOCKABLE, D3DPOOL_SCRATCH, &d3dOffSurface, NULL); // FAILS: D3DERR_INVALIDCALL D3DXLoadSurfaceFromSurface( d3dOffSurface, NULL, NULL, d3dSurface, NULL, NULL, D3DX_FILTER_NONE, 0); // Copy from offscreen surface to CPU memory ... The code fails on the call to D3DXLoadSurfaceFromSurface. It returns the error value D3DERR_INVALIDCALL. What is wrong with my code?

    Read the article

  • How best to handle ID3D11InputLayout in rendering code?

    - by JohnB
    I'm looking for an elegant way to handle input layouts in my directx11 code. The problem I have that I have an Effect class and a Element class. The effect class encapsulates shaders and similar settings, and the Element class contains something that can be drawn (3d model, lanscape etc) My drawing code sets the device shaders etc using the effect specified and then calls the draw function of the Element to draw the actual geometry contained in it. The problem is this - I need to create an D3D11InputLayout somewhere. This really belongs in the Element class as it's no business of the rest of the system how that element chooses to represent it's vertex layout. But in order to create the object the API requires the vertex shader bytecode for the vertex shader that will be used to draw the object. In directx9 it was easy, there was no dependency so my element could contain it's own input layout structures and set them without the effect being involved. But the Element shouldn't really have to know anything about the effect that it's being drawn with, that's just render settings, and the Element is there to provide geometry. So I don't really know where to store and how to select the InputLayout for each draw call. I mean, I've made something work but it seems very ugly. This makes me thing I've either missed something obvious, or else my design of having all the render settings in an Effect, the Geometry in an Element, and a 3rd party that draws it all is just flawed. Just wondering how anyone else handles their input layouts in directx11 in a elegant way?

    Read the article

  • How can I better implement A star algorithm with a very large set of nodes?

    - by Stephen
    I'm making a game with nodejs in which many enemies must converge on the player as the player moves around a relatively open space (right now it is an open field with few obstacles, but eventually there may be some small buildings in the field with 1 or 2 rooms). It's a multiplayer game using websockets, so the server needs to keep track of enemies and players. I found this javascript A* library which I've modified to be used on the server as a nodejs module. The library utilizes a Binary Heap to track the nodes for the algorithm, so it should be pretty fast (and indeed, with a small grid, say 100x100 it is lightning fast). The problem is that my game is not really tile-based. As the player moves around the map, he is moving on a more or less 1-to-1 per-pixel coordinate system (the player can move in 8 directions, 1 or 2 pixels at a time). In preliminary tests, on an 800x600 field, the path-finding can take anywhere from 400 to 1000 ms. Multiply that by 10 enemies and the game starts to get pretty choppy. I have already set it up so that each enemy will only do a path-finding call once per second or even as slow as once every 2 seconds (they have to keep updating their path because the players can move freely). But even with this long interval, there are noticeable lag spikes or chops every couple of seconds as the enemies update their paths. I'm willing to approach the problem of path-finding differently, if there's another option. I'm assuming that the real problem is the enormous grid (800x600). It also occurs to me that maybe the large arrays are to blame, as I've read that V8 has trouble with large arrays.

    Read the article

< Previous Page | 475 476 477 478 479 480 481 482 483 484 485 486  | Next Page >