Search Results

Search found 10366 results on 415 pages for 'const char pointer'.

Page 156/415 | < Previous Page | 152 153 154 155 156 157 158 159 160 161 162 163  | Next Page >

  • How to compile a C++ source code written for Linux/Unix on Windows Vista (code given)

    - by HTMZ
    I have a c++ source code that was written in linux/unix environment by some other author. It gives me errors when i compile it in windows vista environment. I am using Bloodshed Dev C++ v 4.9. please help. #include <iostream.h> #include <map> #include <vector> #include <string> #include <string.h> #include <strstream> #include <unistd.h> #include <stdlib.h> using namespace std; template <class T> class PrefixSpan { private: vector < vector <T> > transaction; vector < pair <T, unsigned int> > pattern; unsigned int minsup; unsigned int minpat; unsigned int maxpat; bool all; bool where; string delimiter; bool verbose; ostream *os; void report (vector <pair <unsigned int, int> > &projected) { if (minpat > pattern.size()) return; // print where & pattern if (where) { *os << "<pattern>" << endl; // what: if (all) { *os << "<freq>" << pattern[pattern.size()-1].second << "</freq>" << endl; *os << "<what>"; for (unsigned int i = 0; i < pattern.size(); i++) *os << (i ? " " : "") << pattern[i].first; } else { *os << "<what>"; for (unsigned int i = 0; i < pattern.size(); i++) *os << (i ? " " : "") << pattern[i].first << delimiter << pattern[i].second; } *os << "</what>" << endl; // where *os << "<where>"; for (unsigned int i = 0; i < projected.size(); i++) *os << (i ? " " : "") << projected[i].first; *os << "</where>" << endl; *os << "</pattern>" << endl; } else { // print found pattern only if (all) { *os << pattern[pattern.size()-1].second; for (unsigned int i = 0; i < pattern.size(); i++) *os << " " << pattern[i].first; } else { for (unsigned int i = 0; i < pattern.size(); i++) *os << (i ? " " : "") << pattern[i].first << delimiter << pattern[i].second; } *os << endl; } } void project (vector <pair <unsigned int, int> > &projected) { if (all) report(projected); map <T, vector <pair <unsigned int, int> > > counter; for (unsigned int i = 0; i < projected.size(); i++) { int pos = projected[i].second; unsigned int id = projected[i].first; unsigned int size = transaction[id].size(); map <T, int> tmp; for (unsigned int j = pos + 1; j < size; j++) { T item = transaction[id][j]; if (tmp.find (item) == tmp.end()) tmp[item] = j ; } for (map <T, int>::iterator k = tmp.begin(); k != tmp.end(); ++k) counter[k->first].push_back (make_pair <unsigned int, int> (id, k->second)); } for (map <T, vector <pair <unsigned int, int> > >::iterator l = counter.begin (); l != counter.end (); ) { if (l->second.size() < minsup) { map <T, vector <pair <unsigned int, int> > >::iterator tmp = l; tmp = l; ++tmp; counter.erase (l); l = tmp; } else { ++l; } } if (! all && counter.size () == 0) { report (projected); return; } for (map <T, vector <pair <unsigned int, int> > >::iterator l = counter.begin (); l != counter.end(); ++l) { if (pattern.size () < maxpat) { pattern.push_back (make_pair <T, unsigned int> (l->first, l->second.size())); project (l->second); pattern.erase (pattern.end()); } } } public: PrefixSpan (unsigned int _minsup = 1, unsigned int _minpat = 1, unsigned int _maxpat = 0xffffffff, bool _all = false, bool _where = false, string _delimiter = "/", bool _verbose = false): minsup(_minsup), minpat (_minpat), maxpat (_maxpat), all(_all), where(_where), delimiter (_delimiter), verbose (_verbose) {}; ~PrefixSpan () {}; istream& read (istream &is) { string line; vector <T> tmp; T item; while (getline (is, line)) { tmp.clear (); istrstream istrs ((char *)line.c_str()); while (istrs >> item) tmp.push_back (item); transaction.push_back (tmp); } return is; } ostream& run (ostream &_os) { os = &_os; if (verbose) *os << transaction.size() << endl; vector <pair <unsigned int, int> > root; for (unsigned int i = 0; i < transaction.size(); i++) root.push_back (make_pair (i, -1)); project (root); return *os; } void clear () { transaction.clear (); pattern.clear (); } }; int main (int argc, char **argv) { extern char *optarg; unsigned int minsup = 1; unsigned int minpat = 1; unsigned int maxpat = 0xffffffff; bool all = false; bool where = false; string delimiter = "/"; bool verbose = false; string type = "string"; int opt; while ((opt = getopt(argc, argv, "awvt:M:m:L:d:")) != -1) { switch(opt) { case 'a': all = true; break; case 'w': where = true; break; case 'v': verbose = true; break; case 'm': minsup = atoi (optarg); break; case 'M': minpat = atoi (optarg); break; case 'L': maxpat = atoi (optarg); break; case 't': type = string (optarg); break; case 'd': delimiter = string (optarg); break; default: cout << "Usage: " << argv[0] << " [-m minsup] [-M minpat] [-L maxpat] [-a] [-w] [-v] [-t type] [-d delimiter] < data .." << endl; return -1; } } if (type == "int") { PrefixSpan<unsigned int> prefixspan (minsup, minpat, maxpat, all, where, delimiter, verbose); prefixspan.read (cin); prefixspan.run (cout); }else if (type == "short") { PrefixSpan<unsigned short> prefixspan (minsup, minpat, maxpat, all, where, delimiter, verbose); prefixspan.read (cin); prefixspan.run (cout); } else if (type == "char") { PrefixSpan<unsigned char> prefixspan (minsup, minpat, maxpat, all, where, delimiter, verbose); prefixspan.read (cin); prefixspan.run (cout); } else if (type == "string") { PrefixSpan<string> prefixspan (minsup, minpat, maxpat, all, where, delimiter, verbose); prefixspan.read (cin); prefixspan.run (cout); } else { cerr << "Unknown Item Type: " << type << " : choose from [string|int|short|char]" << endl; return -1; } return 0; }

    Read the article

  • Optimizing collision engine bottleneck

    - by Vittorio Romeo
    Foreword: I'm aware that optimizing this bottleneck is not a necessity - the engine is already very fast. I, however, for fun and educational purposes, would love to find a way to make the engine even faster. I'm creating a general-purpose C++ 2D collision detection/response engine, with an emphasis on flexibility and speed. Here's a very basic diagram of its architecture: Basically, the main class is World, which owns (manages memory) of a ResolverBase*, a SpatialBase* and a vector<Body*>. SpatialBase is a pure virtual class which deals with broad-phase collision detection. ResolverBase is a pure virtual class which deals with collision resolution. The bodies communicate to the World::SpatialBase* with SpatialInfo objects, owned by the bodies themselves. There currenly is one spatial class: Grid : SpatialBase, which is a basic fixed 2D grid. It has it's own info class, GridInfo : SpatialInfo. Here's how its architecture looks: The Grid class owns a 2D array of Cell*. The Cell class contains two collection of (not owned) Body*: a vector<Body*> which contains all the bodies that are in the cell, and a map<int, vector<Body*>> which contains all the bodies that are in the cell, divided in groups. Bodies, in fact, have a groupId int that is used for collision groups. GridInfo objects also contain non-owning pointers to the cells the body is in. As I previously said, the engine is based on groups. Body::getGroups() returns a vector<int> of all the groups the body is part of. Body::getGroupsToCheck() returns a vector<int> of all the groups the body has to check collision against. Bodies can occupy more than a single cell. GridInfo always stores non-owning pointers to the occupied cells. After the bodies move, collision detection happens. We assume that all bodies are axis-aligned bounding boxes. How broad-phase collision detection works: Part 1: spatial info update For each Body body: Top-leftmost occupied cell and bottom-rightmost occupied cells are calculated. If they differ from the previous cells, body.gridInfo.cells is cleared, and filled with all the cells the body occupies (2D for loop from the top-leftmost cell to the bottom-rightmost cell). body is now guaranteed to know what cells it occupies. For a performance boost, it stores a pointer to every map<int, vector<Body*>> of every cell it occupies where the int is a group of body->getGroupsToCheck(). These pointers get stored in gridInfo->queries, which is simply a vector<map<int, vector<Body*>>*>. body is now guaranteed to have a pointer to every vector<Body*> of bodies of groups it needs to check collision against. These pointers are stored in gridInfo->queries. Part 2: actual collision checks For each Body body: body clears and fills a vector<Body*> bodiesToCheck, which contains all the bodies it needs to check against. Duplicates are avoided (bodies can belong to more than one group) by checking if bodiesToCheck already contains the body we're trying to add. const vector<Body*>& GridInfo::getBodiesToCheck() { bodiesToCheck.clear(); for(const auto& q : queries) for(const auto& b : *q) if(!contains(bodiesToCheck, b)) bodiesToCheck.push_back(b); return bodiesToCheck; } The GridInfo::getBodiesToCheck() method IS THE BOTTLENECK. The bodiesToCheck vector must be filled for every body update because bodies could have moved meanwhile. It also needs to prevent duplicate collision checks. The contains function simply checks if the vector already contains a body with std::find. Collision is checked and resolved for every body in bodiesToCheck. That's it. So, I've been trying to optimize this broad-phase collision detection for quite a while now. Every time I try something else than the current architecture/setup, something doesn't go as planned or I make assumption about the simulation that later are proven to be false. My question is: how can I optimize the broad-phase of my collision engine maintaining the grouped bodies approach? Is there some kind of magic C++ optimization that can be applied here? Can the architecture be redesigned in order to allow for more performance? Actual implementation: SSVSCollsion Body.h, Body.cpp World.h, World.cpp Grid.h, Grid.cpp Cell.h, Cell.cpp GridInfo.h, GridInfo.cpp

    Read the article

  • Maintaining packages with code - Adding a property expression programmatically

    Every now and then I've come across scenarios where I need to update a lot of packages all in the same way. The usual scenario revolves around a group of packages all having been built off the same package template, and something needs to updated to keep up with new requirements, a new logging standard for example.You'd probably start by updating your template package, but then you need to address all your existing packages. Often this can run into the hundreds of packages and clearly that's not a job anyone wants to do by hand. I normally solve the problem by writing a simple console application that looks for files and patches any package it finds, and it is an example of this I'd thought I'd tidy up a bit and publish here. This sample will look at the package and find any top level Execute SQL Tasks, and change the SQL Statement property to use an expression. It is very simplistic working on top level tasks only, so nothing inside a Sequence Container or Loop will be checked but obviously the code could be extended for this if required. The code that actually sets the expression is shown below, the rest is just wrapper code to find the package and to find the task. /// <summary> /// The CreationName of the Tasks to target, e.g. Execute SQL Task /// </summary> private const string TargetTaskCreationName = "Microsoft.SqlServer.Dts.Tasks.ExecuteSQLTask.ExecuteSQLTask, Microsoft.SqlServer.SQLTask, Version=9.0.242.0, Culture=neutral, PublicKeyToken=89845dcd8080cc91"; /// <summary> /// The name of the task property to target. /// </summary> private const string TargetPropertyName = "SqlStatementSource"; /// <summary> /// The property expression to set. /// </summary> private const string ExpressionToSet = "@[User::SQLQueryVariable]"; .... // Check if the task matches our target task type if (taskHost.CreationName == TargetTaskCreationName) { // Check for the target property if (taskHost.Properties.Contains(TargetPropertyName)) { // Get the property, check for an expression and set expression if not found DtsProperty property = taskHost.Properties[TargetPropertyName]; if (string.IsNullOrEmpty(property.GetExpression(taskHost))) { property.SetExpression(taskHost, ExpressionToSet); changeCount++; } } } This is a console application, so to specify which packages you want to target you have three options: Find all packages in the current folder, the default behaviour if no arguments are specified TaskExpressionPatcher.exe .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } Find all packages in a specified folder, pass the folder as the argument TaskExpressionPatcher.exe C:\Projects\Alpha\Packages\ .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } Find a specific package, pass the file path as the argument TaskExpressionPatcher.exe C:\Projects\Alpha\Packages\Package.dtsx The code was written against SQL Server 2005, but just change the reference to Microsoft.SQLServer.ManagedDTS to be the SQL Server 2008 version and it will work fine. If you get an error Microsoft.SqlServer.Dts.Runtime.DtsRuntimeException: The package failed to load due to error 0xC0011008… then check that the package is from the correct version of SSIS compared to the referenced assemblies, 2005 vs 2008 in other words. Download Sample Project TaskExpressionPatcher.zip (6 KB)

    Read the article

  • Write your Tests in RSpec with IronRuby

    - by kazimanzurrashid
    [Note: This is not a continuation of my previous post, treat it as an experiment out in the wild. ] Lets consider the following class, a fictitious Fund Transfer Service: public class FundTransferService : IFundTransferService { private readonly ICurrencyConvertionService currencyConvertionService; public FundTransferService(ICurrencyConvertionService currencyConvertionService) { this.currencyConvertionService = currencyConvertionService; } public void Transfer(Account fromAccount, Account toAccount, decimal amount) { decimal convertionRate = currencyConvertionService.GetConvertionRate(fromAccount.Currency, toAccount.Currency); decimal convertedAmount = convertionRate * amount; fromAccount.Withdraw(amount); toAccount.Deposit(convertedAmount); } } public class Account { public Account(string currency, decimal balance) { Currency = currency; Balance = balance; } public string Currency { get; private set; } public decimal Balance { get; private set; } public void Deposit(decimal amount) { Balance += amount; } public void Withdraw(decimal amount) { Balance -= amount; } } We can write the spec with MSpec + Moq like the following: public class When_fund_is_transferred { const decimal ConvertionRate = 1.029m; const decimal TransferAmount = 10.0m; const decimal InitialBalance = 100.0m; static Account fromAccount; static Account toAccount; static FundTransferService fundTransferService; Establish context = () => { fromAccount = new Account("USD", InitialBalance); toAccount = new Account("CAD", InitialBalance); var currencyConvertionService = new Moq.Mock<ICurrencyConvertionService>(); currencyConvertionService.Setup(ccv => ccv.GetConvertionRate(Moq.It.IsAny<string>(), Moq.It.IsAny<string>())).Returns(ConvertionRate); fundTransferService = new FundTransferService(currencyConvertionService.Object); }; Because of = () => { fundTransferService.Transfer(fromAccount, toAccount, TransferAmount); }; It should_decrease_from_account_balance = () => { fromAccount.Balance.ShouldBeLessThan(InitialBalance); }; It should_increase_to_account_balance = () => { toAccount.Balance.ShouldBeGreaterThan(InitialBalance); }; } and if you run the spec it will give you a nice little output like the following: When fund is transferred » should decrease from account balance » should increase to account balance 2 passed, 0 failed, 0 skipped, took 1.14 seconds (MSpec). Now, lets see how we can write exact spec in RSpec. require File.dirname(__FILE__) + "/../FundTransfer/bin/Debug/FundTransfer" require "spec" require "caricature" describe "When fund is transferred" do Convertion_Rate = 1.029 Transfer_Amount = 10.0 Initial_Balance = 100.0 before(:all) do @from_account = FundTransfer::Account.new("USD", Initial_Balance) @to_account = FundTransfer::Account.new("CAD", Initial_Balance) currency_convertion_service = Caricature::Isolation.for(FundTransfer::ICurrencyConvertionService) currency_convertion_service.when_receiving(:get_convertion_rate).with(:any, :any).return(Convertion_Rate) fund_transfer_service = FundTransfer::FundTransferService.new(currency_convertion_service) fund_transfer_service.transfer(@from_account, @to_account, Transfer_Amount) end it "should decrease from account balance" do @from_account.balance.should be < Initial_Balance end it "should increase to account balance" do @to_account.balance.should be > Initial_Balance end end I think the above code is self explanatory, treat the require(line 1- 4) statements as the add reference of our visual studio projects, we are adding all the required libraries with this statement. Next, the describe which is a RSpec keyword. The before does exactly the same as NUnit's Setup or MsTest’s TestInitialize attribute, but in the above we are using before(:all) which acts as ClassInitialize of MsTest, that means it will be executed only once before all the test methods. In the before(:all) we are first instantiating the from and to accounts, it is same as creating with the full name (including namespace)  like fromAccount = new FundTransfer.Account(.., ..), next, we are creating a mock object of ICurrencyConvertionService, check that for creating the mock we are not using the Moq like the MSpec version. This is somewhat an interesting issue of IronRuby or maybe the DLR, it seems that it is not possible to use the lambda expression that most of the mocking tools uses in arrange phase in Iron Ruby, like: currencyConvertionService.Setup(ccv => ccv.GetConvertionRate(Moq.It.IsAny<string>(), Moq.It.IsAny<string>())).Returns(ConvertionRate); But the good news is, there is already an excellent mocking tool called Caricature written completely in IronRuby which we can use to mock the .NET classes. May be all the mocking tool providers should give some thought to add the support for the DLR, so that we can use the tool that we are already familiar with. I think the rest of the code is too simple, so I am skipping the explanation. Now, the last thing, how we are going to run it with RSpec, lets first install the required gems. Open you command prompt and type the following: igem sources -a http://gems.github.com This will add the GitHub as gem source. Next type: igem install uuidtools caricature rspec and at last we have to create a batch file so that we can execute it in the Notepad++, create a batch like in the IronRuby bin directory like my previous post and put the following in that batch file: @echo off cls call spec %1 --format specdoc pause Next, add a run menu and shortcut in the Notepad++ like my previous post. Now when we run it it will show the following output: When fund is transferred - should decrease from account balance - should increase to account balance Finished in 0.332042 seconds 2 examples, 0 failures Press any key to continue . . . You will complete code of this post in the bottom. That's it for today. Download: RSpecIntegration.zip

    Read the article

  • Basic shadow mapping fails on NVIDIA card?

    - by James
    Recently I switched from an AMD Radeon HD 6870 card to an (MSI) NVIDIA GTX 670 for performance reasons. I found however that my implementation of shadow mapping in all my applications failed. In a very simple shadow POC project the problem appears to be that the scene being drawn never results in a draw to the depth map and as a result the entire depth map is just infinity, 1.0 (Reading directly from the depth component after draw (glReadPixels) shows every pixel is infinity (1.0), replacing the depth comparison in the shader with a comparison of the depth from the shadow map with 1.0 shadows the entire scene, and writing random values to the depth map and then not calling glClear(GL_DEPTH_BUFFER_BIT) results in a random noisy pattern on the scene elements - from which we can infer that the uploading of the depth texture and comparison within the shader are functioning perfectly.) Since the problem appears almost certainly to be in the depth render, this is the code for that: const int s_res = 1024; GLuint shadowMap_tex; GLuint shadowMap_prog; GLint sm_attr_coord3d; GLint sm_uniform_mvp; GLuint fbo_handle; GLuint renderBuffer; bool isMappingShad = false; //The scene consists of a plane with box above it GLfloat scene[] = { -10.0, 0.0, -10.0, 0.5, 0.0, 10.0, 0.0, -10.0, 1.0, 0.0, 10.0, 0.0, 10.0, 1.0, 0.5, -10.0, 0.0, -10.0, 0.5, 0.0, -10.0, 0.0, 10.0, 0.5, 0.5, 10.0, 0.0, 10.0, 1.0, 0.5, ... }; //Initialize the stuff used by the shadow map generator int initShadowMap() { //Initialize the shadowMap shader program if (create_program("shadow.v.glsl", "shadow.f.glsl", shadowMap_prog) != 1) return -1; const char* attribute_name = "coord3d"; sm_attr_coord3d = glGetAttribLocation(shadowMap_prog, attribute_name); if (sm_attr_coord3d == -1) { fprintf(stderr, "Could not bind attribute %s\n", attribute_name); return 0; } const char* uniform_name = "mvp"; sm_uniform_mvp = glGetUniformLocation(shadowMap_prog, uniform_name); if (sm_uniform_mvp == -1) { fprintf(stderr, "Could not bind uniform %s\n", uniform_name); return 0; } //Create a framebuffer glGenFramebuffers(1, &fbo_handle); glBindFramebuffer(GL_FRAMEBUFFER, fbo_handle); //Create render buffer glGenRenderbuffers(1, &renderBuffer); glBindRenderbuffer(GL_RENDERBUFFER, renderBuffer); //Setup the shadow texture glGenTextures(1, &shadowMap_tex); glBindTexture(GL_TEXTURE_2D, shadowMap_tex); glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, s_res, s_res, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); return 0; } //Delete stuff void dnitShadowMap() { //Delete everything glDeleteFramebuffers(1, &fbo_handle); glDeleteRenderbuffers(1, &renderBuffer); glDeleteTextures(1, &shadowMap_tex); glDeleteProgram(shadowMap_prog); } int loadSMap() { //Bind MVP stuff glm::mat4 view = glm::lookAt(glm::vec3(10.0, 10.0, 5.0), glm::vec3(0.0, 0.0, 0.0), glm::vec3(0.0, 1.0, 0.0)); glm::mat4 projection = glm::ortho<float>(-10,10,-8,8,-10,40); glm::mat4 mvp = projection * view; glm::mat4 biasMatrix( 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5, 0.5, 0.5, 1.0 ); glm::mat4 lsMVP = biasMatrix * mvp; //Upload light source matrix to the main shader programs glUniformMatrix4fv(uniform_ls_mvp, 1, GL_FALSE, glm::value_ptr(lsMVP)); glUseProgram(shadowMap_prog); glUniformMatrix4fv(sm_uniform_mvp, 1, GL_FALSE, glm::value_ptr(mvp)); //Draw to the framebuffer (with depth buffer only draw) glBindFramebuffer(GL_FRAMEBUFFER, fbo_handle); glBindRenderbuffer(GL_RENDERBUFFER, renderBuffer); glBindTexture(GL_TEXTURE_2D, shadowMap_tex); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, shadowMap_tex, 0); glDrawBuffer(GL_NONE); glReadBuffer(GL_NONE); GLenum result = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (GL_FRAMEBUFFER_COMPLETE != result) { printf("ERROR: Framebuffer is not complete.\n"); return -1; } //Draw shadow scene printf("Creating shadow buffers..\n"); int ticks = SDL_GetTicks(); glClear(GL_DEPTH_BUFFER_BIT); //Wipe the depth buffer glViewport(0, 0, s_res, s_res); isMappingShad = true; //DRAW glEnableVertexAttribArray(sm_attr_coord3d); glVertexAttribPointer(sm_attr_coord3d, 3, GL_FLOAT, GL_FALSE, 5*4, scene); glDrawArrays(GL_TRIANGLES, 0, 14*3); glDisableVertexAttribArray(sm_attr_coord3d); isMappingShad = false; glBindFramebuffer(GL_FRAMEBUFFER, 0); printf("Render Sbuf in %dms (GLerr: %d)\n", SDL_GetTicks() - ticks, glGetError()); return 0; } This is the full code for the POC shadow mapping project (C++) (Requires SDL 1.2, SDL-image 1.2, GLEW (1.5) and GLM development headers.) initShadowMap is called, followed by loadSMap, the scene is drawn from the camera POV and then dnitShadowMap is called. I followed this tutorial originally (Along with another more comprehensive tutorial which has disappeared as this guy re-configured his site but used to be here (404).) I've ensured that the scene is visible (as can be seen within the full project) to the light source (which uses an orthogonal projection matrix.) Shader utilities function fine in non-shadow-mapped projects. I should also note that at no point is the GL error state set. What am I doing wrong here and why did this not cause problems on my AMD card? (System: Ubuntu 12.04, Linux 3.2.0-49-generic, 64 bit, with the nvidia-experimental-310 driver package. All other games are functioning fine so it's most likely not a card/driver issue.)

    Read the article

  • Maintaining packages with code - Adding a property expression programmatically

    Every now and then I've come across scenarios where I need to update a lot of packages all in the same way. The usual scenario revolves around a group of packages all having been built off the same package template, and something needs to updated to keep up with new requirements, a new logging standard for example.You'd probably start by updating your template package, but then you need to address all your existing packages. Often this can run into the hundreds of packages and clearly that's not a job anyone wants to do by hand. I normally solve the problem by writing a simple console application that looks for files and patches any package it finds, and it is an example of this I'd thought I'd tidy up a bit and publish here. This sample will look at the package and find any top level Execute SQL Tasks, and change the SQL Statement property to use an expression. It is very simplistic working on top level tasks only, so nothing inside a Sequence Container or Loop will be checked but obviously the code could be extended for this if required. The code that actually sets the expression is shown below, the rest is just wrapper code to find the package and to find the task. /// <summary> /// The CreationName of the Tasks to target, e.g. Execute SQL Task /// </summary> private const string TargetTaskCreationName = "Microsoft.SqlServer.Dts.Tasks.ExecuteSQLTask.ExecuteSQLTask, Microsoft.SqlServer.SQLTask, Version=9.0.242.0, Culture=neutral, PublicKeyToken=89845dcd8080cc91"; /// <summary> /// The name of the task property to target. /// </summary> private const string TargetPropertyName = "SqlStatementSource"; /// <summary> /// The property expression to set. /// </summary> private const string ExpressionToSet = "@[User::SQLQueryVariable]"; .... // Check if the task matches our target task type if (taskHost.CreationName == TargetTaskCreationName) { // Check for the target property if (taskHost.Properties.Contains(TargetPropertyName)) { // Get the property, check for an expression and set expression if not found DtsProperty property = taskHost.Properties[TargetPropertyName]; if (string.IsNullOrEmpty(property.GetExpression(taskHost))) { property.SetExpression(taskHost, ExpressionToSet); changeCount++; } } } This is a console application, so to specify which packages you want to target you have three options: Find all packages in the current folder, the default behaviour if no arguments are specified TaskExpressionPatcher.exe .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } Find all packages in a specified folder, pass the folder as the argument TaskExpressionPatcher.exe C:\Projects\Alpha\Packages\ .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } Find a specific package, pass the file path as the argument TaskExpressionPatcher.exe C:\Projects\Alpha\Packages\Package.dtsx The code was written against SQL Server 2005, but just change the reference to Microsoft.SQLServer.ManagedDTS to be the SQL Server 2008 version and it will work fine. If you get an error Microsoft.SqlServer.Dts.Runtime.DtsRuntimeException: The package failed to load due to error 0xC0011008… then check that the package is from the correct version of SSIS compared to the referenced assemblies, 2005 vs 2008 in other words. Download Sample Project TaskExpressionPatcher.zip (6 KB)

    Read the article

  • Upgrading SSIS Custom Components for SQL Server 2012

    Having finally got around to upgrading my custom components to SQL Server 2012, I thought I’d share some notes on the process. One of the goals was minimal duplication, so the same code files are used to build the 2008 and 2012 components, I just have a separate project file. The high level steps are listed below, followed by some more details. Create a 2012 copy of the project file Upgrade project, just open the new project file is VS2010 Change target framework to .NET 4.0 Set conditional compilation symbol for DENALI Change any conditional code, including assembly version and UI type name Edit project file to change referenced assemblies for 2012 Change target framework to .NET 4.0 Open the project properties. On the Applications page, change the Target framework to .NET Framework 4. Set conditional compilation symbol for DENALI Re-open the project properties. On the Build tab, first change the Configuration to All Configurations, then set a Conditional compilation symbol of DENALI. Change any conditional code, including assembly version and UI type name The value doesn’t have to be DENALI, it can actually be anything you like, that is just what I use. It is how I control sections of code that vary between versions. There were several API changes between 2005 and 2008, as well as interface name changes. Whilst we don’t have the same issues between 2008 and 2012, I still have some sections of code that do change such as the assembly attributes. #if DENALI [assembly: AssemblyDescription("Data Generator Source for SQL Server Integration Services 2012")] [assembly: AssemblyCopyright("Copyright © 2012 Konesans Ltd")] [assembly: AssemblyVersion("3.0.0.0")] #else [assembly: AssemblyDescription("Data Generator Source for SQL Server Integration Services 2008")] [assembly: AssemblyCopyright("Copyright © 2008 Konesans Ltd")] [assembly: AssemblyVersion("2.0.0.0")] #endif The Visual Studio editor automatically formats the code based on the current compilation symbols, hence in this case the 2008 code is grey to indicate it is disabled. As you can see in the previous example I have distinct assembly version attributes, ensuring I can run both 2008 and 2012 versions of my component side by side. For custom components with a user interface, be sure to update the UITypeName property of the DtsTask or DtsPipelineComponent attributes. As above I use the conditional compilation symbol to control the code. #if DENALI [DtsTask ( DisplayName = "File Watcher Task", Description = "File Watcher Task", IconResource = "Konesans.Dts.Tasks.FileWatcherTask.FileWatcherTask.ico", UITypeName = "Konesans.Dts.Tasks.FileWatcherTask.FileWatcherTaskUI,Konesans.Dts.Tasks.FileWatcherTask,Version=3.0.0.0,Culture=Neutral,PublicKeyToken=b2ab4a111192992b", TaskContact = "File Watcher Task; Konesans Ltd; Copyright © 2012 Konesans Ltd; http://www.konesans.com" )] #else [DtsTask ( DisplayName = "File Watcher Task", Description = "File Watcher Task", IconResource = "Konesans.Dts.Tasks.FileWatcherTask.FileWatcherTask.ico", UITypeName = "Konesans.Dts.Tasks.FileWatcherTask.FileWatcherTaskUI,Konesans.Dts.Tasks.FileWatcherTask,Version=2.0.0.0,Culture=Neutral,PublicKeyToken=b2ab4a111192992b", TaskContact = "File Watcher Task; Konesans Ltd; Copyright © 2004-2008 Konesans Ltd; http://www.konesans.com" )] #endif public sealed class FileWatcherTask: Task, IDTSComponentPersist, IDTSBreakpointSite, IDTSSuspend { // .. code goes on... } Shown below is another example I found that needed changing. I borrow one of the MS editors, and use it against a custom property, but need to ensure I reference the correct version of the MS controls assembly. This section of code is actually shared between the 2005, 2008 and 2012 versions of my component hence it has test for both DENALI and KATMAI symbols. #if DENALI const string multiLineUI = "Microsoft.DataTransformationServices.Controls.ModalMultilineStringEditor, Microsoft.DataTransformationServices.Controls, Version=11.0.00.0, Culture=neutral, PublicKeyToken=89845dcd8080cc91"; #elif KATMAI const string multiLineUI = "Microsoft.DataTransformationServices.Controls.ModalMultilineStringEditor, Microsoft.DataTransformationServices.Controls, Version=10.0.0.0, Culture=neutral, PublicKeyToken=89845dcd8080cc91"; #else const string multiLineUI = "Microsoft.DataTransformationServices.Controls.ModalMultilineStringEditor, Microsoft.DataTransformationServices.Controls, Version=9.0.242.0, Culture=neutral, PublicKeyToken=89845dcd8080cc91"; #endif // Create Match Expression parameter IDTSCustomPropertyCollection100 propertyCollection = outputColumn.CustomPropertyCollection; IDTSCustomProperty100 property = propertyCollection.New(); property = propertyCollection.New(); property.Name = MatchParams.Name; property.Description = MatchParams.Description; property.TypeConverter = typeof(MultilineStringConverter).AssemblyQualifiedName; property.UITypeEditor = multiLineUI; property.Value = MatchParams.DefaultValue; Edit project file to change referenced assemblies for 2012 We now need to edit the project file itself. Open the MyComponente2012.cproj  in you favourite text editor, and then perform a couple of find and replaces as listed below: Find Replace Comment Version=10.0.0.0, Culture=neutral, PublicKeyToken=89845dcd8080cc91 Version=11.0.0.0, Culture=neutral, PublicKeyToken=89845dcd8080cc91 Change the assembly references version from SQL Server 2008 to SQL Server 2012. Microsoft SQL Server\100\ Microsoft SQL Server\110\ Change any assembly reference hint path locations from from SQL Server 2008 to SQL Server 2012. If you use any Build Events during development, such as copying the component assembly to the DTS folder, or calling GACUTIL to install it into the GAC, you can also change these now. An example of my new post-build event for a pipeline component is shown below, which uses the .NET 4.0 path for GACUTIL. It also uses the 110 folder location, instead of 100 for SQL Server 2008, but that was covered the the previous find and replace. "C:\Program Files (x86)\Microsoft SDKs\Windows\v7.0A\Bin\NETFX 4.0 Tools\gacutil.exe" /if "$(TargetPath)" copy "$(TargetPath)" "%ProgramFiles%\Microsoft SQL Server\110\DTS\PipelineComponents" /Y

    Read the article

  • Using Event Driven Programming in games, when is it beneficial?

    - by Arthur Wulf White
    I am learning ActionScript 3 and I see the Event flow adheres to the W3C recommendations. From what I learned events can only be captured by the dispatcher unless, the listener capturing the event is a DisplayObject on stage and a parent of the object firing the event. You can capture the events in the capture(before) or bubbling(after) phase depending on Listner and Event setup you use. Does this system lend itself well for game programming? When is this system useful? Could you give an example of a case where using events is a lot better than going without them? Are they somehow better for performance in games? Please do not mention events you must use to get a game running, like Event.ENTER_FRAME Or events that are required to get input from the user like, KeyboardEvent.KEY_DOWN and MouseEvent.CLICK. I am asking if there is any use in firing events that have nothing to do with user input, frame rendering and the likes(that are necessary). I am referring to cases where objects are communicating. Is this used to avoid storing a collection of objects that are on the stage? Thanks Here is some code I wrote as an example of event behavior in ActionScript 3, enjoy. package regression { import flash.display.Shape; import flash.display.Sprite; import flash.events.Event; import flash.events.EventDispatcher; import flash.events.KeyboardEvent; import flash.events.MouseEvent; import flash.events.EventPhase; /** * ... * @author ... */ public class Check_event_listening_1 extends Sprite { public const EVENT_DANCE : String = "dance"; public const EVENT_PLAY : String = "play"; public const EVENT_YELL : String = "yell"; private var baby : Shape = new Shape(); private var mom : Sprite = new Sprite(); private var stranger : EventDispatcher = new EventDispatcher(); public function Check_event_listening_1() { if (stage) init(); else addEventListener(Event.ADDED_TO_STAGE, init); } private function init(e:Event = null):void { trace("test begun"); addChild(mom); mom.addChild(baby); stage.addEventListener(EVENT_YELL, onEvent); this.addEventListener(EVENT_YELL, onEvent); mom.addEventListener(EVENT_YELL, onEvent); baby.addEventListener(EVENT_YELL, onEvent); stranger.addEventListener(EVENT_YELL, onEvent); trace("\nTest1 - Stranger yells with no bubbling"); stranger.dispatchEvent(new Event(EVENT_YELL, false)); trace("\nTest2 - Stranger yells with bubbling"); stranger.dispatchEvent(new Event(EVENT_YELL, true)); stage.addEventListener(EVENT_PLAY, onEvent); this.addEventListener(EVENT_PLAY, onEvent); mom.addEventListener(EVENT_PLAY, onEvent); baby.addEventListener(EVENT_PLAY, onEvent); stranger.addEventListener(EVENT_PLAY, onEvent); trace("\nTest3 - baby plays with no bubbling"); baby.dispatchEvent(new Event(EVENT_PLAY, false)); trace("\nTest4 - baby plays with bubbling"); baby.dispatchEvent(new Event(EVENT_PLAY, true)); trace("\nTest5 - baby plays with bubbling but is not a child of mom"); mom.removeChild(baby); baby.dispatchEvent(new Event(EVENT_PLAY, true)); mom.addChild(baby); stage.addEventListener(EVENT_DANCE, onEvent, true); this.addEventListener(EVENT_DANCE, onEvent, true); mom.addEventListener(EVENT_DANCE, onEvent, true); baby.addEventListener(EVENT_DANCE, onEvent); trace("\nTest6 - Mom dances without bubbling - everyone is listening during capture phase(not target and bubble phase)"); mom.dispatchEvent(new Event(EVENT_DANCE, false)); trace("\nTest7 - Mom dances with bubbling - everyone is listening during capture phase(not target and bubble phase)"); mom.dispatchEvent(new Event(EVENT_DANCE, true)); } private function onEvent(e : Event):void { trace("Event was captured"); trace("\nTYPE : ", e.type, "\nTARGET : ", objToName(e.target), "\nCURRENT TARGET : ", objToName(e.currentTarget), "\nPHASE : ", phaseToString(e.eventPhase)); } private function phaseToString(phase : int):String { switch(phase) { case EventPhase.AT_TARGET : return "TARGET"; case EventPhase.BUBBLING_PHASE : return "BUBBLING"; case EventPhase.CAPTURING_PHASE : return "CAPTURE"; default: return "UNKNOWN"; } } private function objToName(obj : Object):String { if (obj == stage) return "STAGE"; else if (obj == this) return "MAIN"; else if (obj == mom) return "Mom"; else if (obj == baby) return "Baby"; else if (obj == stranger) return "Stranger"; else return "Unknown" } } } /*result : test begun Test1 - Stranger yells with no bubbling Event was captured TYPE : yell TARGET : Stranger CURRENT TARGET : Stranger PHASE : TARGET Test2 - Stranger yells with bubbling Event was captured TYPE : yell TARGET : Stranger CURRENT TARGET : Stranger PHASE : TARGET Test3 - baby plays with no bubbling Event was captured TYPE : play TARGET : Baby CURRENT TARGET : Baby PHASE : TARGET Test4 - baby plays with bubbling Event was captured TYPE : play TARGET : Baby CURRENT TARGET : Baby PHASE : TARGET Event was captured TYPE : play TARGET : Baby CURRENT TARGET : Mom PHASE : BUBBLING Event was captured TYPE : play TARGET : Baby CURRENT TARGET : MAIN PHASE : BUBBLING Event was captured TYPE : play TARGET : Baby CURRENT TARGET : STAGE PHASE : BUBBLING Test5 - baby plays with bubbling but is not a child of mom Event was captured TYPE : play TARGET : Baby CURRENT TARGET : Baby PHASE : TARGET Test6 - Mom dances without bubbling - everyone is listening during capture phase(not target and bubble phase) Event was captured TYPE : dance TARGET : Mom CURRENT TARGET : STAGE PHASE : CAPTURE Event was captured TYPE : dance TARGET : Mom CURRENT TARGET : MAIN PHASE : CAPTURE Test7 - Mom dances with bubbling - everyone is listening during capture phase(not target and bubble phase) Event was captured TYPE : dance TARGET : Mom CURRENT TARGET : STAGE PHASE : CAPTURE Event was captured TYPE : dance TARGET : Mom CURRENT TARGET : MAIN PHASE : CAPTURE */

    Read the article

  • Accelerating 2d object collision with other objects [on hold]

    - by Silent Cave
    Making my very first attempt at game programming with SDL/OpenGL. So I made an object Actor witch can move in all four sides with acceleration. And there are bunch of other rectangles to collide to. the image Movement and collision detection alghorythms work just fine by itself, but when combined to prevent the green rectangle from crossing black rectangles, it gives me a kind of funny resault. Let me show you the code first: from Actor.h class Actor{ public: SDL_Rect * dim; alphaColor * col; float speed; float xlGrav, xrGrav, yuGrav, ydGrav; float acceleration; bool left,right,up,down; Actor(SDL_Rect * dim,alphaColor * col, float speed, float acceleration); bool colides(const SDL_Rect & rect); bool check_for_collisions(const std::vector<SDL_Rect*> & gameObjects ); }; from actor.cpp bool Actor::colides(const SDL_Rect & rect){ if (dim->x + dim->w < rect.x) return false; if (dim->x > rect.x + rect.w) return false; if (dim->y + dim->h < rect.y) return false; if (dim->y > rect.y + rect.h) return false; return true; } movement logic from main.cpp if (actor->left){ if(actor->xlGrav < actor->speed){ actor->xlGrav += actor->speed*actor->acceleration; }else actor->xlGrav = actor->speed; actor->dim->x -= actor->xlGrav; if(actor->check_for_collisions(gameObjects)){ actor->dim->x += actor->xlGrav; actor->xlGrav = 0; } } if (!actor->left){ if(actor->xlGrav - actor->speed*actor->acceleration > 0){ actor->xlGrav -= actor->speed*actor->acceleration; }else actor->xlGrav = 0; actor->dim->x -= actor->xlGrav; if(actor->check_for_collisions(gameObjects)){ actor->dim->x += actor->xlGrav; actor->xlGrav = 0; } } if (actor->right){ if(actor->xrGrav < actor->speed){ actor->xrGrav += actor->speed*actor->acceleration; }else actor->xrGrav = actor->speed; actor->dim->x += actor->xrGrav; if(actor->check_for_collisions(gameObjects)){ actor->dim->x -= actor->xrGrav; actor->xrGrav = 0; } } if (!actor->right){ if(actor->xrGrav - actor->speed*actor->acceleration > 0){ actor->xrGrav -= actor->speed*actor->acceleration; }else actor->xrGrav = 0; actor->dim->x += actor->xrGrav; if(actor->check_for_collisions(gameObjects)){ actor->dim->x -= actor->xrGrav; actor->xrGrav = 0; } } if (actor->up){ if(actor->yuGrav < actor->speed){ actor->yuGrav += actor->speed*actor->acceleration; }else actor->yuGrav = actor->speed; actor->dim->y -= actor->yuGrav; if(actor->check_for_collisions(gameObjects)){ actor->dim->y += actor->yuGrav; actor->yuGrav = 0; } } if (!actor->up){ if(actor->yuGrav - actor->speed*actor->acceleration > 0){ actor->yuGrav -= actor->speed*actor->acceleration; }else actor->yuGrav = 0; actor->dim->y -= actor->yuGrav; if(actor->check_for_collisions(gameObjects)){ actor->dim->y += actor->yuGrav; actor->yuGrav = 0; } } if (actor->down){ if(actor->ydGrav < actor->speed){ actor->ydGrav += actor->speed*actor->acceleration; }else actor->ydGrav = actor->speed; actor->dim->y += actor->ydGrav; if(actor->check_for_collisions(gameObjects)){ actor->dim->y -= actor->ydGrav; actor->ydGrav = 0; } } if (!actor->down){ if(actor->ydGrav - actor->speed*actor->acceleration > 0){ actor->ydGrav -= actor->speed*actor->acceleration; }else actor->ydGrav = 0; actor->dim->y += actor->ydGrav; if(actor->check_for_collisions(gameObjects)){ actor->dim->y -= actor->ydGrav; actor->ydGrav = 0; } } So, if the green box approaches an obstacle from up or left, everything goes as planned - object stops, and it's acceleration drops to zero. But if it comes from bottom or right, it enters into obstacles inner space and starts strangely dance, I'd rather say move in inverted controls. What do I fail to see?

    Read the article

  • Space invaders clone not moving properly

    - by ThePlan
    I'm trying to make a basic space invaders clone in allegro 5, I've got my game set up, basic events and such, here is the code: #include <allegro5/allegro.h> #include <allegro5/allegro_image.h> #include <allegro5/allegro_primitives.h> #include <allegro5/allegro_font.h> #include <allegro5/allegro_ttf.h> #include "Entity.h" // GLOBALS ========================================== const int width = 500; const int height = 500; const int imgsize = 3; bool key[5] = {false, false, false, false, false}; bool running = true; bool draw = true; // FUNCTIONS ======================================== void initSpaceship(Spaceship &ship); void moveSpaceshipRight(Spaceship &ship); void moveSpaceshipLeft(Spaceship &ship); void initInvader(Invader &invader); void moveInvaderRight(Invader &invader); void moveInvaderLeft(Invader &invader); void initBullet(Bullet &bullet); void fireBullet(); void doCollision(); void updateInvaders(); void drawText(); enum key_t { UP, DOWN, LEFT, RIGHT, SPACE }; enum source_t { INVADER, DEFENDER }; int main(void) { if(!al_init()) { return -1; } Spaceship ship; Invader invader; Bullet bullet; al_init_image_addon(); al_install_keyboard(); al_init_font_addon(); al_init_ttf_addon(); ALLEGRO_DISPLAY *display = al_create_display(width, height); ALLEGRO_EVENT_QUEUE *event_queue = al_create_event_queue(); ALLEGRO_TIMER *timer = al_create_timer(1.0 / 60); ALLEGRO_BITMAP *images[imgsize]; ALLEGRO_FONT *font1 = al_load_font("arial.ttf", 20, 0); al_register_event_source(event_queue, al_get_keyboard_event_source()); al_register_event_source(event_queue, al_get_display_event_source(display)); al_register_event_source(event_queue, al_get_timer_event_source(timer)); images[0] = al_load_bitmap("defender.bmp"); images[1] = al_load_bitmap("invader.bmp"); images[2] = al_load_bitmap("explosion.bmp"); al_convert_mask_to_alpha(images[0], al_map_rgb(0, 0, 0)); al_convert_mask_to_alpha(images[1], al_map_rgb(0, 0, 0)); al_convert_mask_to_alpha(images[2], al_map_rgb(0, 0, 0)); initSpaceship(ship); initBullet(bullet); initInvader(invader); al_start_timer(timer); while(running) { ALLEGRO_EVENT ev; al_wait_for_event(event_queue, &ev); if(ev.type == ALLEGRO_EVENT_TIMER) { draw = true; if(key[RIGHT] == true) moveSpaceshipRight(ship); if(key[LEFT] == true) moveSpaceshipLeft(ship); } else if(ev.type == ALLEGRO_EVENT_DISPLAY_CLOSE) running = false; else if(ev.type == ALLEGRO_EVENT_KEY_DOWN) { switch(ev.keyboard.keycode) { case ALLEGRO_KEY_ESCAPE: running = false; break; case ALLEGRO_KEY_LEFT: key[LEFT] = true; break; case ALLEGRO_KEY_RIGHT: key[RIGHT] = true; break; case ALLEGRO_KEY_SPACE: key[SPACE] = true; break; } } else if(ev.type == ALLEGRO_KEY_UP) { switch(ev.keyboard.keycode) { case ALLEGRO_KEY_LEFT: key[LEFT] = false; break; case ALLEGRO_KEY_RIGHT: key[RIGHT] = false; break; case ALLEGRO_KEY_SPACE: key[SPACE] = false; break; } } if(draw && al_is_event_queue_empty(event_queue)) { draw = false; al_draw_bitmap(images[0], ship.pos_x, ship.pos_y, 0); al_flip_display(); al_clear_to_color(al_map_rgb(0, 0, 0)); } } al_destroy_font(font1); al_destroy_event_queue(event_queue); al_destroy_timer(timer); for(int i = 0; i < imgsize; i++) al_destroy_bitmap(images[i]); al_destroy_display(display); } // FUNCTION LOGIC ====================================== void initSpaceship(Spaceship &ship) { ship.lives = 3; ship.speed = 2; ship.pos_x = width / 2; ship.pos_y = height - 20; } void initInvader(Invader &invader) { invader.health = 100; invader.count = 40; invader.speed = 0.5; invader.pos_x = 300; invader.pos_y = 300; } void initBullet(Bullet &bullet) { bullet.speed = 10; } void moveSpaceshipRight(Spaceship &ship) { ship.pos_x += ship.speed; if(ship.pos_x >= width) ship.pos_x = width-30; } void moveSpaceshipLeft(Spaceship &ship) { ship.pos_x -= ship.speed; if(ship.pos_x <= 0) ship.pos_x = 0+30; } However it's not behaving the way I want it to behave, in fact the behavior for the ship movement is un-normal. Basically I specified that the ship only moves when the right/left key is down, however the ship is moving constantly to the direction of the key pressed, it never stops although it should only move while my key is down. Even more weird behavior, when I press the opposite key the ship completely stops no matter what else I press. What's wrong with the code? Why does the ship move constantly even after I specified it only moves when a key is down?

    Read the article

  • Objects won't render when Texture Compression + Mipmapping is Enabled

    - by felipedrl
    I'm optimizing my game and I've just implemented compressed (DXTn) texture loading in OpenGL. I've worked my way removing bugs but I can't figure out this one: objects w/ DXTn + mipmapped textures are not being rendered. It's not like they are appearing with a flat color, they just don't appear at all. DXTn textured objs render and mipmapped non-compressed textures render just fine. The texture in question is 256x256 I generate the mips all the way down 4x4, i.e 1 block. I've checked on gDebugger and it display all the levels (7) just fine. I'm using GL_LINEAR_MIPMAP_NEAREST for min filter and GL_LINEAR for mag one. The texture is being compressed and mipmaps being created offline with Paint.NET tool using super sampling method. (I also tried bilinear just in case) Source follow: [SNIPPET 1: Loading DDS into sys memory + Initializing Object] // Read header DDSHeader header; file.read(reinterpret_cast<char*>(&header), sizeof(DDSHeader)); uint pos = static_cast<uint>(file.tellg()); file.seekg(0, std::ios_base::end); uint dataSizeInBytes = static_cast<uint>(file.tellg()) - pos; file.seekg(pos, std::ios_base::beg); // Read file data mData = new unsigned char[dataSizeInBytes]; file.read(reinterpret_cast<char*>(mData), dataSizeInBytes); file.close(); mMipmapCount = header.mipmapcount; mHeight = header.height; mWidth = header.width; mCompressionType = header.pf.fourCC; // Only support files divisible by 4 (for compression blocks algorithms) massert(mWidth % 4 == 0 && mHeight % 4 == 0); massert(mCompressionType == NO_COMPRESSION || mCompressionType == COMPRESSION_DXT1 || mCompressionType == COMPRESSION_DXT3 || mCompressionType == COMPRESSION_DXT5); // Allow textures up to 65536x65536 massert(header.mipmapcount <= MAX_MIPMAP_LEVELS); mTextureFilter = TextureFilter::LINEAR; if (mMipmapCount > 0) { mMipmapFilter = MipmapFilter::NEAREST; } else { mMipmapFilter = MipmapFilter::NO_MIPMAP; } mBitsPerPixel = header.pf.bitcount; if (mCompressionType == NO_COMPRESSION) { if (header.pf.flags & DDPF_ALPHAPIXELS) { // The only format supported w/ alpha is A8R8G8B8 massert(header.pf.amask == 0xFF000000 && header.pf.rmask == 0xFF0000 && header.pf.gmask == 0xFF00 && header.pf.bmask == 0xFF); mInternalFormat = GL_RGBA8; mFormat = GL_BGRA; mDataType = GL_UNSIGNED_BYTE; } else { massert(header.pf.rmask == 0xFF0000 && header.pf.gmask == 0xFF00 && header.pf.bmask == 0xFF); mInternalFormat = GL_RGB8; mFormat = GL_BGR; mDataType = GL_UNSIGNED_BYTE; } } else { uint blockSizeInBytes = 16; switch (mCompressionType) { case COMPRESSION_DXT1: blockSizeInBytes = 8; if (header.pf.flags & DDPF_ALPHAPIXELS) { mInternalFormat = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; } else { mInternalFormat = GL_COMPRESSED_RGB_S3TC_DXT1_EXT; } break; case COMPRESSION_DXT3: mInternalFormat = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; break; case COMPRESSION_DXT5: mInternalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; break; default: // Not Supported (DXT2, DXT4 or any compression format) massert(false); } } [SNIPPET 2: Uploading into video memory] massert(mData != NULL); glGenTextures(1, &mHandle); massert(mHandle!=0); glBindTexture(GL_TEXTURE_2D, mHandle); commitFiltering(); uint offset = 0; Renderer* renderer = Renderer::getInstance(); switch (mInternalFormat) { case GL_RGB: case GL_RGBA: case GL_RGB8: case GL_RGBA8: for (uint i = 0; i < mMipmapCount + 1; ++i) { uint width = std::max(1U, mWidth >> i); uint height = std::max(1U, mHeight >> i); glTexImage2D(GL_TEXTURE_2D, i, mInternalFormat, width, height, mHasBorder, mFormat, mDataType, &mData[offset]); offset += width * height * (mBitsPerPixel / 8); } break; case GL_COMPRESSED_RGB_S3TC_DXT1_EXT: case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT: case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT: case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: { uint blockSize = 16; if (mInternalFormat == GL_COMPRESSED_RGB_S3TC_DXT1_EXT || mInternalFormat == GL_COMPRESSED_RGBA_S3TC_DXT1_EXT) { blockSize = 8; } uint width = mWidth; uint height = mHeight; for (uint i = 0; i < mMipmapCount + 1; ++i) { uint nBlocks = ((width + 3) / 4) * ((height + 3) / 4); // Only POT textures allowed for mipmapping massert(width % 4 == 0 && height % 4 == 0); glCompressedTexImage2D(GL_TEXTURE_2D, i, mInternalFormat, width, height, mHasBorder, nBlocks * blockSize, &mData[offset]); offset += nBlocks * blockSize; if (width <= 4 && height <= 4) { break; } width = std::max(4U, width / 2); height = std::max(4U, height / 2); } break; } default: // Not Supported massert(false); } Also I don't understand the "+3" in the block size computation but looking for a solution for my problema I've encountered people defining it as that. I guess it won't make a differente for POT textures but I put just in case. Thanks.

    Read the article

  • C++ strongly typed typedef

    - by Kian
    I've been trying to think of a way of declaring strongly typed typedefs, to catch a certain class of bugs in the compilation stage. It's often the case that I'll typedef an int into several types of ids, or a vector to position or velocity: typedef int EntityID; typedef int ModelID; typedef Vector3 Position; typedef Vector3 Velocity; This can make the intent of code more clear, but after a long night of coding one might make silly mistakes like comparing different kinds of ids, or adding a position to a velocity perhaps. EntityID eID; ModelID mID; if ( eID == mID ) // <- Compiler sees nothing wrong { /*bug*/ } Position p; Velocity v; Position newP = p + v; // bug, meant p + v*s but compiler sees nothing wrong Unfortunately, suggestions I've found for strongly typed typedefs include using boost, which at least for me isn't a possibility (I do have c++11 at least). So after a bit of thinking, I came upon this idea, and wanted to run it by someone. First, you declare the base type as a template. The template parameter isn't used for anything in the definition, however: template < typename T > class IDType { unsigned int m_id; public: IDType( unsigned int const& i_id ): m_id {i_id} {}; friend bool operator==<T>( IDType<T> const& i_lhs, IDType<T> const& i_rhs ); }; Friend functions actually need to be forward declared before the class definition, which requires a forward declaration of the template class. We then define all the members for the base type, just remembering that it's a template class. Finally, when we want to use it, we typedef it as: class EntityT; typedef IDType<EntityT> EntityID; class ModelT; typedef IDType<ModelT> ModelID; The types are now entirely separate. Functions that take an EntityID will throw a compiler error if you try to feed them a ModelID instead, for example. Aside from having to declare the base types as templates, with the issues that entails, it's also fairly compact. I was hoping anyone had comments or critiques about this idea? One issue that came to mind while writing this, in the case of positions and velocities for example, would be that I can't convert between types as freely as before. Where before multiplying a vector by a scalar would give another vector, so I could do: typedef float Time; typedef Vector3 Position; typedef Vector3 Velocity; Time t = 1.0f; Position p = { 0.0f }; Velocity v = { 1.0f, 0.0f, 0.0f }; Position newP = p + v*t; With my strongly typed typedef I'd have to tell the compiler that multypling a Velocity by a Time results in a Position. class TimeT; typedef Float<TimeT> Time; class PositionT; typedef Vector3<PositionT> Position; class VelocityT; typedef Vector3<VelocityT> Velocity; Time t = 1.0f; Position p = { 0.0f }; Velocity v = { 1.0f, 0.0f, 0.0f }; Position newP = p + v*t; // Compiler error To solve this, I think I'd have to specialize every conversion explicitly, which can be kind of a bother. On the other hand, this limitation can help prevent other kinds of errors (say, multiplying a Velocity by a Distance, perhaps, which wouldn't make sense in this domain). So I'm torn, and wondering if people have any opinions on my original issue, or my approach to solving it.

    Read the article

  • The Incremental Architect&rsquo;s Napkin - #5 - Design functions for extensibility and readability

    - by Ralf Westphal
    Originally posted on: http://geekswithblogs.net/theArchitectsNapkin/archive/2014/08/24/the-incremental-architectrsquos-napkin---5---design-functions-for.aspx The functionality of programs is entered via Entry Points. So what we´re talking about when designing software is a bunch of functions handling the requests represented by and flowing in through those Entry Points. Designing software thus consists of at least three phases: Analyzing the requirements to find the Entry Points and their signatures Designing the functionality to be executed when those Entry Points get triggered Implementing the functionality according to the design aka coding I presume, you´re familiar with phase 1 in some way. And I guess you´re proficient in implementing functionality in some programming language. But in my experience developers in general are not experienced in going through an explicit phase 2. “Designing functionality? What´s that supposed to mean?” you might already have thought. Here´s my definition: To design functionality (or functional design for short) means thinking about… well, functions. You find a solution for what´s supposed to happen when an Entry Point gets triggered in terms of functions. A conceptual solution that is, because those functions only exist in your head (or on paper) during this phase. But you may have guess that, because it´s “design” not “coding”. And here is, what functional design is not: It´s not about logic. Logic is expressions (e.g. +, -, && etc.) and control statements (e.g. if, switch, for, while etc.). Also I consider calling external APIs as logic. It´s equally basic. It´s what code needs to do in order to deliver some functionality or quality. Logic is what´s doing that needs to be done by software. Transformations are either done through expressions or API-calls. And then there is alternative control flow depending on the result of some expression. Basically it´s just jumps in Assembler, sometimes to go forward (if, switch), sometimes to go backward (for, while, do). But calling your own function is not logic. It´s not necessary to produce any outcome. Functionality is not enhanced by adding functions (subroutine calls) to your code. Nor is quality increased by adding functions. No performance gain, no higher scalability etc. through functions. Functions are not relevant to functionality. Strange, isn´t it. What they are important for is security of investment. By introducing functions into our code we can become more productive (re-use) and can increase evolvability (higher unterstandability, easier to keep code consistent). That´s no small feat, however. Evolvable code can hardly be overestimated. That´s why to me functional design is so important. It´s at the core of software development. To sum this up: Functional design is on a level of abstraction above (!) logical design or algorithmic design. Functional design is only done until you get to a point where each function is so simple you are very confident you can easily code it. Functional design an logical design (which mostly is coding, but can also be done using pseudo code or flow charts) are complementary. Software needs both. If you start coding right away you end up in a tangled mess very quickly. Then you need back out through refactoring. Functional design on the other hand is bloodless without actual code. It´s just a theory with no experiments to prove it. But how to do functional design? An example of functional design Let´s assume a program to de-duplicate strings. The user enters a number of strings separated by commas, e.g. a, b, a, c, d, b, e, c, a. And the program is supposed to clear this list of all doubles, e.g. a, b, c, d, e. There is only one Entry Point to this program: the user triggers the de-duplication by starting the program with the string list on the command line C:\>deduplicate "a, b, a, c, d, b, e, c, a" a, b, c, d, e …or by clicking on a GUI button. This leads to the Entry Point function to get called. It´s the program´s main function in case of the batch version or a button click event handler in the GUI version. That´s the physical Entry Point so to speak. It´s inevitable. What then happens is a three step process: Transform the input data from the user into a request. Call the request handler. Transform the output of the request handler into a tangible result for the user. Or to phrase it a bit more generally: Accept input. Transform input into output. Present output. This does not mean any of these steps requires a lot of effort. Maybe it´s just one line of code to accomplish it. Nevertheless it´s a distinct step in doing the processing behind an Entry Point. Call it an aspect or a responsibility - and you will realize it most likely deserves a function of its own to satisfy the Single Responsibility Principle (SRP). Interestingly the above list of steps is already functional design. There is no logic, but nevertheless the solution is described - albeit on a higher level of abstraction than you might have done yourself. But it´s still on a meta-level. The application to the domain at hand is easy, though: Accept string list from command line De-duplicate Present de-duplicated strings on standard output And this concrete list of processing steps can easily be transformed into code:static void Main(string[] args) { var input = Accept_string_list(args); var output = Deduplicate(input); Present_deduplicated_string_list(output); } Instead of a big problem there are three much smaller problems now. If you think each of those is trivial to implement, then go for it. You can stop the functional design at this point. But maybe, just maybe, you´re not so sure how to go about with the de-duplication for example. Then just implement what´s easy right now, e.g.private static string Accept_string_list(string[] args) { return args[0]; } private static void Present_deduplicated_string_list( string[] output) { var line = string.Join(", ", output); Console.WriteLine(line); } Accept_string_list() contains logic in the form of an API-call. Present_deduplicated_string_list() contains logic in the form of an expression and an API-call. And then repeat the functional design for the remaining processing step. What´s left is the domain logic: de-duplicating a list of strings. How should that be done? Without any logic at our disposal during functional design you´re left with just functions. So which functions could make up the de-duplication? Here´s a suggestion: De-duplicate Parse the input string into a true list of strings. Register each string in a dictionary/map/set. That way duplicates get cast away. Transform the data structure into a list of unique strings. Processing step 2 obviously was the core of the solution. That´s where real creativity was needed. That´s the core of the domain. But now after this refinement the implementation of each step is easy again:private static string[] Parse_string_list(string input) { return input.Split(',') .Select(s => s.Trim()) .ToArray(); } private static Dictionary<string,object> Compile_unique_strings(string[] strings) { return strings.Aggregate( new Dictionary<string, object>(), (agg, s) => { agg[s] = null; return agg; }); } private static string[] Serialize_unique_strings( Dictionary<string,object> dict) { return dict.Keys.ToArray(); } With these three additional functions Main() now looks like this:static void Main(string[] args) { var input = Accept_string_list(args); var strings = Parse_string_list(input); var dict = Compile_unique_strings(strings); var output = Serialize_unique_strings(dict); Present_deduplicated_string_list(output); } I think that´s very understandable code: just read it from top to bottom and you know how the solution to the problem works. It´s a mirror image of the initial design: Accept string list from command line Parse the input string into a true list of strings. Register each string in a dictionary/map/set. That way duplicates get cast away. Transform the data structure into a list of unique strings. Present de-duplicated strings on standard output You can even re-generate the design by just looking at the code. Code and functional design thus are always in sync - if you follow some simple rules. But about that later. And as a bonus: all the functions making up the process are small - which means easy to understand, too. So much for an initial concrete example. Now it´s time for some theory. Because there is method to this madness ;-) The above has only scratched the surface. Introducing Flow Design Functional design starts with a given function, the Entry Point. Its goal is to describe the behavior of the program when the Entry Point is triggered using a process, not an algorithm. An algorithm consists of logic, a process on the other hand consists just of steps or stages. Each processing step transforms input into output or a side effect. Also it might access resources, e.g. a printer, a database, or just memory. Processing steps thus can rely on state of some sort. This is different from Functional Programming, where functions are supposed to not be stateful and not cause side effects.[1] In its simplest form a process can be written as a bullet point list of steps, e.g. Get data from user Output result to user Transform data Parse data Map result for output Such a compilation of steps - possibly on different levels of abstraction - often is the first artifact of functional design. It can be generated by a team in an initial design brainstorming. Next comes ordering the steps. What should happen first, what next etc.? Get data from user Parse data Transform data Map result for output Output result to user That´s great for a start into functional design. It´s better than starting to code right away on a given function using TDD. Please get me right: TDD is a valuable practice. But it can be unnecessarily hard if the scope of a functionn is too large. But how do you know beforehand without investing some thinking? And how to do this thinking in a systematic fashion? My recommendation: For any given function you´re supposed to implement first do a functional design. Then, once you´re confident you know the processing steps - which are pretty small - refine and code them using TDD. You´ll see that´s much, much easier - and leads to cleaner code right away. For more information on this approach I call “Informed TDD” read my book of the same title. Thinking before coding is smart. And writing down the solution as a bunch of functions possibly is the simplest thing you can do, I´d say. It´s more according to the KISS (Keep It Simple, Stupid) principle than returning constants or other trivial stuff TDD development often is started with. So far so good. A simple ordered list of processing steps will do to start with functional design. As shown in the above example such steps can easily be translated into functions. Moving from design to coding thus is simple. However, such a list does not scale. Processing is not always that simple to be captured in a list. And then the list is just text. Again. Like code. That means the design is lacking visuality. Textual representations need more parsing by your brain than visual representations. Plus they are limited in their “dimensionality”: text just has one dimension, it´s sequential. Alternatives and parallelism are hard to encode in text. In addition the functional design using numbered lists lacks data. It´s not visible what´s the input, output, and state of the processing steps. That´s why functional design should be done using a lightweight visual notation. No tool is necessary to draw such designs. Use pen and paper; a flipchart, a whiteboard, or even a napkin is sufficient. Visualizing processes The building block of the functional design notation is a functional unit. I mostly draw it like this: Something is done, it´s clear what goes in, it´s clear what comes out, and it´s clear what the processing step requires in terms of state or hardware. Whenever input flows into a functional unit it gets processed and output is produced and/or a side effect occurs. Flowing data is the driver of something happening. That´s why I call this approach to functional design Flow Design. It´s about data flow instead of control flow. Control flow like in algorithms is of no concern to functional design. Thinking about control flow simply is too low level. Once you start with control flow you easily get bogged down by tons of details. That´s what you want to avoid during design. Design is supposed to be quick, broad brush, abstract. It should give overview. But what about all the details? As Robert C. Martin rightly said: “Programming is abot detail”. Detail is a matter of code. Once you start coding the processing steps you designed you can worry about all the detail you want. Functional design does not eliminate all the nitty gritty. It just postpones tackling them. To me that´s also an example of the SRP. Function design has the responsibility to come up with a solution to a problem posed by a single function (Entry Point). And later coding has the responsibility to implement the solution down to the last detail (i.e. statement, API-call). TDD unfortunately mixes both responsibilities. It´s just coding - and thereby trying to find detailed implementations (green phase) plus getting the design right (refactoring). To me that´s one reason why TDD has failed to deliver on its promise for many developers. Using functional units as building blocks of functional design processes can be depicted very easily. Here´s the initial process for the example problem: For each processing step draw a functional unit and label it. Choose a verb or an “action phrase” as a label, not a noun. Functional design is about activities, not state or structure. Then make the output of an upstream step the input of a downstream step. Finally think about the data that should flow between the functional units. Write the data above the arrows connecting the functional units in the direction of the data flow. Enclose the data description in brackets. That way you can clearly see if all flows have already been specified. Empty brackets mean “no data is flowing”, but nevertheless a signal is sent. A name like “list” or “strings” in brackets describes the data content. Use lower case labels for that purpose. A name starting with an upper case letter like “String” or “Customer” on the other hand signifies a data type. If you like, you also can combine descriptions with data types by separating them with a colon, e.g. (list:string) or (strings:string[]). But these are just suggestions from my practice with Flow Design. You can do it differently, if you like. Just be sure to be consistent. Flows wired-up in this manner I call one-dimensional (1D). Each functional unit just has one input and/or one output. A functional unit without an output is possible. It´s like a black hole sucking up input without producing any output. Instead it produces side effects. A functional unit without an input, though, does make much sense. When should it start to work? What´s the trigger? That´s why in the above process even the first processing step has an input. If you like, view such 1D-flows as pipelines. Data is flowing through them from left to right. But as you can see, it´s not always the same data. It get´s transformed along its passage: (args) becomes a (list) which is turned into (strings). The Principle of Mutual Oblivion A very characteristic trait of flows put together from function units is: no functional units knows another one. They are all completely independent of each other. Functional units don´t know where their input is coming from (or even when it´s gonna arrive). They just specify a range of values they can process. And they promise a certain behavior upon input arriving. Also they don´t know where their output is going. They just produce it in their own time independent of other functional units. That means at least conceptually all functional units work in parallel. Functional units don´t know their “deployment context”. They now nothing about the overall flow they are place in. They are just consuming input from some upstream, and producing output for some downstream. That makes functional units very easy to test. At least as long as they don´t depend on state or resources. I call this the Principle of Mutual Oblivion (PoMO). Functional units are oblivious of others as well as an overall context/purpose. They are just parts of a whole focused on a single responsibility. How the whole is built, how a larger goal is achieved, is of no concern to the single functional units. By building software in such a manner, functional design interestingly follows nature. Nature´s building blocks for organisms also follow the PoMO. The cells forming your body do not know each other. Take a nerve cell “controlling” a muscle cell for example:[2] The nerve cell does not know anything about muscle cells, let alone the specific muscel cell it is “attached to”. Likewise the muscle cell does not know anything about nerve cells, let a lone a specific nerve cell “attached to” it. Saying “the nerve cell is controlling the muscle cell” thus only makes sense when viewing both from the outside. “Control” is a concept of the whole, not of its parts. Control is created by wiring-up parts in a certain way. Both cells are mutually oblivious. Both just follow a contract. One produces Acetylcholine (ACh) as output, the other consumes ACh as input. Where the ACh is going, where it´s coming from neither cell cares about. Million years of evolution have led to this kind of division of labor. And million years of evolution have produced organism designs (DNA) which lead to the production of these different cell types (and many others) and also to their co-location. The result: the overall behavior of an organism. How and why this happened in nature is a mystery. For our software, though, it´s clear: functional and quality requirements needs to be fulfilled. So we as developers have to become “intelligent designers” of “software cells” which we put together to form a “software organism” which responds in satisfying ways to triggers from it´s environment. My bet is: If nature gets complex organisms working by following the PoMO, who are we to not apply this recipe for success to our much simpler “machines”? So my rule is: Wherever there is functionality to be delivered, because there is a clear Entry Point into software, design the functionality like nature would do it. Build it from mutually oblivious functional units. That´s what Flow Design is about. In that way it´s even universal, I´d say. Its notation can also be applied to biology: Never mind labeling the functional units with nouns. That´s ok in Flow Design. You´ll do that occassionally for functional units on a higher level of abstraction or when their purpose is close to hardware. Getting a cockroach to roam your bedroom takes 1,000,000 nerve cells (neurons). Getting the de-duplication program to do its job just takes 5 “software cells” (functional units). Both, though, follow the same basic principle. Translating functional units into code Moving from functional design to code is no rocket science. In fact it´s straightforward. There are two simple rules: Translate an input port to a function. Translate an output port either to a return statement in that function or to a function pointer visible to that function. The simplest translation of a functional unit is a function. That´s what you saw in the above example. Functions are mutually oblivious. That why Functional Programming likes them so much. It makes them composable. Which is the reason, nature works according to the PoMO. Let´s be clear about one thing: There is no dependency injection in nature. For all of an organism´s complexity no DI container is used. Behavior is the result of smooth cooperation between mutually oblivious building blocks. Functions will often be the adequate translation for the functional units in your designs. But not always. Take for example the case, where a processing step should not always produce an output. Maybe the purpose is to filter input. Here the functional unit consumes words and produces words. But it does not pass along every word flowing in. Some words are swallowed. Think of a spell checker. It probably should not check acronyms for correctness. There are too many of them. Or words with no more than two letters. Such words are called “stop words”. In the above picture the optionality of the output is signified by the astrisk outside the brackets. It means: Any number of (word) data items can flow from the functional unit for each input data item. It might be none or one or even more. This I call a stream of data. Such behavior cannot be translated into a function where output is generated with return. Because a function always needs to return a value. So the output port is translated into a function pointer or continuation which gets passed to the subroutine when called:[3]void filter_stop_words( string word, Action<string> onNoStopWord) { if (...check if not a stop word...) onNoStopWord(word); } If you want to be nitpicky you might call such a function pointer parameter an injection. And technically you´re right. Conceptually, though, it´s not an injection. Because the subroutine is not functionally dependent on the continuation. Firstly continuations are procedures, i.e. subroutines without a return type. Remember: Flow Design is about unidirectional data flow. Secondly the name of the formal parameter is chosen in a way as to not assume anything about downstream processing steps. onNoStopWord describes a situation (or event) within the functional unit only. Translating output ports into function pointers helps keeping functional units mutually oblivious in cases where output is optional or produced asynchronically. Either pass the function pointer to the function upon call. Or make it global by putting it on the encompassing class. Then it´s called an event. In C# that´s even an explicit feature.class Filter { public void filter_stop_words( string word) { if (...check if not a stop word...) onNoStopWord(word); } public event Action<string> onNoStopWord; } When to use a continuation and when to use an event dependens on how a functional unit is used in flows and how it´s packed together with others into classes. You´ll see examples further down the Flow Design road. Another example of 1D functional design Let´s see Flow Design once more in action using the visual notation. How about the famous word wrap kata? Robert C. Martin has posted a much cited solution including an extensive reasoning behind his TDD approach. So maybe you want to compare it to Flow Design. The function signature given is:string WordWrap(string text, int maxLineLength) {...} That´s not an Entry Point since we don´t see an application with an environment and users. Nevertheless it´s a function which is supposed to provide a certain functionality. The text passed in has to be reformatted. The input is a single line of arbitrary length consisting of words separated by spaces. The output should consist of one or more lines of a maximum length specified. If a word is longer than a the maximum line length it can be split in multiple parts each fitting in a line. Flow Design Let´s start by brainstorming the process to accomplish the feat of reformatting the text. What´s needed? Words need to be assembled into lines Words need to be extracted from the input text The resulting lines need to be assembled into the output text Words too long to fit in a line need to be split Does sound about right? I guess so. And it shows a kind of priority. Long words are a special case. So maybe there is a hint for an incremental design here. First let´s tackle “average words” (words not longer than a line). Here´s the Flow Design for this increment: The the first three bullet points turned into functional units with explicit data added. As the signature requires a text is transformed into another text. See the input of the first functional unit and the output of the last functional unit. In between no text flows, but words and lines. That´s good to see because thereby the domain is clearly represented in the design. The requirements are talking about words and lines and here they are. But note the asterisk! It´s not outside the brackets but inside. That means it´s not a stream of words or lines, but lists or sequences. For each text a sequence of words is output. For each sequence of words a sequence of lines is produced. The asterisk is used to abstract from the concrete implementation. Like with streams. Whether the list of words gets implemented as an array or an IEnumerable is not important during design. It´s an implementation detail. Does any processing step require further refinement? I don´t think so. They all look pretty “atomic” to me. And if not… I can always backtrack and refine a process step using functional design later once I´ve gained more insight into a sub-problem. Implementation The implementation is straightforward as you can imagine. The processing steps can all be translated into functions. Each can be tested easily and separately. Each has a focused responsibility. And the process flow becomes just a sequence of function calls: Easy to understand. It clearly states how word wrapping works - on a high level of abstraction. And it´s easy to evolve as you´ll see. Flow Design - Increment 2 So far only texts consisting of “average words” are wrapped correctly. Words not fitting in a line will result in lines too long. Wrapping long words is a feature of the requested functionality. Whether it´s there or not makes a difference to the user. To quickly get feedback I decided to first implement a solution without this feature. But now it´s time to add it to deliver the full scope. Fortunately Flow Design automatically leads to code following the Open Closed Principle (OCP). It´s easy to extend it - instead of changing well tested code. How´s that possible? Flow Design allows for extension of functionality by inserting functional units into the flow. That way existing functional units need not be changed. The data flow arrow between functional units is a natural extension point. No need to resort to the Strategy Pattern. No need to think ahead where extions might need to be made in the future. I just “phase in” the remaining processing step: Since neither Extract words nor Reformat know of their environment neither needs to be touched due to the “detour”. The new processing step accepts the output of the existing upstream step and produces data compatible with the existing downstream step. Implementation - Increment 2 A trivial implementation checking the assumption if this works does not do anything to split long words. The input is just passed on: Note how clean WordWrap() stays. The solution is easy to understand. A developer looking at this code sometime in the future, when a new feature needs to be build in, quickly sees how long words are dealt with. Compare this to Robert C. Martin´s solution:[4] How does this solution handle long words? Long words are not even part of the domain language present in the code. At least I need considerable time to understand the approach. Admittedly the Flow Design solution with the full implementation of long word splitting is longer than Robert C. Martin´s. At least it seems. Because his solution does not cover all the “word wrap situations” the Flow Design solution handles. Some lines would need to be added to be on par, I guess. But even then… Is a difference in LOC that important as long as it´s in the same ball park? I value understandability and openness for extension higher than saving on the last line of code. Simplicity is not just less code, it´s also clarity in design. But don´t take my word for it. Try Flow Design on larger problems and compare for yourself. What´s the easier, more straightforward way to clean code? And keep in mind: You ain´t seen all yet ;-) There´s more to Flow Design than described in this chapter. In closing I hope I was able to give you a impression of functional design that makes you hungry for more. To me it´s an inevitable step in software development. Jumping from requirements to code does not scale. And it leads to dirty code all to quickly. Some thought should be invested first. Where there is a clear Entry Point visible, it´s functionality should be designed using data flows. Because with data flows abstraction is possible. For more background on why that´s necessary read my blog article here. For now let me point out to you - if you haven´t already noticed - that Flow Design is a general purpose declarative language. It´s “programming by intention” (Shalloway et al.). Just write down how you think the solution should work on a high level of abstraction. This breaks down a large problem in smaller problems. And by following the PoMO the solutions to those smaller problems are independent of each other. So they are easy to test. Or you could even think about getting them implemented in parallel by different team members. Flow Design not only increases evolvability, but also helps becoming more productive. All team members can participate in functional design. This goes beyon collective code ownership. We´re talking collective design/architecture ownership. Because with Flow Design there is a common visual language to talk about functional design - which is the foundation for all other design activities.   PS: If you like what you read, consider getting my ebook “The Incremental Architekt´s Napkin”. It´s where I compile all the articles in this series for easier reading. I like the strictness of Function Programming - but I also find it quite hard to live by. And it certainly is not what millions of programmers are used to. Also to me it seems, the real world is full of state and side effects. So why give them such a bad image? That´s why functional design takes a more pragmatic approach. State and side effects are ok for processing steps - but be sure to follow the SRP. Don´t put too much of it into a single processing step. ? Image taken from www.physioweb.org ? My code samples are written in C#. C# sports typed function pointers called delegates. Action is such a function pointer type matching functions with signature void someName(T t). Other languages provide similar ways to work with functions as first class citizens - even Java now in version 8. I trust you find a way to map this detail of my translation to your favorite programming language. I know it works for Java, C++, Ruby, JavaScript, Python, Go. And if you´re using a Functional Programming language it´s of course a no brainer. ? Taken from his blog post “The Craftsman 62, The Dark Path”. ?

    Read the article

  • Continuous Integration for SQL Server Part II – Integration Testing

    - by Ben Rees
    My previous post, on setting up Continuous Integration for SQL Server databases using GitHub, Bamboo and Red Gate’s tools, covered the first two parts of a simple Database Continuous Delivery process: Putting your database in to a source control system, and, Running a continuous integration process, each time changes are checked in. However there is, of course, a lot more to to Continuous Delivery than that. Specifically, in addition to the above: Putting some actual integration tests in to the CI process (otherwise, they don’t really do much, do they!?), Deploying the database changes with a managed, automated approach, Monitoring what you’ve just put live, to make sure you haven’t broken anything. This post will detail how to set up a very simple pipeline for implementing the first of these (continuous integration testing). NB: A lot of the setup in this post is built on top of the configuration from before, so it might be difficult to implement this post without running through part I first. There’ll then be a third post on automated database deployment followed by a final post dealing with the last item – monitoring changes on the live system. In the previous post, I used a mixture of Red Gate products and other 3rd party software – GitHub and Atlassian Bamboo specifically. This was partly because I believe most people work in an heterogeneous environment, using software from different vendors to suit their purposes and I wanted to show how this could work for this process. For example, you could easily substitute Atlassian’s BitBucket or Stash for GitHub, depending on your needs, or use an alternative CI server such as TeamCity, TFS or Jenkins. However, in this, post, I’ll be mostly using Red Gate products only (other than tSQLt). I would do this, firstly because I work for Red Gate. However, I also think that in the area of Database Delivery processes, nobody else has the offerings to implement this process fully – so I didn’t have any choice!   Background on Continuous Delivery For me, a great source of information on what makes a proper Continuous Delivery process is the Jez Humble and David Farley classic: Continuous Delivery – Reliable Software Releases through Build, Test, and Deployment Automation This book is not of course, primarily about databases, and the process I outline here and in the previous article is a gross simplification of what Jez and David describe (not least because it’s that much harder for databases!). However, a lot of the principles that they describe can be equally applied to database development and, I would argue, should be. As I say however, what I describe here is a very simple version of what would be required for a full production process. A couple of useful resources on handling some of these complexities can be found in the following two references: Refactoring Databases – Evolutionary Database Design, by Scott J Ambler and Pramod J. Sadalage Versioning Databases – Branching and Merging, by Scott Allen In particular, I don’t deal at all with the issues of multiple branches and merging of those branches, an issue made particularly acute by the use of GitHub. The other point worth making is that, in the words of Martin Fowler: Continuous Delivery is about keeping your application in a state where it is always able to deploy into production.   I.e. we are not talking about continuously delivery updates to the production database every time someone checks in an amendment to a stored procedure. That is possible (and what Martin calls Continuous Deployment). However, again, that’s more than I describe in this article. And I doubt I need to remind DBAs or Developers to Proceed with Caution!   Integration Testing Back to something practical. The next stage, building on our set up from the previous article, is to add in some integration tests to the process. As I say, the CI process, though interesting, isn’t enormously useful without some sort of test process running. For this we’ll use the tSQLt framework, an open source framework designed specifically for running SQL Server tests. tSQLt is part of Red Gate’s SQL Test found on http://www.red-gate.com/products/sql-development/sql-test/ or can be downloaded separately from www.tsqlt.org - though I’ll provide a step-by-step guide below for setting this up. Getting tSQLt set up via SQL Test Click on the link http://www.red-gate.com/products/sql-development/sql-test/ and click on the blue Download button to download the Red Gate SQL Test product, if not already installed. Follow the install process for SQL Test to install the SQL Server Management Studio (SSMS) plugin on to your machine, if not already installed. Open SSMS. You should now see SQL Test under the Tools menu:   Clicking this link will give you the basic SQL Test dialogue: As yet, though we’ve installed the SQL Test product we haven’t yet installed the tSQLt test framework on to any particular database. To do this, we need to add our RedGateApp database using this dialogue, by clicking on the + Add Database to SQL Test… link, selecting the RedGateApp database and clicking the Add Database link:   In the next screen, SQL Test describes what will be installed on the database for the tSQLt framework. Also in this dialogue, uncheck the “Add SQL Cop tests” option (shown below). SQL Cop is a great set of pre-defined tests that work within the tSQLt framework to check the general health of your SQL Server database. However, we won’t be using them in this particular simple example: Once you’ve clicked on the OK button, the changes described in the dialogue will be made to your database. Some of these are shown in the left-hand-side below: We’ve now installed the framework. However, we haven’t actually created any tests, so this will be the next step. But, before we proceed, we’ve made an update to our database so should, again check this in to source control, adding comments as required:   Also worth a quick check that your build still runs with the new additions!: (And a quick check of the RedGateAppCI database shows that the changes have been made).   Creating and Testing a Unit Test There are, of course, a lot of very interesting unit tests that you could and should set up for a database. The great thing about the tSQLt framework is that you can write these in SQL. The example I’m going to use here is pretty Mickey Mouse – our database table is going to include some email addresses as reference data and I want to check whether these are all in a correct email format. Nothing clever but it illustrates the process and hopefully shows the method by which more interesting tests could be set up. Adding Reference Data to our Database To start, I want to add some reference data to my database, and have this source controlled (as well as the schema). First of all I need to add some data in to my solitary table – this can be done a number of ways, but I’ll do this in SSMS for simplicity: I then add some reference data to my table: Currently this reference data just exists in the database. For proper integration testing, this needs to form part of the source-controlled version of the database – and so needs to be added to the Git repository. This can be done via SQL Source Control, though first a Primary Key needs to be added to the table. Right click the table, select Design, then right-click on the first “id” row. Then click on “Set Primary Key”: NB: once this change is made, click Save to save the change to the table. Then, to source control this reference data, right click on the table (dbo.Email) and selecting the following option:   In the next screen, link the data in the Email table, by selecting it from the list and clicking “save and close”: We should at this point re-commit the changes (both the addition of the Primary Key, and the data) to the Git repo. NB: From here on, I won’t show screenshots for the GitHub side of things – it’s the same each time: whenever a change is made in SQL Source Control and committed to your local folder, you then need to sync this in the GitHub Windows client (as this is where the build server, Bamboo is taking it from). An interesting point to note here, when these changes are committed in SQL Source Control (right-click database and select “Commit Changes to Source Control..”): The display gives a warning about possibly needing a migration script for the “Add Primary Key” step of the changes. This isn’t actually necessary in this case, but this mechanism would allow you to create override scripts to replace the default change scripts created by the SQL Compare engine (which runs underneath SQL Source Control). Ignoring this message (!), we add a comment and commit the changes to Git. I then sync these, run a build (or the build gets run automatically), and check that the data is being deployed over to the target RedGateAppCI database:   Creating and Running the Test As I mention, the test I’m going to use here is a very simple one - are the email addresses in my reference table valid? This isn’t of course, a full test of email validation (I expect the email addresses I’ve chosen here aren’t really the those of the Fab Four) – but just a very basic check of format used. I’ve taken the relevant SQL from this Stack Overflow article. In SSMS select “SQL Test” from the Tools menu, then click on + New Test: In the next screen, give your new test a name, and also enter a name in the Test Class box (test classes are schemas that help you keep things organised). Also check that the database in which the test is going to be created is correct – RedGateApp in this example: Click “Create Test”. After closing a couple of subsequent dialogues, you’ll see a dummy script for the test, that needs filling in:   We now need to define the SQL for our test. As mentioned before, tSQLt allows you to write your unit tests in T-SQL, and the code I’m going to use here is as below. This needs to be copied and pasted in to the query window, to replace the default given by tSQLt: –  Basic email check test ALTER PROCEDURE [MyChecks].[test Check Email Addresses] AS BEGIN SET NOCOUNT ON         Declare @Output VarChar(max)     Set @Output = ”       SELECT  @Output = @Output + Email +Char(13) + Char(10) FROM dbo.Email WHERE email NOT LIKE ‘%_@__%.__%’       If @Output > ”         Begin             Set @Output = Char(13) + Char(10)                           + @Output             EXEC tSQLt.Fail@Output         End   END;   Once this script is entered, hit execute to add the Stored Procedure to the database. Before committing the test to source control,  it’s worth just checking that it works! For a positive test, click on “SQL Test” from the Tools menu, then click Run Tests. You should see output like the following: - a green tick to indicate success! But of course, what we also need to do is test that this is actually doing something by showing a failed test. Edit one of the email addresses in your table to an incorrect format: Now, re-run the same SQL Test as before and you’ll see the following: Great – we now know that our test is really doing something! You’ll also see a useful error message at the bottom of SSMS: (leave the email address as invalid for now, for the next steps). The next stage is to check this new test in to source control again, by right-clicking on the database and checking in the changes with a commit message (and not forgetting to sync in the GitHub client):   Checking that the Tests are Running as Integration Tests After the changes above are made, and after a build has run on Bamboo (manual or automatic), looking at the Stored Procedures for the RedGateAppCI, the SPROC for the new test has been moved over to the database. However this is not exactly what we were after. We didn’t want to just copy objects from one database to another, but actually run the tests as part of the build/integration test process. I.e. we’re continuously checking any changes we make (in this case, to the reference data emails), to ensure we’re not breaking a test that we’ve set up. The behaviour we want to see is that, if we check in static data that is incorrect (as we did in step 9 above) and we have the tSQLt test set up, then our build in Bamboo should fail. However, re-running the build shows the following: - sadly, a successful build! To make sure the tSQLt tests are run as part of the integration test, we need to amend a switch in the Red Gate CI config file. First, navigate to file sqlCI.targets in your working folder: Edit this document, make the following change, save the document, then commit and sync this change in the GitHub client: <!-- tSQLt tests --> <!-- Optional --> <!-- To run tSQLt tests in source control for the database, enter true. --> <enableTsqlt>true</enableTsqlt> Now, if we re-run the build in Bamboo (NB: I’ve moved to a new server here, hence different address and build number): - superb, a broken build!! The error message isn’t great here, so to get more detailed info, click on the full build log link on this page (below the fold). The interesting part of the log shown is towards the bottom. Pulling out this part:   21-Jun-2013 11:35:19 Build FAILED. 21-Jun-2013 11:35:19 21-Jun-2013 11:35:19 "C:\Users\Administrator\bamboo-home\xml-data\build-dir\RGA-RGP-JOB1\sqlCI.proj" (default target) (1) -> 21-Jun-2013 11:35:19 (sqlCI target) -> 21-Jun-2013 11:35:19 EXEC : sqlCI error occurred: RedGate.Deploy.SqlServerDbPackage.Shared.Exceptions.InvalidSqlException: Test Case Summary: 1 test case(s) executed, 0 succeeded, 1 failed, 0 errored. [C:\Users\Administrator\bamboo-home\xml-data\build-dir\RGA-RGP-JOB1\sqlCI.proj] 21-Jun-2013 11:35:19 EXEC : sqlCI error occurred: [MyChecks].[test Check Email Addresses] failed: [C:\Users\Administrator\bamboo-home\xml-data\build-dir\RGA-RGP-JOB1\sqlCI.proj] 21-Jun-2013 11:35:19 EXEC : sqlCI error occurred: ringo.starr@beatles [C:\Users\Administrator\bamboo-home\xml-data\build-dir\RGA-RGP-JOB1\sqlCI.proj] 21-Jun-2013 11:35:19 EXEC : sqlCI error occurred: [C:\Users\Administrator\bamboo-home\xml-data\build-dir\RGA-RGP-JOB1\sqlCI.proj] 21-Jun-2013 11:35:19 EXEC : sqlCI error occurred: +----------------------+ [C:\Users\Administrator\bamboo-home\xml-data\build-dir\RGA-RGP-JOB1\sqlCI.proj] 21-Jun-2013 11:35:19 EXEC : sqlCI error occurred: |Test Execution Summary| [C:\Users\Administrator\bamboo-home\xml-data\build-dir\RGA-RGP-JOB1\sqlCI.proj]   As a final check, we should make sure that, if we now fix this error, the build succeeds. So in SSMS, I’m going to correct the invalid email address, then check this change in to SQL Source Control (with a comment), commit to GitHub, and re-run the build:   This should have fixed the build: It worked! Summary This has been a very quick run through the implementation of CI for databases, including tSQLt tests to test whether your database updates are working. The next post in this series will focus on automated deployment – we’ve tested our database changes, how can we now deploy these to target sites?  

    Read the article

  • Partitioned Repository for WebCenter Content using Oracle Database 11g

    - by Adao Junior
    One of the biggest challenges for content management solutions is related to the storage management due the high volumes of the unstoppable growing of information. Even if you have storage appliances and a lot of terabytes, thinks like backup, compression, deduplication, storage relocation, encryption, availability could be a nightmare. One standard option that you have with the Oracle WebCenter Content is to store data to the database. And the Oracle Database allows you leverage features like compression, deduplication, encryption and seamless backup. But with a huge volume, the challenge is passed to the DBA to keep the WebCenter Content Database up and running. One solution is the use of DB partitions for your content repository, but what are the implications of this? Can I fit this with my business requirements? Well, yes. It’s up to you how you will manage that, you just need a good plan. During you “storage brainstorm plan” take in your mind what you need, such as storage petabytes of documents? You need everything on-line? There’s a way to logically separate the “good content” from the “legacy content”? The first thing that comes to my mind is to use the creation date of the document, but you need to remember that this document could receive a lot of revisions and maybe you can consider the revision creation date. Your plan can have also complex rules like per Document Type or per a custom metadata like department or an hybrid per date, per DocType and an specific virtual folder. Extrapolation the use, you can have your repository distributed in different servers, different disks, different disk types (Such as ssds, sas, sata, tape,…), separated accordingly your business requirements, separating the “hot” content from the legacy and easily matching your compliance requirements. If you think to use by revision, the simple way is to consider the dId, that is the sequential unique id for every content created using the WebCenter Content or the dLastModified that is the date field of the FileStorage table that contains the date of inclusion of the content to the DB Table using SecureFiles. Using the scenario of partitioned repository using an hierarchical separation by date, we will transform the FileStorage table in an partitioned table using  “Partition by Range” of the dLastModified column (You can use the dId or a join with other tables for other metadata such as dDocType, Security, etc…). The test scenario bellow covers: Previous existent data on the JDBC Storage to be migrated to the new partitioned JDBC Storage Partition by Date Automatically generation of new partitions based on a pre-defined interval (Available only with Oracle Database 11g+) Deduplication and Compression for legacy data Oracle WebCenter Content 11g PS5 (Could present some customizations that do not affect the test scenario) For the test case you need some data stored using JDBC Storage to be the “legacy” data. If you do not have done before, just create an Storage rule pointed to the JDBC Storage: Enable the metadata StorageRule in the UI and upload some documents using this rule. For this test case you can run using the schema owner or an dba user. We will use the schema owner TESTS_OCS. I can’t forgot to tell that this is just a test and you should do a proper backup of your environment. When you use the schema owner, you need some privileges, using the dba user grant the privileges needed: REM Grant privileges required for online redefinition. GRANT EXECUTE ON DBMS_REDEFINITION TO TESTS_OCS; GRANT ALTER ANY TABLE TO TESTS_OCS; GRANT DROP ANY TABLE TO TESTS_OCS; GRANT LOCK ANY TABLE TO TESTS_OCS; GRANT CREATE ANY TABLE TO TESTS_OCS; GRANT SELECT ANY TABLE TO TESTS_OCS; REM Privileges required to perform cloning of dependent objects. GRANT CREATE ANY TRIGGER TO TESTS_OCS; GRANT CREATE ANY INDEX TO TESTS_OCS; In our test scenario we will separate the content as Legacy, Day1, Day2, Day3 and Future. This last one will partitioned automatically using 3 tablespaces in a round robin mode. In a real scenario the partition rule could be per month, per year or any rule that you choose. Table spaces for the test scenario: CREATE TABLESPACE TESTS_OCS_PART_LEGACY DATAFILE 'tests_ocs_part_legacy.dat' SIZE 500K AUTOEXTEND ON NEXT 500K MAXSIZE UNLIMITED; CREATE TABLESPACE TESTS_OCS_PART_DAY1 DATAFILE 'tests_ocs_part_day1.dat' SIZE 500K AUTOEXTEND ON NEXT 500K MAXSIZE UNLIMITED; CREATE TABLESPACE TESTS_OCS_PART_DAY2 DATAFILE 'tests_ocs_part_day2.dat' SIZE 500K AUTOEXTEND ON NEXT 500K MAXSIZE UNLIMITED; CREATE TABLESPACE TESTS_OCS_PART_DAY3 DATAFILE 'tests_ocs_part_day3.dat' SIZE 500K AUTOEXTEND ON NEXT 500K MAXSIZE UNLIMITED; CREATE TABLESPACE TESTS_OCS_PART_ROUND_ROBIN_A 'tests_ocs_part_round_robin_a.dat' DATAFILE SIZE 500K AUTOEXTEND ON NEXT 500K MAXSIZE UNLIMITED; CREATE TABLESPACE TESTS_OCS_PART_ROUND_ROBIN_B 'tests_ocs_part_round_robin_b.dat' DATAFILE SIZE 500K AUTOEXTEND ON NEXT 500K MAXSIZE UNLIMITED; CREATE TABLESPACE TESTS_OCS_PART_ROUND_ROBIN_C 'tests_ocs_part_round_robin_c.dat' DATAFILE SIZE 500K AUTOEXTEND ON NEXT 500K MAXSIZE UNLIMITED; Before start, gather optimizer statistics on the actual FileStorage table: EXEC DBMS_STATS.GATHER_TABLE_STATS(USER, 'FileStorage', cascade => TRUE); Now check if is possible execute the redefinition process: EXEC DBMS_REDEFINITION.CAN_REDEF_TABLE('TESTS_OCS', 'FileStorage',DBMS_REDEFINITION.CONS_USE_PK); If no errors messages, you are good to go. Create a Partitioned Interim FileStorage table. You need to create a new table with the partition information to act as an interim table: CREATE TABLE FILESTORAGE_Part ( DID NUMBER(*,0) NOT NULL ENABLE, DRENDITIONID VARCHAR2(30 CHAR) NOT NULL ENABLE, DLASTMODIFIED TIMESTAMP (6), DFILESIZE NUMBER(*,0), DISDELETED VARCHAR2(1 CHAR), BFILEDATA BLOB ) LOB (BFILEDATA) STORE AS SECUREFILE ( ENABLE STORAGE IN ROW NOCACHE LOGGING KEEP_DUPLICATES NOCOMPRESS ) PARTITION BY RANGE (DLASTMODIFIED) INTERVAL (NUMTODSINTERVAL(1,'DAY')) STORE IN (TESTS_OCS_PART_ROUND_ROBIN_A, TESTS_OCS_PART_ROUND_ROBIN_B, TESTS_OCS_PART_ROUND_ROBIN_C) ( PARTITION FILESTORAGE_PART_LEGACY VALUES LESS THAN (TO_DATE('05-APR-2012 12.00.00 AM', 'DD-MON-YYYY HH.MI.SS AM')) TABLESPACE TESTS_OCS_PART_LEGACY LOB (BFILEDATA) STORE AS SECUREFILE ( TABLESPACE TESTS_OCS_PART_LEGACY RETENTION NONE DEDUPLICATE COMPRESS HIGH ), PARTITION FILESTORAGE_PART_DAY1 VALUES LESS THAN (TO_DATE('06-APR-2012 07.25.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) TABLESPACE TESTS_OCS_PART_DAY1 LOB (BFILEDATA) STORE AS SECUREFILE ( TABLESPACE TESTS_OCS_PART_DAY1 RETENTION AUTO KEEP_DUPLICATES COMPRESS ), PARTITION FILESTORAGE_PART_DAY2 VALUES LESS THAN (TO_DATE('06-APR-2012 07.55.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) TABLESPACE TESTS_OCS_PART_DAY2 LOB (BFILEDATA) STORE AS SECUREFILE ( TABLESPACE TESTS_OCS_PART_DAY2 RETENTION AUTO KEEP_DUPLICATES NOCOMPRESS ), PARTITION FILESTORAGE_PART_DAY3 VALUES LESS THAN (TO_DATE('06-APR-2012 07.58.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) TABLESPACE TESTS_OCS_PART_DAY3 LOB (BFILEDATA) STORE AS SECUREFILE ( TABLESPACE TESTS_OCS_PART_DAY3 RETENTION AUTO KEEP_DUPLICATES NOCOMPRESS ) ); After the creation you should see your partitions defined. Note that only the fixed range partitions have been created, none of the interval partition have been created. Start the redefinition process: BEGIN DBMS_REDEFINITION.START_REDEF_TABLE( uname => 'TESTS_OCS' ,orig_table => 'FileStorage' ,int_table => 'FileStorage_PART' ,col_mapping => NULL ,options_flag => DBMS_REDEFINITION.CONS_USE_PK ); END; This operation can take some time to complete, depending how many contents that you have and on the size of the table. Using the DBA user you can check the progress with this command: SELECT * FROM v$sesstat WHERE sid = 1; Copy dependent objects: DECLARE redefinition_errors PLS_INTEGER := 0; BEGIN DBMS_REDEFINITION.COPY_TABLE_DEPENDENTS( uname => 'TESTS_OCS' ,orig_table => 'FileStorage' ,int_table => 'FileStorage_PART' ,copy_indexes => DBMS_REDEFINITION.CONS_ORIG_PARAMS ,copy_triggers => TRUE ,copy_constraints => TRUE ,copy_privileges => TRUE ,ignore_errors => TRUE ,num_errors => redefinition_errors ,copy_statistics => FALSE ,copy_mvlog => FALSE ); IF (redefinition_errors > 0) THEN DBMS_OUTPUT.PUT_LINE('>>> FileStorage to FileStorage_PART temp copy Errors: ' || TO_CHAR(redefinition_errors)); END IF; END; With the DBA user, verify that there's no errors: SELECT object_name, base_table_name, ddl_txt FROM DBA_REDEFINITION_ERRORS; *Note that will show 2 lines related to the constrains, this is expected. Synchronize the interim table FileStorage_PART: BEGIN DBMS_REDEFINITION.SYNC_INTERIM_TABLE( uname => 'TESTS_OCS', orig_table => 'FileStorage', int_table => 'FileStorage_PART'); END; Gather statistics on the new table: EXEC DBMS_STATS.GATHER_TABLE_STATS(USER, 'FileStorage_PART', cascade => TRUE); Complete the redefinition: BEGIN DBMS_REDEFINITION.FINISH_REDEF_TABLE( uname => 'TESTS_OCS', orig_table => 'FileStorage', int_table => 'FileStorage_PART'); END; During the execution the FileStorage table is locked in exclusive mode until finish the operation. After the last command the FileStorage table is partitioned. If you have contents out of the range partition, you should see the new partitions created automatically, not generating an error if you “forgot” to create all the future ranges. You will see something like: You now can drop the FileStorage_PART table: border-bottom-width: 1px; border-bottom-style: solid; text-align: left; border-left-color: silver; border-left-width: 1px; border-left-style: solid; padding-bottom: 4px; line-height: 12pt; background-color: #f4f4f4; margin-top: 20px; margin-right: 0px; margin-bottom: 10px; margin-left: 0px; padding-left: 4px; width: 97.5%; padding-right: 4px; font-family: 'Courier New', Courier, monospace; direction: ltr; max-height: 200px; font-size: 8pt; overflow-x: auto; overflow-y: auto; border-top-color: silver; border-top-width: 1px; border-top-style: solid; cursor: text; border-right-color: silver; border-right-width: 1px; border-right-style: solid; padding-top: 4px; " id="codeSnippetWrapper"> DROP TABLE FileStorage_PART PURGE; To check the FileStorage table is valid and is partitioned, use the command: SELECT num_rows,partitioned FROM user_tables WHERE table_name = 'FILESTORAGE'; You can list the contents of the FileStorage table in a specific partition, per example: SELECT * FROM FileStorage PARTITION (FILESTORAGE_PART_LEGACY) Some useful commands that you can use to check the partitions, note that you need to run using a DBA user: SELECT * FROM DBA_TAB_PARTITIONS WHERE table_name = 'FILESTORAGE';   SELECT * FROM DBA_TABLESPACES WHERE tablespace_name like 'TESTS_OCS%'; After the redefinition process complete you have a new FileStorage table storing all content that has the Storage rule pointed to the JDBC Storage and partitioned using the rule set during the creation of the temporary interim FileStorage_PART table. At this point you can test the WebCenter Content downloading the documents (Original and Renditions). Note that the content could be already in the cache area, take a look in the weblayout directory to see if a file with the same id is there, then click on the web rendition of your test file and see if have created the file and you can open, this means that is all working. The redefinition process can be repeated many times, this allow you test what the better layout, over and over again. Now some interesting maintenance actions related to the partitions: Make an tablespace read only. No issues viewing, the WebCenter Content do not alter the revisions When try to delete an content that is part of an read only tablespace, an error will occurs and the document will not be deleted The only way to prevent errors today is creating an custom component that checks the partitions and if you have an document in an “Read Only” repository, execute the deletion process of the metadata and mark the document to be deleted on the next db maintenance, like a new redefinition. Take an tablespace off-line for archiving purposes or any other reason. When you try open an document that is included in this tablespace will receive an error that was unable to retrieve the content, but the others online tablespaces are not affected. Same behavior when deleting documents. Again, an custom component is the solution. If you have an document “out of range”, the component can show an message that the repository for that document is offline. This can be extended to a option to the user to request to put online again. Moving some legacy content to an offline repository (table) using the Exchange option to move the content from one partition to a empty nonpartitioned table like FileStorage_LEGACY. Note that this option will remove the registers from the FileStorage and will not be able to open the stored content. You always need to keep in mind the indexes and constrains. An redefinition separating the original content (vault) from the renditions and separate by date ate the same time. This could be an option for DAM environments that want to have an special place for the renditions and put the original files in a storage with less performance. The process will be the same, you just need to change the script of the interim table to use composite partitioning. Will be something like: CREATE TABLE FILESTORAGE_RenditionPart ( DID NUMBER(*,0) NOT NULL ENABLE, DRENDITIONID VARCHAR2(30 CHAR) NOT NULL ENABLE, DLASTMODIFIED TIMESTAMP (6), DFILESIZE NUMBER(*,0), DISDELETED VARCHAR2(1 CHAR), BFILEDATA BLOB ) LOB (BFILEDATA) STORE AS SECUREFILE ( ENABLE STORAGE IN ROW NOCACHE LOGGING KEEP_DUPLICATES NOCOMPRESS ) PARTITION BY LIST (DRENDITIONID) SUBPARTITION BY RANGE (DLASTMODIFIED) ( PARTITION Vault VALUES ('primaryFile') ( SUBPARTITION FILESTORAGE_VAULT_LEGACY VALUES LESS THAN (TO_DATE('05-APR-2012 12.00.00 AM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_VAULT_DAY1 VALUES LESS THAN (TO_DATE('06-APR-2012 07.25.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_VAULT_DAY2 VALUES LESS THAN (TO_DATE('06-APR-2012 07.55.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_VAULT_DAY3 VALUES LESS THAN (TO_DATE('06-APR-2012 07.58.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_VAULT_FUTURE VALUES LESS THAN (MAXVALUE) ) ,PARTITION WebLayout VALUES ('webViewableFile') ( SUBPARTITION FILESTORAGE_WEBLAYOUT_LEGACY VALUES LESS THAN (TO_DATE('05-APR-2012 12.00.00 AM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_WEBLAYOUT_DAY1 VALUES LESS THAN (TO_DATE('06-APR-2012 07.25.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_WEBLAYOUT_DAY2 VALUES LESS THAN (TO_DATE('06-APR-2012 07.55.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_WEBLAYOUT_DAY3 VALUES LESS THAN (TO_DATE('06-APR-2012 07.58.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_WEBLAYOUT_FUTURE VALUES LESS THAN (MAXVALUE) ) ,PARTITION Special VALUES ('Special') ( SUBPARTITION FILESTORAGE_SPECIAL_LEGACY VALUES LESS THAN (TO_DATE('05-APR-2012 12.00.00 AM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_SPECIAL_DAY1 VALUES LESS THAN (TO_DATE('06-APR-2012 07.25.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_SPECIAL_DAY2 VALUES LESS THAN (TO_DATE('06-APR-2012 07.55.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_SPECIAL_DAY3 VALUES LESS THAN (TO_DATE('06-APR-2012 07.58.00 PM', 'DD-MON-YYYY HH.MI.SS AM')) LOB (BFILEDATA) STORE AS SECUREFILE , SUBPARTITION FILESTORAGE_SPECIAL_FUTURE VALUES LESS THAN (MAXVALUE) ) )ENABLE ROW MOVEMENT; The next post related to partitioned repository will come with an sample component to handle the possible exceptions when you need to take off line an tablespace/partition or move to another place. Also, we can include some integration to the Retention Management and Records Management. Another subject related to partitioning is the ability to create an FileStore Provider pointed to a different database, raising the level of the distributed storage vs. performance. Let us know if this is important to you or you have an use case not listed, leave a comment. Cross-posted on the blog.ContentrA.com

    Read the article

  • Simplex Noise Help

    - by Alex Larsen
    Im Making A Minecraft Like Gae In XNA C# And I Need To Generate Land With Caves This Is The Code For Simplex I Have /// <summary> /// 1D simplex noise /// </summary> /// <param name="x"></param> /// <returns></returns> public static float Generate(float x) { int i0 = FastFloor(x); int i1 = i0 + 1; float x0 = x - i0; float x1 = x0 - 1.0f; float n0, n1; float t0 = 1.0f - x0 * x0; t0 *= t0; n0 = t0 * t0 * grad(perm[i0 & 0xff], x0); float t1 = 1.0f - x1 * x1; t1 *= t1; n1 = t1 * t1 * grad(perm[i1 & 0xff], x1); // The maximum value of this noise is 8*(3/4)^4 = 2.53125 // A factor of 0.395 scales to fit exactly within [-1,1] return 0.395f * (n0 + n1); } /// <summary> /// 2D simplex noise /// </summary> /// <param name="x"></param> /// <param name="y"></param> /// <returns></returns> public static float Generate(float x, float y) { const float F2 = 0.366025403f; // F2 = 0.5*(sqrt(3.0)-1.0) const float G2 = 0.211324865f; // G2 = (3.0-Math.sqrt(3.0))/6.0 float n0, n1, n2; // Noise contributions from the three corners // Skew the input space to determine which simplex cell we're in float s = (x + y) * F2; // Hairy factor for 2D float xs = x + s; float ys = y + s; int i = FastFloor(xs); int j = FastFloor(ys); float t = (float)(i + j) * G2; float X0 = i - t; // Unskew the cell origin back to (x,y) space float Y0 = j - t; float x0 = x - X0; // The x,y distances from the cell origin float y0 = y - Y0; // For the 2D case, the simplex shape is an equilateral triangle. // Determine which simplex we are in. int i1, j1; // Offsets for second (middle) corner of simplex in (i,j) coords if (x0 > y0) { i1 = 1; j1 = 0; } // lower triangle, XY order: (0,0)->(1,0)->(1,1) else { i1 = 0; j1 = 1; } // upper triangle, YX order: (0,0)->(0,1)->(1,1) // A step of (1,0) in (i,j) means a step of (1-c,-c) in (x,y), and // a step of (0,1) in (i,j) means a step of (-c,1-c) in (x,y), where // c = (3-sqrt(3))/6 float x1 = x0 - i1 + G2; // Offsets for middle corner in (x,y) unskewed coords float y1 = y0 - j1 + G2; float x2 = x0 - 1.0f + 2.0f * G2; // Offsets for last corner in (x,y) unskewed coords float y2 = y0 - 1.0f + 2.0f * G2; // Wrap the integer indices at 256, to avoid indexing perm[] out of bounds int ii = i % 256; int jj = j % 256; // Calculate the contribution from the three corners float t0 = 0.5f - x0 * x0 - y0 * y0; if (t0 < 0.0f) n0 = 0.0f; else { t0 *= t0; n0 = t0 * t0 * grad(perm[ii + perm[jj]], x0, y0); } float t1 = 0.5f - x1 * x1 - y1 * y1; if (t1 < 0.0f) n1 = 0.0f; else { t1 *= t1; n1 = t1 * t1 * grad(perm[ii + i1 + perm[jj + j1]], x1, y1); } float t2 = 0.5f - x2 * x2 - y2 * y2; if (t2 < 0.0f) n2 = 0.0f; else { t2 *= t2; n2 = t2 * t2 * grad(perm[ii + 1 + perm[jj + 1]], x2, y2); } // Add contributions from each corner to get the final noise value. // The result is scaled to return values in the interval [-1,1]. return 40.0f * (n0 + n1 + n2); // TODO: The scale factor is preliminary! } public static float Generate(float x, float y, float z) { // Simple skewing factors for the 3D case const float F3 = 0.333333333f; const float G3 = 0.166666667f; float n0, n1, n2, n3; // Noise contributions from the four corners // Skew the input space to determine which simplex cell we're in float s = (x + y + z) * F3; // Very nice and simple skew factor for 3D float xs = x + s; float ys = y + s; float zs = z + s; int i = FastFloor(xs); int j = FastFloor(ys); int k = FastFloor(zs); float t = (float)(i + j + k) * G3; float X0 = i - t; // Unskew the cell origin back to (x,y,z) space float Y0 = j - t; float Z0 = k - t; float x0 = x - X0; // The x,y,z distances from the cell origin float y0 = y - Y0; float z0 = z - Z0; // For the 3D case, the simplex shape is a slightly irregular tetrahedron. // Determine which simplex we are in. int i1, j1, k1; // Offsets for second corner of simplex in (i,j,k) coords int i2, j2, k2; // Offsets for third corner of simplex in (i,j,k) coords /* This code would benefit from a backport from the GLSL version! */ if (x0 >= y0) { if (y0 >= z0) { i1 = 1; j1 = 0; k1 = 0; i2 = 1; j2 = 1; k2 = 0; } // X Y Z order else if (x0 >= z0) { i1 = 1; j1 = 0; k1 = 0; i2 = 1; j2 = 0; k2 = 1; } // X Z Y order else { i1 = 0; j1 = 0; k1 = 1; i2 = 1; j2 = 0; k2 = 1; } // Z X Y order } else { // x0<y0 if (y0 < z0) { i1 = 0; j1 = 0; k1 = 1; i2 = 0; j2 = 1; k2 = 1; } // Z Y X order else if (x0 < z0) { i1 = 0; j1 = 1; k1 = 0; i2 = 0; j2 = 1; k2 = 1; } // Y Z X order else { i1 = 0; j1 = 1; k1 = 0; i2 = 1; j2 = 1; k2 = 0; } // Y X Z order } // A step of (1,0,0) in (i,j,k) means a step of (1-c,-c,-c) in (x,y,z), // a step of (0,1,0) in (i,j,k) means a step of (-c,1-c,-c) in (x,y,z), and // a step of (0,0,1) in (i,j,k) means a step of (-c,-c,1-c) in (x,y,z), where // c = 1/6. float x1 = x0 - i1 + G3; // Offsets for second corner in (x,y,z) coords float y1 = y0 - j1 + G3; float z1 = z0 - k1 + G3; float x2 = x0 - i2 + 2.0f * G3; // Offsets for third corner in (x,y,z) coords float y2 = y0 - j2 + 2.0f * G3; float z2 = z0 - k2 + 2.0f * G3; float x3 = x0 - 1.0f + 3.0f * G3; // Offsets for last corner in (x,y,z) coords float y3 = y0 - 1.0f + 3.0f * G3; float z3 = z0 - 1.0f + 3.0f * G3; // Wrap the integer indices at 256, to avoid indexing perm[] out of bounds int ii = i % 256; int jj = j % 256; int kk = k % 256; // Calculate the contribution from the four corners float t0 = 0.6f - x0 * x0 - y0 * y0 - z0 * z0; if (t0 < 0.0f) n0 = 0.0f; else { t0 *= t0; n0 = t0 * t0 * grad(perm[ii + perm[jj + perm[kk]]], x0, y0, z0); } float t1 = 0.6f - x1 * x1 - y1 * y1 - z1 * z1; if (t1 < 0.0f) n1 = 0.0f; else { t1 *= t1; n1 = t1 * t1 * grad(perm[ii + i1 + perm[jj + j1 + perm[kk + k1]]], x1, y1, z1); } float t2 = 0.6f - x2 * x2 - y2 * y2 - z2 * z2; if (t2 < 0.0f) n2 = 0.0f; else { t2 *= t2; n2 = t2 * t2 * grad(perm[ii + i2 + perm[jj + j2 + perm[kk + k2]]], x2, y2, z2); } float t3 = 0.6f - x3 * x3 - y3 * y3 - z3 * z3; if (t3 < 0.0f) n3 = 0.0f; else { t3 *= t3; n3 = t3 * t3 * grad(perm[ii + 1 + perm[jj + 1 + perm[kk + 1]]], x3, y3, z3); } // Add contributions from each corner to get the final noise value. // The result is scaled to stay just inside [-1,1] return 32.0f * (n0 + n1 + n2 + n3); // TODO: The scale factor is preliminary! } private static byte[] perm = new byte[512] { 151,160,137,91,90,15, 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180, 151,160,137,91,90,15, 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180 }; private static int FastFloor(float x) { return (x > 0) ? ((int)x) : (((int)x) - 1); } private static float grad(int hash, float x) { int h = hash & 15; float grad = 1.0f + (h & 7); // Gradient value 1.0, 2.0, ..., 8.0 if ((h & 8) != 0) grad = -grad; // Set a random sign for the gradient return (grad * x); // Multiply the gradient with the distance } private static float grad(int hash, float x, float y) { int h = hash & 7; // Convert low 3 bits of hash code float u = h < 4 ? x : y; // into 8 simple gradient directions, float v = h < 4 ? y : x; // and compute the dot product with (x,y). return ((h & 1) != 0 ? -u : u) + ((h & 2) != 0 ? -2.0f * v : 2.0f * v); } private static float grad(int hash, float x, float y, float z) { int h = hash & 15; // Convert low 4 bits of hash code into 12 simple float u = h < 8 ? x : y; // gradient directions, and compute dot product. float v = h < 4 ? y : h == 12 || h == 14 ? x : z; // Fix repeats at h = 12 to 15 return ((h & 1) != 0 ? -u : u) + ((h & 2) != 0 ? -v : v); } private static float grad(int hash, float x, float y, float z, float t) { int h = hash & 31; // Convert low 5 bits of hash code into 32 simple float u = h < 24 ? x : y; // gradient directions, and compute dot product. float v = h < 16 ? y : z; float w = h < 8 ? z : t; return ((h & 1) != 0 ? -u : u) + ((h & 2) != 0 ? -v : v) + ((h & 4) != 0 ? -w : w); } This Is My World Generation Code Block[,] BlocksInMap = new Block[1024, 256]; public bool IsWorldGenerated = false; Random r = new Random(); private void RunThread() { for (int BH = 0; BH <= 256; BH++) { for (int BW = 0; BW <= 1024; BW++) { Block b = new Block(); if (BH >= 192) { } BlocksInMap[BW, BH] = b; } } IsWorldGenerated = true; } public void GenWorld() { new Thread(new ThreadStart(RunThread)).Start(); } And This Is A Example Of How I Set Blocks Block b = new Block(); b.BlockType = = Block.BlockTypes.Air; This Is A Example Of How I Set Models foreach (Block b in MyWorld) { switch(b.BlockType) { case Block.BlockTypes.Dirt: b.Model = DirtModel; break; ect. } } How Would I Use These To Generate To World (The Block Array) And If Possible Thread It More? btw It's 1024 Wide And 256 Tall

    Read the article

  • Why is it 8 here,understanding buffer overflow

    - by Mask
    void function(int a, int b, int c) { char buffer1[5]; char buffer2[10]; int *ret; ret = buffer1 + 12; (*ret) += 8;//why is it 8?? } void main() { int x; x = 0; function(1,2,3); x = 1; printf("%d\n",x); } The above demo is from here: http://insecure.org/stf/smashstack.html But it's not working here: D:\test>gcc -Wall -Wextra hw.cpp && a.exe hw.cpp: In function `void function(int, int, int)': hw.cpp:6: warning: unused variable 'buffer2' hw.cpp: At global scope: hw.cpp:4: warning: unused parameter 'a' hw.cpp:4: warning: unused parameter 'b' hw.cpp:4: warning: unused parameter 'c' 1 And I don't understand why it's 8 though the author thinks: A little math tells us the distance is 8 bytes.

    Read the article

  • Calling AuditQuerySystemPolicy() (advapi32.dll) from C# returns "The parameter is incorrect"

    - by JCCyC
    The sequence is like follows: Open a policy handle with LsaOpenPolicy() (not shown) Call LsaQueryInformationPolicy() to get the number of categories; For each category: Call AuditLookupCategoryGuidFromCategoryId() to turn the enum value into a GUID; Call AuditEnumerateSubCategories() to get a list of the GUIDs of all subcategories; Call AuditQuerySystemPolicy() to get the audit policies for the subcategories. All of these work and return expected, sensible values except the last. Calling AuditQuerySystemPolicy() gets me a "The parameter is incorrect" error. I'm thinking there must be some subtle unmarshaling problem. I'm probably misinterpreting what exactly AuditEnumerateSubCategories() returns, but I'm stumped. You'll see (commented) I tried to dereference the return pointer from AuditEnumerateSubCategories() as a pointer. Doing or not doing that gives the same result. Code: #region LSA types public enum POLICY_INFORMATION_CLASS { PolicyAuditLogInformation = 1, PolicyAuditEventsInformation, PolicyPrimaryDomainInformation, PolicyPdAccountInformation, PolicyAccountDomainInformation, PolicyLsaServerRoleInformation, PolicyReplicaSourceInformation, PolicyDefaultQuotaInformation, PolicyModificationInformation, PolicyAuditFullSetInformation, PolicyAuditFullQueryInformation, PolicyDnsDomainInformation } public enum POLICY_AUDIT_EVENT_TYPE { AuditCategorySystem, AuditCategoryLogon, AuditCategoryObjectAccess, AuditCategoryPrivilegeUse, AuditCategoryDetailedTracking, AuditCategoryPolicyChange, AuditCategoryAccountManagement, AuditCategoryDirectoryServiceAccess, AuditCategoryAccountLogon } [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] public struct POLICY_AUDIT_EVENTS_INFO { public bool AuditingMode; public IntPtr EventAuditingOptions; public UInt32 MaximumAuditEventCount; } [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] public struct GUID { public UInt32 Data1; public UInt16 Data2; public UInt16 Data3; public Byte Data4a; public Byte Data4b; public Byte Data4c; public Byte Data4d; public Byte Data4e; public Byte Data4f; public Byte Data4g; public Byte Data4h; public override string ToString() { return Data1.ToString("x8") + "-" + Data2.ToString("x4") + "-" + Data3.ToString("x4") + "-" + Data4a.ToString("x2") + Data4b.ToString("x2") + "-" + Data4c.ToString("x2") + Data4d.ToString("x2") + Data4e.ToString("x2") + Data4f.ToString("x2") + Data4g.ToString("x2") + Data4h.ToString("x2"); } } #endregion #region LSA Imports [DllImport("kernel32.dll")] extern static int GetLastError(); [DllImport("advapi32.dll", CharSet = CharSet.Unicode, PreserveSig = true)] public static extern UInt32 LsaNtStatusToWinError( long Status); [DllImport("advapi32.dll", CharSet = CharSet.Unicode, PreserveSig = true)] public static extern long LsaOpenPolicy( ref LSA_UNICODE_STRING SystemName, ref LSA_OBJECT_ATTRIBUTES ObjectAttributes, Int32 DesiredAccess, out IntPtr PolicyHandle ); [DllImport("advapi32.dll", CharSet = CharSet.Unicode, PreserveSig = true)] public static extern long LsaClose(IntPtr PolicyHandle); [DllImport("advapi32.dll", CharSet = CharSet.Unicode, PreserveSig = true)] public static extern long LsaFreeMemory(IntPtr Buffer); [DllImport("advapi32.dll", CharSet = CharSet.Unicode, PreserveSig = true)] public static extern void AuditFree(IntPtr Buffer); [DllImport("advapi32.dll", SetLastError = true, PreserveSig = true)] public static extern long LsaQueryInformationPolicy( IntPtr PolicyHandle, POLICY_INFORMATION_CLASS InformationClass, out IntPtr Buffer); [DllImport("advapi32.dll", SetLastError = true, PreserveSig = true)] public static extern bool AuditLookupCategoryGuidFromCategoryId( POLICY_AUDIT_EVENT_TYPE AuditCategoryId, IntPtr pAuditCategoryGuid); [DllImport("advapi32.dll", SetLastError = true, PreserveSig = true)] public static extern bool AuditEnumerateSubCategories( IntPtr pAuditCategoryGuid, bool bRetrieveAllSubCategories, out IntPtr ppAuditSubCategoriesArray, out ulong pCountReturned); [DllImport("advapi32.dll", SetLastError = true, PreserveSig = true)] public static extern bool AuditQuerySystemPolicy( IntPtr pSubCategoryGuids, ulong PolicyCount, out IntPtr ppAuditPolicy); #endregion Dictionary<string, UInt32> retList = new Dictionary<string, UInt32>(); long lretVal; uint retVal; IntPtr pAuditEventsInfo; lretVal = LsaQueryInformationPolicy(policyHandle, POLICY_INFORMATION_CLASS.PolicyAuditEventsInformation, out pAuditEventsInfo); retVal = LsaNtStatusToWinError(lretVal); if (retVal != 0) { LsaClose(policyHandle); throw new System.ComponentModel.Win32Exception((int)retVal); } POLICY_AUDIT_EVENTS_INFO myAuditEventsInfo = new POLICY_AUDIT_EVENTS_INFO(); myAuditEventsInfo = (POLICY_AUDIT_EVENTS_INFO)Marshal.PtrToStructure(pAuditEventsInfo, myAuditEventsInfo.GetType()); IntPtr subCats = IntPtr.Zero; ulong nSubCats = 0; for (int audCat = 0; audCat < myAuditEventsInfo.MaximumAuditEventCount; audCat++) { GUID audCatGuid = new GUID(); if (!AuditLookupCategoryGuidFromCategoryId((POLICY_AUDIT_EVENT_TYPE)audCat, new IntPtr(&audCatGuid))) { int causingError = GetLastError(); LsaFreeMemory(pAuditEventsInfo); LsaClose(policyHandle); throw new System.ComponentModel.Win32Exception(causingError); } if (!AuditEnumerateSubCategories(new IntPtr(&audCatGuid), true, out subCats, out nSubCats)) { int causingError = GetLastError(); LsaFreeMemory(pAuditEventsInfo); LsaClose(policyHandle); throw new System.ComponentModel.Win32Exception(causingError); } // Dereference the first pointer-to-pointer to point to the first subcategory // subCats = (IntPtr)Marshal.PtrToStructure(subCats, subCats.GetType()); if (nSubCats > 0) { IntPtr audPolicies = IntPtr.Zero; if (!AuditQuerySystemPolicy(subCats, nSubCats, out audPolicies)) { int causingError = GetLastError(); if (subCats != IntPtr.Zero) AuditFree(subCats); LsaFreeMemory(pAuditEventsInfo); LsaClose(policyHandle); throw new System.ComponentModel.Win32Exception(causingError); } AUDIT_POLICY_INFORMATION myAudPol = new AUDIT_POLICY_INFORMATION(); for (ulong audSubCat = 0; audSubCat < nSubCats; audSubCat++) { // Process audPolicies[audSubCat], turn GUIDs into names, fill retList. // http://msdn.microsoft.com/en-us/library/aa373931%28VS.85%29.aspx // http://msdn.microsoft.com/en-us/library/bb648638%28VS.85%29.aspx IntPtr itemAddr = IntPtr.Zero; IntPtr itemAddrAddr = new IntPtr(audPolicies.ToInt64() + (long)(audSubCat * (ulong)Marshal.SizeOf(itemAddr))); itemAddr = (IntPtr)Marshal.PtrToStructure(itemAddrAddr, itemAddr.GetType()); myAudPol = (AUDIT_POLICY_INFORMATION)Marshal.PtrToStructure(itemAddr, myAudPol.GetType()); retList[myAudPol.AuditSubCategoryGuid.ToString()] = myAudPol.AuditingInformation; } if (audPolicies != IntPtr.Zero) AuditFree(audPolicies); } if (subCats != IntPtr.Zero) AuditFree(subCats); subCats = IntPtr.Zero; nSubCats = 0; } lretVal = LsaFreeMemory(pAuditEventsInfo); retVal = LsaNtStatusToWinError(lretVal); if (retVal != 0) throw new System.ComponentModel.Win32Exception((int)retVal); lretVal = LsaClose(policyHandle); retVal = LsaNtStatusToWinError(lretVal); if (retVal != 0) throw new System.ComponentModel.Win32Exception((int)retVal);

    Read the article

  • Using custom std::set comparator

    - by Omry
    I am trying to change the default order of the items in a set of integers to be lexicographic instead of numeric, and I can't get the following to compile with g++: file.cpp: bool lex_compare(const int64_t &a, const int64_t &b) { stringstream s1,s2; s1 << a; s2 << b; return s1.str() < s2.str(); } void foo() { set<int64_t, lex_compare> > s; s.insert(1); ... } I get the following error: error: type/value mismatch at argument 2 in template parameter list for ‘template<class _Key, class _Compare, class _Alloc> class std::set’ error: expected a type, got ‘lex_compare’ what am I doing wrong?

    Read the article

  • Accessing UIPopoverController for UIActionSheet on iPad

    - by westsider
    On the iPad, one can show a UIActionSheet using -showFromBarButtonItem:animated:. This is convenient because it wraps a UIPopoverController around the action sheet and it points the popover's arrow to the UIBarButtonItem that is passed in. However, this call adds the UIBarButtomItem's toolbar to the list of passthrough views - which isn't always desirable. And, without a pointer to the UIPopoverController, one can't add other views to the passthrough list. Does anyone know of a sanctioned approach to getting a pointer to the popover controller? Thanks in advance.

    Read the article

  • MIPS: removing non alpha-numeric characters from a string

    - by Kron
    I'm in the process of writing a program in MIPS that will determine whether or not a user entered string is a palindrome. It has three subroutines which are under construction. Here is the main block of code, subroutines to follow with relevant info: .data Buffer: .asciiz " " # 80 bytes in Buffer intro: .asciiz "Hello, please enter a string of up to 80 characters. I will then tell you if that string was a palindrome!" .text main: li $v0, 4 # print_string call number la $a0, intro # pointer to string in memory syscall li $v0, 8 #syscall code for reading string la $a0, Buffer #save read string into buffer li $a1, 80 #string is 80 bytes long syscall li $s0, 0 #i = 0 li $t0, 80 #max for i to reach la $a0, Buffer jal stripNonAlpha li $v0, 4 # print_string call number la $a0, Buffer # pointer to string in memory syscall li $s0, 0 jal findEnd jal toUpperCase li $v0, 4 # print_string call number la $a0, Buffer # pointer to string in memory syscall Firstly, it's supposed to remove all non alpha-numeric characters from the string before hand, but when it encounters a character designated for removal, all characters after that are removed. stripNonAlpha: beq $s0, $t0, stripEnd #if i = 80 end add $t4, $s0, $a0 #address of Buffer[i] in $t4 lb $s1, 0($t4) #load value of Buffer[i] addi $s0, $s0, 1 #i = i + 1 slti $t1, $s1, 48 #if ascii code is less than 48 bne $t1, $zero, strip #remove ascii character slti $t1, $s1, 58 #if ascii code is greater than 57 #and slti $t2, $s1, 65 #if ascii code is less than 65 slt $t3, $t1, $t2 bne $t3, $zero, strip #remove ascii character slti $t1, $s1, 91 #if ascii code is greater than 90 #and slti $t2, $s1, 97 #if ascii code is less than 97 slt $t3, $t1, $t2 bne $t3, $zero, strip #remove ascii character slti $t1, $s1, 123 #if ascii character is greater than 122 beq $t1, $zero, strip #remove ascii character j stripNonAlpha #go to stripNonAlpha strip: #add $t5, $s0, $a0 #address of Buffer[i] in $t5 sb $0, 0($t4) #Buffer[i] = 0 #addi $s0, $s0, 1 #i = i + 1 j stripNonAlpha #go to stripNonAlpha stripEnd: la $a0, Buffer #save modified string into buffer jr $ra #return Secondly, it is supposed to convert all lowercase characters to uppercase. toUpperCase: beq $s0, $s2, upperEnd add $t4, $s0, $a0 lb $s1, 0($t4) addi $s1, $s1, 1 slti $t1, $s1, 97 #beq $t1, $zero, upper slti $t2, $s1, 123 slt $t3, $t1, $t2 bne $t1, $zero, upper j toUpperCase upper: add $t5, $s0, $a0 addi $t6, $t6, -32 sb $t6, 0($t5) j toUpperCase upperEnd: la $a0, Buffer jr $ra The final subroutine, which checks if the string is a palindrome isn't anywhere near complete at the moment. I'm having trouble finding the end of the string because I'm not sure what PC-SPIM uses as the carriage return character. Any help is appreciated, I have the feeling most of my problems result from something silly and stupid so feel free to point out anything, no matter how small.

    Read the article

  • WPF ComboBox Binding to non string object

    - by Mike L
    I'm using MVVM (MVVM Light Toolkit) and have a property on the view model which exposes a list of objects. The objects contain two properties, both strings, which correlate to an abbreviation and a description. I want the ComboBox to expose the pairing as "abbreviation - description". If I use a data template, it does this easily. I have another property on the view model which represents the object that should display as selected -- the chosen item in the ComboBox. I'm binding the ItemsSource to the list, as it represents the universe of available selections, and am trying to bind the SelectedItem to this object. I'm killing myself trying to figure out why I can't get it to work, and feeling more like a fraud by the hour. In trying to learn why this works, I created the same approach with just a list of strings, and a selected string. This works perfectly. So, it clearly has something to do with the typing... perhaps something in choosing equality? Or perhaps it has to do with the data template? Here is the XAML: <Window x:Class="MvvmLight1.MainWindow" xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" Title="MainWindow" Height="300" Width="300" DataContext="{Binding Main, Source={StaticResource Locator}}"> <Window.Resources> <ResourceDictionary> <ResourceDictionary.MergedDictionaries> <ResourceDictionary Source="Skins/MainSkin.xaml" /> </ResourceDictionary.MergedDictionaries> <DataTemplate x:Key="DataTemplate1"> <StackPanel Orientation="Horizontal"> <TextBlock TextWrapping="Wrap" Text="{Binding CourtCode}"/> <TextBlock TextWrapping="Wrap" Text=" - "/> <TextBlock TextWrapping="Wrap" Text="{Binding CourtDescription}"/> </StackPanel> </DataTemplate> </ResourceDictionary> </Window.Resources> <Grid x:Name="LayoutRoot"> <ComboBox x:Name="cmbAbbrevDescriptions" Height="35" Margin="25,75,25,25" VerticalAlignment="Top" ItemsSource="{Binding Codes}" ItemTemplate="{DynamicResource DataTemplate1}" SelectedItem="{Binding selectedCode}" /> <ComboBox x:Name="cmbStrings" Height="35" Margin="25" VerticalAlignment="Top" ItemsSource="{Binding strs}" SelectedItem="{Binding selectedStr}"/> </Grid> </Window> And, if helpful, here is the ViewModel: using GalaSoft.MvvmLight; using MvvmLight1.Model; using System.Collections.Generic; namespace MvvmLight1.ViewModel { public class MainViewModel : ViewModelBase { public const string CodesPropertyName = "Codes"; private List<Court> _codes = null; public List<Court> Codes { get { return _codes; } set { if (_codes == value) { return; } var oldValue = _codes; _codes = value; // Update bindings and broadcast change using GalaSoft.Utility.Messenging RaisePropertyChanged(CodesPropertyName, oldValue, value, true); } } public const string selectedCodePropertyName = "selectedCode"; private Court _selectedCode = null; public Court selectedCode { get { return _selectedCode; } set { if (_selectedCode == value) { return; } var oldValue = _selectedCode; _selectedCode = value; // Update bindings and broadcast change using GalaSoft.Utility.Messenging RaisePropertyChanged(selectedCodePropertyName, oldValue, value, true); } } public const string strsPropertyName = "strs"; private List<string> _strs = null; public List<string> strs { get { return _strs; } set { if (_strs == value) { return; } var oldValue = _strs; _strs = value; // Update bindings and broadcast change using GalaSoft.Utility.Messenging RaisePropertyChanged(strsPropertyName, oldValue, value, true); } } public const string selectedStrPropertyName = "selectedStr"; private string _selectedStr = ""; public string selectedStr { get { return _selectedStr; } set { if (_selectedStr == value) { return; } var oldValue = _selectedStr; _selectedStr = value; // Update bindings and broadcast change using GalaSoft.Utility.Messenging RaisePropertyChanged(selectedStrPropertyName, oldValue, value, true); } } /// <summary> /// Initializes a new instance of the MainViewModel class. /// </summary> public MainViewModel() { Codes = new List<Court>(); Court code1 = new Court(); code1.CourtCode = "ABC"; code1.CourtDescription = "A Court"; Court code2 = new Court(); code2.CourtCode = "DEF"; code2.CourtDescription = "Second Court"; Codes.Add(code1); Codes.Add(code2); Court code3 = new Court(); code3.CourtCode = "DEF"; code3.CourtDescription = "Second Court"; selectedCode = code3; selectedStr = "Hello"; strs = new List<string>(); strs.Add("Goodbye"); strs.Add("Hello"); strs.Add("Ciao"); } } } And here is the ridiculously trivial class that is being exposed: using System; using System.Collections.Generic; using System.Linq; using System.Text; namespace MvvmLight1.Model { public class Court { public string CourtCode { get; set; } public string CourtDescription { get; set; } } } Thanks!

    Read the article

  • C# Secure Sockets (SSL)

    - by Matthias Vance
    LS, I was planning on writing a wrapper around the System.Net.Sockets.Socket class, because I didn't feel like using the SSLStream class because I wanted to maintain backwards compatibility with other programs. I found an article which does exactly what I want, but on Windows Mobile. (Link: Enable SSL for managed socket on windows mobile) Quote: My first surprise was that SetSocketOption takes a SocketOptionName enum value as the second parameter, but this enum doesn’t have the equivalent of SO_SECURE. However, C# was nice enough to let me cast an arbitrary integer value to the enum I needed. I tried to do the same, but it doesn't work. Code: private const ushort SO_SECURE = 0x2001; private const ushort SO_SEC_SSL = 0x2004; this.SetSocketOption(SocketOptionLevel.Socket, (SocketOptionName) SO_SECURE, SO_SEC_SSL); Error: An unknown, invalid, or unsupported option or level was specified in a getsockopt or setsockopt call Is there a way to work around this? Kind regards, Matthias Vance

    Read the article

  • Landscape orientation for UITabBarController?

    - by gingersnap
    The UITabBarController does not allow landscape orientation. So I used a subclass of UITabBarContoller (called RotatingTabBarController). Its sole purpose it to allow rotation by returning YES to shouldAutorotateToInterfaceOrientation call. The problem is that when you rotate the iPhone in simulator it gives the following malloc error. malloc: *** error for object 0x3888000: pointer being freed was not allocated *** set a breakpoint in malloc_error_break to debug I am using 3.0 SDK with Xcode 3.2 on Snow Leopard. I set a breakpoint in malloc_error_break but I can not trace it back to my code. Is there something I can do to make this error go away? Here is the RotatingTabBarController class: #import <UIKit/UIKit.h> @interface RotatingTabBarController : UITabBarController { } @end @implementation RotatingTabBarController -(BOOL)shouldAutorotateToInterfaceOrientation:UIInterfaceOrientation)interfaceOrientation { return YES; } @end Update: I tried the same with a category. But it gives the same malloc error. // UITabBarController+Rotation.h @interface UITabBarController (rotation) - (BOOL)shouldAutorotateToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation; @end // UITabBarController+Rotation.m #import "UITabBarController+Rotation.h" @implementation UITabBarController (rotation) - (BOOL)shouldAutorotateToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation { return YES; } @end Backtrace [Session started at 2009-09-05 12:13:19 -0400.] Untitled(992,0xa06d9500) malloc: *** error for object 0x2024000: pointer being freed was not allocated *** set a breakpoint in malloc_error_break to debug Untitled(992,0xa06d9500) malloc: *** error for object 0x2014000: pointer being freed was not allocated *** set a breakpoint in malloc_error_break to debug [Session started at 2009-09-05 12:13:27 -0400.] GNU gdb 6.3.50-20050815 (Apple version gdb-1344) (Fri Jul 3 01:19:56 UTC 2009) Copyright 2004 Free Software Foundation, Inc. GDB is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Type "show copying" to see the conditions. There is absolutely no warranty for GDB. Type "show warranty" for details. This GDB was configured as "x86_64-apple-darwin".Attaching to process 992. sharedlibrary apply-load-rules all (gdb) bt #0 0x951908fa in mach_msg_trap () #1 0x95191067 in mach_msg () #2 0x30244d62 in CFRunLoopRunSpecific () #3 0x30244628 in CFRunLoopRunInMode () #4 0x32044c31 in GSEventRunModal () #5 0x32044cf6 in GSEventRun () #6 0x309021ee in UIApplicationMain () #7 0x00002608 in main (argc=1, argv=0xbfffef94) at /Users/vishwas/Desktop/Untitled/main.m:13 (gdb)

    Read the article

  • how to instantiate template of template

    - by aaa
    hello I have template that looks like this: 100 template<size_t A0, size_t A1, size_t A2, size_t A3> 101 struct mask { 103 template<size_t B0, size_t B1, size_t B2, size_t B3> 104 struct compare { 105 static const bool value = (A0 == B0 && A1 == B1 && A2 == B2 && A3 == B3); 106 }; 107 }; ... 120 const typename boost::enable_if_c< 121 mask<a,b,c,d>::compare<2,3,0,1>::value || ...>::type I am trying to instantiate compare structure. How do I do get value in line 121?

    Read the article

< Previous Page | 152 153 154 155 156 157 158 159 160 161 162 163  | Next Page >