Search Results

Search found 6079 results on 244 pages for 'define'.

Page 172/244 | < Previous Page | 168 169 170 171 172 173 174 175 176 177 178 179  | Next Page >

  • Please Critique this PHP Login Script

    - by NightMICU
    Greetings, A site I developed was recently compromised, most likely by a brute force or Rainbow Table attack. The original log-in script did not have a SALT, passwords were stored in MD5. Below is an updated script, complete with SALT and IP address banning. In addition, it will send a Mayday email & SMS and disable the account should the same IP address or account attempt 4 failed log-ins. Please look it over and let me know what could be improved, what is missing, and what is just plain strange. Many thanks! <?php //Start session session_start(); //Include DB config include $_SERVER['DOCUMENT_ROOT'] . '/includes/pdo_conn.inc.php'; //Error message array $errmsg_arr = array(); $errflag = false; //Function to sanitize values received from the form. Prevents SQL injection function clean($str) { $str = @trim($str); if(get_magic_quotes_gpc()) { $str = stripslashes($str); } return $str; } //Define a SALT, the one here is for demo define('SALT', '63Yf5QNA'); //Sanitize the POST values $login = clean($_POST['login']); $password = clean($_POST['password']); //Encrypt password $encryptedPassword = md5(SALT . $password); //Input Validations //Obtain IP address and check for past failed attempts $ip_address = $_SERVER['REMOTE_ADDR']; $checkIPBan = $db->prepare("SELECT COUNT(*) FROM ip_ban WHERE ipAddr = ? OR login = ?"); $checkIPBan->execute(array($ip_address, $login)); $numAttempts = $checkIPBan->fetchColumn(); //If there are 4 failed attempts, send back to login and temporarily ban IP address if ($numAttempts == 1) { $getTotalAttempts = $db->prepare("SELECT attempts FROM ip_ban WHERE ipAddr = ? OR login = ?"); $getTotalAttempts->execute(array($ip_address, $login)); $totalAttempts = $getTotalAttempts->fetch(); $totalAttempts = $totalAttempts['attempts']; if ($totalAttempts >= 4) { //Send Mayday SMS $to = "[email protected]"; $subject = "Banned Account - $login"; $mailheaders = 'From: [email protected]' . "\r\n"; $mailheaders .= 'Reply-To: [email protected]' . "\r\n"; $mailheaders .= 'MIME-Version: 1.0' . "\r\n"; $mailheaders .= 'Content-type: text/html; charset=iso-8859-1' . "\r\n"; $msg = "<p>IP Address - " . $ip_address . ", Username - " . $login . "</p>"; mail($to, $subject, $msg, $mailheaders); $setAccountBan = $db->query("UPDATE ip_ban SET isBanned = 1 WHERE ipAddr = '$ip_address'"); $setAccountBan->execute(); $errmsg_arr[] = 'Too Many Login Attempts'; $errflag = true; } } if($login == '') { $errmsg_arr[] = 'Login ID missing'; $errflag = true; } if($password == '') { $errmsg_arr[] = 'Password missing'; $errflag = true; } //If there are input validations, redirect back to the login form if($errflag) { $_SESSION['ERRMSG_ARR'] = $errmsg_arr; session_write_close(); header('Location: http://somewhere.com/login.php'); exit(); } //Query database $loginSQL = $db->prepare("SELECT password FROM user_control WHERE username = ?"); $loginSQL->execute(array($login)); $loginResult = $loginSQL->fetch(); //Compare passwords if($loginResult['password'] == $encryptedPassword) { //Login Successful session_regenerate_id(); //Collect details about user and assign session details $getMemDetails = $db->prepare("SELECT * FROM user_control WHERE username = ?"); $getMemDetails->execute(array($login)); $member = $getMemDetails->fetch(); $_SESSION['SESS_MEMBER_ID'] = $member['user_id']; $_SESSION['SESS_USERNAME'] = $member['username']; $_SESSION['SESS_FIRST_NAME'] = $member['name_f']; $_SESSION['SESS_LAST_NAME'] = $member['name_l']; $_SESSION['SESS_STATUS'] = $member['status']; $_SESSION['SESS_LEVEL'] = $member['level']; //Get Last Login $_SESSION['SESS_LAST_LOGIN'] = $member['lastLogin']; //Set Last Login info $updateLog = $db->prepare("UPDATE user_control SET lastLogin = DATE_ADD(NOW(), INTERVAL 1 HOUR), ip_addr = ? WHERE user_id = ?"); $updateLog->execute(array($ip_address, $member['user_id'])); session_write_close(); //If there are past failed log-in attempts, delete old entries if ($numAttempts > 0) { //Past failed log-ins from this IP address. Delete old entries $deleteIPBan = $db->prepare("DELETE FROM ip_ban WHERE ipAddr = ?"); $deleteIPBan->execute(array($ip_address)); } if ($member['level'] != "3" || $member['status'] == "Suspended") { header("location: http://somewhere.com"); } else { header('Location: http://somewhere.com'); } exit(); } else { //Login failed. Add IP address and other details to ban table if ($numAttempts < 1) { //Add a new entry to IP Ban table $addBanEntry = $db->prepare("INSERT INTO ip_ban (ipAddr, login, attempts) VALUES (?,?,?)"); $addBanEntry->execute(array($ip_address, $login, 1)); } else { //increment Attempts count $updateBanEntry = $db->prepare("UPDATE ip_ban SET ipAddr = ?, login = ?, attempts = attempts+1 WHERE ipAddr = ? OR login = ?"); $updateBanEntry->execute(array($ip_address, $login, $ip_address, $login)); } header('Location: http://somewhere.com/login.php'); exit(); } ?>

    Read the article

  • C++ Deck and Card Class Error with bad alloc

    - by user3702164
    Just started learn to code in school. Our assignment requires us to create a card game with card,deck and hand class. I am having troubles with it now and i keep getting exception: std::bad_alloc at memory location. Here are my codes right now CardType h: #ifndef cardType_h #define cardType_h #include <string> using namespace std; class cardType{ public: void print(); int getValue() const; string getSymbol() const; string getSpecial() const; string getSuit() const; int checkSpecial(int gscore) const; cardType(); cardType(string suit,int value); private: int value; string special; string symbol; string suit; }; #endif CardType cpp: #include "cardType.h" #include <iostream> #include <string> using namespace std; void cardType::print() { cout << getSymbol() << " of " << getSuit() << ", having the value of " << getValue() << "."<< endl <<"This card's special is " << getSpecial() << endl; } int cardType::getValue() const { return value; } string cardType::getSymbol() const { return symbol; } string cardType::getSpecial() const { return special; } string cardType::getSuit() const { return suit; } cardType::cardType(){ value=0; symbol="?"; special='?'; suit='?'; } cardType::cardType(string s, int v){ suit = s; value = v; switch(v){ case 1: // Ace cards have a value of 1 and have no special type symbol="Ace"; special="None"; break; case 2: // 2 cards have a value of 2 and have no special type symbol="2"; special="None"; break; case 3: symbol="3"; // 3 cards have a value of 3 and have no special type special="None"; break; case 4: symbol="4"; // 4 cards have a value of 0 and have a special type "Reverse" which reverses the flow of the game special="Reverse"; value=0; break; case 5: symbol="5"; // 5 cards have a value of 5 and have no special type special="None"; break; case 6: symbol="6"; // 6 cards have a value of 6 and have no special type special="None"; break; case 7: symbol="7"; // 7 cards have a value of 7 and have no special type special="None"; break; case 8: symbol="8"; // 8 cards have a value of 8 and have no special type special="None"; break; case 9: symbol="9"; // 9 cards have a value of 0 and have a special type "Pass" which does not add any value to the game and lets the player skip his turn. special="Pass"; value=0; break; case 10: symbol="10"; // 10 cards have a value of 10 and have a special type "subtract" which instead of adding the 10 value to the total game it is subtracted instead. special="Subtract"; value=10; break; case 11: // Jack cards have a value of 10 and have no special type symbol="Jack"; special="None"; value=10; break; case 12: // Queens cards have a value of 10 and have no special type symbol="Queen"; special="None"; value=10; break; case 13: symbol="King"; // King cards have a value of 0 and have a special type "NinetyNine" which changes the total game score to 99 reguardless what number it was previously special="NinetyNine"; value=0; break; } } int cardType::checkSpecial(int gscore) const{ if(special=="Pass"){ return gscore; } if(special=="Reverse"){ return gscore; } if(special=="Subtract"){ return gscore - value; } if(special=="NinetyNine"){ return 99; } else{ return gscore + value; } } DeckType h: #ifndef deckType_h #define deckType_h #include "cardType.h" #include <string> using namespace std; class deckType { public: void shuffle(); cardType dealCard(); deckType(); private: cardType *deck; int current; }; #endif DeckType cpp: #include <iostream> #include "deckType.h" using namespace std; deckType::deckType() { int index = 0; int current=0; deck = new cardType[52]; string suit[] = {"Hearts","Diamonds","Clubs","Spades"}; int value[] = {1,2,3,4,5,6,7,8,9,10,11,12,13}; for ( int i = 0; i <= 3; i++ ) { for ( int j = 1; j <= 13; j++ ) { deck[index] = cardType(suit[i],value[j]); index++; } } } cardType deckType::dealCard() { return deck[current]; current++; } Main cpp : #include "deckType.h" #include <iostream> using namespace std; int main() { deckType gamedeck; cout << "1" <<endl; cardType currentCard; cout << "2" <<endl; currentCard = gamedeck.dealCard(); cout << "3" <<endl; return 0; } I keep getting bad_alloc at the currentCard = gamedeck.dealCard(); I really do not know what i have done wrong.

    Read the article

  • Executing legacy MSBuild scripts in TFS 2010 Build

    - by Jakob Ehn
    When upgrading from TFS 2008 to TFS 2010, all builds are “upgraded” in the sense that a build definition with the same name is created, and it uses the UpgradeTemplate  build process template to execute the build. This template basically just runs MSBuild on the existing TFSBuild.proj file. The build definition contains a property called ConfigurationFolderPath that points to the TFSBuild.proj file. So, existing builds will run just fine after upgrade. But what if you want to use the new workflow functionality in TFS 2010 Build, but still have a lot of MSBuild scripts that maybe call custom MSBuild tasks that you don’t have the time to rewrite? Then one option is to keep these MSBuild scrips and call them from a TFS 2010 Build workflow. This can be done using the MSBuild workflow activity that is avaiable in the toolbox in the Team Foundation Build Activities section: This activity wraps the call to MSBuild.exe and has the following parameters: Most of these properties are only relevant when actually compiling projects, for example C# project files. When calling custom MSBuild project files, you should focus on these properties: Property Meaning Example CommandLineArguments Use this to send in/override MSBuild properties in your project “/p:MyProperty=SomeValue” or MSBuildArguments (this will let you define the arguments in the build definition or when queuing the build) LogFile Name of the log file where MSbuild will log the output “MyBuild.log” LogFileDropLocation Location of the log file BuildDetail.DropLocation + “\log” Project The project to execute SourcesDirectory + “\BuildExtensions.targets” ResponseFile The name of the MSBuild response file SourcesDirectory + “\BuildExtensions.rsp” Targets The target(s) to execute New String() {“Target1”, “Target2”} Verbosity Logging verbosity Microsoft.TeamFoundation.Build.Workflow.BuildVerbosity.Normal Integrating with Team Build   If your MSBuild scripts tries to use Team Build tasks, they will most likely fail with the above approach. For example, the following MSBuild project file tries to add a build step using the BuildStep task:   <?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <Import Project="$(MSBuildExtensionsPath)\Microsoft\VisualStudio\TeamBuild\Microsoft.TeamFoundation.Build.targets" /> <Target Name="MyTarget"> <BuildStep TeamFoundationServerUrl="$(TeamFoundationServerUrl)" BuildUri="$(BuildUri)" Name="MyBuildStep" Message="My build step executed" Status="Succeeded"></BuildStep> </Target> </Project> When executing this file using the MSBuild activity, calling the MyTarget, it will fail with the following message: The "Microsoft.TeamFoundation.Build.Tasks.BuildStep" task could not be loaded from the assembly \PrivateAssemblies\Microsoft.TeamFoundation.Build.ProcessComponents.dll. Could not load file or assembly 'file:///D:\PrivateAssemblies\Microsoft.TeamFoundation.Build.ProcessComponents.dll' or one of its dependencies. The system cannot find the file specified. Confirm that the <UsingTask> declaration is correct, that the assembly and all its dependencies are available, and that the task contains a public class that implements Microsoft.Build.Framework.ITask. You can see that the path to the ProcessComponents.dll is incomplete. This is because in the Microsoft.TeamFoundation.Build.targets file the task is referenced using the $(TeamBuildRegPath) property. Also note that the task needs the TeamFounationServerUrl and BuildUri properties. One solution here is to pass these properties in using the Command Line Arguments parameter:   Here we pass in the parameters with the corresponding values from the curent build. The build log shows that the build step has in fact been inserted:   The problem as you probably spted is that the build step is insert at the top of the build log, instead of next to the MSBuild activity call. This is because we are using a legacy team build task (BuildStep), and that is how these are handled in TFS 2010. You can see the same behaviour when running builds that are using the UpgradeTemplate, that cutom build steps shows up at the top of the build log.

    Read the article

  • How to implement multi-source XSLT mapping in 11g BPEL

    - by [email protected]
    In SOA 11g, you can create a XSLT mapper that uses multiple sources as the input. To implement a multi-source mapper, just follow the instructions below, Drag and drop a Transform Activity to a BPEL process Double-click on the Transform Activity, the Transform dialog window appears. Add source variables by clicking the Add icon and selecting the variable and part of the variable as needed. You can select multiple input variables. The first variable represents the main XML input to the XSL mapping, while additional variables that are added here are defined in the XSL mapping as input parameters. Select the target variable and its part if available. Specify the mapper file name, the default file name is xsl/Transformation_%SEQ%.xsl, where %SEQ% represents the sequence number of the mapper. Click OK, the xls file will be opened in the graphical mode. You can map the sources to the target as usual. Open the mapper source code, you will notice the variable representing the additional source payload, is defined as the input parameter in the map source spec and body<mapSources>    <source type="XSD">      <schema location="../xsd/po.xsd"/>      <rootElement name="PurchaseOrder" namespace="http://www.oracle.com/pcbpel/po"/>    </source>    <source type="XSD">      <schema location="../xsd/customer.xsd"/>      <rootElement name="Customer" namespace="http://www.oracle.com/pcbpel/Customer"/>      <param name="v_customer" />    </source>  </mapSources>...<xsl:param name="v_customer"/> Let's take a look at the BPEL source code used to execute xslt mapper. <assign name="Transform_1">            <bpelx:annotation>                <bpelx:pattern>transformation</bpelx:pattern>            </bpelx:annotation>            <copy>                <from expression="ora:doXSLTransformForDoc('xsl/Transformation_1.xsl',bpws:getVariableData('v_po'),'v_customer',bpws:getVariableData('v_customer'))"/>                <to variable="v_invoice"/>            </copy>        </assign> You will see BPEL uses ora:doXSLTransformForDoc XPath function to execute the XSLT mapper.This function returns the result of  XSLT transformation when the xslt template matching the document. The signature of this function is  ora:doXSLTransformForDoc(template,input, [paramQName, paramValue]*).Wheretemplate is the XSLT mapper nameinput is the string representation of xml input, paramQName is the parameter defined in the xslt mapper as the additional sourceparameterValue is the additional source payload. You can add more sources to the mapper at the later stage, but you have to modify the ora:doXSLTransformForDoc in the BPEL source code and make sure it passes correct parameter and its value pair that reflects the changes in the XSLT mapper.So the best practices are : create the variables before creating the mapping file, therefore you can add multiple sources when you define the transformation in the first place, which is more straightforward than adding them later on. Review ora:doXSLTransformForDoc code in the BPEL source and make sure it passes the correct parameters to the mapper.

    Read the article

  • What is the Oracle Utilities Application Framework?

    - by Anthony Shorten
    The Oracle Utilities Application Framework is a reusable, scalable and flexible java based framework which allows other products to be built, configured and implemented in a standard way. Note: Even though the Framework is built in java it can be integrated with COBOL based extensions for backward compatibility. When Oracle Utilities Customer Care & Billing was migrated from V1 to V2, it was decided that the technical aspects of that product be separated to allow for reuse and independence from technical issues. The idea was that all the technical aspects would be concentrated in this separate product (i.e. a framework) and allow all products using the framework to concentrate on delivering superior functionality. The product was named the Oracle Utilities Application Framework (oufw is the product code). The technical components are contained in the Oracle Utilities Application Framework which can be summarized as follows: Metadata - The Oracle Utilities Application Framework is responsible for defining and using the metadata to define the runtime behavior of the product. All the metadata definition and management is contained within the Oracle Utilities Application Framework. UI Management - The Oracle Utilities Application Framework is responsible for defining and rendering the pages and responsible for ensuring the pages are in the appropriate format for the locale. Integration - The Oracle Utilities Application Framework is responsible for providing the integration points to the architecture. Refer to the Oracle Utilities Application Framework Integration Overview for more details Tools - The Oracle Utilities Application Framework provides a common set of facilities and tools that can be used across all products. Technology - The Oracle Utilities Application Framework is responsible for all technology standards compliance, platform support and integration. There are a number of products from the Tax and Utilities Global Business Unit as well as from the Financial Services Global Business Unit that are built upon the Oracle Utilities Application Framework. These products require the Oracle Utilities Application Framework to be installed first and then the product itself installed onto the framework to complete the installation process. There are a number of key benefits that the Oracle Utilities Application Framework provides to these products: Common facilities - The Oracle Utilities Application Framework provides a standard set of technical facilities that mean that products can concentrate in the unique aspects of their markets rather than making technical decisions. Common methods of configuration - The Oracle Utilities Application Framework standardizes the technical configuration process for a product. Customers can effectively reuse the configuration process across products. Multi-lingual and Multi-platform - The Oracle Utilities Application Framework allows the products to be offered in more markets and across multiple platforms for maximized flexibility. Common methods of implementation - The Oracle Utilities Application Framework standardizes the technical aspects of a product implementation. Customers can effectively reuse the technical implementation process across products. Quicker adoption of new technologies - As new technologies and standards are identified as being important for the product line, they can be integrated centrally benefiting multiple products. Cross product reuse - As enhancements to the Oracle Utilities Application Framework are identified by a particular product, all products can potentially benefit from the enhancement. Note: Use of the Oracle Utilities Application Framework does not preclude the introduction of product specific technologies or facilities to satisfy market needs. The framework minimizes the need and assists in the quick integration of a new product specific piece of technology (if necessary). The Framework is not available as a product itself and is bundled with Tax and Utilities Global Business Unit prodicts. At the present time the following products are on the Framework: Oracle Utilities Customer Care And Billing (V2 and above) Oracle Enterprise Taxation Management (V2 and above) Oracle Utilities Business Intelligence (V2 and above) Oracle Utilities Mobile Workforice Management (V2 and above)

    Read the article

  • Hierarchical View/ViewModel/Presenters in MVPVM

    - by Brian Flynn
    I've been working with MVVM for a while, but I've recently started using MVPVM and I want to know how to create hierarchial View/ViewModel/Presenter app using this pattern. In MVVM I would typically build my application using a hierarchy of Views and corresponding ViewModels e.g. I might define 3 views as follows: The View Models for these views would be as follows: public class AViewModel { public string Text { get { return "This is A!"; } } public object Child1 { get; set; } public object Child2 { get; set; } } public class BViewModel { public string Text { get { return "This is B!"; } } } public class CViewModel { public string Text { get { return "This is C!"; } } } In would then have some data templates to say that BViewModel and CViewModel should be presented using View B and View C: <DataTemplate DataType="{StaticResource local:BViewModel}"> <local:BView/> </DataTemplate> <DataTemplate DataType="{StaticResource local:CViewModel}"> <local:CView/> </DataTemplate> The final step would be to put some code in AViewModel that would assign values to Child1 and Child2: public AViewModel() { this.Child1 = new AViewModel(); this.Child2 = new BViewModel(); } The result of all this would be a screen that looks something like: Doing this in MVPVM would be fairly simple - simply moving the code in AViewModel's constructor to APresenter: public class APresenter { .... public void WireUp() { ViewModel.Child1 = new BViewModel(); ViewModel.Child2 = new CViewModel(); } } But If I want to have business logic for BViewModel and CViewModel I would need to have a BPresenter and a CPresenter - the problem is, Im not sure where the best place to put these are. I could store references to the presenter for AViewModel.Child1 and AViewModel.Child2 in APresenter i.e.: public class APresenter : IPresenter { private IPresenter child1Presenter; private IPresenter child2Presenter; public void WireUp() { child1Presenter = new BPresenter(); child1Presenter.WireUp(); child2Presenter = new CPresenter(); child2Presenter.WireUp(); ViewModel.Child1 = child1Presenter.ViewModel; ViewModel.Child2 = child2Presenter.ViewModel; } } But this solution seems inelegant compared to the MVVM approach. I have to keep track of both the presenter and the view model and ensure they stay in sync. If, for example, I wanted a button on View A, which, when clicked swapped the View's in Child1 and Child2, I might have a command that did the following: var temp = ViewModel.Child1; ViewModel.Child1 = ViewModel.Child2; ViewModel.Child2 = temp; This would work as far as swapping the view's on screen (assuming the correct Property Change notification code is in place), but now my APresenter.child1Presenter is pointing to the presenter for AViewModel.Child2, and APresenter.child2Presenter is pointing to the presenter for AViewModel.Child1. If something accesses APresenter.child1Presenter, any changes will actually happen to AViewModel.Child2. I can imagine this leading to all sorts of debugging fun. I know that I may be misunderstanding the pattern, and if this is the case a clarification of what Im doing wrong would be appreciated.

    Read the article

  • Initializing and drawing a mesh using OpenTK

    - by Boreal
    I'm implementing a "Mesh" class to use in my OpenTK game. You pass in a vertex array and an index array, and then you can call Mesh.Draw() to draw it using a shader. I've heard VBO's and VAO's are the way to go for this approach, but nowhere have I found a guide that shows how to get Data Video Memory Shader. Can someone give me a quick rundown of how this works? EDIT: So far, I have this: struct Vertex { public Vector3 position; public Vector3 normal; public Vector3 color; public static int memSize = 9 * sizeof(float); public static byte[] memOffset = { 0, 3 * sizeof(float), 6 * sizeof(float) }; } class Mesh { private uint vbo; private uint ibo; // stores the numbers of vertices and indices private int numVertices; private int numIndices; public Mesh(int numVertices, Vertex[] vertices, int numIndices, ushort[] indices) { // set numbers this.numVertices = numVertices; this.numIndices = numIndices; // generate buffers GL.GenBuffers(1, out vbo); GL.GenBuffers(1, out ibo); GL.BindBuffer(BufferTarget.ArrayBuffer, vbo); GL.BindBuffer(BufferTarget.ElementArrayBuffer, ibo); // send data to the buffers GL.BufferData(BufferTarget.ArrayBuffer, new IntPtr(Vertex.memSize * numVertices), vertices, BufferUsageHint.StaticDraw); GL.BufferData(BufferTarget.ElementArrayBuffer, new IntPtr(sizeof(ushort) * numIndices), indices, BufferUsageHint.StaticDraw); } public void Render() { // bind buffers GL.BindBuffer(BufferTarget.ArrayBuffer, vbo); GL.BindBuffer(BufferTarget.ElementArrayBuffer, ibo); // define offsets GL.VertexPointer(3, VertexPointerType.Float, Vertex.memSize, new IntPtr(Vertex.memOffset[0])); GL.NormalPointer(NormalPointerType.Float, Vertex.memSize, new IntPtr(Vertex.memOffset[1])); GL.ColorPointer(3, ColorPointerType.Float, Vertex.memSize, new IntPtr(Vertex.memOffset[2])); // draw GL.DrawElements(BeginMode.Triangles, numIndices, DrawElementsType.UnsignedInt, (IntPtr)0); } } class Application : GameWindow { Mesh triangle; protected override void OnLoad(EventArgs e) { base.OnLoad(e); GL.ClearColor(0.1f, 0.2f, 0.5f, 0.0f); GL.Enable(EnableCap.DepthTest); GL.Enable(EnableCap.VertexArray); GL.Enable(EnableCap.NormalArray); GL.Enable(EnableCap.ColorArray); Vertex v0 = new Vertex(); v0.position = new Vector3(-1.0f, -1.0f, 4.0f); v0.normal = new Vector3(0.0f, 0.0f, -1.0f); v0.color = new Vector3(1.0f, 1.0f, 0.0f); Vertex v1 = new Vertex(); v1.position = new Vector3(1.0f, -1.0f, 4.0f); v1.normal = new Vector3(0.0f, 0.0f, -1.0f); v1.color = new Vector3(1.0f, 0.0f, 0.0f); Vertex v2 = new Vertex(); v2.position = new Vector3(0.0f, 1.0f, 4.0f); v2.normal = new Vector3(0.0f, 0.0f, -1.0f); v2.color = new Vector3(0.2f, 0.9f, 1.0f); Vertex[] va = { v0, v1, v2 }; ushort[] ia = { 0, 1, 2 }; triangle = new Mesh(3, va, 3, ia); } protected override void OnRenderFrame(FrameEventArgs e) { base.OnRenderFrame(e); GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit); Matrix4 modelview = Matrix4.LookAt(Vector3.Zero, Vector3.UnitZ, Vector3.UnitY); GL.MatrixMode(MatrixMode.Modelview); GL.LoadMatrix(ref modelview); triangle.Render(); SwapBuffers(); } } It doesn't draw anything.

    Read the article

  • GPGPU

    WhatGPU obviously stands for Graphics Processing Unit (the silicon powering the display you are using to read this blog post). The extra GP in front of that stands for General Purpose computing.So, altogether GPGPU refers to computing we can perform on GPU for purposes beyond just drawing on the screen. In effect, we can use a GPGPU a bit like we already use a CPU: to perform some calculation (that doesn’t have to have any visual element to it). The attraction is that a GPGPU can be orders of magnitude faster than a CPU.WhyWhen I was at the SuperComputing conference in Portland last November, GPGPUs were all the rage. A quick online search reveals many articles introducing the GPGPU topic. I'll just share 3 here: pcper (ignoring all pages except the first, it is a good consumer perspective), gizmodo (nice take using mostly layman terms) and vizworld (answering the question on "what's the big deal").The GPGPU programming paradigm (from a high level) is simple: in your CPU program you define functions (aka kernels) that take some input, can perform the costly operation and return the output. The kernels are the things that execute on the GPGPU leveraging its power (and hence execute faster than what they could on the CPU) while the host CPU program waits for the results or asynchronously performs other tasks.However, GPGPUs have different characteristics to CPUs which means they are suitable only for certain classes of problem (i.e. data parallel algorithms) and not for others (e.g. algorithms with branching or recursion or other complex flow control). You also pay a high cost for transferring the input data from the CPU to the GPU (and vice versa the results back to the CPU), so the computation itself has to be long enough to justify the overhead transfer costs. If your problem space fits the criteria then you probably want to check out this technology.HowSo where can you get a graphics card to start playing with all this? At the time of writing, the two main vendors ATI (owned by AMD) and NVIDIA are the obvious players in this industry. You can read about GPGPU on this AMD page and also on this NVIDIA page. NVIDIA's website also has a free chapter on the topic from the "GPU Gems" book: A Toolkit for Computation on GPUs.If you followed the links above, then you've already come across some of the choices of programming models that are available today. Essentially, AMD is offering their ATI Stream technology accessible via a language they call Brook+; NVIDIA offers their CUDA platform which is accessible from CUDA C. Choosing either of those locks you into the GPU vendor and hence your code cannot run on systems with cards from the other vendor (e.g. imagine if your CPU code would run on Intel chips but not AMD chips). Having said that, both vendors plan to support a new emerging standard called OpenCL, which theoretically means your kernels can execute on any GPU that supports it. To learn more about all of these there is a website: gpgpu.org. The caveat about that site is that (currently) it completely ignores the Microsoft approach, which I touch on next.On Windows, there is already a cross-GPU-vendor way of programming GPUs and that is the DirectX API. Specifically, on Windows Vista and Windows 7, the DirectX 11 API offers a dedicated subset of the API for GPGPU programming: DirectCompute. You use this API on the CPU side, to set up and execute the kernels that run on the GPU. The kernels are written in a language called HLSL (High Level Shader Language). You can use DirectCompute with HLSL to write a "compute shader", which is the term DirectX uses for what I've been referring to in this post as a "kernel". For a comprehensive collection of links about this (including tutorials, videos and samples) please see my blog post: DirectCompute.Note that there are many efforts to build even higher level languages on top of DirectX that aim to expose GPGPU programming to a wider audience by making it as easy as today's mainstream programming models. I'll mention here just two of those efforts: Accelerator from MSR and Brahma by Ananth. Comments about this post welcome at the original blog.

    Read the article

  • Redaction in AutoVue

    - by [email protected]
    As the trend to digitize all paper assets continues, so does the push to digitize all the processes around these assets. One such process is redaction - removing sensitive or classified information from documents. While for some this may conjure up thoughts of old CIA documents filled with nothing but blacked out pages, there are actually many uses for redaction today beyond military and government. Many companies have a need to remove names, phone numbers, social security numbers, credit card numbers, etc. from documents that are being scanned in and/or released to the public or less privileged users - insurance companies, banks and legal firms are a few examples. The process of digital redaction actually isn't that far from the old paper method: Step 1. Find a folder with a big red stamp on it labeled "TOP SECRET" Step 2. Make a copy of that document, since some folks still need to access the original contents Step 3. Black out the text or pages you want to hide Step 4. Release or distribute this new 'redacted' copy So where does a solution like AutoVue come in? Well, we've really been doing all of these things for years! 1. With AutoVue's VueLink integration and iSDK, we can integrate to virtually any content management system and view documents of almost any format with a single click. Finding the document and opening it in AutoVue: CHECK! 2. With AutoVue's markup capabilities, adding filled boxes (or other shapes) around certain text is a no-brainer. You can even leverage AutoVue's powerful APIs to automate the addition of markups over certain text or pre-defined regions using our APIs. Black out the text you want to hide: CHECK! 3. With AutoVue's conversion capabilities, you can 'burn-in' the comments into a new file, either as a TIFF, JPEG or PDF document. Burning-in the redactions avoids slip-ups like the recent (well-publicized) TSA one. Through our tight integrations, the newly created copies can be directly checked into the content management system with no manual intervention. Make a copy of that document: CHECK! 4. Again, leveraging AutoVue's integrations, we can now define rules in the system based on a user's privileges. An 'authorized' user wishing to view the document from the repository will get exactly that - no redactions. An 'unauthorized' user, when requesting to view that same document, can get redirected to open the redacted copy of the same document. Release or distribute the new 'redacted' copy: CHECK! See this movie (WMV format, 2mins, 20secs, no audio) for a quick illustration of AutoVue's redaction capabilities. It shows how redactions can be added based on text searches, manual input or pre-defined templates/regions. Let us know what you think in the comments. And remember - this is all in our flagship AutoVue product - no additional software required!

    Read the article

  • SQL SERVER – What is a Technology Evangelist?

    - by pinaldave
    When you hear that someone is an “evangelist” the first thing that might pop into your mind is the Christian church.  In fact, the term did come from Christianity, and basically means someone who spreads the news about their faith.  In the technology world, the same definition is true. Technology evangelists are individuals who, professionally or in their spare time, spread the news about the latest new products.  Sounds like a salesperson, right?  No they are absolutely different. Salespeople also keep up to date with a large number of people, and like to convince others to buy their product – and some will go to any lengths to sell!  An evangelist, on the other hand, is brutally honest about the product, even if sometimes it means not making a sale.  An evangelist is out there to tell the TRUTH.  A salesperson needs to make sales. An Evangelist offers a Solution independent of Technology used – a Salesperson offers Particular Technology. With this definition in mind, you can probably think of a few technology evangelists you already know.  Maybe it’s a relative or a neighbor, someone who loves keeping up with the latest trends and is always willing to tell you about them if you ask even the simplest question.  And, in fact, they probably are evangelists and don’t even know it.  For a long time, the work of technology evangelism was in the hands of community and community technology leaders. Luckily now various organizations have understood the importance of the community and helping community to reach their goals. This has lead them to create role of “Technology Evangelists”. Let me talk about one of the most famous Evangelist of the SQL Server technology. Technology Evangelist only belongs to technology and above any country, race, location or any other thing. They are dedicated to the technology. Vinod Kumar is such a man, who have given a lot to community. For years he was a Technology Evangelist for Microsoft, and maintained a blog that was dedicated to spreading his enthusiasm for his favorite products.  He is one of the most respected Evangelists in the field, and has done a lot of work to define the job for other professionals. Vinod’s career has since progressed to the Microsoft Technology Center (read his post), but he is continuing to be a strong presence in the evangelism community.  I have a lot of respect for Vinod.  He has done a lot for the community and technology evangelism.  Everybody has dream to serve community the way he does, and he is a great role model for evangelists everywhere. On his blog, Vinod created one of the best descriptions of a Technology Evangelist.  It defined the position and also made the distinction between evangelist and salesperson extremely clear.  I will include the highlights of that list here, because no one can say it better than Vinod: Bundle of energy – Passion is their middle name Wonderful Story tellers Empathy, Trust, Loyalty, Openness, Accessibility and Warmth Technology Enthusiast – Doers Love people, people and more people – Community oriented Unique Style and Leadership qualities !!! Self-Confident, Self-Motivated but a student (To read the full list, see: Evangelism Beyond Borders with Evangelists) His blog is a must-read for anyone interested in technology evangelism as a career or simply a hobby.  His advice about how to gain an audience and become a trusted advisor is the best in the business. I think there is an evangelist in everyone. I, too, consider myself a technology evangelist.  Regular readers of this blog will recognize that I am dedicated to bringing information to the masses, and that I pride myself on being both brutally and honest and giving every product fair consideration. I think there is no better way of saying following subject. “Once an Evangelist – Always an Evangelist!” Reference: Pinal Dave (http://blog.SQLAuthority.com)     Filed under: About Me, Database, MVP, Pinal Dave, PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, SQLAuthority News, T SQL, Technology Tagged: Evangelist

    Read the article

  • Getting a Database into Source Control

    - by Grant Fritchey
    For any number of reasons, from simple auditing, to change tracking, to automated deployment, to integration with application development processes, you’re going to want to place your database into source control. Using Red Gate SQL Source Control this process is extremely simple. SQL Source Control works within your SQL Server Management Studio (SSMS) interface.  This means you can work with your databases in any way that you’re used to working with them. If you prefer scripts to using the GUI, not a problem. If you prefer using the GUI to having to learn T-SQL, again, that’s fine. After installing SQL Source Control, this is what you’ll see when you open SSMS:   SQL Source Control is now a direct piece of the SSMS environment. The key point initially is that I currently don’t have a database selected. You can even see that in the SQL Source Control window where it shows, in red, “No database selected – select a database in Object Explorer.” If I expand my Databases list in the Object Explorer, you’ll be able to immediately see which databases have been integrated with source control and which have not. There are visible differences between the databases as you can see here:   To add a database to source control, I first have to select it. For this example, I’m going to add the AdventureWorks2012 database to an instance of the SVN source control software (I’m using uberSVN). When I click on the AdventureWorks2012 database, the SQL Source Control screen changes:   I’m going to need to click on the “Link database to source control” text which will open up a window for connecting this database to the source control system of my choice.  You can pick from the default source control systems on the left, or define one of your own. I also have to provide the connection string for the location within the source control system where I’ll be storing my database code. I set these up in advance. You’ll need two. One for the main set of scripts and one for special scripts called Migrations that deal with different kinds of changes between versions of the code. Migrations help you solve problems like having to create or modify data in columns as part of a structural change. I’ll talk more about them another day. Finally, I have to determine if this is an isolated environment that I’m going to be the only one use, a dedicated database. Or, if I’m sharing the database in a shared environment with other developers, a shared database.  The main difference is, under a dedicated database, I will need to regularly get any changes that other developers have made from source control and integrate it into my database. While, under a shared database, all changes for all developers are made at the same time, which means you could commit other peoples work without proper testing. It all depends on the type of environment you work within. But, when it’s all set, it will look like this: SQL Source Control will compare the results between the empty folders in source control and the database, AdventureWorks2012. You’ll get a report showing exactly the list of differences and you can choose which ones will get checked into source control. Each of the database objects is scripted individually. You’ll be able to modify them later in the same way. Here’s the list of differences for my new database:   You can select/deselect all the objects or each object individually. You also get a report showing the differences between what’s in the database and what’s in source control. If there was already a database in source control, you’d only see changes to database objects rather than every single object. You can see that the database objects can be sorted by name, by type, or other choices. I’m going to add a comment such as “Initial creation of database in source control.” And then click on the Commit button which will put all the objects in my database into the source control system. That’s all it takes to get the objects into source control initially. Now is when things can get fun with breaking changes to code, automated deployments, unit testing and all the rest.

    Read the article

  • Web Services for Info Explorer Zones

    - by Anthony Shorten
    One of the most interesting uses for XAI and Configurable objects is the exposure of a query portal as a Web Service. Let me illustrate this with an example. Say you have an interface that requires a list of data from a number of product tables. In the past you would have to build a java program to do this with SQL then use an application service but it is now possible with just configuration. The first step in the process is to create the SQL you want to use for the interface. It can be any valid static SQL or use host variables for the WHERE clause (we call that filtered). Once you are happy with the SQL (and it performs acceptably) you can incorporate that SQL into a Info-Explorer Zone. You can use any of the explorer zone types but I typically recommend F1-DE-SINGLE as it supports a single SQL statement with multiple filters (up to 15) as well as hidden filters (up to 5). Hidden filters are typically not displayed in the UI for criteria (remember explorer zones can be used on the user Interface as well) but for web services they can be used as normal filters (this means you can use up to 20 filters all up). Once you are happy with the zone, you now need to define it as a Business Service. We have a generic service called FWLZDEXP which allows a explorer zone to be defined as a Business Service. If you open any Business Service based upon FWLZDEXP you will see some examples. The schema is standard and pretty self explanatory in terms of the structure. The schema pattern looks like this: Zone element - maps to the ZONE_CD element and the default value is the zone name you just created. This links the business service to the zone. Filter elements - You name the filters as you like but the mapField is set to Fx_VALUE where x is the filter number corresponding to the filter element in the zone definition. Hidden filter elements - You name the filters as you like but the mapField is set to Hx_VALUE where x is the filter number corresponding to the hidden filter element in the zone definition. results group - this holds the elements of the result set. Each element in your result set has a tagname and is linked to the COL_VALUE mapField and the row element is lists the SEQNO of the column. This corresponds to the column number in the results set in the zone. An example schema is shown below for the F1-USGRACML zone, which returns the access modes for a user group and application service filters. In the example, the userGroup and applicationService elements are the filters and the rows would contain a list of accessModeDescr. This is just a simple example to illustrate the point. There are lots of examples in the product that you can investigate. One recommendation, to save time, is that you copy the schema from one of the examples to save you typing it from scratch. You can simply modify the tags and other elements to suit your needs. Once the Business Service is defined it can simply be defined as a Web Service by registering an XAI Inbound Service using the Business Service definition as a basis. You now have a Web Service based upon a Info Explorer Zone. This is one of my favorite components as it allows interfaces to be simplified. This will be my last blog entry for this year. I hope you all have a great and safe Christmas and an even greater new year. Next year promises to be an exciting year and I look forward to communicating exciting developments we are working on at the moment as they are released.

    Read the article

  • Oracle WebCenter - Well Connected

    - by Brian Dirking
    800x600 Normal 0 false false false EN-US X-NONE X-NONE MicrosoftInternetExplorer4 /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin:0in; mso-para-margin-bottom:.0001pt; mso-pagination:widow-orphan; font-size:10.0pt; font-family:"Calibri","sans-serif"; mso-bidi-font-family:"Times New Roman";} An good post from Dan Elam on the state of the ECM industry (http://www.aiim.org/community/blogs/community/ECM-Vendors-go-to-War) . For those of you who don’t know Dan, he is one of the major forces in the content management industry. He founded eVisory and IMERGE Consulting, he is an AIIM Fellow and a former US Technical Expert to the International Standards Organization (ISO), and has been a driving force behind EmTag, AIIM’s Emerging Technologies Group. His post is interesting – it starts out talking about our Moveoff Documentum campaign, but then it becomes a much deeper insight into the ECM industry. Dan points out that Oracle has been making quiet strides in the ECM industry. In fact, analysts share this view Oracle, pointing out Oracle is growing greater than 20% annually while many of the big vendors are shrinking. And as Dan points out, this cements Oracle as one of the big five in the ECM space – the same week that Autonomy was removed from the Gartner Magic Quadrant for ECM. One of the key things points out is that Oracle WebCenter is well connected. WebCenter has out-of-the-box connections to key enterprise applications such as E-Business Suite, PeopleSoft, Siebel and JD Edwards. Those out-of-the-box integrations make it easy for organizations to drive content right into the places where it is needed, in the midst of business processes. At the same time, WebCenter provides composite interface capabilities to bring together two or more of these enterprise applications onto the same screen. Combine that with the capabilities of Oracle Social Network, you start to see how Oracle is providing a full platform for user engagement. But beyond those connections, WebCenter can also connect to other content management systems. It can index and search those systems from a single point of search, bringing back results in a single combined hitlist. WebCenter can also extend records management capabilities into Documentum, SharePoint, and email archiving systems. From a single console, records managers can define a series, set a retention schedule, and place holds – without having to go to each system to make these updates. Dan points out that there are some new competitive dynamics – to be sure. And it is interesting when a system can interact with another system, enforce dispositions and holds, and enable users to search and retrieve content. Oracle WebCenter is providing the infrastructure to build on, and the interfaces to drive user engagement. It’s an interesting time.

    Read the article

  • The “Customer” Experience Revolution is Here

    - by Natalia Rachelson
    Normal 0 false false false EN-US X-NONE X-NONE /* Style Definitions */ table.MsoNormalTable {mso-style-name:"Table Normal"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-parent:""; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin-top:0in; mso-para-margin-right:0in; mso-para-margin-bottom:10.0pt; mso-para-margin-left:0in; line-height:115%; mso-pagination:widow-orphan; font-size:11.0pt; font-family:"Calibri","sans-serif"; mso-ascii-font-family:Calibri; mso-ascii-theme-font:minor-latin; mso-hansi-font-family:Calibri; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"Times New Roman"; mso-bidi-theme-font:minor-bidi;} A guest post by Anthony Lye, SVP, Oracle Development The Experience Revolution is here, and we are going to explore and celebrate our new customer experience ventures and strategy in an extraordinary way. In true Oracle fashion, we are hosting an exceptional event, bringing together customer experience advocates, visionaries and practitioners to discover and define Oracle’s Customer Experience vision. The Experience Revolution is best described as today’s era of the empowered consumer. For those of us who work with customers on a daily basis, we know that the modern consumer demands fast, accurate, consistent information across all communication channels. And if they don’t like the services received can easily take to social channels to voice disapproval. For this reason, organizations today operate in an environment where traditional methods of differentiation are less effective and customer experience has become the primary driver of business value. Here’s some food for thought, according to the 2011 Customer Experience Impact (CEI) Report, a full 89 percent of consumers will switch brands for a better customer experience. In short, in today’s era of the empowered consumer, delivering excellent customer experiences is what will, and is, defining the next great brands. At The Experience Revolution, Oracle President Mark Hurd will detail the vision of where customer experience is going and how Oracle will help you get there. He will introduce for the first time Oracle Customer Experience, a cross stack suite of customer experience products that enable organizations to: Engage customers with a consistent, connected and personalized brand experience across all channels and devices Deliver exceptional cross-channel order fulfillment and customer service through web, call centers and social networks Connect and analyze data from all interactions to better personalize experiences and identify hidden opportunities The Experience Revolution will also include an interactive gallery of customer experience interactions, featuring videos, touch screens and near field communication technology that will guide each attendee through an individualized event experience. We hope you will join us for an incredible evening on June 25, from 6:00 – 9:00 p.m. at Gotham Hall in New York City. You can register for The Experience Revolution here. And if you haven’t already joined the conversation on Twitter, please do: #OracleCX, #ExperienceRevolution

    Read the article

  • How to reproject a shapefile from WGS 84 to Spherical/Web Mercator projection.

    - by samkea
    Definitions: You will need to know the meaning of these terms below. I have given a small description to the acronyms but you can google and know more about them. #1:WGS-84- World Geodetic Systems (1984)- is a standard reference coordinate system used for Cartography, Geodesy and Navigation. #2: EPGS-European Petroleum Survey Group-was a scientific organization with ties to the European petroleum industry consisting of specialists working in applied geodesy, surveying, and cartography related to oil exploration. EPSG::4326 is a common coordinate reference system that refers to WGS84 as (latitude, longitude) pair coordinates in degrees with Greenwich as the central meridian. Any degree representation (e.g., decimal or DMSH: degrees minutes seconds hemisphere) may be used. Which degree representation is used must be declared for the user by the supplier of data. So, the Spherical/Web Mercator projection is referred to as EPGS::3785 which is renamed to EPSG:900913 by google for use in googlemaps. The associated CRS(Coordinate Reference System) for this is the "Popular Visualisation CRS / Mercator ". This is the kind of projection that is used by GoogleMaps, BingMaps,OSM,Virtual Earth, Deep Earth excetra...to show interactive maps over the web with thier nearly precise coordinates.  Reprojection: After reading alot about reprojecting my coordinates from the deepearth project on Codeplex, i still could not do it. After some help from a colleague, i got my ball rolling.This is how i did it. #1 You need to download and open your shapefile using Q-GIS; its the one with the biggest number of coordinate reference systems/ projections. #2 Use the plugins menu, and enable ftools and the WFS plugin. #3 Use the Vector menu--> Data Management Tools and choose define current projection. Enable, use predefined reference system and choose WGS 84 coodinate system. I am personally in zone 36, so i chose WGS84-UTM Zone 36N under ( Projected Coordinate Systems--> Universal Transverse Mercator) and click ok. #4 Now use the Vector menu--> Data Management Tools and choose export to new projection. The same dialog will pop-up. Now choose WGS 84 EPGS::4326 under Geodetic Coordinate Systems. My Input user Defined Spatial Reference System should looks like this: +proj=tmerc +lat_0=0 +lon_0=33 +k=0.9996 +x_0=500000 +y_0=200000 +ellps=WGS84 +datum=WGS84 +units=m +no_defs Your Output user Defined Spatial Reference System should look like this: +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs Browse for the place where the shapefile is going to be and give the shapefile a name(like origna_reprojected). If it prompts you to add the projected layer to the TOC, accept. There, you have your re-projected map with latitude and longitude pair of coordinates. #5 Now, this is not the actual Spherical/Web Mercator projection, but dont worry, this is where you have to stop. All the other custom web-mapping portals will pick this projection and transform it into EPGS::3785 or EPSG:900913 but the coordinates will still remain as the LatLon pair of the projected shapefile. If you want to test, a particular know point, Q-GIS has a lot of room for that. Go ahead and test it.

    Read the article

  • Today's Links (6/29/2011)

    - by Bob Rhubart
    Event-Driven SOA: Events meet Services | Guido Schmutz Oracle ACE Director Guido Schmutz shows you how to achieve extreme loose coupling within a Service-Oriented Architecture by using event-driven interactions. Misconceptions About Software Architecture | Sanjeev Kumar A concise, to-the-point, and informative article by Sanjeev Kumar. Good Leaders Acknowledge What Can't Be Done - Jeffrey Pfeffer - Harvard Business Review "None of us likes to admit to bad decisions," says Jeffrey Pfeffer. "But imagine how much harder that is for someone who has been chosen to lead a large organization precisely because he or she is thought to have the power to see the future more clearly and chart a wise course." Suboptimal Thinking within Enterprise Architecture | James McGovern McGovern says: "We need to remember that enterprises live and thrive beyond just the current person at the helm." Boundaryless Information Flow | Richard Veryard "If all the boundaries are removed or porous, then the (extended) enterprise or ecosystem becomes like a giant sponge, in which all information permeates the whole," Veryard says. "Some people may think that's a good idea, but it's not what I'd call loose coupling." Coming to a City Near You: Oracle Business Analytics Summits | Rob Reynolds This series of events includes a Technology and Architecture track. New Date for Implementation of Sun Hands-On Course Requirement (Oracle Certification) As announced on the Oracle Certification website, Java Architect, Java Developer, Solaris System Administrator and Solaris Security Administrator certification tracks will include a new mandatory course attendance requirement. VirtualBox 4.0.10 is now available for download | Bob Netherton Netherton shares information on the new release. Updated Technical Best Practices whitepaper | Anthony Shorten The Technical Best Practices whitepaper has been updated with the latest advice. "New advice includes new installation advice, advanced settings, new security settings and advice for both Oracle WebLogic and IBM WebSphere installations," says Shorten. Kscope 11 ADF, AIA and Business Rules | Peter Paul van de Beek Whitehorses Solution Architect Peter Paul van de Beek shares his impressions of KScope11 presentations by Markus Eisele, Sten Vesterli, and Edwin Biemond. Amazon AWS for the learning experience | Andrej Koelewijn "Using AWS changes your expectations how your internal data center should operate," says Koelewijn. BPMN is dead, long live BPEL! (SOA Partner Community Blog) Jürgen Kress shares information -- including a long list of speakers -- for the SOA & BPM Integration Days 2011 conference, October 12th & 13th 2011 in Düsseldorf. InfoQ: HTML5 and the Dawn of Rich Mobile Web Applications James Pearce introduces cross-platform web apps development using HTML5 and web frameworks, such as jQTouch, jQuery Mobile, Sencha Touch, PhoneGap, outlining what makes a good framework. InfoQ: Interview and Book Excerpt: CMMI for Development "Frameworks like TOGAF are used to define an architecture that aligns IT assets and resources to support key business needs and processes of key stakeholders," says SEI's Mike Konrad. "But the individual application systems, capabilities, services, networks, and other IT assets and infrastructure still need to be acquired, developed, or sustained." InfoQ: Architecting a Cloud-Scale Identity Fabric | Eric Olden "The most cited reason for not moving to the cloud is concern about security," says Olden. "In particular, managing user identity and access in the cloud is a tough problem to solve and a big security concern for organizations."

    Read the article

  • OWB 11gR2 &ndash; OLAP and Simba

    - by David Allan
    Oracle Warehouse Builder was the first ETL product to provide a single integrated and complete environment for managing enterprise data warehouse solutions that also incorporate multi-dimensional schemas. The OWB 11gR2 release provides Oracle OLAP 11g deployment for multi-dimensional models (in addition to support for prior releases of OLAP). This means users can easily utilize Simba's MDX Provider for Oracle OLAP (see here for details and cost) which allows you to use the powerful and popular ad hoc query and analysis capabilities of Microsoft Excel PivotTables® and PivotCharts® with your Oracle OLAP business intelligence data. The extensions to the dimensional modeling capabilities have been built on established relational concepts, with the option to seamlessly move from a relational deployment model to a multi-dimensional model at the click of a button. This now means that ETL designers can logically model a complete data warehouse solution using one single tool and control the physical implementation of a logical model at deployment time. As a result data warehouse projects that need to provide a multi-dimensional model as part of the overall solution can be designed and implemented faster and more efficiently. Wizards for dimensions and cubes let you quickly build dimensional models and realize either relationally or as an Oracle database OLAP implementation, both 10g and 11g formats are supported based on a configuration option. The wizard provides a good first cut definition and the objects can be further refined in the editor. Both wizards let you choose the implementation, to deploy to OLAP in the database select MOLAP: multidimensional storage. You will then be asked what levels and attributes are to be defined, by default the wizard creates a level bases hierarchy, parent child hierarchies can be defined in the editor. Once the dimension or cube has been designed there are special mapping operators that make it easy to load data into the objects, below we load a constant value for the total level and the other levels from a source table.   Again when the cube is defined using the wizard we can edit the cube and define a number of analytic calculations by using the 'generate calculated measures' option on the measures panel. This lets you very easily add a lot of rich analytic measures to your cube. For example one of the measures is the percentage difference from a year ago which we can see in detail below. You can also add your own custom calculations to leverage the capabilities of the Oracle OLAP option, either by selecting existing template types such as moving averages to defining true custom expressions. The 11g OLAP option now supports percentage based summarization (the amount of data to precompute and store), this is available from the option 'cost based aggregation' in the cube's configuration. Ensure all measure-dimensions level based aggregation is switched off (on the cube-dimension panel) - previously level based aggregation was the only option. The 11g generated code now uses the new unified API as you see below, to generate the code, OWB needs a valid connection to a real schema, this was not needed before 11gR2 and is a new requirement since the OLAP API which OWB uses is not an offline one. Once all of the objects are deployed and the maps executed then we get to the fun stuff! How can we analyze the data? One option which is powerful and at many users' fingertips is using Microsoft Excel PivotTables® and PivotCharts®, which can be used with your Oracle OLAP business intelligence data by utilizing Simba's MDX Provider for Oracle OLAP (see Simba site for details of cost). I'll leave the exotic reporting illustrations to the experts (see Bud's demonstration here), but with Simba's MDX Provider for Oracle OLAP its very simple to easily access the analytics stored in the database (all built and loaded via the OWB 11gR2 release) and get the regular features of Excel at your fingertips such as using the conditional formatting features for example. That's a very quick run through of the OWB 11gR2 with respect to Oracle 11g OLAP integration and the reporting using Simba's MDX Provider for Oracle OLAP. Not a deep-dive in any way but a quick overview to illustrate the design capabilities and integrations possible.

    Read the article

  • New security options in UCM Patch Set 3

    - by kyle.hatlestad
    While the Patch Set 3 (PS3) release was mostly focused on bug fixes and such, some new features sneaked in there. One of those new features is to the security options. In 10gR3 and prior versions, UCM had a component called Collaboration Manager which allowed for project folders to be created and groups of users assigned as members to collaborate on documents. With this component came access control lists (ACL) for content and folders. Users could assign specific security rights on each and every document and folder within a project. And it was even possible to enable these ACL's without having the Collaboration Manager component enabled (see technote# 603148.1). When 11g came out, Collaboration Manager was no longer available. But the configuration settings to turn on ACLs were still there. Well, in PS3 they're implemented slightly differently. And there is a new component available which adds an additional dimension to define security on the object, Roles. So now instead of selecting individual users or groups of users (defined as an Alias in User Admin), you can select a particular role. And if a user has that role, they are granted that level of access. This can allow for a much more flexible and manageable security model instead of trying to manage with just user and group access as people come and go in the organization. The way that it is enabled is still through configuration entries. First log in as an administrator and go to Administration -> Admin Server. On the Component Manager page, click the 'advanced component manager' link in the description paragraph at the top. In the list of Disabled Components, enable the RoleEntityACL component. Then click the General Configuration link on the left. In the Additional Configuration Variables text area, enter the new configuration values: UseEntitySecurity=true SpecialAuthGroups=<comma separated list of Security Groups to honor ACLs> The SpecialAuthGroups should be a list of Security Groups that honor the ACL fields. If an ACL is applied to a content item with a Security Group outside this list, it will be ignored. Save the settings and restart the instance. Upon restart, three new metadata fields will be created: xClbraUserList, xClbraAliasList, xClbraRoleList. If you are using OracleTextSearch as the search indexer, be sure to run a Fast Rebuild on the collection. On the Check In, Search, and Update pages, values are added by simply typing in the value and getting a type-ahead list of possible values. Select the value, click Add and then set the level of access (Read, Write, Delete, or Admin). If all of the fields are blank, then it simply falls back to just Security Group and Account access. For Users and Groups, these values are automatically picked up from the corresponding database tables. In the case of Roles, this is an explicitly defined list of choices that are made available. These values must match the role that is being defined from WebLogic Server or you LDAP/AD repository. To add these values, go to Administration -> Admin Applets -> Configuration Manager. On the Views tab, edit the values for the ExternalRolesView. By default, 'guest' and 'authenticated' are added. Once added to through the view, they will be available to select from for the Roles Access List. As for how they are stored in the metadata fields, each entry starts with it's identifier: ampersand (&) symbol for users, "at" (@) symbol for groups, and colon (:) for roles. Following that is the entity name. And at the end is the level of access in paranthesis. e.g. (RWDA). And each entry is separated by a comma. So if you were populating values through batch loader or an external source, the values would be defined this way. Detailed information on Access Control Lists can be found in the Oracle Fusion Middleware System Administrator's Guide for Oracle Content Server.

    Read the article

  • Value of SOA Specialization interview with Thomas Schaller IPT - part III

    - by Jürgen Kress
    Recognized by Oracle, Preferred by Customers. We had the great opportunity to interview Thomas Schaller – Partner from our SOA Specialized Partner IPT Innovation Process Technology from Switzerland Why did IPT decide to become SOA Specialized? " SOA Specialization is a great branding for IPT. We are the SOA Specialists in the Swiss market, as we focus all our services around SOA. With 65 Swiss consultants focused on SOA Security & SOA Testing & BPM – Business Process Management & BSM – Business Service Modeling the partnership with Oracle as the technology leader in SOA is key, therefore it was important to us to become the first SOA Specialized company in Switzerland. As a result IPT is mentioned by Gartner as one of eight European SOA Consulting Firms and included in „Guide to SOA Consulting and System Integration Service Providers“ Can you describe the marketing activities with Oracle? Once a year we organize the largest SOA Conference in Switzerland “SOA, BPM & Integration Forum 2011“ Oracle is much more than a sponsor for the conference. Jointly we invite our customer base to attend this key event. The sales teams address jointly their most important prospects and customers. Oracle supports us with key speakers who present future directions of the Oracle SOA portfolio like Clemens Utschig-Utschig who presented details about the Complex Event Processing (CEP) solution in 2009 and James Allerton-Austin who presented details about the social BPM solution (BPM) in 2010. Additional our key customers presented their Oracle SOA success stories. How did you team with Oracle around the sales activities? "Sales alignment is key for the successful partnership. When we achieved! SOA Specialization we celebrated jointly with the Oracle and IPT middleware sales team. At the Aperol may interesting discussions resulted in joint opportunities and business. A key section of our joint business planning are marketing and sales activities. Together we define campaign topics and target customers. Matthias Breitschmid our superb Oracle partner manager ensures that the defined sales teams align and start the joint business. Regular we review our joint business plan with the joint management teams and Jürgen Kress our EMEA Oracle Sponsor. It is great to see that both companies profit from each other and we receive leads from Oracle!” Did you get Oracle support to train your consultants in the Oracle SOA Suite? “Enablement is key for us to deliver successful SOA projects. Together with Ralph Bellinghausen from the Oracle Enablement team we defined an Oracle trainings plan for our consultants. The monthly SOA Partner Community newsletter is a great resource to get the latest product updates, webcasts and trainings. As a SOA Specialized partner we get also invited to the SOA Blackbelt trainings, this trainings are hosted by Oracle product management where we get not only first hand information we get also direct access to the developers who can support us in critical project phases. Driven by the customer success we have increased our Oracle SOA practice by more than 200% in the last years!” Why did the customer decide for the IPT SOA offering? “SOA Specialization becomes a brand for customers, it proofs that we have the certified SOA skills and that IPT has delivered successful Oracle SOA projects. Jointly with Oracle and all the support we get from marketing, sales, enablement, support and product management we can ensure our customers to deliver their SOA project successful!” What are the next steps for IPT? “SOA Specialization is a super beneficial for IPT. We are looking forward to our upcoming SOA, BPM & Integration Forum 2011 and prepare to become BPM Specialized. part I Torsten Winterberg, Opitz Consulting & part II Debra Lilley, Fujitsu For more information on SOA Specialization and the SOA Partner Community please feel free to register at www.oracle.com/goto/emea/soa (OPN account required) Blog Twitter LinkedIn Mix Forum Wiki Website

    Read the article

  • Pie Charts Just Don't Work When Comparing Data - Number 10 of Top 10 Reasons to Never Ever Use a Pie

    - by Tony Wolfram
    When comparing data, which is what a pie chart is for, people have a hard time judging the angles and areas of the multiple pie slices in order to calculate how much bigger one slice is than the others. Pie Charts Don't Work A slice of pie is good for serving up a portion of desert. It's not good for making a judgement about how big the slice is, what percentage of 100 it is, or how it compares to other slices. People have trouble comparing angles and areas to each other. Controlled studies show that people will overestimate the percentage that a pie slice area represents. This is because we have trouble calculating the area based on the space between the two angles that define the slice. This picture shows how a pie chart is useless in determing the largest value when you have to compare pie slices.   You can't compare angles and slice areas to each other. Human perception and cognition is poor when viewing angles and areas and trying to make a mental comparison. Pie charts overload the working memory, forcing the person to make complicated calculations, and at the same time make a decision based on those comparisons. What's the point of showing a pie chart when you want to compare data, except to say, "well, the slices are almost the same, but I'm not really sure which one is bigger, or by how much, or what order they are from largest to smallest. But the colors sure are pretty. Plus, I like round things. Oh,was I suppose to make some important business decision? Sorry." Bad Choices and Bad Decisions Interaction Designers, Graphic Artists, Report Builders, Software Developers, and Executives have all made the decision to use pie charts in their reports, software applications, and dashboards. It was a bad decision. It was a poor choice. There are always better options and choices, yet the designer still made the decision to use a pie chart. I'll expore why people make such poor choices in my upcoming blog entires. (Hint: It has more to do with emotions than with analytical thinking.) I've outlined my opinions and arguments about the evils of using pie charts in "Countdown of Top 10 Reasons to Never Ever Use a Pie Chart." Each of my next 10 blog entries will support these arguments with illustrations, examples, and references to studies. But my goal is not to continuously and endlessly rage against the evils of using pie charts. This blog is not about pie charts. This blog is about understanding why designers choose to use a pie chart. Why, when give better alternatives, and acknowledging the shortcomings of pie charts, do designers over and over again still freely choose to place a pie chart in a report? As an extra treat and parting shot, check out the nice pie chart that Wikipedia uses to illustrate the United States population by state.   Remember, somebody chose to use this pie chart, with all its glorious colors, and post it on Wikipedia for all the world to see. My next blog will give you a better alternative for displaying comparable data - the sorted bar chart.

    Read the article

  • Auto-cancel reason not found (6, 13906)

    - by Rajesh Sharma
    There are many errors in the application which are never invoked because of appropriate application configuration done at the time of implementation by the solution architects. So typically, as an application end user you would never stumble upon such errors. But what if the application administrator inadvertently changes the configuration/setup in the development, test, QA, or production environment? This is the time when you as an end user are introduced to a brand-new error for which you may not have a clue or understanding to what it means and neither the access/privilege to rectify it.    In this post we'll focus on one such error '6, 13906 - Auto-cancel reason not found'.   You get this error if you have not defined a Bill (Segment) Cancel Reason (Admin Menu, B, Bill Cancel Reason) code with System Default value of Turn off auto-cancel.   Consider a scenario when you are about to final bill an 'Account' for which the bill period's cut-off date you selected is falling on or after the Service Agreement's (SA) end/stop date (basically SA is Stopped with a date earlier than it was billed previously). And for the same 'Account' either: Bill segments exists that end after the SA's end date OR Non-closing bill segments exists that end on the SA's end date (OR closing bill segments that do not end on SA's end date or do not exist at all - remember closing/final bill segment is generated if the SA is in Stopped status).   CC&B detects such scenario and attempts to cancel all such violating bill segments automatically, but NOT if you are generating the bill Online. If online, the system assumes that you know what you are doing, and prompts you with error 2, 13716 - Bill segments that violate the SA (%1) End Date (%2) exist to take necessary action.   If in batch, system automatically cancels these kinds of bill segment(s).   Since this happens in the background, you have to define within the application which System Default Bill (Segment) cancellation reason code identified as Turn off auto-cancel, should be used by the process when it attempts to cancel any such violating bill segments (You already know that you cannot cancel a bill segment without giving a reason for cancellation).   So what exactly happens during batch billing?   Bill Segment generation routine at first determines billing eligibility of the service agreement being billed. One of the billing eligibility criteria is to check the SA's previous bill segments which have end dates greater than the current cut-off date/end date. Technically, the routine retrieves a count of such violating bill segments.     SELECT COUNT (*) FROM CI_BSEG WHERE SA_ID = :SA-ID AND BSEG_STAT_FLG = '50' -- Frozen AND END_DT IS NOT NULL AND (END_DT > '03-JUN-2010' -- Bill segment greater than SA's End Date OR OR (END_DT = '03-JUN-2010' AND CLOSING_BSEG_SW = 'N')) -- Non-closing bill segment ending on SA's end date   If the count is greater than zero, Bill segment generation routine executes another program to auto-cancel such bill segments. Auto-cancel program retrieves the 'Bill Cancel Reason' code which is identified as Turn off auto-cancel. Retrieved cancel reason code is then placed on the bill segments that are being cancelled automatically.   During this process if the routine fails to determine the bill cancel reason code having System Default Turn off auto-cancel because it was not been configured, you get a bill exception 6, 13906 - Auto-cancel reason not found.   Also note that duplicate or multiple System Default codes identified as Turn off auto-cancel are not allowed. CC&B would complain with an error 2, 54201.   Duplicate validation/check is also performed within Auto-cancel routine, if suppose for test purposes you executed a DML statement updating CI_BILL_CAN_RSN.BSCAN_SYS_DFLT_FLG with a value 'T'.

    Read the article

  • Changing the action of a hyperlink in a Silverlight RichTextArea

    - by Marc Schluper
    The title of this post could also have been "Move over Hyperlink, here comes Actionlink" or "Creating interactive text in Silverlight." But alas, there can be only one. Hyperlinks are very useful. However, they are also limited because their action is fixed: browse to a URL. This may have been adequate at the start of the Internet, but nowadays, in web applications, the one thing we do not want to happen is a complete change of context. In applications we typically like a hyperlink selection to initiate an action that updates a part of the screen. For instance, if my application has a map displayed with some text next to it, the map would react to a selection of a hyperlink in the text, e.g. by zooming in on a location and displaying additional locational information in a popup. In this way, the text becomes interactive text. It is quite common that one company creates and maintains websites for many client companies. To keep maintenance cost low, it is important that the content of these websites can be updated by the client companies themselves, without the need to involve a software engineer. To accommodate this scenario, we want the author of the interactive text to configure all hyperlinks (without writing any code). In a Silverlight RichTextArea, the default action of a Hyperlink is the same as a traditional hyperlink, but it can be changed: if the Command property has a value then upon a click event this command is called with the value of the CommandParameter as parameter. How can we let the author of the text specify a command for each hyperlink in the text, and how can we let an application react properly to a hyperlink selection event? We are talking about any command here. Obviously, the application would recognize only a specific set of commands, with well defined parameters, but the approach we take here is generic in the sense that it pertains to the RichTextArea and any command. So what do we require? We wish that: As a text author, I can configure the action of a hyperlink in a (rich) text without writing code; As a text author, I can persist the action of a hyperlink with the text; As a reader of persisted text, I can click a hyperlink and the configured action will happen; As an application developer, I can configure a control to use my application specific commands. In an excellent introduction to the RichTextArea, John Papa shows (among other things) how to persist a text created using this control. To meet our requirements, we can create a subclass of RichTextArea that uses John's code and allows plugging in two command specific components: one to prompt for a command definition, and one to execute the command. Since both of these plugins are application specific, our RichTextArea subclass should not assume anything about them except their interface. public interface IDefineCommand { void Prompt(string content, // the link content Action<string, object> callback); // the method called to convey the link definition } public interface IPerformCommand : ICommand {} The IDefineCommand plugin receives the content of the link (the text visible to the reader) and displays some kind of control that allows the author to define the link. When that's done, this (possibly changed) content string is conveyed back to the RichTextArea, together with an object that defines the command to execute when the link is clicked by the reader of the published text. The IPerformCommand plugin simply implements System.Windows.Input.ICommand. Let's use MEF to load the proper plugins. In the example solution there is a project that contains rudimentary implementations of these. The IDefineCommand plugin simply prompts for a command string (cf. a command line or query string), and the IPerformCommand plugin displays a MessageBox showing this command string. An actual application using this extended RichTextArea would have its own set of commands, each having their own parameters, and hence would provide more user friendly application specific plugins. Nonetheless, in any case a command can be persisted as a string and hence the two interfaces defined above suffice. For a Visual Studio 2010 solution, see my article on The Code Project.

    Read the article

  • OWB 11gR2 - Find and Search Metadata in Designer

    - by David Allan
    Here are some tools and techniques for finding objects, specifically in the design repository. There are ways of navigating and collating objects that are useful for day to day development and build-time usage - this includes features out of the box and utilities constructed on top. There are a variety of techniques to navigate and find objects in the repository, the first 3 are out of the box, the 4th is an expert utility. Navigating by the tree, grouping by project and module - ok if you are aware of the exact module/folder that objects reside in. The structure panel is a useful way of finding parts of an object, especially when large rather than using the canvas. In large scale projects it helps to have accelerators (either find or collections below). Advanced find to search by name - 11gR2 included a find capability specifically for large scale projects. There were improvements in both the tree search and the object editors (including highlighting in mapping for example). So you can now do regular expression based search and quickly navigate to objects within a repository. Collections - logically organize your objects into virtual folders by shortcutting the actual objects. This is useful for a range of things since all the OWB services operate on collections too (export/import, validation, deployment). See the post here for new collection functionality in 11gR2. Reports for searching by type, updated on, updated by etc. Useful for activities such as periodic incremental actions (deploy all mappings changed in the past week). The report style view is useful since I can quickly see who changed what and when. You can see all the audit details for objects within each objects property inspector, but its useful to just get all objects changed today or example, all objects changed since my last build etc. This utility combines both UI extensions via experts and the public views on the repository. In the figure to the right you see the contextual option 'Object Search' which invokes the utility, you can see I have quite a number of modules within my project. Figure out all the potential objects which have been changed is not simple. The utility is an expert which provides this kind of search capability. The utility provides a report of the objects in the design repository which satisfy some filter criteria. The type of criteria includes; objects updated in the last n days optionally filter the objects updated by user filter the user by project and by type (table/mappings etc.) The search dialog appears with these options, you can multi-select the object types, so for example you can select TABLE and MAPPING. Its also possible to search across projects if need be. If you have multiple users using the repository you can define the OWB user name in the 'Updated by' property to restrict the report to just that user also. Finally there is a search name that will be used for some of the options such as building a collection - this name is used for the collection to be built. In the example I have done, I've just searched my project for all process flows and mappings that users have updated in the last 7 days. The results of the query are returned in a table containing the object names, types, full path and audit details. The columns are sort-able, you can sort the results by name, type, path etc. One of the cool things here, is that you can then perform operations on these objects - such as edit them, export single selection or entire results to MDL, create a collection from the results (now you have a saved set of references in the repository, you could do deploy/export etc.), create a deployment script from the results...or even add in your own ideas! You see from this that you can do bulk operations on sets of objects based on search results. So for example selecting the 'Build Collection' option creates a collection with all of the objects from my search, you can subsequently deploy/generate/maintain this collection of objects. Under the hood of the expert if just basic OMB commands from the product and the use of the public views on the design repository. You can see how easy it is to build up macro-like capabilities that will help you do day-to-day as well as build like tasks on sets of objects.

    Read the article

  • Routes for IIS Classic and Integrated Mode

    - by imran_ku07
         Introduction:             ASP.NET MVC Routing feature makes it very easy to provide clean URLs. You just need to configure routes in global.asax file to create an application with clean URLs. In most cases you define routes works in IIS 6, IIS 7 (or IIS 7.5) Classic and Integrated mode. But in some cases your routes may only works in IIS 7 Integrated mode, like in the case of using extension less URLs in IIS 6 without a wildcard extension map. So in this article I will show you how to create different routes which works in IIS 6 and IIS 7 Classic and Integrated mode.       Description:             Let's say that you need to create an application which must work both in Classic and Integrated mode. Also you have no control to setup a wildcard extension map in IIS. So you need to create two routes. One with extension less URL for Integrated mode and one with a URL with an extension for Classic Mode.   routes.MapRoute( "DefaultClassic", // Route name "{controller}.aspx/{action}/{id}", // URL with parameters new { controller = "Home", action = "Index", id = UrlParameter.Optional } // Parameter defaults ); routes.MapRoute( "DefaultIntegrated", // Route name "{controller}/{action}/{id}", // URL with parameters new { controller = "Home", action = "Index", id = UrlParameter.Optional } // Parameter defaults );               Now you have set up two routes, one for Integrated mode and one for Classic mode. Now you only need to ensure that Integrated mode route should only match if the application is running in Integrated mode and Classic mode route should only match if the application is running in Classic mode. For making this work you need to create two custom constraint for Integrated and Classic mode. So replace the above routes with these routes,     routes.MapRoute( "DefaultClassic", // Route name "{controller}.aspx/{action}/{id}", // URL with parameters new { controller = "Home", action = "Index", id = UrlParameter.Optional }, // Parameter defaults new { mode = new ClassicModeConstraint() }// Constraints ); routes.MapRoute( "DefaultIntegrated", // Route name "{controller}/{action}/{id}", // URL with parameters new { controller = "Home", action = "Index", id = UrlParameter.Optional }, // Parameter defaults new { mode = new IntegratedModeConstraint() }// Constraints );            The first route which is for Classic mode adds a ClassicModeConstraint and second route which is for Integrated mode adds a IntegratedModeConstraint. Next you need to add the implementation of these constraint classes.     public class ClassicModeConstraint : IRouteConstraint { public bool Match(HttpContextBase httpContext, Route route, string parameterName, RouteValueDictionary values, RouteDirection routeDirection) { return !HttpRuntime.UsingIntegratedPipeline; } } public class IntegratedModeConstraint : IRouteConstraint { public bool Match(HttpContextBase httpContext, Route route, string parameterName, RouteValueDictionary values, RouteDirection routeDirection) { return HttpRuntime.UsingIntegratedPipeline; } }             HttpRuntime.UsingIntegratedPipeline returns true if the application is running on Integrated mode; otherwise, it returns false. So routes for Integrated mode only matched when the application is running on Integrated mode and routes for Classic mode only matched when the application is not running on Integrated mode.       Summary:             During developing applications, sometimes developers are not sure that whether this application will be host on IIS 6 or IIS 7 (or IIS 7.5) Integrated mode or Classic mode. So it's a good idea to create separate routes for both Classic and Integrated mode so that your application will use extension less URLs where possible and use URLs with an extension where it is not possible to use extension less URLs. In this article I showed you how to create separate routes for IIS Integrated and Classic mode. Hope you will enjoy this article too.   SyntaxHighlighter.all()

    Read the article

  • Asynchrony in C# 5: Dataflow Async Logger Sample

    - by javarg
    Check out this (very simple) code examples for TPL Dataflow. Suppose you are developing an Async Logger to register application events to different sinks or log writers. The logger architecture would be as follow: Note how blocks can be composed to achieved desired behavior. The BufferBlock<T> is the pool of log entries to be process whereas linked ActionBlock<TInput> represent the log writers or sinks. The previous composition would allows only one ActionBlock to consume entries at a time. Implementation code would be something similar to (add reference to System.Threading.Tasks.Dataflow.dll in %User Documents%\Microsoft Visual Studio Async CTP\Documentation): TPL Dataflow Logger var bufferBlock = new BufferBlock<Tuple<LogLevel, string>>(); ActionBlock<Tuple<LogLevel, string>> infoLogger =     new ActionBlock<Tuple<LogLevel, string>>(         e => Console.WriteLine("Info: {0}", e.Item2)); ActionBlock<Tuple<LogLevel, string>> errorLogger =     new ActionBlock<Tuple<LogLevel, string>>(         e => Console.WriteLine("Error: {0}", e.Item2)); bufferBlock.LinkTo(infoLogger, e => (e.Item1 & LogLevel.Info) != LogLevel.None); bufferBlock.LinkTo(errorLogger, e => (e.Item1 & LogLevel.Error) != LogLevel.None); bufferBlock.Post(new Tuple<LogLevel, string>(LogLevel.Info, "info message")); bufferBlock.Post(new Tuple<LogLevel, string>(LogLevel.Error, "error message")); Note the filter applied to each link (in this case, the Logging Level selects the writer used). We can specify message filters using Predicate functions on each link. Now, the previous sample is useless for a Logger since Logging Level is not exclusive (thus, several writers could be used to process a single message). Let´s use a Broadcast<T> buffer instead of a BufferBlock<T>. Broadcast Logger var bufferBlock = new BroadcastBlock<Tuple<LogLevel, string>>(     e => new Tuple<LogLevel, string>(e.Item1, e.Item2)); ActionBlock<Tuple<LogLevel, string>> infoLogger =     new ActionBlock<Tuple<LogLevel, string>>(         e => Console.WriteLine("Info: {0}", e.Item2)); ActionBlock<Tuple<LogLevel, string>> errorLogger =     new ActionBlock<Tuple<LogLevel, string>>(         e => Console.WriteLine("Error: {0}", e.Item2)); ActionBlock<Tuple<LogLevel, string>> allLogger =     new ActionBlock<Tuple<LogLevel, string>>(     e => Console.WriteLine("All: {0}", e.Item2)); bufferBlock.LinkTo(infoLogger, e => (e.Item1 & LogLevel.Info) != LogLevel.None); bufferBlock.LinkTo(errorLogger, e => (e.Item1 & LogLevel.Error) != LogLevel.None); bufferBlock.LinkTo(allLogger, e => (e.Item1 & LogLevel.All) != LogLevel.None); bufferBlock.Post(new Tuple<LogLevel, string>(LogLevel.Info, "info message")); bufferBlock.Post(new Tuple<LogLevel, string>(LogLevel.Error, "error message")); As this block copies the message to all its outputs, we need to define the copy function in the block constructor. In this case we create a new Tuple, but you can always use the Identity function if passing the same reference to every output. Try both scenarios and compare the results.

    Read the article

< Previous Page | 168 169 170 171 172 173 174 175 176 177 178 179  | Next Page >