Search Results

Search found 87926 results on 3518 pages for 'deft code'.

Page 800/3518 | < Previous Page | 796 797 798 799 800 801 802 803 804 805 806 807  | Next Page >

  • How to merge two different Makefiles?

    - by martijnn2008
    I have did some reading on "Merging Makefiles", one suggest I should leave the two Makefiles separate in different folders [1]. For me this look counter intuitive, because I have the following situation: I have 3 source files (main.cpp flexibility.cpp constraints.cpp) one of them (flexibility.cpp) is making use of the COIN-OR Linear Programming library (Clp) When installing this library on my computer it makes sample Makefiles, which I have adjust the Makefile and it currently makes a good working binary. # Copyright (C) 2006 International Business Machines and others. # All Rights Reserved. # This file is distributed under the Eclipse Public License. # $Id: Makefile.in 726 2006-04-17 04:16:00Z andreasw $ ########################################################################## # You can modify this example makefile to fit for your own program. # # Usually, you only need to change the five CHANGEME entries below. # ########################################################################## # To compile other examples, either changed the following line, or # add the argument DRIVER=problem_name to make DRIVER = main # CHANGEME: This should be the name of your executable EXE = clp # CHANGEME: Here is the name of all object files corresponding to the source # code that you wrote in order to define the problem statement OBJS = $(DRIVER).o constraints.o flexibility.o # CHANGEME: Additional libraries ADDLIBS = # CHANGEME: Additional flags for compilation (e.g., include flags) ADDINCFLAGS = # CHANGEME: Directory to the sources for the (example) problem definition # files SRCDIR = . ########################################################################## # Usually, you don't have to change anything below. Note that if you # # change certain compiler options, you might have to recompile the # # COIN package. # ########################################################################## COIN_HAS_PKGCONFIG = TRUE COIN_CXX_IS_CL = #TRUE COIN_HAS_SAMPLE = TRUE COIN_HAS_NETLIB = #TRUE # C++ Compiler command CXX = g++ # C++ Compiler options CXXFLAGS = -O3 -pipe -DNDEBUG -pedantic-errors -Wparentheses -Wreturn-type -Wcast-qual -Wall -Wpointer-arith -Wwrite-strings -Wconversion -Wno-unknown-pragmas -Wno-long-long -DCLP_BUILD # additional C++ Compiler options for linking CXXLINKFLAGS = -Wl,--rpath -Wl,/home/martijn/Downloads/COIN/coin-Clp/lib # C Compiler command CC = gcc # C Compiler options CFLAGS = -O3 -pipe -DNDEBUG -pedantic-errors -Wimplicit -Wparentheses -Wsequence-point -Wreturn-type -Wcast-qual -Wall -Wno-unknown-pragmas -Wno-long-long -DCLP_BUILD # Sample data directory ifeq ($(COIN_HAS_SAMPLE), TRUE) ifeq ($(COIN_HAS_PKGCONFIG), TRUE) CXXFLAGS += -DSAMPLEDIR=\"`PKG_CONFIG_PATH=/home/martijn/Downloads/COIN/coin-Clp/lib64/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/lib/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/share/pkgconfig: pkg-config --variable=datadir coindatasample`\" CFLAGS += -DSAMPLEDIR=\"`PKG_CONFIG_PATH=/home/martijn/Downloads/COIN/coin-Clp/lib64/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/lib/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/share/pkgconfig: pkg-config --variable=datadir coindatasample`\" else CXXFLAGS += -DSAMPLEDIR=\"\" CFLAGS += -DSAMPLEDIR=\"\" endif endif # Netlib data directory ifeq ($(COIN_HAS_NETLIB), TRUE) ifeq ($(COIN_HAS_PKGCONFIG), TRUE) CXXFLAGS += -DNETLIBDIR=\"`PKG_CONFIG_PATH=/home/martijn/Downloads/COIN/coin-Clp/lib64/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/lib/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/share/pkgconfig: pkg-config --variable=datadir coindatanetlib`\" CFLAGS += -DNETLIBDIR=\"`PKG_CONFIG_PATH=/home/martijn/Downloads/COIN/coin-Clp/lib64/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/lib/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/share/pkgconfig: pkg-config --variable=datadir coindatanetlib`\" else CXXFLAGS += -DNETLIBDIR=\"\" CFLAGS += -DNETLIBDIR=\"\" endif endif # Include directories (we use the CYGPATH_W variables to allow compilation with Windows compilers) ifeq ($(COIN_HAS_PKGCONFIG), TRUE) INCL = `PKG_CONFIG_PATH=/home/martijn/Downloads/COIN/coin-Clp/lib64/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/lib/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/share/pkgconfig: pkg-config --cflags clp` else INCL = endif INCL += $(ADDINCFLAGS) # Linker flags ifeq ($(COIN_HAS_PKGCONFIG), TRUE) LIBS = `PKG_CONFIG_PATH=/home/martijn/Downloads/COIN/coin-Clp/lib64/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/lib/pkgconfig:/home/martijn/Downloads/COIN/coin-Clp/share/pkgconfig: pkg-config --libs clp` else ifeq ($(COIN_CXX_IS_CL), TRUE) LIBS = -link -libpath:`$(CYGPATH_W) /home/martijn/Downloads/COIN/coin-Clp/lib` libClp.lib else LIBS = -L/home/martijn/Downloads/COIN/coin-Clp/lib -lClp endif endif # The following is necessary under cygwin, if native compilers are used CYGPATH_W = echo # Here we list all possible generated objects or executables to delete them CLEANFILES = clp \ main.o \ flexibility.o \ constraints.o \ all: $(EXE) .SUFFIXES: .cpp .c .o .obj $(EXE): $(OBJS) bla=;\ for file in $(OBJS); do bla="$$bla `$(CYGPATH_W) $$file`"; done; \ $(CXX) $(CXXLINKFLAGS) $(CXXFLAGS) -o $@ $$bla $(LIBS) $(ADDLIBS) clean: rm -rf $(CLEANFILES) .cpp.o: $(CXX) $(CXXFLAGS) $(INCL) -c -o $@ `test -f '$<' || echo '$(SRCDIR)/'`$< .cpp.obj: $(CXX) $(CXXFLAGS) $(INCL) -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(SRCDIR)/$<'; fi` .c.o: $(CC) $(CFLAGS) $(INCL) -c -o $@ `test -f '$<' || echo '$(SRCDIR)/'`$< .c.obj: $(CC) $(CFLAGS) $(INCL) -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(SRCDIR)/$<'; fi` The other Makefile compiles a lot of code and makes use of bison and flex. This one is also made by someone else. I am able to alter this Makefile when I want to add some code. This Makefile also makes a binary. CFLAGS=-Wall LDLIBS=-LC:/GnuWin32/lib -lfl -lm LSOURCES=lex.l YSOURCES=grammar.ypp CSOURCES=debug.cpp esta_plus.cpp heap.cpp main.cpp stjn.cpp timing.cpp tmsp.cpp token.cpp chaining.cpp flexibility.cpp exceptions.cpp HSOURCES=$(CSOURCES:.cpp=.h) includes.h OBJECTS=$(LSOURCES:.l=.o) $(YSOURCES:.ypp=.tab.o) $(CSOURCES:.cpp=.o) all: solver solver: CFLAGS+=-g -O0 -DDEBUG solver: $(OBJECTS) main.o debug.o g++ $(CFLAGS) -o $@ $^ $(LDLIBS) solver.release: CFLAGS+=-O5 solver.release: $(OBJECTS) main.o g++ $(CFLAGS) -o $@ $^ $(LDLIBS) %.o: %.cpp g++ -c $(CFLAGS) -o $@ $< lex.cpp: lex.l grammar.tab.cpp grammar.tab.hpp flex -o$@ $< %.tab.cpp %.tab.hpp: %.ypp bison --verbose -d $< ifneq ($(LSOURCES),) $(LSOURCES:.l=.cpp): $(YSOURCES:.y=.tab.h) endif -include $(OBJECTS:.o=.d) clean: rm -f $(OBJECTS) $(OBJECTS:.o=.d) $(YSOURCES:.ypp=.tab.cpp) $(YSOURCES:.ypp=.tab.hpp) $(YSOURCES:.ypp=.output) $(LSOURCES:.l=.cpp) solver solver.release 2>/dev/null .PHONY: all clean debug release Both of these Makefiles are, for me, hard to understand. I don't know what they exactly do. What I want is to merge the two of them so I get only one binary. The code compiled in the second Makefile should be the result. I want to add flexibility.cpp and constraints.cpp to the second Makefile, but when I do. I get the problem following problem: flexibility.h:4:26: fatal error: ClpSimplex.hpp: No such file or directory #include "ClpSimplex.hpp" So the compiler can't find the Clp library. I also tried to copy-paste more code from the first Makefile into the second, but it still gives me that same error. Q: Can you please help me with merging the two makefiles or pointing out a more elegant way? Q: In this case is it indeed better to merge the two Makefiles? I also tried to use cmake, but I gave upon that one quickly, because I don't know much about flex and bison.

    Read the article

  • o display an image

    - by Vimal Basdeo
    I want to display an image from the web to a panel in another Jframe at the click of a button but whenever I click the button first the image loads and during this time the current form potentially freezes and once the image has loaded the form is displayed with the image.. How can I avoid the situation where my form freezes since it is very irritating My codes :: My current class private void btn_TrackbusActionPerformed(java.awt.event.ActionEvent evt) { try { sendMessage("Query,map,$,start,211,Arsenal,!"); System.out.println(receiveMessage()); } catch (UnknownHostException ex) { Logger.getLogger(client_Trackbus.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { Logger.getLogger(client_Trackbus.class.getName()).log(Level.SEVERE, null, ex); } catch (Exception ex) { Logger.getLogger(client_Trackbus.class.getName()).log(Level.SEVERE, null, ex); } client_trackedbus nextform=new client_trackedbus(planform,connection,packet_receive,packet_send); this.setVisible(false); this.dispose(); nextform.setVisible(true); // TODO add your handling code here: } My next class that displays the image public class client_trackedbus extends javax.swing.JFrame { client_planform planform=null; DatagramSocket connection=null; DatagramPacket packet_receive=null; DatagramPacket packet_send=null; JLabel label=null; /** Creates new form client_trackedbus */ public client_trackedbus(client_planform planform,DatagramSocket connection,DatagramPacket packet_receive,DatagramPacket packet_send) { initComponents(); this.planform=planform; this.connection=connection; this.packet_receive=packet_receive; this.packet_send=packet_send; try { displayMap("http://www.huddletogether.com/projects/lightbox2/images/image-2.jpg", jPanel1, new JLabel()); } catch (MalformedURLException ex) { Logger.getLogger(client_trackedbus.class.getName()).log(Level.SEVERE, null, ex); } } private void displayMap(String url,JPanel panel,JLabel label) throws MalformedURLException{ URL imageurl=new URL(url); Image image=(Toolkit.getDefaultToolkit().createImage(imageurl)); ImageIcon icon = new ImageIcon(image); label.setIcon(icon); panel.add(label); // System.out.println(panel.getSize().width); this.getContentPane().add(panel); } /** This method is called from within the constructor to * initialize the form. * WARNING: Do NOT modify this code. The content of this method is * always regenerated by the Form Editor. */ @SuppressWarnings("unchecked") // <editor-fold defaultstate="collapsed" desc="Generated Code"> private void initComponents() { jPanel1 = new javax.swing.JPanel(); jLabel1 = new javax.swing.JLabel(); btn_Exit = new javax.swing.JButton(); btn_Plan = new javax.swing.JButton(); setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE); setTitle("Public Transport Journey Planner"); javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1); jPanel1.setLayout(jPanel1Layout); jPanel1Layout.setHorizontalGroup( jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 368, Short.MAX_VALUE) ); jPanel1Layout.setVerticalGroup( jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGap(0, 172, Short.MAX_VALUE) ); jLabel1.setFont(new java.awt.Font("Arial", 1, 18)); jLabel1.setText("Your tracked bus"); btn_Exit.setText("Exit"); btn_Exit.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { btn_ExitActionPerformed(evt); } }); btn_Plan.setText("Plan journey"); btn_Plan.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { btn_PlanActionPerformed(evt); } }); javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane()); getContentPane().setLayout(layout); layout.setHorizontalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addGap(104, 104, 104) .addComponent(jLabel1)) .addGroup(layout.createSequentialGroup() .addContainerGap() .addComponent(jPanel1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) .addGroup(layout.createSequentialGroup() .addGap(65, 65, 65) .addComponent(btn_Plan) .addGap(65, 65, 65) .addComponent(btn_Exit, javax.swing.GroupLayout.PREFERRED_SIZE, 87, javax.swing.GroupLayout.PREFERRED_SIZE))) .addContainerGap(20, Short.MAX_VALUE)) ); layout.setVerticalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addGap(35, 35, 35) .addComponent(jLabel1) .addGap(18, 18, 18) .addComponent(jPanel1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) .addGap(18, 18, 18) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(btn_Exit) .addComponent(btn_Plan)) .addContainerGap(12, Short.MAX_VALUE)) ); pack(); }// </editor-fold> private void btn_ExitActionPerformed(java.awt.event.ActionEvent evt) { // TODO add your handling code here: Exitform(); } private void btn_PlanActionPerformed(java.awt.event.ActionEvent evt) { // TODO add your handling code here: this.setVisible(false); this.dispose(); this.planform.setVisible(true); } private void Exitform(){ this.setVisible(false); this.dispose(); } /** * @param args the command line arguments */ public static void main(String args[]) { java.awt.EventQueue.invokeLater(new Runnable() { public void run() { // new client_trackedbus().setVisible(true); } }); } // Variables declaration - do not modify private javax.swing.JButton btn_Exit; private javax.swing.JButton btn_Plan; private javax.swing.JLabel jLabel1; private javax.swing.JPanel jPanel1; // End of variables declaration }

    Read the article

  • Design for complex ATG applications

    - by Glen Borkowski
    Overview Needless to say, some ATG applications are more complex than others.  Some ATG applications support a single site, single language, single catalog, single currency, have a single development staff, single business team, and a relatively simple business model.  The real complex applications have to support multiple sites, multiple languages, multiple catalogs, multiple currencies, a couple different development teams, multiple business teams, and a highly complex business model (and processes to go along with it).  While it's still important to implement a proper design for simple applications, it's absolutely critical to do this for the complex applications.  Why?  It's all about time and money.  If you are unable to manage your complex applications in an efficient manner, the cost of managing it will increase dramatically as will the time to get things done (time to market).  On the positive side, your competition is most likely in the same situation, so you just need to be more efficient than they are. This article is intended to discuss a number of key areas to think about when designing complex applications on ATG.  Some of this can get fairly technical, so it may help to get some background first.  You can get enough of the required background information from this post.  After reading that, come back here and follow along. Application Design Of all the various types of ATG applications out there, the most complex tend to be the ones in the telecommunications industry - especially the ones which operate in multiple countries.  To get started, let's assume that we are talking about an application like that.  One that has these properties: Operates in multiple countries - must support multiple sites, catalogs, languages, and currencies The organization is fairly loosely-coupled - single brand, but different businesses across different countries There is some common functionality across all sites in all countries There is some common functionality across different sites within the same country Sites within a single country may have some unique functionality - relative to other sites in the same country Complex product catalog (mostly in terms of bundles, eligibility, and compatibility) At this point, I'll assume you have read through the required reading and have a decent understanding of how ATG modules work... Code / configuration - assemble into modules When it comes to defining your modules for a complex application, there are a number of goals: Divide functionality between the modules in a way that maps to your business Group common functionality 'further down in the stack of modules' Provide a good balance between shared resources and autonomy for countries / sites Now I'll describe a high level approach to how you could accomplish those goals...  Let's start from the bottom and work our way up.  At the very bottom, you have the modules that ship with ATG - the 'out of the box' stuff.  You want to make sure that you are leveraging all the modules that make sense in order to get the most value from ATG as possible - and less stuff you'll have to write yourself.  On top of the ATG modules, you should create what we'll refer to as the Corporate Foundation Module described as follows: Sits directly on top of ATG modules Used by all applications across all countries and sites - this is the foundation for everyone Contains everything that is common across all countries / all sites Once established and settled, will change less frequently than other 'higher' modules Encapsulates as many enterprise-wide integrations as possible Will provide means of code sharing therefore less development / testing - faster time to market Contains a 'reference' web application (described below) The next layer up could be multiple modules for each country (you could replace this with region if that makes more sense).  We'll define those modules as follows: Sits on top of the corporate foundation module Contains what is unique to all sites in a given country Responsible for managing any resource bundles for this country (to handle multiple languages) Overrides / replaces corporate integration points with any country-specific ones Finally, we will define what should be a fairly 'thin' (in terms of functionality) set of modules for each site as follows: Sits on top of the country it resides in module Contains what is unique for a given site within a given country Will mostly contain configuration, but could also define some unique functionality as well Contains one or more web applications The graphic below should help to indicate how these modules fit together: Web applications As described in the previous section, there are many opportunities for sharing (minimizing costs) as it relates to the code and configuration aspects of ATG modules.  Web applications are also contained within ATG modules, however, sharing web applications can be a bit more difficult because this is what the end customer actually sees, and since each site may have some degree of unique look & feel, sharing becomes more challenging.  One approach that can help is to define a 'reference' web application at the corporate foundation layer to act as a solid starting point for each site.  Here's a description of the 'reference' web application: Contains minimal / sample reference styling as this will mostly be addressed at the site level web app Focus on functionality - ensure that core functionality is revealed via this web application Each individual site can use this as a starting point There may be multiple types of web apps (i.e. B2C, B2B, etc) There are some techniques to share web application assets - i.e. multiple web applications, defined in the web.xml, and it's worth investigating, but is out of scope here. Reference infrastructure In this complex environment, it is assumed that there is not a single infrastructure for all countries and all sites.  It's more likely that different countries (or regions) could have their own solution for infrastructure.  In this case, it will be advantageous to define a reference infrastructure which contains all the hardware and software that make up the core environment.  Specifications and diagrams should be created to outline what this reference infrastructure looks like, as well as it's baseline cost and the incremental cost to scale up with volume.  Having some consistency in terms of infrastructure will save time and money as new countries / sites come online.  Here are some properties of the reference infrastructure: Standardized approach to setup of hardware Type and number of servers Defines application server, operating system, database, etc... - including vendor and specific versions Consistent naming conventions Provides a consistent base of terminology and understanding across environments Defines which ATG services run on which servers Production Staging BCC / Preview Each site can change as required to meet scale requirements Governance / organization It should be no surprise that the complex application we're talking about is backed by an equally complex organization.  One of the more challenging aspects of efficiently managing a series of complex applications is to ensure the proper level of governance and organization.  Here are some ideas and goals to work towards: Establish a committee to make enterprise-wide decisions that affect all sites Representation should be evenly distributed Should have a clear communication procedure Focus on high level business goals Evaluation of feature / function gaps and how that relates to ATG release schedule / roadmap Determine when to upgrade & ensure value will be realized Determine how to manage various levels of modules Who is responsible for maintaining corporate / country / site layers Determine a procedure for controlling what goes in the corporate foundation module Standardize on source code control, database, hardware, OS versions, J2EE app servers, development procedures, etc only use tested / proven versions - this is something that should be centralized so that every country / site does not have to worry about compatibility between versions Create a innovation team Quickly develop new features, perform proof of concepts All teams can benefit from their findings Summary At this point, it should be clear why the topics above (design, governance, organization, etc) are critical to being able to efficiently manage a complex application.  To summarize, it's all about competitive advantage...  You will need to reduce costs and improve time to market with the goal of providing a better experience for your end customers.  You can reduce cost by reducing development time, time allocated to testing (don't have to test the corporate foundation module over and over again - do it once), and optimizing operations.  With an efficient design, you can improve your time to market and your business will be more flexible  and agile.  Over time, you'll find that you're becoming more focused on offering functionality that is new to the market (creativity) and this will be rewarded - you're now a leader. In addition to the above, you'll realize soft benefits as well.  Your staff will be operating in a culture based on sharing.  You'll want to reward efforts to improve and enhance the foundation as this will benefit everyone.  This culture will inspire innovation, which can only lend itself to your competitive advantage.

    Read the article

  • How can I remove old kernels/install new ones when /boot is full?

    - by Marcel
    I know this question is asked many times before, however with me it is just a bit different I guess. # df -h Filesystem Size Used Avail Use% Mounted on /dev/sda3 224G 5.2G 208G 3% / udev 1.9G 4.0K 1.9G 1% /dev tmpfs 777M 260K 777M 1% /run none 5.0M 0 5.0M 0% /run/lock none 1.9G 0 1.9G 0% /run/shm /dev/sda2 90M 88M 0 100% /boot /dev/sda6 1.9G 514M 1.3G 29% /tmp My boot partition is full. Current Kernel: # uname -r 3.2.0-35-generic All Kernels: # dpkg --list | grep linux-image ii linux-image-3.2.0-32-generic 3.2.0-32.51 Linux kernel image for version 3.2.0 on 64 bit x86 SMP ii linux-image-3.2.0-34-generic 3.2.0-34.53 Linux kernel image for version 3.2.0 on 64 bit x86 SMP ii linux-image-3.2.0-35-generic 3.2.0-35.55 Linux kernel image for version 3.2.0 on 64 bit x86 SMP iF linux-image-3.2.0-37-generic 3.2.0-37.58 Linux kernel image for version 3.2.0 on 64 bit x86 SMP iF linux-image-3.2.0-38-generic 3.2.0-38.60 Linux kernel image for version 3.2.0 on 64 bit x86 SMP iU linux-image-generic 3.2.0.37.45 Generic Linux kernel image So I thought of removing the 3.2.0.32-generic kernel with: # sudo apt-get purge linux-image-3.2.0-32-generic Reading package lists... Done Building dependency tree Reading state information... Done You might want to run 'apt-get -f install' to correct these: The following packages have unmet dependencies: linux-generic : Depends: linux-headers-generic (= 3.2.0.37.45) but 3.2.0.38.46 is to be installed E: Unmet dependencies. Try 'apt-get -f install' with no packages (or specify a solution). No success. When I try apt-get -f install it also fails: # apt-get -f install Reading package lists... Done Building dependency tree Reading state information... Done Correcting dependencies... Done The following packages were automatically installed and are no longer required: linux-headers-3.2.0-34 linux-headers-3.2.0-35 linux-headers-3.2.0-34-generic linux-headers-3.2.0-35-generic Use 'apt-get autoremove' to remove them. The following extra packages will be installed: linux-generic linux-image-generic The following packages will be upgraded: linux-generic linux-image-generic 2 upgraded, 0 newly installed, 0 to remove and 9 not upgraded. 5 not fully installed or removed. Need to get 0 B/4,334 B of archives. After this operation, 0 B of additional disk space will be used. Do you want to continue [Y/n]? y Setting up initramfs-tools (0.99ubuntu13.1) ... update-initramfs: deferring update (trigger activated) Setting up linux-image-3.2.0-37-generic (3.2.0-37.58) ... Running depmod. update-initramfs: deferring update (hook will be called later) The link /initrd.img is a dangling linkto /boot/initrd.img-3.2.0-38-generic Examining /etc/kernel/postinst.d. run-parts: executing /etc/kernel/postinst.d/initramfs-tools 3.2.0-37-generic /boot/vmlinuz-3.2.0-37-generic update-initramfs: Generating /boot/initrd.img-3.2.0-37-generic gzip: stdout: No space left on device E: mkinitramfs failure cpio 141 gzip 1 update-initramfs: failed for /boot/initrd.img-3.2.0-37-generic with 1. run-parts: /etc/kernel/postinst.d/initramfs-tools exited with return code 1 Failed to process /etc/kernel/postinst.d at /var/lib/dpkg/info/linux-image-3.2.0-37-generic.postinst line 1010. dpkg: error processing linux-image-3.2.0-37-generic (--configure): subprocess installed post-installation script returned error exit status 2 Setting up linux-image-3.2.0-38-generic (3.2.0-38.60) ... Running depmod. update-initramfs: deferring update (hook will be called later) The link /initrd.img is a dangling linkto /boot/initrd.img-3.2.0-37-generic Examining /etc/kernel/postinst.d. run-parts: executing /etc/kernel/postinst.d/initramfs-tools 3.2.0-38-generic /boot/vmlinuz-3.2.0-38-generic update-initramfs: Generating /boot/initrd.img-3.2.0-38-generic gzip: stdout: No space left on device E: mkinitramfs failure cpio 141 gzip 1 update-initramfs: failed for /boot/initrd.img-3.2.0-38-generic with 1. run-parts: /etc/kernel/postinst.d/initramfs-tools exited with return code 1 Failed to process /etc/kernel/postinst.d at /var/lib/dpkg/info/linux-image-3.2.0-38-generic.postinst line 1010. dpkg: error processing linux-image-3.2.0-38-generic (--configure): subprocess installed post-installation script returned error exit status 2 dpkg: dependency problems prevent configuration of linux-image-generic: linux-image-generic depends on linux-image-3.2.0-37-generic; however: Package linux-image-3.2.0-37-generic is not configured yet. dpkg: error processing linux-image-generic (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of linux-generic: linux-generic depends on linux-image-generic (= 3.2.0.37.45); however: Package linux-image-generic is not configured yet. linux-generic depends on linux-headers-generic (= 3.2.0.37.45); however: Version of linux-headers-generic on system is 3.2.0.38.46. dpkg: error processing linux-generic (--configure): dependency problems - leaving unconfigured Processing triggers for initramfs-tools ... No apport report written because the error message indicates its a followup error from a previous failure. No apport report written because MaxReports is reached already update-initramfs: Generating /boot/initrd.img-3.2.0-35-generic gzip: stdout: No space left on device E: mkinitramfs failure cpio 141 gzip 1 update-initramfs: failed for /boot/initrd.img-3.2.0-35-generic with 1. dpkg: error processing initramfs-tools (--configure): subprocess installed post-installation script returned error exit status 1 No apport report written because MaxReports is reached already Errors were encountered while processing: linux-image-3.2.0-37-generic linux-image-3.2.0-38-generic linux-image-generic linux-generic initramfs-tools E: Sub-process /usr/bin/dpkg returned an error code (1) Any help would really be appreciated. Update: I did: sudo rm /boot/*-3.2.0-32-generic /boot/*-3.2.0-34-generic After that the following problem with apt-get -f install: root@localhost:/# apt-get -f install Reading package lists... Done Building dependency tree Reading state information... Done Correcting dependencies... Done The following extra packages will be installed: linux-generic The following packages will be upgraded: linux-generic 1 upgraded, 0 newly installed, 0 to remove and 9 not upgraded. 1 not fully installed or removed. Need to get 0 B/1,722 B of archives. After this operation, 0 B of additional disk space will be used. Do you want to continue [Y/n]? y dpkg: dependency problems prevent configuration of linux-generic: linux-generic depends on linux-image-generic (= 3.2.0.37.45); however: Version of linux-image-generic on system is 3.2.0.38.46. linux-generic depends on linux-headers-generic (= 3.2.0.37.45); however: Version of linux-headers-generic on system is 3.2.0.38.46. dpkg: error processing linux-generic (--configure): dependency problems - leaving unconfigured No apport report written because the error message indicates its a followup error from a previous failure. Errors were encountered while processing: linux-generic E: Sub-process /usr/bin/dpkg returned an error code (1)

    Read the article

  • Waterfall Model (SDLC) vs. Prototyping Model

    The characters in the fable of the Tortoise and the Hare can easily be used to demonstrate the similarities and differences between the Waterfall and Prototyping software development models. This children fable is about a race between a consistently slow moving but steadfast turtle and an extremely fast but unreliable rabbit. After closely comparing each character’s attributes in correlation with both software development models, a trend seems to appear in that the Waterfall closely resembles the Tortoise in that Waterfall Model is typically a slow moving process that is broken up in to multiple sequential steps that must be executed in a standard linear pattern. The Tortoise can be quoted several times in the story saying “Slow and steady wins the race.” This is the perfect mantra for the Waterfall Model in that this model is seen as a cumbersome and slow moving. Waterfall Model Phases Requirement Analysis & Definition This phase focuses on defining requirements for a project that is to be developed and determining if the project is even feasible. Requirements are collected by analyzing existing systems and functionality in correlation with the needs of the business and the desires of the end users. The desired output for this phase is a list of specific requirements from the business that are to be designed and implemented in the subsequent steps. In addition this phase is used to determine if any value will be gained by completing the project. System Design This phase focuses primarily on the actual architectural design of a system, and how it will interact within itself and with other existing applications. Projects at this level should be viewed at a high level so that actual implementation details are decided in the implementation phase. However major environmental decision like hardware and platform decision are typically decided in this phase. Furthermore the basic goal of this phase is to design an application at the system level in those classes, interfaces, and interactions are defined. Additionally decisions about scalability, distribution and reliability should also be considered for all decisions. The desired output for this phase is a functional  design document that states all of the architectural decisions that have been made in regards to the project as well as a diagrams like a sequence and class diagrams. Software Design This phase focuses primarily on the refining of the decisions found in the functional design document. Classes and interfaces are further broken down in to logical modules based on the interfaces and interactions previously indicated. The output of this phase is a formal design document. Implementation / Coding This phase focuses primarily on implementing the previously defined modules in to units of code. These units are developed independently are intergraded as the system is put together as part of a whole system. Software Integration & Verification This phase primarily focuses on testing each of the units of code developed as well as testing the system as a whole. There are basic types of testing at this phase and they include: Unit Test and Integration Test. Unit Test are built to test the functionality of a code unit to ensure that it preforms its desired task. Integration testing test the system as a whole because it focuses on results of combining specific units of code and validating it against expected results. The output of this phase is a test plan that includes test with expected results and actual results. System Verification This phase primarily focuses on testing the system as a whole in regards to the list of project requirements and desired operating environment. Operation & Maintenance his phase primarily focuses on handing off the competed project over to the customer so that they can verify that all of their requirements have been met based on their original requirements. This phase will also validate the correctness of their requirements and if any changed need to be made. In addition, any problems not resolved in the previous phase will be handled in this section. The Waterfall Model’s linear and sequential methodology does offer a project certain advantages and disadvantages. Advantages of the Waterfall Model Simplistic to implement and execute for projects and/or company wide Limited demand on resources Large emphasis on documentation Disadvantages of the Waterfall Model Completed phases cannot be revisited regardless if issues arise within a project Accurate requirement are never gather prior to the completion of the requirement phase due to the lack of clarification in regards to client’s desires. Small changes or errors that arise in applications may cause additional problems The client cannot change any requirements once the requirements phase has been completed leaving them no options for changes as they see their requirements changes as the customers desires change. Excess documentation Phases are cumbersome and slow moving Learn more about the Major Process in the Sofware Development Life Cycle and Waterfall Model. Conversely, the Hare shares similar traits with the prototyping software development model in that ideas are rapidly converted to basic working examples and subsequent changes are made to quickly align the project with customers desires as they are formulated and as software strays from the customers vision. The basic concept of prototyping is to eliminate the use of well-defined project requirements. Projects are allowed to grow as the customer needs and request grow. Projects are initially designed according to basic requirements and are refined as requirement become more refined. This process allows customer to feel their way around the application to ensure that they are developing exactly what they want in the application This model also works well for determining the feasibility of certain approaches in regards to an application. Prototypes allow for quickly developing examples of implementing specific functionality based on certain techniques. Advantages of Prototyping Active participation from users and customers Allows customers to change their mind in specifying requirements Customers get a better understanding of the system as it is developed Earlier bug/error detection Promotes communication with customers Prototype could be used as final production Reduced time needed to develop applications compared to the Waterfall method Disadvantages of Prototyping Promotes constantly redefining project requirements that cause major system rewrites Potential for increased complexity of a system as scope of the system expands Customer could believe the prototype as the working version. Implementation compromises could increase the complexity when applying updates and or application fixes When companies trying to decide between the Waterfall model and Prototype model they need to evaluate the benefits and disadvantages for both models. Typically smaller companies or projects that have major time constraints typically head for more of a Prototype model approach because it can reduce the time needed to complete the project because there is more of a focus on building a project and less on defining requirements and scope prior to the start of a project. On the other hand, Companies with well-defined requirements and time allowed to generate proper documentation should steer towards more of a waterfall model because they are in a position to obtain clarified requirements and have to design and optimal solution prior to the start of coding on a project.

    Read the article

  • Automating deployments with the SQL Compare command line

    - by Jonathan Hickford
    In my previous article, “Five Tips to Get Your Organisation Releasing Software Frequently” I looked at how teams can automate processes to speed up release frequency. In this post, I’m looking specifically at automating deployments using the SQL Compare command line. SQL Compare compares SQL Server schemas and deploys the differences. It works very effectively in scenarios where only one deployment target is required – source and target databases are specified, compared, and a change script is automatically generated and applied. But if multiple targets exist, and pressure to increase the frequency of releases builds, this solution quickly becomes unwieldy.   This is where SQL Compare’s command line comes into its own. I’ve put together a PowerShell script that loops through the Servers table and pulls out the server and database, these are then passed to sqlcompare.exe to be used as target parameters. In the example the source database is a scripts folder, a folder structure of scripted-out database objects used by both SQL Source Control and SQL Compare. The script can easily be adapted to use schema snapshots.     -- Create a DeploymentTargets database and a Servers table CREATE DATABASE DeploymentTargets GO USE DeploymentTargets GO CREATE TABLE [dbo].[Servers]( [id] [int] IDENTITY(1,1) NOT NULL, [serverName] [nvarchar](50) NULL, [environment] [nvarchar](50) NULL, [databaseName] [nvarchar](50) NULL, CONSTRAINT [PK_Servers] PRIMARY KEY CLUSTERED ([id] ASC) ) GO -- Now insert your target server and database details INSERT INTO dbo.Servers ( serverName , environment , databaseName) VALUES ( N'myserverinstance' , N'myenvironment1' , N'mydb1') INSERT INTO dbo.Servers ( serverName , environment , databaseName) VALUES ( N'myserverinstance' , N'myenvironment2' , N'mydb2') Here’s the PowerShell script you can adapt for yourself as well. # We're holding the server names and database names that we want to deploy to in a database table. # We need to connect to that server to read these details $serverName = "" $databaseName = "DeploymentTargets" $authentication = "Integrated Security=SSPI" #$authentication = "User Id=xxx;PWD=xxx" # If you are using database authentication instead of Windows authentication. # Path to the scripts folder we want to deploy to the databases $scriptsPath = "SimpleTalk" # Path to SQLCompare.exe $SQLComparePath = "C:\Program Files (x86)\Red Gate\SQL Compare 10\sqlcompare.exe" # Create SQL connection string, and connection $ServerConnectionString = "Data Source=$serverName;Initial Catalog=$databaseName;$authentication" $ServerConnection = new-object system.data.SqlClient.SqlConnection($ServerConnectionString); # Create a Dataset to hold the DataTable $dataSet = new-object "System.Data.DataSet" "ServerList" # Create a query $query = "SET NOCOUNT ON;" $query += "SELECT serverName, environment, databaseName " $query += "FROM dbo.Servers; " # Create a DataAdapter to populate the DataSet with the results $dataAdapter = new-object "System.Data.SqlClient.SqlDataAdapter" ($query, $ServerConnection) $dataAdapter.Fill($dataSet) | Out-Null # Close the connection $ServerConnection.Close() # Populate the DataTable $dataTable = new-object "System.Data.DataTable" "Servers" $dataTable = $dataSet.Tables[0] #For every row in the DataTable $dataTable | FOREACH-OBJECT { "Server Name: $($_.serverName)" "Database Name: $($_.databaseName)" "Environment: $($_.environment)" # Compare the scripts folder to the database and synchronize the database to match # NB. Have set SQL Compare to abort on medium level warnings. $arguments = @("/scripts1:$($scriptsPath)", "/server2:$($_.serverName)", "/database2:$($_.databaseName)", "/AbortOnWarnings:Medium") # + @("/sync" ) # Commented out the 'sync' parameter for safety, write-host $arguments & $SQLComparePath $arguments "Exit Code: $LASTEXITCODE" # Some interesting variations # Check that every database matches a folder. # For example this might be a pre-deployment step to validate everything is at the same baseline state. # Or a post deployment script to validate the deployment worked. # An exit code of 0 means the databases are identical. # # $arguments = @("/scripts1:$($scriptsPath)", "/server2:$($_.serverName)", "/database2:$($_.databaseName)", "/Assertidentical") # Generate a report of the difference between the folder and each database. Generate a SQL update script for each database. # For example use this after the above to generate upgrade scripts for each database # Examine the warnings and the HTML diff report to understand how the script will change objects # #$arguments = @("/scripts1:$($scriptsPath)", "/server2:$($_.serverName)", "/database2:$($_.databaseName)", "/ScriptFile:update_$($_.environment+"_"+$_.databaseName).sql", "/report:update_$($_.environment+"_"+$_.databaseName).html" , "/reportType:Interactive", "/showWarnings", "/include:Identical") } It’s worth noting that the above example generates the deployment scripts dynamically. This approach should be problem-free for the vast majority of changes, but it is still good practice to review and test a pre-generated deployment script prior to deployment. An alternative approach would be to pre-generate a single deployment script using SQL Compare, and run this en masse to multiple targets programmatically using sqlcmd, or using a tool like SQL Multi Script.  You can use the /ScriptFile, /report, and /showWarnings flags to generate change scripts, difference reports and any warnings.  See the commented out example in the PowerShell: #$arguments = @("/scripts1:$($scriptsPath)", "/server2:$($_.serverName)", "/database2:$($_.databaseName)", "/ScriptFile:update_$($_.environment+"_"+$_.databaseName).sql", "/report:update_$($_.environment+"_"+$_.databaseName).html" , "/reportType:Interactive", "/showWarnings", "/include:Identical") There is a drawback of running a pre-generated deployment script; it assumes that a given database target hasn’t drifted from its expected state. Often there are (rightly or wrongly) many individuals within an organization who have permissions to alter the production database, and changes can therefore be made outside of the prescribed development processes. The consequence is that at deployment time, the applied script has been validated against a target that no longer represents reality. The solution here would be to add a check for drift prior to running the deployment script. This is achieved by using sqlcompare.exe to compare the target against the expected schema snapshot using the /Assertidentical flag. Should this return any differences (sqlcompare.exe Exit Code 79), a drift report is outputted instead of executing the deployment script.  See the commented out example. # $arguments = @("/scripts1:$($scriptsPath)", "/server2:$($_.serverName)", "/database2:$($_.databaseName)", "/Assertidentical") Any checks and processes that should be undertaken prior to a manual deployment, should also be happen during an automated deployment. You might think about triggering backups prior to deployment – even better, automate the verification of the backup too.   You can use SQL Compare’s command line interface along with PowerShell to automate multiple actions and checks that you need in your deployment process. Automation is a practical solution where multiple targets and a higher release cadence come into play. As we know, with great power comes great responsibility – responsibility to ensure that the necessary checks are made so deployments remain trouble-free.  (The code sample supplied in this post automates the simple dynamic deployment case – if you are considering more advanced automation, e.g. the drift checks, script generation, deploying to large numbers of targets and backup/verification, please email me at [email protected] for further script samples or if you have further questions)

    Read the article

  • Using Apache FOP from .NET level

    - by Lukasz Kurylo
    In one of my previous posts I was talking about FO.NET which I was using to generate a pdf documents from XSL-FO. FO.NET is one of the .NET ports of Apache FOP. Unfortunatelly it is no longer maintained. I known it when I decidec to use it, because there is a lack of available (free) choices for .NET to render a pdf form XSL-FO. I hoped in this implementation I will find all I need to create a pdf file with my really simple requirements. FO.NET is a port from some old version of Apache FOP and I found really quickly that there is a lack of some features that I needed, like dotted borders, double borders or support for margins. So I started to looking for some alternatives. I didn’t try the NFOP, another port of Apache FOP, because I found something I think much more better, the IKVM.NET project.   IKVM.NET it is not a pdf renderer. So what it is? From the project site:   IKVM.NET is an implementation of Java for Mono and the Microsoft .NET Framework. It includes the following components: a Java Virtual Machine implemented in .NET a .NET implementation of the Java class libraries tools that enable Java and .NET interoperability   In the simplest form IKVM.NET allows to use a Java code library in the C# code and vice versa.   I tried to use an Apache FOP, the best I think open source pdf –> XSL-FO renderer written in Java from my project written in C# using an IKVM.NET and it work like a charm. In the rest of the post I want to show, how to prepare a .NET *.dll class library from Apache FOP *.jar’s with IKVM.NET and generate a simple Hello world pdf document.   To start playing with IKVM.NET and Apache FOP we need to download their packages: IKVM.NET Apache FOP and then unpack them.   From the FOP directory copy all the *.jar’s files from lib and build catalogs to some location, e.g. d:\fop. Second step is to build the *.dll library from these files. On the console execute the following comand:   ikvmc –target:library –out:d:\fop\fop.dll –recurse:d:\fop   The ikvmc is located in the bin subdirectory where you unpacked the IKVM.NET. You must execute this command from this catalog, add this path to the global variable PATH or specify the full path to the bin subdirectory.   In no error occurred during this process, the fop.dll library should be created. Right now we can create a simple project to test if we can create a pdf file.   So let’s create a simple console project application and add reference to the fop.dll and the IKVM dll’s: IKVM.OpenJDK.Core and IKVM.OpenJDK.XML.API.   Full code to generate a pdf file from XSL-FO template:   static void Main(string[] args)         {             //initialize the Apache FOP             FopFactory fopFactory = FopFactory.newInstance();               //in this stream we will get the generated pdf file             OutputStream o = new DotNetOutputMemoryStream();             try             {                 Fop fop = fopFactory.newFop("application/pdf", o);                 TransformerFactory factory = TransformerFactory.newInstance();                 Transformer transformer = factory.newTransformer();                   //read the template from disc                 Source src = new StreamSource(new File("HelloWorld.fo"));                 Result res = new SAXResult(fop.getDefaultHandler());                 transformer.transform(src, res);             }             finally             {                 o.close();             }             using (System.IO.FileStream fs = System.IO.File.Create("HelloWorld.pdf"))             {                 //write from the .NET MemoryStream stream to disc the generated pdf file                 var data = ((DotNetOutputMemoryStream)o).Stream.GetBuffer();                 fs.Write(data, 0, data.Length);             }             Process.Start("HelloWorld.pdf");             System.Console.ReadLine();         }   Apache FOP be default using a Java’s Xalan to work with XML files. I didn’t find a way to replace this piece of code with equivalent from .NET standard library. If any error or warning will occure during generating the pdf file, on the console will ge shown, that’s why I inserted the last line in the sample above. The DotNetOutputMemoryStream this is my wrapper for the Java OutputStream. I have created it to have the possibility to exchange data between the .NET <-> Java objects. It’s implementation:   class DotNetOutputMemoryStream : OutputStream     {         private System.IO.MemoryStream ms = new System.IO.MemoryStream();         public System.IO.MemoryStream Stream         {             get             {                 return ms;             }         }         public override void write(int i)         {             ms.WriteByte((byte)i);         }         public override void write(byte[] b, int off, int len)         {             ms.Write(b, off, len);         }         public override void write(byte[] b)         {             ms.Write(b, 0, b.Length);         }         public override void close()         {             ms.Close();         }         public override void flush()         {             ms.Flush();         }     } The last thing we need, this is the HelloWorld.fo template.   <?xml version="1.0" encoding="utf-8"?> <fo:root xmlns:fo="http://www.w3.org/1999/XSL/Format"          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">   <fo:layout-master-set>     <fo:simple-page-master master-name="simple"                   page-height="29.7cm"                   page-width="21cm"                   margin-top="1.8cm"                   margin-bottom="0.8cm"                   margin-left="1.6cm"                   margin-right="1.2cm">       <fo:region-body margin-top="3cm"/>       <fo:region-before extent="3cm"/>       <fo:region-after extent="1.5cm"/>     </fo:simple-page-master>   </fo:layout-master-set>   <fo:page-sequence master-reference="simple">     <fo:flow flow-name="xsl-region-body">       <fo:block font-size="18pt" color="black" text-align="center">         Hello, World!       </fo:block>     </fo:flow>   </fo:page-sequence> </fo:root>   I’m not going to explain how how this template is created, because this will be covered in the near future posts.   Generated pdf file should look that:

    Read the article

  • Is there an equivalent to Java's ClassFileTransformer in .NET? (a way to replace a class)

    - by Alix
    I've been searching for this for quite a while with no luck so far. Is there an equivalent to Java's ClassFileTransformer in .NET? Basically, I want to create a class CustomClassFileTransformer (which in Java would implement the interface ClassFileTransformer) that gets called whenever a class is loaded, and is allowed to tweak it and replace it with the tweaked version. I know there are frameworks that do similar things, but I was looking for something more straightforward, like implementing my own ClassFileTransformer. Is it possible? EDIT #1. More details about why I need this: Basically, I have a C# application and I need to monitor the instructions it wants to run in order to detect read or write operations to fields (operations Ldfld and Stfld) and insert some instructions before the read/write takes place. I know how to do this (except for the part where I need to be invoked to replace the class): for every method whose code I want to monitor, I must: Get the method's MethodBody using MethodBase.GetMethodBody() Transform it to byte array with MethodBody.GetILAsByteArray(). The byte[] it returns contains the bytecode. Analyse the bytecode as explained here, possibly inserting new instructions or deleting/modifying existing ones by changing the contents of the array. Create a new method and use the new bytecode to create its body, with MethodBuilder.CreateMethodBody(byte[] il, int count), where il is the array with the bytecode. I put all these tweaked methods in a new class and use the new class to replace the one that was originally going to be loaded. An alternative to replacing classes would be somehow getting notified whenever a method is invoked. Then I'd replace the call to that method with a call to my own tweaked method, which I would tweak only the first time is invoked and then I'd put it in a dictionary for future uses, to reduce overhead (for future calls I'll just look up the method and invoke it; I won't need to analyse the bytecode again). I'm currently investigating ways to do this and LinFu looks pretty interesting, but if there was something like a ClassFileTransformer it would be much simpler: I just rewrite the class, replace it, and let the code run without monitoring anything. An additional note: the classes may be sealed. I want to be able to replace any kind of class, I cannot impose restrictions on their attributes. EDIT #2. Why I need to do this at runtime. I need to monitor everything that is going on so that I can detect every access to data. This applies to the code of library classes as well. However, I cannot know in advance which classes are going to be used, and even if I knew every possible class that may get loaded it would be a huge performance hit to tweak all of them instead of waiting to see whether they actually get invoked or not. POSSIBLE (BUT PRETTY HARDCORE) SOLUTION. In case anyone is interested (and I see the question has been faved, so I guess someone is), this is what I'm looking at right now. Basically I'd have to implement the profiling API and I'll register for the events that I'm interested in, in my case whenever a JIT compilation starts. An extract of the blogpost: In your ICorProfilerCallback2::ModuleLoadFinished callback, you call ICorProfilerInfo2::GetModuleMetadata to get a pointer to a metadata interface on that module. QI for the metadata interface you want. Search MSDN for "IMetaDataImport", and grope through the table of contents to find topics on the metadata interfaces. Once you're in metadata-land, you have access to all the types in the module, including their fields and function prototypes. You may need to parse metadata signatures and this signature parser may be of use to you. In your ICorProfilerCallback2::JITCompilationStarted callback, you may use ICorProfilerInfo2::GetILFunctionBody to inspect the original IL, and ICorProfilerInfo2::GetILFunctionBodyAllocator and then ICorProfilerInfo2::SetILFunctionBody to replace that IL with your own. The great news: I get notified when a JIT compilation starts and I can replace the bytecode right there, without having to worry about replacing the class, etc. The not-so-great news: you cannot invoke managed code from the API's callback methods, which makes sense but means I'm on my own parsing the IL code, etc, as opposed to be able to use Cecil, which would've been a breeze. I don't think there's a simpler way to do this without using AOP frameworks (such as PostSharp). If anyone has any other idea please let me know. I'm not marking the question as answered yet.

    Read the article

  • how to use serial port in UDK using windows DLL and DLLBind directive?

    - by Shayan Abbas
    I want to use serial port in UDK, For that purpose i use a windows DLL and DLLBind directive. I have a thread in windows DLL for serial port data recieve event. My problem is: this thread doesn't work properly. Please Help me. below is my code SerialPortDLL Code: // SerialPortDLL.cpp : Defines the exported functions for the DLL application. // #include "stdafx.h" #include "Cport.h" extern "C" { // This is an example of an exported variable //SERIALPORTDLL_API int nSerialPortDLL=0; // This is an example of an exported function. //SERIALPORTDLL_API int fnSerialPortDLL(void) //{ // return 42; //} CPort *sp; __declspec(dllexport) void Open(wchar_t* portName) { sp = new CPort(portName); //MessageBox(0,L"ha ha!!!",L"ha ha",0); //MessageBox(0,portName,L"ha ha",0); } __declspec(dllexport) void Close() { sp->Close(); MessageBox(0,L"ha ha!!!",L"ha ha",0); } __declspec(dllexport) wchar_t *GetData() { return sp->GetData(); } __declspec(dllexport) unsigned int GetDSR() { return sp->getDSR(); } __declspec(dllexport) unsigned int GetCTS() { return sp->getCTS(); } __declspec(dllexport) unsigned int GetRing() { return sp->getRing(); } } CPort class code: #include "stdafx.h" #include "CPort.h" #include "Serial.h" CSerial serial; HANDLE HandleOfThread; LONG lLastError = ERROR_SUCCESS; bool fContinue = true; HANDLE hevtOverlapped; HANDLE hevtStop; OVERLAPPED ov = {0}; //char szBuffer[101] = ""; wchar_t *szBuffer = L""; wchar_t *data = L""; DWORD WINAPI ThreadHandler( LPVOID lpParam ) { // Keep reading data, until an EOF (CTRL-Z) has been received do { MessageBox(0,L"ga ga!!!",L"ga ga",0); //Sleep(10); // Wait for an event lLastError = serial.WaitEvent(&ov); if (lLastError != ERROR_SUCCESS) { //LOG( " Unable to wait for a COM-port event" ); } // Setup array of handles in which we are interested HANDLE ahWait[2]; ahWait[0] = hevtOverlapped; ahWait[1] = hevtStop; // Wait until something happens switch (::WaitForMultipleObjects(sizeof(ahWait)/sizeof(*ahWait),ahWait,FALSE,INFINITE)) { case WAIT_OBJECT_0: { // Save event const CSerial::EEvent eEvent = serial.GetEventType(); // Handle break event if (eEvent & CSerial::EEventBreak) { //LOG( " ### BREAK received ###" ); } // Handle CTS event if (eEvent & CSerial::EEventCTS) { //LOG( " ### Clear to send %s ###", serial.GetCTS() ? "on":"off" ); } // Handle DSR event if (eEvent & CSerial::EEventDSR) { //LOG( " ### Data set ready %s ###", serial.GetDSR() ? "on":"off" ); } // Handle error event if (eEvent & CSerial::EEventError) { switch (serial.GetError()) { case CSerial::EErrorBreak: /*LOG( " Break condition" );*/ break; case CSerial::EErrorFrame: /*LOG( " Framing error" );*/ break; case CSerial::EErrorIOE: /*LOG( " IO device error" );*/ break; case CSerial::EErrorMode: /*LOG( " Unsupported mode" );*/ break; case CSerial::EErrorOverrun: /*LOG( " Buffer overrun" );*/ break; case CSerial::EErrorRxOver: /*LOG( " Input buffer overflow" );*/ break; case CSerial::EErrorParity: /*LOG( " Input parity error" );*/ break; case CSerial::EErrorTxFull: /*LOG( " Output buffer full" );*/ break; default: /*LOG( " Unknown" );*/ break; } } // Handle ring event if (eEvent & CSerial::EEventRing) { //LOG( " ### RING ###" ); } // Handle RLSD/CD event if (eEvent & CSerial::EEventRLSD) { //LOG( " ### RLSD/CD %s ###", serial.GetRLSD() ? "on" : "off" ); } // Handle data receive event if (eEvent & CSerial::EEventRecv) { // Read data, until there is nothing left DWORD dwBytesRead = 0; do { // Read data from the COM-port lLastError = serial.Read(szBuffer,33,&dwBytesRead); if (lLastError != ERROR_SUCCESS) { //LOG( "Unable to read from COM-port" ); } if( dwBytesRead == 33 && szBuffer[0]=='$' ) { // Finalize the data, so it is a valid string szBuffer[dwBytesRead] = '\0'; ////LOG( "\n%s\n", szBuffer ); data = szBuffer; } } while (dwBytesRead > 0); } } break; case WAIT_OBJECT_0+1: { // Set the continue bit to false, so we'll exit fContinue = false; } break; default: { // Something went wrong //LOG( "Error while calling WaitForMultipleObjects" ); } break; } } while (fContinue); MessageBox(0,L"kka kk!!!",L"kka ga",0); return 0; } CPort::CPort(wchar_t *portName) { // Attempt to open the serial port (COM2) //lLastError = serial.Open(_T(portName),0,0,true); lLastError = serial.Open(portName,0,0,true); if (lLastError != ERROR_SUCCESS) { //LOG( "Unable to open COM-port" ); } // Setup the serial port (115200,8N1, which is the default setting) lLastError = serial.Setup(CSerial::EBaud115200,CSerial::EData8,CSerial::EParNone,CSerial::EStop1); if (lLastError != ERROR_SUCCESS) { //LOG( "Unable to set COM-port setting" ); } // Register only for the receive event lLastError = serial.SetMask(CSerial::EEventBreak | CSerial::EEventCTS | CSerial::EEventDSR | CSerial::EEventError | CSerial::EEventRing | CSerial::EEventRLSD | CSerial::EEventRecv); if (lLastError != ERROR_SUCCESS) { //LOG( "Unable to set COM-port event mask" ); } // Use 'non-blocking' reads, because we don't know how many bytes // will be received. This is normally the most convenient mode // (and also the default mode for reading data). lLastError = serial.SetupReadTimeouts(CSerial::EReadTimeoutNonblocking); if (lLastError != ERROR_SUCCESS) { //LOG( "Unable to set COM-port read timeout" ); } // Create a handle for the overlapped operations hevtOverlapped = ::CreateEvent(0,TRUE,FALSE,0);; if (hevtOverlapped == 0) { //LOG( "Unable to create manual-reset event for overlapped I/O" ); } // Setup the overlapped structure ov.hEvent = hevtOverlapped; // Open the "STOP" handle hevtStop = ::CreateEvent(0,TRUE,FALSE,_T("Overlapped_Stop_Event")); if (hevtStop == 0) { //LOG( "Unable to create manual-reset event for stop event" ); } HandleOfThread = CreateThread( NULL, 0, ThreadHandler, 0, 0, NULL); } CPort::~CPort() { //fContinue = false; //CloseHandle( HandleOfThread ); //serial.Close(); } void CPort::Close() { fContinue = false; CloseHandle( HandleOfThread ); serial.Close(); } wchar_t *CPort::GetData() { return data; } bool CPort::getCTS() { return serial.GetCTS(); } bool CPort::getDSR() { return serial.GetDSR(); } bool CPort::getRing() { return serial.GetRing(); } Unreal Script Code: class MyPlayerController extends GamePlayerController DLLBind(SerialPortDLL); dllimport final function Open(string portName); dllimport final function Close(); dllimport final function string GetData();

    Read the article

  • SQL University: What and why of database testing

    - by Mladen Prajdic
    This is a post for a great idea called SQL University started by Jorge Segarra also famously known as SqlChicken on Twitter. It’s a collection of blog posts on different database related topics contributed by several smart people all over the world. So this week is mine and we’ll be talking about database testing and refactoring. In 3 posts we’ll cover: SQLU part 1 - What and why of database testing SQLU part 2 - What and why of database refactoring SQLU part 2 – Tools of the trade With that out of the way let us sharpen our pencils and get going. Why test a database The sad state of the industry today is that there is very little emphasis on testing in general. Test driven development is still a small niche of the programming world while refactoring is even smaller. The cause of this is the inability of developers to convince themselves and their managers that writing tests is beneficial. At the moment they are mostly viewed as waste of time. This is because the average person (let’s not fool ourselves, we’re all average) is unable to think about lower future costs in relation to little more current work. It’s orders of magnitude easier to know about the current costs in relation to current amount of work. That’s why programmers convince themselves testing is a waste of time. However we have to ask ourselves what tests are really about? Maybe finding bugs? No, not really. If we introduce bugs, we’re likely to write test around those bugs too. But yes we can find some bugs with tests. The main point of tests is to have reproducible repeatability in our systems. By having a code base largely covered by tests we can know with better certainty what a small code change can break in other parts of the system. By having repeatability we can make code changes with confidence, since we know we’ll see what breaks in other tests. And here comes the inability to estimate future costs. By spending just a few more hours writing those tests we’d know instantly what broke where. Imagine we fix a reported bug. We check-in the code, deploy it and the users are happy. Until we get a call 2 weeks later about a certain monthly process has stopped working. What we don’t know is that this process was developed by a long gone coworker and for some reason it relied on that same bug we’ve happily fixed. There’s no way we could’ve known that. We say OK and go in and fix the monthly process. But what we have no clue about is that there’s this ETL job that relied on data from that monthly process. Now that we’ve fixed the process it’s giving unexpected (yet correct since we fixed it) data to the ETL job. So we have to fix that too. But there’s this part of the app we coded that relies on data from that exact ETL job. And just like that we enter the “Loop of maintenance horror”. With the loop eventually comes blame. Here’s a nice tip for all developers and DBAs out there: If you make a mistake man up and admit to it. All of the above is valid for any kind of software development. Keeping this in mind the database is nothing other than just a part of the application. But a big part! One reason why testing a database is even more important than testing an application is that one database is usually accessed from multiple applications and processes. This makes it the central and vital part of the enterprise software infrastructure. Knowing all this can we really afford not to have tests? What to test in a database Now that we’ve decided we’ll dive into this testing thing we have to ask ourselves what needs to be tested? The short answer is: everything. The long answer is: read on! There are 2 main ways of doing tests: Black box and White box testing. Black box testing means we have no idea how the system internals are built and we only have access to it’s inputs and outputs. With it we test that the internal changes to the system haven’t caused the input/output behavior of the system to change. The most important thing to test here are the edge conditions. It’s where most programs break. Having good edge condition tests we can be more confident that the systems changes won’t break. White box testing has the full knowledge of the system internals. With it we test the internal system changes, different states of the application, etc… White and Black box tests should be complementary to each other as they are very much interconnected. Testing database routines includes testing stored procedures, views, user defined functions and anything you use to access the data with. Database routines are your input/output interface to the database system. They count as black box testing. We test then for 2 things: Data and schema. When testing schema we only care about the columns and the data types they’re returning. After all the schema is the contract to the out side systems. If it changes we usually have to change the applications accessing it. One helpful T-SQL command when doing schema tests is SET FMTONLY ON. It tells the SQL Server to return only empty results sets. This speeds up tests because it doesn’t return any data to the client. After we’ve validated the schema we have to test the returned data. There no other way to do this but to have expected data known before the tests executes and comparing that data to the database routine output. Testing Authentication and Authorization helps us validate who has access to the SQL Server box (Authentication) and who has access to certain database objects (Authorization). For desktop applications and windows authentication this works well. But the biggest problem here are web apps. They usually connect to the database as a single user. Please ensure that that user is not SA or an account with admin privileges. That is just bad. Load testing ensures us that our database can handle peak loads. One often overlooked tool for load testing is Microsoft’s OSTRESS tool. It’s part of RML utilities (x86, x64) for SQL Server and can help determine if our database server can handle loads like 100 simultaneous users each doing 10 requests per second. SQL Profiler can also help us here by looking at why certain queries are slow and what to do to fix them.   One particular problem to think about is how to begin testing existing databases. First thing we have to do is to get to know those databases. We can’t test something when we don’t know how it works. To do this we have to talk to the users of the applications accessing the database, run SQL Profiler to see what queries are being run, use existing documentation to decipher all the object relationships, etc… The way to approach this is to choose one part of the database (say a logical grouping of tables that go together) and filter our traces accordingly. Once we’ve done that we move on to the next grouping and so on until we’ve covered the whole database. Then we move on to the next one. Database Testing is a topic that we can spent many hours discussing but let this be a nice intro to the world of database testing. See you in the next post.

    Read the article

  • python list mysteriously getting set to something within my django/piston handler

    - by Anverc
    To start, I'm very new to python, let alone Django and Piston. Anyway, I've created a new BaseHandler class "class BaseApiHandler(BaseHandler)" so that I can extend some of the stff that BaseHandler does. This has been working fine until I added a new filter that could limit results to the first or last result. Now I can refresh the api page over and over and sometimes it will limit the result even if I don't include /limit/whatever in my URL... I've added some debug info into my return value to see what is happening, and that's when it gets more weird. this return value will make more sense after you see the code, but here they are for reference: When the results are correct: "statusmsg": "2 hours_detail found with query: {'empid':'22','datestamp':'2009-03-02',}", when the results are incorrect (once you read the code you'll notice two things wrong. First, it doesn't have 'limit':'None', secondly it shouldn't even get this far to begin with. "statusmsg": "1 hours_detail found with query: {'empid':'22','datestamp':'2009-03-02',with limit[0,1](limit,None),}", It may be important to note that I'm the only person with access to the server running this right now, so even if it was a cache issue, it doesn't make sense that I can just refresh and get different results by hitting F5 while viewing: http://localhost/api/hours_detail/datestamp/2009-03-02/empid/22 Here's the code broken into urls.py and handlers.py so that you can see what i'm doing: URLS.PY urlpatterns = patterns('', #hours_detail/id/{id}/empid/{empid}/projid/{projid}/datestamp/{datestamp}/daterange/{fromdate}to{todate}/limit/{first|last}/exact #empid is required # id, empid, projid, datestamp, daterange can be in any order url(r'^api/hours_detail/(?:' + \ r'(?:[/]?id/(?P<id>\d+))?' + \ r'(?:[/]?empid/(?P<empid>\d+))?' + \ r'(?:[/]?projid/(?P<projid>\d+))?' + \ r'(?:[/]?datestamp/(?P<datestamp>\d{4,}[-/\.]\d{2,}[-/\.]\d{2,}))?' + \ r'(?:[/]?daterange/(?P<daterange>(?:\d{4,}[-/\.]\d{2,}[-/\.]\d{2,})(?:to|/-)(?:\d{4,}[-/\.]\d{2,}[-/\.]\d{2,})))?' + \ r')+' + \ r'(?:/limit/(?P<limit>(?:first|last)))?' + \ r'(?:/(?P<exact>exact))?$', hours_detail_resource), HANDLERS.PY # inherit from BaseHandler to add the extra functionality i need to process the possibly null URL params class BaseApiHandler(BaseHandler): # keep track of the handler so the data is represented back to me correctly post_name = 'base' # THIS IS THE LIST IN QUESTION - SOMETIMES IT IS GETTING SET TO [0,1] MYSTERIOUSLY # this gets set to a list when the results are to be limited limit = None def has_limit(self): return (isinstance(self.limit, list) and len(self.limit) == 2) def process_kwarg_read(self, key, value, d_post, b_exact): """ this should be overridden in the derived classes to process kwargs """ pass # override 'read' so we can better handle our api's searching capabilities def read(self, request, *args, **kwargs): d_post = {'status':0,'statusmsg':'Nothing Happened'} try: # setup the named response object # select all employees then filter - querysets are lazy in django # the actual query is only done once data is needed, so this may # seem like some memory hog slow beast, but it's actually not. d_post[self.post_name] = self.queryset(request) # this is a string that holds debug information... it's the string I mentioned before pasting this code s_query = '' b_exact = False if 'exact' in kwargs and kwargs['exact'] <> None: b_exact = True s_query = '\'exact\':True,' for key,value in kwargs.iteritems(): # the regex url possibilities will push None into the kwargs dictionary # if not specified, so just continue looping through if that's the case if value == None or key == 'exact': continue # write to the s_query string so we have a nice error message s_query = '%s\'%s\':\'%s\',' % (s_query, key, value) # now process this key/value kwarg self.process_kwarg_read(key=key, value=value, d_post=d_post, b_exact=b_exact) # end of the kwargs for loop else: if self.has_limit(): # THIS SEEMS TO GET HIT SOMETIMES IF YOU CONSTANTLY REFRESH THE API PAGE, EVEN THOUGH # THE LINE IN THE FOR LOOP WHICH UPDATES s_query DOESN'T GET HIS AND THUS self.process_kwarg_read ALSO # DOESN'T GET HIT SO NEITHER DOES limit = [0,1] s_query = '%swith limit[%s,%s](limit,%s),' % (s_query, self.limit[0], self.limit[1], kwargs['limit']) d_post[self.post_name] = d_post[self.post_name][self.limit[0]:self.limit[1]] if d_post[self.post_name].count() == 0: d_post['status'] = 0 d_post['statusmsg'] = '%s not found with query: {%s}' % (self.post_name, s_query) else: d_post['status'] = 1 d_post['statusmsg'] = '%s %s found with query: {%s}' % (d_post[self.post_name].count(), self.post_name, s_query) except: e = sys.exc_info()[1] d_post['status'] = 0 d_post['statusmsg'] = 'error: %s' % e d_post[self.post_name] = [] return d_post class HoursDetailHandler(BaseApiHandler): #allowed_methods = ('GET',) model = HoursDetail exclude = () post_name = 'hours_detail' def process_kwarg_read(self, key, value, d_post, b_exact): if ... # I have several if/elif statements here that check for other things... # 'self.limit =' only shows up in the following elif: elif key == 'limit': order_by = 'clock_time' if value == 'last': order_by = '-clock_time' d_post[self.post_name] = d_post[self.post_name].order_by(order_by) # TO GET HERE, THE ONLY PLACE IN CODE WHERE self.limit IS SET, YOU MUST HAVE GONE THROUGH # THE value == None CHECK???? self.limit = [0, 1] else: raise NameError def read(self, request, *args, **kwargs): # empid is required, so make sure it exists before running BaseApiHandler's read method if not('empid' in kwargs and kwargs['empid'] <> None and kwargs['empid'] >= 0): return {'status':0,'statusmsg':'empid cannot be empty'} else: return BaseApiHandler.read(self, request, *args, **kwargs) Does anyone have a clue how else self.limit might be getting set to [0, 1] ? Am I misunderstanding kwargs or loops or anything in Python?

    Read the article

  • Simple GET operation with JSON data in ADF Mobile

    - by PadmajaBhat
    Usecase: This sample uses a RESTful service which contains a GET method that fetches employee details for an employee with given employee ID along with other methods. The data is fetched in JSON format. This RESTful service is then invoked via ADF Mobile and the JSON data thus obtained is parsed and rendered in mobile in a table. Prerequisite: Download JDev build JDEVADF_11.1.2.4.0_GENERIC_130421.1600.6436.1 or higher with mobile support.  Steps: Run EmployeeService.java in JSONService.zip. This is a simple service with a method, getEmpById(id) that takes employee ID as parameter and produces employee details in JSON format. Copy the target URL generated on running this service. The target URL will be as shown below: http://127.0.0.1:7101/JSONService-Project1-context-root/jersey/project1 Now, let us invoke this service in our mobile application. For this, create an ADF Mobile application.  Name the application JSON_SearchByEmpID and finish the wizard. Now, let us create a connection to our service. To do this, we create a URL Connection. Invoke new gallery wizard on ApplicationController project.  Select URL Connection option. In the Create URL Connection window, enter connection name as ‘conn’. For URL endpoint, supply the URL you copied earlier on running the service. Remember to use your system IP instead of localhost. Test the connection and click OK. At this point, a connection to the REST service has been created. Since JSON data is not supported directly in WSDC wizard, we need to invoke the operation through Java code using RestServiceAdapter. For this, in the ApplicationController project, create a Java class called ‘EmployeeDC’. We will be creating DC from this class. Add the following code to the newly created class to invoke the getEmpById method. 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 public Employee fetchEmpDetails(){ RestServiceAdapter restServiceAdapter = Model.createRestServiceAdapter(); restServiceAdapter.clearRequestProperties(); restServiceAdapter.setConnectionName("conn"); //URL connection created with this name restServiceAdapter.setRequestType(RestServiceAdapter.REQUEST_TYPE_GET); restServiceAdapter.addRequestProperty("Content-Type", "application/json"); restServiceAdapter.addRequestProperty("Accept", "application/json; charset=UTF-8"); restServiceAdapter.setRetryLimit(0); restServiceAdapter.setRequestURI("/getById/"+inputEmpID); String response = ""; JSONBeanSerializationHelper jsonHelper = new JSONBeanSerializationHelper(); try { response = restServiceAdapter.send(""); //Invoke the GET operation System.out.println("Response received!"); Employee responseObject = (Employee) jsonHelper.fromJSON(Employee.class, response); return responseObject; } catch (Exception e) { } return null; } Here, in lines 2 to 9, we create the RestServiceAdapter and set various properties required to invoke the web service. At line 4, we are pointing to the connection ‘conn’ created previously. Since we want to invoke getEmpById method of the service, which is defined by the URL http://IP:7101/REST_Sanity_JSON-Project1-context-root/resources/project1/getById/{id} we are updating the request URI to point to this URI at line 9. inputEmpID is a variable that will hold the value input by the user for employee ID. This we will be creating in a while. As the method we are invoking is a GET operation and consumes json data, these properties are being set in lines 5 through 7. Finally, we are sending the request in line 13. In line 15, we use jsonHelper.fromJSON to convert received JSON data to a Java object. The required Java objects' structure is defined in class Employee.java whose structure is provided later. Since the response from our service is a simple response consisting of attributes like employee Id, name, design etc, we will just return this parsed response (line 16) and use it to create DC. As mentioned previously, we would like the user to input the employee ID for which he/she wants to perform search. So, in the same class, define a variable inputEmpID which will hold the value input by the user. Generate accessors for this variable. Lastly, we need to create Employee class. Employee class will define how we want to structure the JSON object received from the service. To design the Employee class, run the services’ method in the browser or via analyzer using path parameter as 1. This will give you the output JSON structure. Ours is a simple service that returns a JSONObject with a set of data. Hence, Employee class will just contain this set of data defined with the proper data types. Create Employee.java in the same project as EmployeeDC.java and write the below code: package application; import oracle.adfmf.java.beans.PropertyChangeListener; import oracle.adfmf.java.beans.PropertyChangeSupport; public class Employee { private String dept; private String desig; private int id; private String name; private int salary; private PropertyChangeSupport propertyChangeSupport = new PropertyChangeSupport(this); public void setDept(String dept) {         String oldDept = this.dept; this.dept = dept; propertyChangeSupport.firePropertyChange("dept", oldDept, dept); } public String getDept() { return dept; } public void setDesig(String desig) { String oldDesig = this.desig; this.desig = desig; propertyChangeSupport.firePropertyChange("desig", oldDesig, desig); } public String getDesig() { return desig; } public void setId(int id) { int oldId = this.id; this.id = id; propertyChangeSupport.firePropertyChange("id", oldId, id); } public int getId() { return id; } public void setName(String name) { String oldName = this.name; this.name = name; propertyChangeSupport.firePropertyChange("name", oldName, name); } public String getName() { return name; } public void setSalary(int salary) { int oldSalary = this.salary; this.salary = salary; propertyChangeSupport.firePropertyChange("salary", oldSalary, salary); } public int getSalary() { return salary; } public void addPropertyChangeListener(PropertyChangeListener l) { propertyChangeSupport.addPropertyChangeListener(l); } public void removePropertyChangeListener(PropertyChangeListener l) { propertyChangeSupport.removePropertyChangeListener(l);     } } Now, let us create a DC out of EmployeeDC.java.  DC as shown below is created. Now, you can design the mobile page as usual and invoke the operation of the service. To design the page, go to ViewController project and locate adfmf-feature.xml. Create a new feature called ‘SearchFeature’ by clicking the plus icon. Go the content tab and add an amx page. Call it SearchPage.amx. Call it SearchPage.amx. Remove primary and secondary buttons as we don’t need them and rename the header. Drag and drop inputEmpID from the DC palette onto Panel Page in the structure pane as input text with label. Next, drop fetchEmpDetails method as an ADF button. For a change, let us display the output in a table component instead of the usual form. However, you will notice that if you drag and drop Employee onto the structure pane, there is no option for ADF Mobile Table. Hence, we will need to create the table on our own. To do this, let us first drop Employee as an ADF Read -Only form. This step is needed to get the required bindings. We will be deleting this form in a while. Now, from the Component palette, search for ‘Table Layout’. Drag and drop this below the command button.  Within the tablelayout, insert ‘Row Layout’ and ‘Cell Format’ components. Final table structure should be as shown below. Here, we have also defined some inline styling to render the UI in a nice manner. <amx:tableLayout id="tl1" borderWidth="2" halign="center" inlineStyle="vertical-align:middle;" width="100%" cellPadding="10"> <amx:rowLayout id="rl1" > <amx:cellFormat id="cf1" width="30%"> <amx:outputText value="#{bindings.dept.hints.label}" id="ot7" inlineStyle="color:rgb(0,148,231);"/> </amx:cellFormat> <amx:cellFormat id="cf2"> <amx:outputText value="#{bindings.dept.inputValue}" id="ot8" /> </amx:cellFormat> </amx:rowLayout> <amx:rowLayout id="rl2"> <amx:cellFormat id="cf3" width="30%"> <amx:outputText value="#{bindings.desig.hints.label}" id="ot9" inlineStyle="color:rgb(0,148,231);"/> </amx:cellFormat> <amx:cellFormat id="cf4" > <amx:outputText value="#{bindings.desig.inputValue}" id="ot10"/> </amx:cellFormat> </amx:rowLayout> <amx:rowLayout id="rl3"> <amx:cellFormat id="cf5" width="30%"> <amx:outputText value="#{bindings.id.hints.label}" id="ot11" inlineStyle="color:rgb(0,148,231);"/> </amx:cellFormat> <amx:cellFormat id="cf6" > <amx:outputText value="#{bindings.id.inputValue}" id="ot12"/> </amx:cellFormat> </amx:rowLayout> <amx:rowLayout id="rl4"> <amx:cellFormat id="cf7" width="30%"> <amx:outputText value="#{bindings.name.hints.label}" id="ot13" inlineStyle="color:rgb(0,148,231);"/> </amx:cellFormat> <amx:cellFormat id="cf8"> <amx:outputText value="#{bindings.name.inputValue}" id="ot14"/> </amx:cellFormat> </amx:rowLayout> <amx:rowLayout id="rl5"> <amx:cellFormat id="cf9" width="30%"> <amx:outputText value="#{bindings.salary.hints.label}" id="ot15" inlineStyle="color:rgb(0,148,231);"/> </amx:cellFormat> <amx:cellFormat id="cf10"> <amx:outputText value="#{bindings.salary.inputValue}" id="ot16"/> </amx:cellFormat> </amx:rowLayout>     </amx:tableLayout> The values used in the output text of the table come from the bindings obtained from the ADF Form created earlier. As we have used the bindings and don’t need the form anymore, let us delete the form.  One last thing before we deploy. When user changes employee ID, we want to clear the table contents. For this we associate a value change listener with the input text box. Click New in the resulting dialog to create a managed bean. Next, we create a method within the managed bean. For this, click on the New button associated with method. Call the method ‘empIDChange’. Open myClass.java and write the below code in empIDChange(). public void empIDChange(ValueChangeEvent valueChangeEvent) { // Add event code here... //Resetting the values to blank values when employee id changes AdfELContext adfELContext = AdfmfJavaUtilities.getAdfELContext(); ValueExpression ve = AdfmfJavaUtilities.getValueExpression("#{bindings.dept.inputValue}", String.class); ve.setValue(adfELContext, ""); ve = AdfmfJavaUtilities.getValueExpression("#{bindings.desig.inputValue}", String.class); ve.setValue(adfELContext, ""); ve = AdfmfJavaUtilities.getValueExpression("#{bindings.id.inputValue}", int.class); ve.setValue(adfELContext, ""); ve = AdfmfJavaUtilities.getValueExpression("#{bindings.name.inputValue}", String.class); ve.setValue(adfELContext, ""); ve = AdfmfJavaUtilities.getValueExpression("#{bindings.salary.inputValue}", int.class); ve.setValue(adfELContext, ""); } That’s it. Deploy the application to android emulator or device. Some snippets from the app.

    Read the article

  • How to get javascript object references or reference count?

    - by Tauren
    How to get reference count for an object Is it possible to determine if a javascript object has multiple references to it? Or if it has references besides the one I'm accessing it with? Or even just to get the reference count itself? Can I find this information from javascript itself, or will I need to keep track of my own reference counters. Obviously, there must be at least one reference to it for my code access the object. But what I want to know is if there are any other references to it, or if my code is the only place it is accessed. I'd like to be able to delete the object if nothing else is referencing it. If you know the answer, there is no need to read the rest of this question. Below is just an example to make things more clear. Use Case In my application, I have a Repository object instance called contacts that contains an array of ALL my contacts. There are also multiple Collection object instances, such as friends collection and a coworkers collection. Each collection contains an array with a different set of items from the contacts Repository. Sample Code To make this concept more concrete, consider the code below. Each instance of the Repository object contains a list of all items of a particular type. You might have a repository of Contacts and a separate repository of Events. To keep it simple, you can just get, add, and remove items, and add many via the constructor. var Repository = function(items) { this.items = items || []; } Repository.prototype.get = function(id) { for (var i=0,len=this.items.length; i<len; i++) { if (items[i].id === id) { return this.items[i]; } } } Repository.prototype.add = function(item) { if (toString.call(item) === "[object Array]") { this.items.concat(item); } else { this.items.push(item); } } Repository.prototype.remove = function(id) { for (var i=0,len=this.items.length; i<len; i++) { if (items[i].id === id) { this.removeIndex(i); } } } Repository.prototype.removeIndex = function(index) { if (items[index]) { if (/* items[i] has more than 1 reference to it */) { // Only remove item from repository if nothing else references it this.items.splice(index,1); return; } } } Note the line in remove with the comment. I only want to remove the item from my master repository of objects if no other objects have a reference to the item. Here's Collection: var Collection = function(repo,items) { this.repo = repo; this.items = items || []; } Collection.prototype.remove = function(id) { for (var i=0,len=this.items.length; i<len; i++) { if (items[i].id === id) { // Remove object from this collection this.items.splice(i,1); // Tell repo to remove it (only if no other references to it) repo.removeIndxe(i); return; } } } And then this code uses Repository and Collection: var contactRepo = new Repository([ {id: 1, name: "Joe"}, {id: 2, name: "Jane"}, {id: 3, name: "Tom"}, {id: 4, name: "Jack"}, {id: 5, name: "Sue"} ]); var friends = new Collection( contactRepo, [ contactRepo.get(2), contactRepo.get(4) ] ); var coworkers = new Collection( contactRepo, [ contactRepo.get(1), contactRepo.get(2), contactRepo.get(5) ] ); contactRepo.items; // contains item ids 1, 2, 3, 4, 5 friends.items; // contains item ids 2, 4 coworkers.items; // contains item ids 1, 2, 5 coworkers.remove(2); contactRepo.items; // contains item ids 1, 2, 3, 4, 5 friends.items; // contains item ids 2, 4 coworkers.items; // contains item ids 1, 5 friends.remove(4); contactRepo.items; // contains item ids 1, 2, 3, 5 friends.items; // contains item ids 2 coworkers.items; // contains item ids 1, 5 Notice how coworkers.remove(2) didn't remove id 2 from contactRepo? This is because it was still referenced from friends.items. However, friends.remove(4) causes id 4 to be removed from contactRepo, because no other collection is referring to it. Summary The above is what I want to do. I'm sure there are ways I can do this by keeping track of my own reference counters and such. But if there is a way to do it using javascript's built-in reference management, I'd like to hear about how to use it.

    Read the article

  • Google Web Toolkit Deferred Binding Issue

    - by snctln
    I developed a web app using GWT about 2 years ago, since then the application has evolved. In its current state it relies on fetching a single XML file and parsing the information from it. Overall this works great. A requirement of this app is that it needs to be able to be ran from the filesystem (file:///..) as well as the traditional model of running from a webserver (http://...) Fetching this file from a webserver works exactly as expected using a RequestBuilder object. When running the app from the filesystem Firefox, Opera, Safari, and Chrome all behave as expected. When running the app from the filesystem using IE7 or IE8 the RequestBuilder.send() call fails, the information about the error suggests that there is a problem accessing the file due to violating the same origin policy. The app worked as expected in IE6 but not in IE7 or IE8. So I looked at the source code of RequestBuilder.java and saw that the actual request was being executed with an XMLHttpRequest GWT object. So I looked at the source code for XMLHttpRequest.java and found out some information. Here is the code (starts at line 83 in XMLHttpRequest.java) public static native XMLHttpRequest create() /*-{ if ($wnd.XMLHttpRequest) { return new XMLHttpRequest(); } else { try { return new ActiveXObject('MSXML2.XMLHTTP.3.0'); } catch (e) { return new ActiveXObject("Microsoft.XMLHTTP"); } } }-*/; So basically if an XMLHttpRequest cannot be created (like in IE6 because it is not available) an ActiveXObject is used instead. I read up a little bit more on the IE implementation of XMLHttpRequest, and it appears that it is only supported for interacting with files on a webserver. I found a setting in IE8 (Tools-Internet Options-Advanced-Security-Enable native XMLHTTP support), when I uncheck this box my app works. I assume this is because I am more of less telling IE to not use their implementation of XmlHttpRequest, so GWT just uses an ActiveXObject because it doesn't think the native XmlHttpRequest is available. This fixes the problem, but is hardly a long term solution. I can currently catch a failed send request and verify that it was trying to fetch the XML file from the filesystem using normal GWT. What I would like to do in this case is catch the IE7 and IE8 case and have them use a ActiveXObject instead of a native XmlHttpRequest object. There was a posting on the GWT google group that had a supposed solution for this problem (link). Looking at it I can tell that it was created for an older version of GWT. I am using the latest release and think that this is more or less what I would like to do (use GWT deferred binding to detect a specific browser type and run my own implementation of XMLHttpRequest.java in place of the built in GWT implementation). Here is the code that I am trying to use package com.mycompany.myapp.client; import com.google.gwt.xhr.client.XMLHttpRequest; public class XMLHttpRequestIE7or8 extends XMLHttpRequest { // commented out the "override" so that eclipse and the ant build script don't throw errors //@Override public static native XMLHttpRequest create() /*-{ try { return new ActiveXObject('MSXML2.XMLHTTP.3.0'); } catch (e) { return new ActiveXObject("Microsoft.XMLHTTP"); } }-*/; // have an empty protected constructor so the ant build script doesn't throw errors // the actual XMLHttpRequest constructor is empty as well so this shouldn't cause any problems protected XMLHttpRequestIE7or8() { } }; And here are the lines that I added to my module xml <replace-with class="com.mycompany.myapp.client.XMLHttpRequestIE7or8"> <when-type-is class="com.google.gwt.xhr.client.XMLHttpRequest"/> <any> <when-property-is name="user.agent" value="ie7" /> <when-property-is name="user.agent" value="ie8" /> </any> </replace-with> From what I can tell this should work, but my code never runs. Does anyone have any idea of what I am doing wrong? Should I not do this via deferred binding and just use native javascript when I catch the fail case instead? Is there a different way of approaching this problem that I have not mentioned? All replies are welcome.

    Read the article

  • Learnings from trying to write better software: Loud errors from the very start

    - by theo.spears
    Microsoft made a very small number of backwards incompatible changes between .NET 1.1 and 2.0, because they wanted to make it as easy and safe as possible to port applications to the new runtime. (Here’s a list.) However, one thing they did change was what happens when a background thread fails with an unhanded exception - in .NET 1.1 nothing happened, the thread terminated, and the application continued oblivious. Try the same trick in .NET 2.0 and the entire application, including all threads, will rudely terminate. There are three reasons for this. Firstly if a background thread has crashed, it may have left the entire application in an inconsistent state, in a way that will affect other threads. It’s better to terminate the entire application than continue and have the application perform actions based on a broken state, for example take customer orders, or write corrupt files to disk.  Secondly, during software development, it is far better for errors to be loud and obtrusive. Even if you have unit tests and integration tests (and you should), a key part of ensuring software works properly is to actually try using it, both through systematic testing and through the casual use all software gets by its developers during use. Subtle errors are easy to miss if you are not actually doing real work using the application, loud errors are obvious. Thirdly, and most importantly, even if catching and swallowing exceptions indiscriminately doesn't cause any problems in your application, the presence of unexpected exceptions shows you do not fully understand the behavior of your code. The currently released version of your application may be absolutely correct. However, because your mental model of the behavior is wrong, any future change you make to the program could and probably will introduce critical errors.  This applies to more than just exceptions causing threads to exit, any unexpected state should make the application blow up in an un-ignorable way. The worst thing you can do is silently swallow errors and continue. And let's be clear, writing to a log file does not count as blowing up in an un-ignorable way.  This is all simple as long as the call stack only contains your code, but when your functions start to be called by third party or .NET framework code, it's surprisingly easy for exceptions to start vanishing. Let's look at two examples.   1. Windows forms drag drop events  Usually if you throw an exception from a winforms event handler it will bring up the "application has crashed" dialog with abort and continue options. This is a good default behavior - the error is big and loud, but it is possible for the user to ignore the error and hopefully save their data, if somehow this bug makes it past testing. However drag and drop are different - throw an exception from one of these and it will just be silently swallowed with no explanation.  By the way, it's not just drag and drop events. Timer events do it too.  You can research how exceptions are treated in different handlers and code appropriately, but the safest and most user friendly approach is to always catch exceptions in your event handlers and show your own error message. I'll talk about one good approach to handling these exceptions at the end of this post.   2. SSMS integration for SQL Tab Magic  A while back wrote an SSMS add-in called SQL Tab Magic (learn more about the process here). It works by listening to certain SSMS events and remembering what documents are opened and closed. I deployed it internally and it was used for a few months by a number of people without problems, so I was reasonably confident in its quality. Before releasing I made a few cleanups, including introducing error reporting. Bam. A few days later I was looking at over 1,000 error reports in my inbox. In turns out I wasn't handling table designers properly. The exceptions were there, but again SSMS was helpfully swallowing them all for me, so I was blissfully unaware. Had I made my errors loud from the start, I would have noticed these issues long before and fixed them.   Handling exceptions  Now you are systematically catching exceptions throughout your application, you need to do something with them. I've tried 3 options: log them, alert the user, and automatically send them home.  There are a few good options for logging in .NET. The most widespread is Apache log4net, which provides a very capable and configurable logging framework. There is also NLog which has a compatible interface, with a greater emphasis on fluent rather than XML configuration.  Alerting the user serves two purposes. Firstly it means they understand their action has failed to they don't just assume it worked (Silent file copy failure is a problem if you then delete the originals) or that they should keep waiting for a background task to complete. Secondly, it means the users can report the bug to your support team, and then you can fix it. This means the message you show the user should contain the information you need as a developer to identify and fix it. And the user will probably just send you a screenshot of the dialog, so it shouldn't be hidden by scroll bars.  This leads us to the third option, automatically sending error reports home. By automatic I mean with minimal effort on the part of the user, rather than doing it silently behind their backs. The advantage of this is you can send back far more detailed and precise information than you can expect a user to include in an email, and by making it easier to report errors, you make it more likely users will do so.  We do this using a great tool called SmartAssembly (full disclosure: this is a product made by Red Gate). It captures complete stack traces including the values of all local variables and then allows the user to send all this information back with a single click. We also capture log files to help understand what lead up to the error. We then use the free SmartAssembly Sync for Jira to dedupe these reports and raise them as bugs in our bug tracking system.  The combined effect of loud errors during development and then automatic error reporting once software is deployed allows us to find and fix more bugs, correct misunderstandings on how our software works, and overall is a key piece in delivering higher quality software. However it is no substitute for having motivated cunning testers in the building - and we're looking to hire more of those too.   If you found this post interesting you should follow me on twitter.  

    Read the article

  • Refactoring FizzBuzz

    - by MarkPearl
    A few years ago I blogger about FizzBuzz, at the time the post was prompted by Scott Hanselman who had podcasted about how surprized he was that some programmers could not even solve the FizzBuzz problem within a reasonable period of time during a job interview. At the time I thought I would give the problem a go in F# and sure enough the solution was fairly simple – I then also did a basic solution in C# but never posted it. Since then I have learned that being able to solve a problem and how you solve the problem are two totally different things. Today I decided to give the problem a retry and see if I had learnt anything new in the last year or so. Here is how my solution looked after refactoring… Solution 1 – Cheap and Nasty public class FizzBuzzCalculator { public string NumberFormat(int number) { var numDivisibleBy3 = (number % 3) == 0; var numDivisibleBy5 = (number % 5) == 0; if (numDivisibleBy3 && numDivisibleBy5) return String.Format("{0} FizzBuz", number); else if (numDivisibleBy3) return String.Format("{0} Fizz", number); else if (numDivisibleBy5) return String.Format("{0} Buz", number); return number.ToString(); } } class Program { static void Main(string[] args) { var fizzBuzz = new FizzBuzzCalculator(); for (int i = 0; i < 100; i++) { Console.WriteLine(fizzBuzz.NumberFormat(i)); } } } My first attempt I just looked at solving the problem – it works, and could be an acceptable solution but tonight I thought I would see how far  I could refactor it… The section I decided to focus on was the mass of if..else code in the NumberFormat method. Solution 2 – Replacing If…Else with a Dictionary public class FizzBuzzCalculator { private readonly Dictionary<Tuple<bool, bool>, string> _mappings; public FizzBuzzCalculator(Dictionary<Tuple<bool, bool>, string> mappings) { _mappings = mappings; } public string NumberFormat(int number) { var numDivisibleBy3 = (number % 3) == 0; var numDivisibleBy5 = (number % 5) == 0; var mappedKey = new Tuple<bool, bool>(numDivisibleBy3, numDivisibleBy5); return String.Format("{0} {1}", number, _mappings[mappedKey]); } } class Program { static void Main(string[] args) { var mappings = new Dictionary<Tuple<bool, bool>, string> { { new Tuple<bool, bool>(true, true), "- FizzBuzz"}, { new Tuple<bool, bool>(true, false), "- Fizz"}, { new Tuple<bool, bool>(false, true), "- Buzz"}, { new Tuple<bool, bool>(false, false), ""} }; var fizzBuzz = new FizzBuzzCalculator(mappings); for (int i = 0; i < 100; i++) { Console.WriteLine(fizzBuzz.NumberFormat(i)); } Console.ReadLine(); } } In my second attempt I looked at removing the if else in the NumberFormat method. A dictionary proved to be useful for this – I added a constructor to the class and injected the dictionary mapping. One could argue that this is totally overkill, but if I was going to use this code in a large system an approach like this makes it easy to put this data in a configuration file, which would up its OC (Open for extensibility, closed for modification principle). I could of course take the OC principle even further – the check for divisibility by 3 and 5 is tightly coupled to this class. If I wanted to make it 4 instead of 3, I would need to adjust this class. This introduces my third refactoring. Solution 3 – Introducing Delegates and Injecting them into the class public delegate bool FizzBuzzComparison(int number); public class FizzBuzzCalculator { private readonly Dictionary<Tuple<bool, bool>, string> _mappings; private readonly FizzBuzzComparison _comparison1; private readonly FizzBuzzComparison _comparison2; public FizzBuzzCalculator(Dictionary<Tuple<bool, bool>, string> mappings, FizzBuzzComparison comparison1, FizzBuzzComparison comparison2) { _mappings = mappings; _comparison1 = comparison1; _comparison2 = comparison2; } public string NumberFormat(int number) { var mappedKey = new Tuple<bool, bool>(_comparison1(number), _comparison2(number)); return String.Format("{0} {1}", number, _mappings[mappedKey]); } } class Program { private static bool DivisibleByNum(int number, int divisor) { return number % divisor == 0; } public static bool Divisibleby3(int number) { return number % 3 == 0; } public static bool Divisibleby5(int number) { return number % 5 == 0; } static void Main(string[] args) { var mappings = new Dictionary<Tuple<bool, bool>, string> { { new Tuple<bool, bool>(true, true), "- FizzBuzz"}, { new Tuple<bool, bool>(true, false), "- Fizz"}, { new Tuple<bool, bool>(false, true), "- Buzz"}, { new Tuple<bool, bool>(false, false), ""} }; var fizzBuzz = new FizzBuzzCalculator(mappings, Divisibleby3, Divisibleby5); for (int i = 0; i < 100; i++) { Console.WriteLine(fizzBuzz.NumberFormat(i)); } Console.ReadLine(); } } I have taken this one step further and introduced delegates that are injected into the FizzBuzz Calculator class, from an OC principle perspective it has probably made it more compliant than the previous Solution 2, but there seems to be a lot of noise. Anonymous Delegates increase the readability level, which is what I have done in Solution 4. Solution 4 – Anon Delegates public delegate bool FizzBuzzComparison(int number); public class FizzBuzzCalculator { private readonly Dictionary<Tuple<bool, bool>, string> _mappings; private readonly FizzBuzzComparison _comparison1; private readonly FizzBuzzComparison _comparison2; public FizzBuzzCalculator(Dictionary<Tuple<bool, bool>, string> mappings, FizzBuzzComparison comparison1, FizzBuzzComparison comparison2) { _mappings = mappings; _comparison1 = comparison1; _comparison2 = comparison2; } public string NumberFormat(int number) { var mappedKey = new Tuple<bool, bool>(_comparison1(number), _comparison2(number)); return String.Format("{0} {1}", number, _mappings[mappedKey]); } } class Program { static void Main(string[] args) { var mappings = new Dictionary<Tuple<bool, bool>, string> { { new Tuple<bool, bool>(true, true), "- FizzBuzz"}, { new Tuple<bool, bool>(true, false), "- Fizz"}, { new Tuple<bool, bool>(false, true), "- Buzz"}, { new Tuple<bool, bool>(false, false), ""} }; var fizzBuzz = new FizzBuzzCalculator(mappings, (n) => n % 3 == 0, (n) => n % 5 == 0); for (int i = 0; i < 100; i++) { Console.WriteLine(fizzBuzz.NumberFormat(i)); } Console.ReadLine(); } }   Using the anonymous delegates I think the noise level has now been reduced. This is where I am going to end this post, I have gone through 4 iterations of the code from the initial solution using If..Else to delegates and dictionaries. I think each approach would have it’s pro’s and con’s and depending on the intention of where the code would be used would be a large determining factor. If you can think of an alternative way to do FizzBuzz, add a comment!

    Read the article

  • What does Ruby have that Python doesn't, and vice versa?

    - by Lennart Regebro
    There is a lot of discussions of Python vs Ruby, and I all find them completely unhelpful, because they all turn around why feature X sucks in language Y, or that claim language Y doesn't have X, although in fact it does. I also know exactly why I prefer Python, but that's also subjective, and wouldn't help anybody choosing, as they might not have the same tastes in development as I do. It would therefore be interesting to list the differences, objectively. So no "Python's lambdas sucks". Instead explain what Ruby's lambdas can do that Python's can't. No subjectivity. Example code is good! Don't have several differences in one answer, please. And vote up the ones you know are correct, and down those you know are incorrect (or are subjective). Also, differences in syntax is not interesting. We know Python does with indentation what Ruby does with brackets and ends, and that @ is called self in Python. UPDATE: This is now a community wiki, so we can add the big differences here. Ruby has a class reference in the class body In Ruby you have a reference to the class (self) already in the class body. In Python you don't have a reference to the class until after the class construction is finished. An example: class Kaka puts self end self in this case is the class, and this code would print out "Kaka". There is no way to print out the class name or in other ways access the class from the class definition body in Python. All classes are mutable in Ruby This lets you develop extensions to core classes. Here's an example of a rails extension: class String def starts_with?(other) head = self[0, other.length] head == other end end Ruby has Perl-like scripting features Ruby has first class regexps, $-variables, the awk/perl line by line input loop and other features that make it more suited to writing small shell scripts that munge text files or act as glue code for other programs. Ruby has first class continuations Thanks to the callcc statement. In Python you can create continuations by various techniques, but there is no support built in to the language. Ruby has blocks With the "do" statement you can create a multi-line anonymous function in Ruby, which will be passed in as an argument into the method in front of do, and called from there. In Python you would instead do this either by passing a method or with generators. Ruby: amethod { |here| many=lines+of+code goes(here) } Python: def function(here): many=lines+of+code goes(here) amethod(function) Interestingly, the convenience statement in Ruby for calling a block is called "yield", which in Python will create a generator. Ruby: def themethod yield 5 end themethod do |foo| puts foo end Python: def themethod(): yield 5 for foo in themethod: print foo Although the principles are different, the result is strikingly similar. Python has built-in generators (which are used like Ruby blocks, as noted above) Python has support for generators in the language. In Ruby you could use the generator module that uses continuations to create a generator from a block. Or, you could just use a block/proc/lambda! Moreover, in Ruby 1.9 Fibers are, and can be used as, generators. docs.python.org has this generator example: def reverse(data): for index in range(len(data)-1, -1, -1): yield data[index] Contrast this with the above block examples. Python has flexible name space handling In Ruby, when you import a file with require, all the things defined in that file will end up in your global namespace. This causes namespace pollution. The solution to that is Rubys modules. But if you create a namespace with a module, then you have to use that namespace to access the contained classes. In Python, the file is a module, and you can import its contained names with from themodule import *, thereby polluting the namespace if you want. But you can also import just selected names with from themodule import aname, another or you can simply import themodule and then access the names with themodule.aname. If you want more levels in your namespace you can have packages, which are directories with modules and an __init__.py file. Python has docstrings Docstrings are strings that are attached to modules, functions and methods and can be introspected at runtime. This helps for creating such things as the help command and automatic documentation. def frobnicate(bar): """frobnicate takes a bar and frobnicates it >>> bar = Bar() >>> bar.is_frobnicated() False >>> frobnicate(bar) >>> bar.is_frobnicated() True """ Python has more libraries Python has a vast amount of available modules and bindings for libraries. Python has multiple inheritance Ruby does not ("on purpose" -- see Ruby's website, see here how it's done in Ruby). It does reuse the module concept as a sort of abstract classes. Python has list/dict comprehensions Python: res = [x*x for x in range(1, 10)] Ruby: res = (0..9).map { |x| x * x } Python: >>> (x*x for x in range(10)) <generator object <genexpr> at 0xb7c1ccd4> >>> list(_) [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] Ruby: p = proc { |x| x * x } (0..9).map(&p) Python: >>> {x:str(y*y) for x,y in {1:2, 3:4}.items()} {1: '4', 3: '16'} Ruby: >> Hash[{1=>2, 3=>4}.map{|x,y| [x,(y*y).to_s]}] => {1=>"4", 3=>"16"} Python has decorators Things similar to decorators can be created in Ruby, and it can also be argued that they aren't as necessary as in Python.

    Read the article

  • how to define a field of view for the entire map for shadow?

    - by Mehdi Bugnard
    I recently added "Shadow Mapping" in my XNA games to include shadows. I followed the nice and famous tutorial from "Riemers" : http://www.riemers.net/eng/Tutorials/XNA/Csharp/Series3/Shadow_map.php . This code work nice and I can see my source of light and shadow. But the problem is that my light source does not match the field of view that I created. I want the light covers the entire map of my game. I don't know why , but the light only affect 2-3 cubes of my map. ScreenShot: (the emission of light illuminates only 2-3 blocks and not the full map) Here is my code i create the fieldOfView for LightviewProjection Matrix: Vector3 lightDir = new Vector3(10, 52, 10); lightPos = new Vector3(10, 52, 10); Matrix lightsView = Matrix.CreateLookAt(lightPos, new Vector3(105, 50, 105), new Vector3(0, 1, 0)); Matrix lightsProjection = Matrix.CreatePerspectiveFieldOfView(MathHelper.PiOver2, 1f, 20f, 1000f); lightsViewProjectionMatrix = lightsView * lightsProjection; As you can see , my nearPlane and FarPlane are set to 20f and 100f . So i don't know why the light stop after 2 cubes. it's should be bigger Here is set the value to my custom effect HLSL in the shader file /* SHADOW VALUE */ effectWorld.Parameters["LightDirection"].SetValue(lightDir); effectWorld.Parameters["xLightsWorldViewProjection"].SetValue(Matrix.Identity * .lightsViewProjectionMatrix); effectWorld.Parameters["xWorldViewProjection"].SetValue(Matrix.Identity * arcadia.camera.View * arcadia.camera.Projection); effectWorld.Parameters["xLightPower"].SetValue(1f); effectWorld.Parameters["xAmbient"].SetValue(0.3f); Here is my custom HLSL shader effect file "*.fx" // This sample uses a simple Lambert lighting model. float3 LightDirection = normalize(float3(-1, -1, -1)); float3 DiffuseLight = 1.25; float3 AmbientLight = 0.25; uniform const float3 DiffuseColor = 1; uniform const float Alpha = 1; uniform const float3 EmissiveColor = 0; uniform const float3 SpecularColor = 1; uniform const float SpecularPower = 16; uniform const float3 EyePosition; // FOG attribut uniform const float FogEnabled ; uniform const float FogStart ; uniform const float FogEnd ; uniform const float3 FogColor ; float3 cameraPos : CAMERAPOS; texture Texture; sampler Sampler = sampler_state { Texture = (Texture); magfilter = LINEAR; minfilter = LINEAR; mipfilter = LINEAR; AddressU = mirror; AddressV = mirror; }; texture xShadowMap; sampler ShadowMapSampler = sampler_state { Texture = <xShadowMap>; magfilter = LINEAR; minfilter = LINEAR; mipfilter = LINEAR; AddressU = clamp; AddressV = clamp; }; /* *************** */ /* SHADOW MAP CODE */ /* *************** */ struct SMapVertexToPixel { float4 Position : POSITION; float4 Position2D : TEXCOORD0; }; struct SMapPixelToFrame { float4 Color : COLOR0; }; struct SSceneVertexToPixel { float4 Position : POSITION; float4 Pos2DAsSeenByLight : TEXCOORD0; float2 TexCoords : TEXCOORD1; float3 Normal : TEXCOORD2; float4 Position3D : TEXCOORD3; }; struct SScenePixelToFrame { float4 Color : COLOR0; }; float DotProduct(float3 lightPos, float3 pos3D, float3 normal) { float3 lightDir = normalize(pos3D - lightPos); return dot(-lightDir, normal); } SSceneVertexToPixel ShadowedSceneVertexShader(float4 inPos : POSITION, float2 inTexCoords : TEXCOORD0, float3 inNormal : NORMAL) { SSceneVertexToPixel Output = (SSceneVertexToPixel)0; Output.Position = mul(inPos, xWorldViewProjection); Output.Pos2DAsSeenByLight = mul(inPos, xLightsWorldViewProjection); Output.Normal = normalize(mul(inNormal, (float3x3)World)); Output.Position3D = mul(inPos, World); Output.TexCoords = inTexCoords; return Output; } SScenePixelToFrame ShadowedScenePixelShader(SSceneVertexToPixel PSIn) { SScenePixelToFrame Output = (SScenePixelToFrame)0; float2 ProjectedTexCoords; ProjectedTexCoords[0] = PSIn.Pos2DAsSeenByLight.x / PSIn.Pos2DAsSeenByLight.w / 2.0f + 0.5f; ProjectedTexCoords[1] = -PSIn.Pos2DAsSeenByLight.y / PSIn.Pos2DAsSeenByLight.w / 2.0f + 0.5f; float diffuseLightingFactor = 0; if ((saturate(ProjectedTexCoords).x == ProjectedTexCoords.x) && (saturate(ProjectedTexCoords).y == ProjectedTexCoords.y)) { float depthStoredInShadowMap = tex2D(ShadowMapSampler, ProjectedTexCoords).r; float realDistance = PSIn.Pos2DAsSeenByLight.z / PSIn.Pos2DAsSeenByLight.w; if ((realDistance - 1.0f / 100.0f) <= depthStoredInShadowMap) { diffuseLightingFactor = DotProduct(xLightPos, PSIn.Position3D, PSIn.Normal); diffuseLightingFactor = saturate(diffuseLightingFactor); diffuseLightingFactor *= xLightPower; } } float4 baseColor = tex2D(Sampler, PSIn.TexCoords); Output.Color = baseColor*(diffuseLightingFactor + xAmbient); return Output; } SMapVertexToPixel ShadowMapVertexShader(float4 inPos : POSITION) { SMapVertexToPixel Output = (SMapVertexToPixel)0; Output.Position = mul(inPos, xLightsWorldViewProjection); Output.Position2D = Output.Position; return Output; } SMapPixelToFrame ShadowMapPixelShader(SMapVertexToPixel PSIn) { SMapPixelToFrame Output = (SMapPixelToFrame)0; Output.Color = PSIn.Position2D.z / PSIn.Position2D.w; return Output; } /* ******************* */ /* END SHADOW MAP CODE */ /* ******************* */ / For rendering without instancing. technique ShadowMap { pass Pass0 { VertexShader = compile vs_2_0 ShadowMapVertexShader(); PixelShader = compile ps_2_0 ShadowMapPixelShader(); } } technique ShadowedScene { /* pass Pass0 { VertexShader = compile vs_2_0 VSBasicTx(); PixelShader = compile ps_2_0 PSBasicTx(); } */ pass Pass1 { VertexShader = compile vs_2_0 ShadowedSceneVertexShader(); PixelShader = compile ps_2_0 ShadowedScenePixelShader(); } } technique SimpleFog { pass Pass0 { VertexShader = compile vs_2_0 VSBasicTx(); PixelShader = compile ps_2_0 PSBasicTx(); } } I edited my fx file , for show you only information and functions about the shadow ;-)

    Read the article

  • Neural Network Always Produces Same/Similar Outputs for Any Input

    - by l33tnerd
    I have a problem where I am trying to create a neural network for Tic-Tac-Toe. However, for some reason, training the neural network causes it to produce nearly the same output for any given input. I did take a look at Artificial neural networks benchmark, but my network implementation is built for neurons with the same activation function for each neuron, i.e. no constant neurons. To make sure the problem wasn't just due to my choice of training set (1218 board states and moves generated by a genetic algorithm), I tried to train the network to reproduce XOR. The logistic activation function was used. Instead of using the derivative, I multiplied the error by output*(1-output) as some sources suggested that this was equivalent to using the derivative. I can put the Haskell source on HPaste, but it's a little embarrassing to look at. The network has 3 layers: the first layer has 2 inputs and 4 outputs, the second has 4 inputs and 1 output, and the third has 1 output. Increasing to 4 neurons in the second layer didn't help, and neither did increasing to 8 outputs in the first layer. I then calculated errors, network output, bias updates, and the weight updates by hand based on http://hebb.mit.edu/courses/9.641/2002/lectures/lecture04.pdf to make sure there wasn't an error in those parts of the code (there wasn't, but I will probably do it again just to make sure). Because I am using batch training, I did not multiply by x in equation (4) there. I am adding the weight change, though http://www.faqs.org/faqs/ai-faq/neural-nets/part2/section-2.html suggests to subtract it instead. The problem persisted, even in this simplified network. For example, these are the results after 500 epochs of batch training and of incremental training. Input |Target|Output (Batch) |Output(Incremental) [1.0,1.0]|[0.0] |[0.5003781562785173]|[0.5009731800870864] [1.0,0.0]|[1.0] |[0.5003740346965251]|[0.5006347214672715] [0.0,1.0]|[1.0] |[0.5003734471544522]|[0.500589332376345] [0.0,0.0]|[0.0] |[0.5003674110937019]|[0.500095157458231] Subtracting instead of adding produces the same problem, except everything is 0.99 something instead of 0.50 something. 5000 epochs produces the same result, except the batch-trained network returns exactly 0.5 for each case. (Heck, even 10,000 epochs didn't work for batch training.) Is there anything in general that could produce this behavior? Also, I looked at the intermediate errors for incremental training, and the although the inputs of the hidden/input layers varied, the error for the output neuron was always +/-0.12. For batch training, the errors were increasing, but extremely slowly and the errors were all extremely small (x10^-7). Different initial random weights and biases made no difference, either. Note that this is a school project, so hints/guides would be more helpful. Although reinventing the wheel and making my own network (in a language I don't know well!) was a horrible idea, I felt it would be more appropriate for a school project (so I know what's going on...in theory, at least. There doesn't seem to be a computer science teacher at my school). EDIT: Two layers, an input layer of 2 inputs to 8 outputs, and an output layer of 8 inputs to 1 output, produces much the same results: 0.5+/-0.2 (or so) for each training case. I'm also playing around with pyBrain, seeing if any network structure there will work. Edit 2: I am using a learning rate of 0.1. Sorry for forgetting about that. Edit 3: Pybrain's "trainUntilConvergence" doesn't get me a fully trained network, either, but 20000 epochs does, with 16 neurons in the hidden layer. 10000 epochs and 4 neurons, not so much, but close. So, in Haskell, with the input layer having 2 inputs & 2 outputs, hidden layer with 2 inputs and 8 outputs, and output layer with 8 inputs and 1 output...I get the same problem with 10000 epochs. And with 20000 epochs. Edit 4: I ran the network by hand again based on the MIT PDF above, and the values match, so the code should be correct unless I am misunderstanding those equations. Some of my source code is at http://hpaste.org/42453/neural_network__not_working; I'm working on cleaning my code somewhat and putting it in a Github (rather than a private Bitbucket) repository. All of the relevant source code is now at https://github.com/l33tnerd/hsann.

    Read the article

  • What is stopping data flow with .NET 3.5 asynchronous System.Net.Sockets.Socket?

    - by TonyG
    I have a .NET 3.5 client/server socket interface using the asynchronous methods. The client connects to the server and the connection should remain open until the app terminates. The protocol consists of the following pattern: send stx receive ack send data1 receive ack send data2 (repeat 5-6 while more data) receive ack send etx So a single transaction with two datablocks as above would consist of 4 sends from the client. After sending etx the client simply waits for more data to send out, then begins the next transmission with stx. I do not want to break the connection between individual exchanges or after each stx/data/etx payload. Right now, after connection, the client can send the first stx, and get a single ack, but I can't put more data onto the wire after that. Neither side disconnects, the socket is still intact. The client code is seriously abbreviated as follows - I'm following the pattern commonly available in online code samples. private void SendReceive(string data) { // ... SocketAsyncEventArgs completeArgs; completeArgs.Completed += new EventHandler<SocketAsyncEventArgs>(OnSend); clientSocket.SendAsync(completeArgs); // two AutoResetEvents, one for send, one for receive if ( !AutoResetEvent.WaitAll(autoSendReceiveEvents , -1) ) Log("failed"); else Log("success"); // ... } private void OnSend( object sender , SocketAsyncEventArgs e ) { // ... Socket s = e.UserToken as Socket; byte[] receiveBuffer = new byte[ 4096 ]; e.SetBuffer(receiveBuffer , 0 , receiveBuffer.Length); e.Completed += new EventHandler<SocketAsyncEventArgs>(OnReceive); s.ReceiveAsync(e); // ... } private void OnReceive( object sender , SocketAsyncEventArgs e ) {} // ... if ( e.BytesTransferred > 0 ) { Int32 bytesTransferred = e.BytesTransferred; String received = Encoding.ASCII.GetString(e.Buffer , e.Offset , bytesTransferred); dataReceived += received; } autoSendReceiveEvents[ SendOperation ].Set(); // could be moved elsewhere autoSendReceiveEvents[ ReceiveOperation ].Set(); // releases mutexes } The code on the server is very similar except that it receives first and then sends a response - the server is not doing anything (that I can tell) to modify the connection after it sends a response. The problem is that the second time I hit SendReceive in the client, the connection is already in a weird state. Do I need to do something in the client to preserve the SocketAsyncEventArgs, and re-use the same object for the lifetime of the socket/connection? I'm not sure which eventargs object should hang around during the life of the connection or a given exchange. Do I need to do something, or Not do something in the server to ensure it continues to Receive data? The server setup and response processing looks like this: void Start() { // ... listenSocket.Bind(...); listenSocket.Listen(0); StartAccept(null); // note accept as soon as we start. OK? mutex.WaitOne(); } void StartAccept(SocketAsyncEventArgs acceptEventArg) { if ( acceptEventArg == null ) { acceptEventArg = new SocketAsyncEventArgs(); acceptEventArg.Completed += new EventHandler<SocketAsyncEventArgs>(OnAcceptCompleted); } Boolean willRaiseEvent = this.listenSocket.AcceptAsync(acceptEventArg); if ( !willRaiseEvent ) ProcessAccept(acceptEventArg); // ... } private void OnAcceptCompleted( object sender , SocketAsyncEventArgs e ) { ProcessAccept(e); } private void ProcessAccept( SocketAsyncEventArgs e ) { // ... SocketAsyncEventArgs readEventArgs = new SocketAsyncEventArgs(); readEventArgs.SetBuffer(dataBuffer , 0 , Int16.MaxValue); readEventArgs.Completed += new EventHandler<SocketAsyncEventArgs>(OnIOCompleted); readEventArgs.UserToken = e.AcceptSocket; dataReceived = ""; // note server is degraded for single client/thread use // As soon as the client is connected, post a receive to the connection. Boolean willRaiseEvent = e.AcceptSocket.ReceiveAsync(readEventArgs); if ( !willRaiseEvent ) this.ProcessReceive(readEventArgs); // Accept the next connection request. this.StartAccept(e); } private void OnIOCompleted( object sender , SocketAsyncEventArgs e ) { // switch ( e.LastOperation ) case SocketAsyncOperation.Receive: ProcessReceive(e); // similar to client code // operate on dataReceived here case SocketAsyncOperation.Send: ProcessSend(e); // similar to client code } // execute this when a data has been processed into a response (ack, etc) private SendResponseToClient(string response) { // create buffer with response // currentEventArgs has class scope and is re-used currentEventArgs.SetBuffer(sendBuffer , 0 , sendBuffer.Length); Boolean willRaiseEvent = currentClient.SendAsync(currentEventArgs); if ( !willRaiseEvent ) ProcessSend(currentEventArgs); } A .NET trace shows the following when sending ABC\r\n: Socket#7588182::SendAsync() Socket#7588182::SendAsync(True#1) Data from Socket#7588182::FinishOperation(SendAsync) 00000000 : 41 42 43 0D 0A Socket#7588182::ReceiveAsync() Exiting Socket#7588182::ReceiveAsync() - True#1 And it stops there. It looks just like the first send from the client but the server shows no activity. I think that could be info overload for now but I'll be happy to provide more details as required. Thanks!

    Read the article

  • Very simple, terse and easy GUI programming “frameworks”

    - by jetxee
    Please list GUI programming libraries, toolkits, frameworks which allow to write GUI apps quickly. I mean in such a way, that GUI is described entirely in a human-readable (and human-writable) plain text file (code) code is terse (1 or 2 lines of code per widget/event pair), suitable for scripting structure and operation of the GUI is evident from the code (nesting of widgets and flow of events) details about how to build the GUI are hidden (things like mainloop, attaching event listeners, etc.) auto-layouts are supported (vboxes, hboxes, etc.) As answers suggest, this may be defined as declarative GUI programming, but it is not necessarily such. Any approach is OK if it works, is easy to use and terse. There are some GUI libraries/toolkits like this. They are listed below. Please extend the list if you see a qualifying toolkit missing. Indicate if the project is crossplatform, mature, active, and give an example if possible. Please use this wiki to discuss only Open Source projects. This is the list so far (in alphabetical order): Fudgets Fudgets is a Haskell library. Platform: Unix. Status: Experimental, but still maintained. An example: import Fudgets main = fudlogue (shellF "Hello" (labelF "Hello, world!" >+< quitButtonF)) GNUstep Renaissance Renaissance allows to describe GUI in simple XML. Platforms: OSX/GNUstep. Status: part of GNUstep. An example below: <window title="Example"> <vbox> <label font="big"> Click the button below to quit the application </label> <button title="Quit" action="terminate:"/> </vbox> </window> HTML HTML-based GUI (HTML + JS). Crossplatform, mature. Can be used entirely on the client side. Looking for a nice “helloworld” example. JavaFX JavaFX is usable for standalone (desktop) apps as well as for web applications. Not completely crossplatform, not yet completely open source. Status: 1.0 release. An example: Frame { content: Button { text: "Press Me" action: operation() { System.out.println("You pressed me"); } } visible: true } Screenshot is needed. Phooey Phooey is another Haskell library. Crossplatform (wxWidgets), HTML+JS backend planned. Mature and active. An example (a little more than a helloworld): ui1 :: UI () ui1 = title "Shopping List" $ do a <- title "apples" $ islider (0,10) 3 b <- title "bananas" $ islider (0,10) 7 title "total" $ showDisplay (liftA2 (+) a b) PythonCard PythonCard describes GUI in a Python dictionary. Crossplatform (wxWidgets). Some apps use it, but the project seems stalled. There is an active fork. I skip PythonCard example because it is too verbose for the contest. Shoes Shoes for Ruby. Platforms: Win/OSX/GTK+. Status: Young but active. A minimal app looks like this: Shoes.app { @push = button "Push me" @note = para "Nothing pushed so far" @push.click { @note.replace "Aha! Click!" } } Tcl/Tk Tcl/Tk. Crossplatform (its own widget set). Mature (probably even dated) and active. An example: #!/usr/bin/env wish button .hello -text "Hello, World!" -command { exit } pack .hello tkwait window . tekUI tekUI for Lua (and C). Platforms: X11, DirectFB. Status: Alpha (usable, but API still evolves). An example: #/usr/bin/env lua ui = require "tek.ui" ui.Application:new { Children = { ui.Window:new { Title = "Hello", Children = { ui.Text:new { Text = "_Hello, World!", Style = "button", Mode = "button", }, }, }, }, }:run() Treethon Treethon for Python. It describes GUI in a YAML file (Python in a YAML tree). Platform: GTK+. Status: work in proress. A simple app looks like this: _import: gtk view: gtk.Window() add: - view: gtk.Button('Hello World') on clicked: print view.get_label() Yet unnamed Python library by Richard Jones: This one is not released yet. The idea is to use Python context managers (with keyword) to structure GUI code. See Richard Jones' blog for details. with gui.vertical: text = gui.label('hello!') items = gui.selection(['one', 'two', 'three']) with gui.button('click me!'): def on_click(): text.value = items.value text.foreground = red XUL XUL + Javascript may be used to create stand-alone desktop apps with XULRunner as well as Mozilla extensions. Mature, open source, crossplatform. <?xml version="1.0"?> <?xml-stylesheet href="chrome://global/skin/" type="text/css"?> <window id="main" title="My App" width="300" height="300" xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"> <caption label="Hello World"/> </window> Thank your for contributions!

    Read the article

  • Problem with entityForName & ManagedObjectContext when extending tutorial material

    - by Martin KS
    Afternoon all, I tried to add a second data entity to the persistent store in the (locations) coredata tutorial code, and then access this in a new view. I think that I've followed the tutorial, and checked that I'm doing a clean build etc, but can't see what to change to prevent it crashing. I'm afraid I'm at my wits end with this one, and can't seem to find the step that I've missed. I've pasted the header and code files below, please let me know if I need to share any more of the code. The crash seems to happen on the line: NSEntityDescription *entity = [NSEntityDescription entityForName:@"Album" inManagedObjectContext:[self managedObjectContext]]; There is one other line in the code that refers to galleryviewcontroller at the moment, and that's in the main application delegate: galleryViewController.managedObjectContext = [self managedObjectContext]; GalleryViewController.h #import <UIKit/UIKit.h> @interface GalleryViewController : UIViewController { NSManagedObjectContext *managedObjectContext; int rowNumber; IBOutlet UILabel *lblMessage; UIBarButtonItem *addButton; NSMutableArray *imagesArray; } @property (readwrite) int rowNumber; @property (nonatomic,retain) UILabel *lblMessage; @property (nonatomic,retain) NSMutableArray *imagesArray; @property (nonatomic, retain) NSManagedObjectContext *managedObjectContext; @property (nonatomic, retain) UIBarButtonItem *addButton; -(void)updateRowNumber:(int)theIndex; -(void)addImage; @end GalleryViewController.m #import "RootViewController.h" #import "LocationsAppDelegate.h" #import "Album.h" #import "GalleryViewController.h" #import "Image.h" @implementation GalleryViewController @synthesize lblMessage,rowNumber,addButton,managedObjectContext; @synthesize imagesArray; /* // The designated initializer. Override if you create the controller programmatically and want to perform customization that is not appropriate for viewDidLoad. - (id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil { if ((self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil])) { // Custom initialization } return self; } */ -(void)updateRowNumber:(int)theIndex{ rowNumber=theIndex; LocationsAppDelegate *mainDelegate =(LocationsAppDelegate *)[[UIApplication sharedApplication] delegate]; Album *anAlbum = [mainDelegate.albumsArray objectAtIndex:rowNumber]; lblMessage.text = anAlbum.uniqueAlbumIdentifier; } // Implement viewDidLoad to do additional setup after loading the view, typically from a nib. - (void)viewDidLoad { [super viewDidLoad]; addButton = [[UIBarButtonItem alloc] initWithBarButtonSystemItem:UIBarButtonSystemItemAdd target:self action:@selector(addImage)]; addButton.enabled = YES; self.navigationItem.rightBarButtonItem = addButton; /* Found this in another answer, adding it to the code didn't help. if (managedObjectContext == nil) { managedObjectContext = [[[UIApplication sharedApplication] delegate] managedObjectContext]; } */ NSFetchRequest *request = [[NSFetchRequest alloc] init]; NSEntityDescription *entity = [NSEntityDescription entityForName:@"Album" inManagedObjectContext:[self managedObjectContext]]; [request setEntity:entity]; // Order the albums by creation date, most recent first. NSSortDescriptor *sortDescriptor = [[NSSortDescriptor alloc] initWithKey:@"imagePath" ascending:NO]; NSArray *sortDescriptors = [[NSArray alloc] initWithObjects:sortDescriptor, nil]; [request setSortDescriptors:sortDescriptors]; [sortDescriptor release]; [sortDescriptors release]; // Execute the fetch -- create a mutable copy of the result. NSError *error = nil; NSMutableArray *mutableFetchResults = [[managedObjectContext executeFetchRequest:request error:&error] mutableCopy]; if (mutableFetchResults == nil) { // Handle the error. } [self setImagesArray:mutableFetchResults]; int a = 5; int b = 10; for( int i=0; i<[imagesArray count]; i++ ) { if( a == 325 ) { a = 5; b += 70; } UIImageView *any = [[UIImageView alloc] initWithFrame:CGRectMake(a,b,70,60)]; any.image = [imagesArray objectAtIndex:i]; any.tag = i; [self.view addSubview:any]; [any release]; a += 80; } } -(void)addImage{ NSString *msg = [NSString stringWithFormat:@"%i",rowNumber]; UIAlertView *alert = [[UIAlertView alloc] initWithTitle:@"Add image to" message:msg delegate:self cancelButtonTitle:@"No" otherButtonTitles:@"Yes", nil]; [alert show]; [alert release]; } - (void)didReceiveMemoryWarning { // Releases the view if it doesn't have a superview. [super didReceiveMemoryWarning]; // Release any cached data, images, etc that aren't in use. } - (void)viewDidUnload { [super viewDidUnload]; } - (void)dealloc { [lblMessage release]; [managedObjectContext release]; [super dealloc]; } @end

    Read the article

  • Checking who is connected to your server, with PowerShell.

    - by Fatherjack
    There are many occasions when, as a DBA, you want to see who is connected to your SQL Server, along with how they are connecting and what sort of activities they are carrying out. I’m going to look at a couple of ways of getting this information and compare the effort required and the results achieved of each. SQL Server comes with a couple of stored procedures to help with this sort of task – sp_who and its undocumented counterpart sp_who2. There is also the pumped up version of these called sp_whoisactive, written by Adam Machanic which does way more than these procedures. I wholly recommend you try it out if you don’t already know how it works. When it comes to serious interrogation of your SQL Server activity then it is absolutely indispensable. Anyway, back to the point of this blog, we are going to look at getting the information from sp_who2 for a remote server. I wrote this Powershell script a week or so ago and was quietly happy with it for a while. I’m relatively new to Powershell so forgive both my rather low threshold for entertainment and the fact that something so simple is a moderate achievement for me. $Server = 'SERVERNAME' $SMOServer = New-Object Microsoft.SQLServer.Management.SMO.Server $Server # connection and query stuff         $ConnectionStr = "Server=$Server;Database=Master;Integrated Security=True" $Query = "EXEC sp_who2" $Connection = new-object system.Data.SQLClient.SQLConnection $Table = new-object "System.Data.DataTable" $Connection.connectionstring = $ConnectionStr try{ $Connection.open() $Command = $Connection.CreateCommand() $Command.commandtext = $Query $result = $Command.ExecuteReader() $Table.Load($result) } catch{ # Show error $error[0] | format-list -Force } $Title = "Data access processes (" + $Table.Rows.Count + ")" $Table | Out-GridView -Title $Title $Connection.close() So this is pretty straightforward, create an SMO object that represents our chosen server, define a connection to the database and a table object for the results when we get them, execute our query over the connection, load the results into our table object and then, if everything is error free display these results to the PowerShell grid viewer. The query simply gets the results of ‘EXEC sp_who2′ for us. Depending on how many connections there are will influence how long the query runs. The grid viewer lets me sort and search the results so it can be a pretty handy way to locate troublesome connections. Like I say, I was quite pleased with this, it seems a pretty simple script and was working well for me, I have added a few parameters to control the output and give me more specific details but then I see a script that uses the $SMOServer object itself to provide the process information and saves having to define the connection object and query specifications. $Server = 'SERVERNAME' $SMOServer = New-Object Microsoft.SQLServer.Management.SMO.Server $Server $Processes = $SMOServer.EnumProcesses() $Title = "SMO processes (" + $Processes.Rows.Count + ")" $Processes | Out-GridView -Title $Title Create the SMO object of our server and then call the EnumProcesses method to get all the process information from the server. Staggeringly simple! The results are a little different though. Some columns are the same and we can see the same basic information so my first thought was to which runs faster – so that I can get my results more quickly and also so that I place less stress on my server(s). PowerShell comes with a great way of testing this – the Measure-Command function. All you have to do is wrap your piece of code in Measure-Command {[your code here]} and it will spit out the time taken to execute the code. So, I placed both of the above methods of getting SQL Server process connections in two Measure-Command wrappers and pressed F5! The Powershell console goes blank for a while as the code is executed internally when Measure-Command is used but the grid viewer windows appear and the console shows this. You can take the output from Measure-Command and format it for easier reading but in a simple comparison like this we can simply cross refer the TotalMilliseconds values from the two result sets to see how the two methods performed. The query execution method (running EXEC sp_who2 ) is the first set of timings and the SMO EnumProcesses is the second. I have run these on a variety of servers and while the results vary from execution to execution I have never seen the SMO version slower than the other. The difference has varied and the time for both has ranged from sub-second as we see above to almost 5 seconds on other systems. This difference, I would suggest is partly due to the cost overhead of having to construct the data connection and so on where as the SMO EnumProcesses method has the connection to the server already in place and just needs to call back the process information. There is also the difference in the data sets to consider. Let’s take a look at what we get and where the two methods differ Query execution method (sp_who2) SMO EnumProcesses Description - Urn What looks like an XML or JSON representation of the server name and the process ID SPID Spid The process ID Status Status The status of the process Login Login The login name of the user executing the command HostName Host The name of the computer where the  process originated BlkBy BlockingSpid The SPID of a process that is blocking this one DBName Database The database that this process is connected to Command Command The type of command that is executing CPUTime Cpu The CPU activity related to this process DiskIO - The Disk IO activity related to this process LastBatch - The time the last batch was executed from this process. ProgramName Program The application that is facilitating the process connection to the SQL Server. SPID1 - In my experience this is always the same value as SPID. REQUESTID - In my experience this is always 0 - Name In my experience this is always the same value as SPID and so could be seen as analogous to SPID1 from sp_who2 - MemUsage An indication of the memory used by this process but I don’t know what it is measured in (bytes, Kb, Mb…) - IsSystem True or False depending on whether the process is internal to the SQL Server instance or has been created by an external connection requesting data. - ExecutionContextID In my experience this is always 0 so could be analogous to REQUESTID from sp_who2. Please note, these are my own very brief descriptions of these columns, detail can be found from MSDN for columns in the sp_who results here http://msdn.microsoft.com/en-GB/library/ms174313.aspx. Where the columns are common then I would use that description, in other cases then the information returned is purely for interpretation by the reader. Rather annoyingly both result sets have useful information that the other doesn’t. sp_who2 returns Disk IO and LastBatch information which is really useful but the SMO processes method give you IsSystem and MemUsage which have their place in fault diagnosis methods too. So which is better? On reflection I think I prefer to use the sp_who2 method primarily but knowing that the SMO Enumprocesses method is there when I need it is really useful and I’m sure I’ll use it regularly. I’m OK with the fact that it is the slower method because Measure-Command has shown me how close it is to the other option and that it really isn’t a large enough margin to matter.

    Read the article

  • How do I use texture-mapping in a simple ray tracer?

    - by fastrack20
    I am attempting to add features to a ray tracer in C++. Namely, I am trying to add texture mapping to the spheres. For simplicity, I am using an array to store the texture data. I obtained the texture data by using a hex editor and copying the correct byte values into an array in my code. This was just for my testing purposes. When the values of this array correspond to an image that is simply red, it appears to work close to what is expected except there is no shading. The bottom right of the image shows what a correct sphere should look like. This sphere's colour using one set colour, not a texture map. Another problem is that when the texture map is of something other than just one colour pixels, it turns white. My test image is a picture of water, and when it maps, it shows only one ring of bluish pixels surrounding the white colour. When this is done, it simply appears as this: Here are a few code snippets: Color getColor(const Object *object,const Ray *ray, float *t) { if (object->materialType == TEXTDIF || object->materialType == TEXTMATTE) { float distance = *t; Point pnt = ray->origin + ray->direction * distance; Point oc = object->center; Vector ve = Point(oc.x,oc.y,oc.z+1) - oc; Normalize(&ve); Vector vn = Point(oc.x,oc.y+1,oc.z) - oc; Normalize(&vn); Vector vp = pnt - oc; Normalize(&vp); double phi = acos(-vn.dot(vp)); float v = phi / M_PI; float u; float num1 = (float)acos(vp.dot(ve)); float num = (num1 /(float) sin(phi)); float theta = num /(float) (2 * M_PI); if (theta < 0 || theta == NAN) {theta = 0;} if (vn.cross(ve).dot(vp) > 0) { u = theta; } else { u = 1 - theta; } int x = (u * IMAGE_WIDTH) -1; int y = (v * IMAGE_WIDTH) -1; int p = (y * IMAGE_WIDTH + x)*3; return Color(TEXT_DATA[p+2],TEXT_DATA[p+1],TEXT_DATA[p]); } else { return object->color; } }; I call the colour code here in Trace: if (object->materialType == MATTE) return getColor(object, ray, &t); Ray shadowRay; int isInShadow = 0; shadowRay.origin.x = pHit.x + nHit.x * bias; shadowRay.origin.y = pHit.y + nHit.y * bias; shadowRay.origin.z = pHit.z + nHit.z * bias; shadowRay.direction = light->object->center - pHit; float len = shadowRay.direction.length(); Normalize(&shadowRay.direction); float LdotN = shadowRay.direction.dot(nHit); if (LdotN < 0) return 0; Color lightColor = light->object->color; for (int k = 0; k < numObjects; k++) { if (Intersect(objects[k], &shadowRay, &t) && !objects[k]->isLight) { if (objects[k]->materialType == GLASS) lightColor *= getColor(objects[k], &shadowRay, &t); // attenuate light color by glass color else isInShadow = 1; break; } } lightColor *= 1.f/(len*len); return (isInShadow) ? 0 : getColor(object, &shadowRay, &t) * lightColor * LdotN; } I left out the rest of the code as to not bog down the post, but it can be seen here. Any help is greatly appreciated. The only portion not included in the code, is where I define the texture data, which as I said, is simply taken straight from a bitmap file of the above image. Thanks.

    Read the article

  • Remove box2d bodies after collision deduction android?

    - by jubin
    Can any one explain me how to destroy box2d body when collide i have tried but my application crashed.First i have checked al collisions then add all the bodies in array who i want to destroy.I am trying to learning this tutorial My all the bodies are falling i want these bodies should destroy when these bodies will collide my actor monkey but when it collide it destroy but my aplication crashed.I have googled and from google i got the application crash reasons we should not destroy body in step funtion but i am removing body in the last of tick method. could any one help me or provide me code aur check my code why i am getting this prblem or how can i destroy box2d bodies. This is my code what i am doing. Please could any one check my code and tell me what is i am doing wrong for removing bodies. The code is for multiple box2d objects falling on my actor monkey it should be destroy when it will fall on the monkey.It is destroing but my application crahes. static class Box2DLayer extends CCLayer { protected static final float PTM_RATIO = 32.0f; protected static final float WALK_FACTOR = 3.0f; protected static final float MAX_WALK_IMPULSE = 0.2f; protected static final float ANIM_SPEED = 0.3f; int isLeft=0; String dir=""; int x =0; float direction; CCColorLayer objectHint; // protected static final float PTM_RATIO = 32.0f; protected World _world; protected static Body spriteBody; CGSize winSize = CCDirector.sharedDirector().winSize(); private static int count = 200; protected static Body monkey_body; private static Body bodies; CCSprite monkey; float animDelay; int animPhase; CCSpriteSheet danceSheet = CCSpriteSheet.spriteSheet("phases.png"); CCSprite _block; List<Body> toDestroy = new ArrayList<Body>(); //CCSpriteSheet _spriteSheet; private static MyContactListener _contactListener = new MyContactListener(); public Box2DLayer() { this.setIsAccelerometerEnabled(true); CCSprite bg = CCSprite.sprite("jungle.png"); addChild(bg,0); bg.setAnchorPoint(0,0); bg.setPosition(0,0); CGSize s = CCDirector.sharedDirector().winSize(); // Use scaled width and height so that our boundaries always match the current screen float scaledWidth = s.width/PTM_RATIO; float scaledHeight = s.height/PTM_RATIO; Vector2 gravity = new Vector2(0.0f, -30.0f); boolean doSleep = false; _world = new World(gravity, doSleep); // Create edges around the entire screen // Define the ground body. BodyDef bxGroundBodyDef = new BodyDef(); bxGroundBodyDef.position.set(0.0f, 0.0f); // The body is also added to the world. Body groundBody = _world.createBody(bxGroundBodyDef); // Register our contact listener // Define the ground box shape. PolygonShape groundBox = new PolygonShape(); Vector2 bottomLeft = new Vector2(0f,0f); Vector2 topLeft = new Vector2(0f,scaledHeight); Vector2 topRight = new Vector2(scaledWidth,scaledHeight); Vector2 bottomRight = new Vector2(scaledWidth,0f); // bottom groundBox.setAsEdge(bottomLeft, bottomRight); groundBody.createFixture(groundBox,0); // top groundBox.setAsEdge(topLeft, topRight); groundBody.createFixture(groundBox,0); // left groundBox.setAsEdge(topLeft, bottomLeft); groundBody.createFixture(groundBox,0); // right groundBox.setAsEdge(topRight, bottomRight); groundBody.createFixture(groundBox,0); CCSprite floorbg = CCSprite.sprite("grassbehind.png"); addChild(floorbg,1); floorbg.setAnchorPoint(0,0); floorbg.setPosition(0,0); CCSprite floorfront = CCSprite.sprite("grassfront.png"); floorfront.setTag(2); this.addBoxBodyForSprite(floorfront); addChild(floorfront,3); floorfront.setAnchorPoint(0,0); floorfront.setPosition(0,0); addChild(danceSheet); //CCSprite monkey = CCSprite.sprite(danceSheet, CGRect.make(0, 0, 48, 73)); //addChild(danceSprite); monkey = CCSprite.sprite("arms_up.png"); monkey.setTag(2); monkey.setPosition(200,100); BodyDef spriteBodyDef = new BodyDef(); spriteBodyDef.type = BodyType.DynamicBody; spriteBodyDef.bullet=true; spriteBodyDef.position.set(200 / PTM_RATIO, 300 / PTM_RATIO); monkey_body = _world.createBody(spriteBodyDef); monkey_body.setUserData(monkey); PolygonShape spriteShape = new PolygonShape(); spriteShape.setAsBox(monkey.getContentSize().width/PTM_RATIO/2, monkey.getContentSize().height/PTM_RATIO/2); FixtureDef spriteShapeDef = new FixtureDef(); spriteShapeDef.shape = spriteShape; spriteShapeDef.density = 2.0f; spriteShapeDef.friction = 0.70f; spriteShapeDef.restitution = 0.0f; monkey_body.createFixture(spriteShapeDef); //Vector2 force = new Vector2(10, 10); //monkey_body.applyLinearImpulse(force, spriteBodyDef.position); addChild(monkey,10000); this.schedule(tickCallback); this.schedule(createobjects, 2.0f); objectHint = CCColorLayer.node(ccColor4B.ccc4(255,0,0,128), 200f, 100f); addChild(objectHint, 15000); objectHint.setVisible(false); _world.setContactListener(_contactListener); } private UpdateCallback tickCallback = new UpdateCallback() { public void update(float d) { tick(d); } }; private UpdateCallback createobjects = new UpdateCallback() { public void update(float d) { secondUpdate(d); } }; private void secondUpdate(float dt) { this.addNewSprite(); } public void addBoxBodyForSprite(CCSprite sprite) { BodyDef spriteBodyDef = new BodyDef(); spriteBodyDef.type = BodyType.StaticBody; //spriteBodyDef.bullet=true; spriteBodyDef.position.set(sprite.getPosition().x / PTM_RATIO, sprite.getPosition().y / PTM_RATIO); spriteBody = _world.createBody(spriteBodyDef); spriteBody.setUserData(sprite); Vector2 verts[] = { new Vector2(-11.8f / PTM_RATIO, -24.5f / PTM_RATIO), new Vector2(11.7f / PTM_RATIO, -24.0f / PTM_RATIO), new Vector2(29.2f / PTM_RATIO, -14.0f / PTM_RATIO), new Vector2(28.7f / PTM_RATIO, -0.7f / PTM_RATIO), new Vector2(8.0f / PTM_RATIO, 18.2f / PTM_RATIO), new Vector2(-29.0f / PTM_RATIO, 18.7f / PTM_RATIO), new Vector2(-26.3f / PTM_RATIO, -12.2f / PTM_RATIO) }; PolygonShape spriteShape = new PolygonShape(); spriteShape.set(verts); //spriteShape.setAsBox(sprite.getContentSize().width/PTM_RATIO/2, //sprite.getContentSize().height/PTM_RATIO/2); FixtureDef spriteShapeDef = new FixtureDef(); spriteShapeDef.shape = spriteShape; spriteShapeDef.density = 2.0f; spriteShapeDef.friction = 0.70f; spriteShapeDef.restitution = 0.0f; spriteShapeDef.isSensor=true; spriteBody.createFixture(spriteShapeDef); } public void addNewSprite() { count=0; Random rand = new Random(); int Number = rand.nextInt(10); switch(Number) { case 0: _block = CCSprite.sprite("banana.png"); break; case 1: _block = CCSprite.sprite("backpack.png");break; case 2: _block = CCSprite.sprite("statue.png");break; case 3: _block = CCSprite.sprite("pineapple.png");break; case 4: _block = CCSprite.sprite("bananabunch.png");break; case 5: _block = CCSprite.sprite("hat.png");break; case 6: _block = CCSprite.sprite("canteen.png");break; case 7: _block = CCSprite.sprite("banana.png");break; case 8: _block = CCSprite.sprite("statue.png");break; case 9: _block = CCSprite.sprite("hat.png");break; } int padding=20; //_block.setPosition(CGPoint.make(100, 100)); // Determine where to spawn the target along the Y axis CGSize winSize = CCDirector.sharedDirector().displaySize(); int minY = (int)(_block.getContentSize().width / 2.0f); int maxY = (int)(winSize.width - _block.getContentSize().width / 2.0f); int rangeY = maxY - minY; int actualY = rand.nextInt(rangeY) + minY; // Create block and add it to the layer float xOffset = padding+_block.getContentSize().width/2+((_block.getContentSize().width+padding)*count); _block.setPosition(CGPoint.make(actualY, 750)); _block.setTag(1); float w = _block.getContentSize().width; objectHint.setVisible(true); objectHint.changeWidth(w); objectHint.setPosition(actualY-w/2, 460); this.addChild(_block,10000); // Create ball body and shape BodyDef ballBodyDef1 = new BodyDef(); ballBodyDef1.type = BodyType.DynamicBody; ballBodyDef1.position.set(actualY/PTM_RATIO, 480/PTM_RATIO); bodies = _world.createBody(ballBodyDef1); bodies.setUserData(_block); PolygonShape circle1 = new PolygonShape(); Vector2 verts[] = { new Vector2(-11.8f / PTM_RATIO, -24.5f / PTM_RATIO), new Vector2(11.7f / PTM_RATIO, -24.0f / PTM_RATIO), new Vector2(29.2f / PTM_RATIO, -14.0f / PTM_RATIO), new Vector2(28.7f / PTM_RATIO, -0.7f / PTM_RATIO), new Vector2(8.0f / PTM_RATIO, 18.2f / PTM_RATIO), new Vector2(-29.0f / PTM_RATIO, 18.7f / PTM_RATIO), new Vector2(-26.3f / PTM_RATIO, -12.2f / PTM_RATIO) }; circle1.set(verts); FixtureDef ballShapeDef1 = new FixtureDef(); ballShapeDef1.shape = circle1; ballShapeDef1.density = 10.0f; ballShapeDef1.friction = 0.0f; ballShapeDef1.restitution = 0.1f; bodies.createFixture(ballShapeDef1); count++; //Remove(); } @Override public void ccAccelerometerChanged(float accelX, float accelY, float accelZ) { //Apply the directional impulse /*float impulse = monkey_body.getMass()*accelY*WALK_FACTOR; Vector2 force = new Vector2(impulse, 0); monkey_body.applyLinearImpulse(force, monkey_body.getWorldCenter());*/ walk(accelY); //Remove(); } private void walk(float accelY) { // TODO Auto-generated method stub direction = accelY; } private void Remove() { for (Iterator<MyContact> it1 = _contactListener.mContacts.iterator(); it1.hasNext();) { MyContact contact = it1.next(); Body bodyA = contact.fixtureA.getBody(); Body bodyB = contact.fixtureB.getBody(); // See if there's any user data attached to the Box2D body // There should be, since we set it in addBoxBodyForSprite if (bodyA.getUserData() != null && bodyB.getUserData() != null) { CCSprite spriteA = (CCSprite) bodyA.getUserData(); CCSprite spriteB = (CCSprite) bodyB.getUserData(); // Is sprite A a cat and sprite B a car? If so, push the cat // on a list to be destroyed... if (spriteA.getTag() == 1 && spriteB.getTag() == 2) { //Log.v("dsfds", "dsfsd"+bodyA); //_world.destroyBody(bodyA); // removeChild(spriteA, true); toDestroy.add(bodyA); } // Is sprite A a car and sprite B a cat? If so, push the cat // on a list to be destroyed... else if (spriteA.getTag() == 2 && spriteB.getTag() == 1) { //Log.v("dsfds", "dsfsd"+bodyB); toDestroy.add(bodyB); } } } // Loop through all of the box2d bodies we want to destroy... for (Iterator<Body> it1 = toDestroy.iterator(); it1.hasNext();) { Body body = it1.next(); // See if there's any user data attached to the Box2D body // There should be, since we set it in addBoxBodyForSprite if (body.getUserData() != null) { // We know that the user data is a sprite since we set // it that way, so cast it... CCSprite sprite = (CCSprite) body.getUserData(); // Remove the sprite from the scene _world.destroyBody(body); removeChild(sprite, true); } // Destroy the Box2D body as well // _contactListener.mContacts.remove(0); } } public synchronized void tick(float delta) { synchronized (_world) { _world.step(delta, 8, 3); //_world.clearForces(); //addNewSprite(); } CCAnimation danceAnimation = CCAnimation.animation("dance", 1.0f); // Iterate over the bodies in the physics world Iterator<Body> it = _world.getBodies(); while(it.hasNext()) { Body b = it.next(); Object userData = b.getUserData(); if (userData != null && userData instanceof CCSprite) { //Synchronize the Sprites position and rotation with the corresponding body CCSprite sprite = (CCSprite)userData; if(sprite.getTag()==1) { //b.applyLinearImpulse(force, pos); sprite.setPosition(b.getPosition().x * PTM_RATIO, b.getPosition().y * PTM_RATIO); sprite.setRotation(-1.0f * ccMacros.CC_RADIANS_TO_DEGREES(b.getAngle())); } else { //Apply the directional impulse float impulse = monkey_body.getMass()*direction*WALK_FACTOR; Vector2 force = new Vector2(impulse, 0); b.applyLinearImpulse(force, b.getWorldCenter()); sprite.setPosition(b.getPosition().x * PTM_RATIO, b.getPosition().y * PTM_RATIO); animDelay -= 1.0f/60.0f; if(animDelay <= 0) { animDelay = ANIM_SPEED; animPhase++; if(animPhase > 2) { animPhase = 1; } } if(direction < 0 ) { isLeft=1; } else { isLeft=0; } if(isLeft==1) { dir = "left"; } else { dir = "right"; } float standingLimit = (float) 0.1f; float vX = monkey_body.getLinearVelocity().x; if((vX > -standingLimit)&& (vX < standingLimit)) { // Log.v("sasd", "standing"); } else { } } } } Remove(); } } Sorry for my english. Thanks in advance.

    Read the article

< Previous Page | 796 797 798 799 800 801 802 803 804 805 806 807  | Next Page >