Search Results

Search found 21559 results on 863 pages for 'template meta programming'.

Page 860/863 | < Previous Page | 856 857 858 859 860 861 862 863  | Next Page >

  • Logging Application Block

    - by Gordon Carpenter-Thompson
    I'm using the Logging Application Block in my ASP.NET application and want to convert the application to a Sharepoint WebPart. It all works fine as long as I change: <trust level="WSS_Minimal" originUrl="" /> to <trust level="Full" originUrl="" /> If not I get an exception in the logs: Failed to add webpart *************255Fcatalogs%252Fwp%252FSearchWebPart%252Ewebpart;SearchWebPart. Exception Microsoft.SharePoint.WebPartPages.WebPartPageUserException: The type initializer for 'Microsoft.Practices.EnterpriseLibrary.Logging.Logger' threw an exception. ---> System.TypeInitializationException: The type initializer for 'Microsoft.Practices.EnterpriseLibrary.Logging.Logger' threw an exception. ---> System.TypeInitializationException: The type initializer for 'Microsoft.Practices.EnterpriseLibrary.Common.Configuration.SystemConfigurationSource' threw an exception. ---> System.Security.SecurityException: Request for the permission of type 'System.Security.Permissions.FileIOPermission, mscorlib, Version=2.0.0.0, Culture=neut... ...ral, PublicKeyToken=b77a5c561934e089' failed. at System.Security.CodeAccessSecurityEngine.Check(Object demand, StackCrawlMark& stackMark, Boolean isPermSet) at System.Security.CodeAccessPermission.Demand() at System.AppDomainSetup.VerifyDir(String dir, Boolean normalize) at System.AppDomainSetup.get_ConfigurationFile() at Microsoft.Practices.EnterpriseLibrary.Common.Configuration.SystemConfigurationSourceImplementation..ctor(Boolean refresh) at Microsoft.Practices.EnterpriseLibrary.Common.Configuration.SystemConfigurationSource..cctor() The action that failed was: Demand The type of the first permission that failed was: System.Security.Permissions.FileIOPermission The first permission that failed was: <IPermission class="System.Security.Permissions.FileIOPermi... ...ssion, mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" version="1" PathDiscovery="D:\Inetpub\wwwroot\wss\VirtualDirectories\8686\web.config"/> The demand was for: <IPermission class="System.Security.Permissions.FileIOPermission, mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" version="1" PathDiscovery="D:\Inetpub\wwwroot\wss\VirtualDirectories\8686\web.config"/> The granted set of the failing assembly was: <PermissionSet class="System.Security.PermissionSet" version="1"> <IPermission class="System.Security.Permissions.SecurityPermission, mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" version="1" Flags="Execution"/> <IPermission class="System.Security.Permissions.StrongNameIdentityPermis... ...sion, mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" version="1" PublicKeyBlob="0024000004800000940000000602000000240000525341310004000001000100257FAE170ABB2AB4A8EF914DFEA757F7DB8C221F01850FC8753A4C6585C0B07749DA33DF4D64A721A070E7CDCDEFC8C786E3626418389BCF461E4300E6F4C477BE5CE64AD12C29D517208D6BA627D9F73A9066B7638BE1FEE3EABE6C3E537B546CB3B5DE5E436F95278BB1E9DBDE85C2A6B624010A8073841D467CC7A0A0C6C8" Name="Microsoft.Practices.EnterpriseLibrary.Common" AssemblyVersion="3.1.0.0"/> <IPermission class="System.Security.Permissions.UrlIdentityPermission, mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" version="1" Url="file:///D:/Inetpub/wwwroot/wss/VirtualDirectories/8686/bin/Microsoft.Practices.EnterpriseLibrary.Common.DLL"/> <IPe... ...rmission class="System.Security.Permissions.ZoneIdentityPermission, mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" version="1" Zone="MyComputer"/> <IPermission class="System.Web.AspNetHostingPermission, System, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" version="1" Level="Minimal"/> <IPermission class="Microsoft.SharePoint.Security.WebPartPermission, Microsoft.SharePoint.Security, Version=12.0.0.0, Culture=neutral, PublicKeyToken=71e9bce111e9429c" version="1" Connections="True"/> </PermissionSet> The assembly or AppDomain that failed was: Microsoft.Practices.EnterpriseLibrary.Common, Version=3.1.0.0, Culture=neutral, PublicKeyToken=a646907c4a695009 The Zone of the assembly that failed was: MyComputer The Url of the assem... ...bly that failed was: file:///D:/Inetpub/wwwroot/wss/VirtualDirectories/8686/bin/Microsoft.Practices.EnterpriseLibrary.Common.DLL --- End of inner exception stack trace --- at Microsoft.Practices.EnterpriseLibrary.Common.Configuration.SystemConfigurationSource..ctor() at Microsoft.Practices.EnterpriseLibrary.Common.Configuration.ConfigurationSourceFactory.Create() at Microsoft.Practices.EnterpriseLibrary.Logging.Logger..cctor() --- End of inner exception stack trace --- at Microsoft.Practices.EnterpriseLibrary.Logging.Logger.Write(LogEntry log) at com.okana.sharepoint.SearchWebPart.OnLoad(EventArgs e) at System.Web.UI.Control.LoadRecursive() at System.Web.UI.Control.AddedControl(Control control, Int32 index) at System.Web.UI.ControlCollection.Add(Con... ...trol child) at System.Web.UI.WebControls.WebParts.WebPartManager.WebPartManagerControlCollection.AddWebPartHelper(WebPart webPart) at System.Web.UI.WebControls.WebParts.WebPartManager.WebPartManagerControlCollection.AddWebPart(WebPart webPart) at System.Web.UI.WebControls.WebParts.WebPartManager.AddWebPart(WebPart webPart) at System.Web.UI.WebControls.WebParts.WebPartManagerInternals.AddWebPart(WebPart webPart) at Microsoft.SharePoint.WebPartPages.SPWebPartManager.AddWebPartWithRetry(WebPart webPart) at Microsoft.SharePoint.WebPartPages.SPWebPartManager.AddDynamicWebPart(WebPart webPart) at Microsoft.SharePoint.WebPartPages.SPWebPartManager.LoadWebPart(WebPart aspWebPart, String zoneId, Int32 zoneIndex, Boolean isClosed) at Microsoft.SharePoint.WebPartPages.... ...SPWebPartManager.AddWebPartInternalShared(WebPart webPart) at Microsoft.SharePoint.WebPartPages.SPWebPartManager.AddWebPartInternal(SPSupersetWebPart superset, Boolean throwIfLocked) --- End of inner exception stack trace --- at Microsoft.SharePoint.WebPartPages.SPWebPartManager.AddWebPartInternal(SPSupersetWebPart superset, Boolean throwIfLocked) at Microsoft.SharePoint.WebPartPages.SPWebPartManager.AddWebPartInternal(SPSupersetWebPart superset) at Microsoft.SharePoint.WebPartPages.WebPartQuickAdd.System.Web.UI.IPostBackEventHandler.RaisePostBackEvent(String eventArgument) My log configuration is this: <loggingConfiguration name="Logging Application Block" tracingEnabled="true" defaultCategory="General" logWarningsWhenNoCategoriesMatch="true"> <listeners> <add fileName="XAE.log" rollSizeKB="0" timeStampPattern="yyyy-MM-dd" rollFileExistsBehavior="Overwrite" rollInterval="Day" formatter="Text Formatter" header="" footer="" listenerDataType="Microsoft.Practices.EnterpriseLibrary.Logging.Configuration.RollingFlatFileTraceListenerData, Microsoft.Practices.EnterpriseLibrary.Logging, Version=3.1.0.0, Culture=neutral" traceOutputOptions="None" type="Microsoft.Practices.EnterpriseLibrary.Logging.TraceListeners.RollingFlatFileTraceListener, Microsoft.Practices.EnterpriseLibrary.Logging, Version=3.1.0.0, Culture=neutral" name="Rolling Flat File Trace Listener" /> <add fileName="IDOL.log" rollSizeKB="0" timeStampPattern="yyyy-MM-dd" rollFileExistsBehavior="Overwrite" rollInterval="Day" formatter="Text Formatter" header="" footer="" listenerDataType="Microsoft.Practices.EnterpriseLibrary.Logging.Configuration.RollingFlatFileTraceListenerData, Microsoft.Practices.EnterpriseLibrary.Logging, Version=3.1.0.0, Culture=neutral" traceOutputOptions="None" type="Microsoft.Practices.EnterpriseLibrary.Logging.TraceListeners.RollingFlatFileTraceListener, Microsoft.Practices.EnterpriseLibrary.Logging, Version=3.1.0.0, Culture=neutral" name="IDOL Rolling Flat File Trace Listener" /> </listeners> <formatters> <add template="{timestamp(local)} : {category} : {message}" type="Microsoft.Practices.EnterpriseLibrary.Logging.Formatters.TextFormatter, Microsoft.Practices.EnterpriseLibrary.Logging, Version=3.1.0.0, Culture=neutral" name="Text Formatter" /> </formatters> <categorySources> <!-- For any log entries that you wish to suppress set "All" to "Off" --> <add switchValue="All" name="Communication"> <listeners> <add name="Rolling Flat File Trace Listener" /> </listeners> </add> <add switchValue="All" name="Debug"> <listeners> <add name="Rolling Flat File Trace Listener" /> </listeners> </add> <add switchValue="All" name="Exception"> <listeners> <add name="Rolling Flat File Trace Listener" /> </listeners> </add> <add switchValue="All" name="General"> <listeners> <add name="Rolling Flat File Trace Listener" /> </listeners> </add> <add switchValue="All" name="Warning"> <listeners> <add name="Rolling Flat File Trace Listener" /> </listeners> </add> <add switchValue="All" name="IDOL"> <listeners> <add name="IDOL Rolling Flat File Trace Listener" /> </listeners> </add> </categorySources> <specialSources> <allEvents switchValue="All" name="All Events" /> <notProcessed switchValue="All" name="Unprocessed Category" /> <errors switchValue="All" name="Logging Errors &amp; Warnings"> <listeners> <add name="Rolling Flat File Trace Listener" /> </listeners> </errors> </specialSources> </loggingConfiguration> Clearly this is because it's trying to create the log files and WSS_Minimal doesn't allow this. Is there a simple way to disable all logging for now? Removing the logging is problematic as it's used in the underlying libraries. I have tried setting all switchValue="All" to "Off" but it still throws the exception even though nothing should be logged

    Read the article

  • MonoTouch App Crashes When Returning From MFMailComposeViewController

    - by Richard Khan
    My MonoTouch Version Info: Release ID: 20401003 Git revision: 2f1746af36f421d262dcd2b0542ce86b12158f02 Build date: 2010-12-23 23:13:38+0000 The MFMailComposeViewController is displayed and works correctly as a dialog using the following code: if (MFMailComposeViewController.CanSendMail) { MFMailComposeViewController mail; mail = new MFMailComposeViewController (); mail.SetSubject ("Subject Test"); mail.SetMessageBody ("Body Test", false); mail.Finished += HandleMailFinished; this.navigationController.PresentModalViewController (mail, true); } else { new UIAlertView ("Mail Failed", "Mail Failed", null, "OK", null).Show (); } However, once the user selects Cancel | Delete Draft or Cancel | Save Draft or Send, the App throws a run-time error like the following: Stacktrace: at (wrapper managed-to-native) MonoTouch.UIKit.UIApplication.UIApplicationMain (int,string[],intptr,intptr) <0x00004 at (wrapper managed-to-native) MonoTouch.UIKit.UIApplication.UIApplicationMain (int,string[],intptr,intptr) <0x00004 at MonoTouch.UIKit.UIApplication.Main (string[],string,string) [0x00038] in /Users/plasma/Source/iphone/monotouch/UIKit/UIApplication.cs:26 at MonoTouch.UIKit.UIApplication.Main (string[]) [0x00000] in /Users/plasma/Source/iphone/monotouch/UIKit/UIApplication.cs:31 at MailDialog.Application.Main (string[]) [0x00000] in /Users/rrkhan/Projects/Sandbox/MailDialog/Main.cs:15 at (wrapper runtime-invoke) .runtime_invoke_void_object (object,intptr,intptr,intptr) Native stacktrace: 0 MailDialog 0x000be66f mono_handle_native_sigsegv + 343 1 MailDialog 0x0000e43e mono_sigsegv_signal_handler + 313 2 libSystem.B.dylib 0x903e946b _sigtramp + 43 3 ??? 0xffffffff 0x0 + 4294967295 4 MessageUI 0x01a9f6b7 -[MFMailComposeController _close] + 284 5 UIKit 0x01f682f1 -[UIActionSheet(Private) _buttonClicked:] + 258 6 UIKit 0x01be1a6e -[UIApplication sendAction:to:from:forEvent:] + 119 7 UIKit 0x01c701b5 -[UIControl sendAction:to:forEvent:] + 67 8 UIKit 0x01c72647 -[UIControl(Internal) _sendActionsForEvents:withEvent:] + 527 9 UIKit 0x01c711f4 -[UIControl touchesEnded:withEvent:] + 458 10 UIKit 0x01c060d1 -[UIWindow _sendTouchesForEvent:] + 567 11 UIKit 0x01be737a -[UIApplication sendEvent:] + 447 12 UIKit 0x01bec732 _UIApplicationHandleEvent + 7576 13 GraphicsServices 0x03eb7a36 PurpleEventCallback + 1550 14 CoreFoundation 0x00df9064 CFRUNLOOP_IS_CALLING_OUT_TO_A_SOURCE1_PERFORM_FUNCTION + 52 15 CoreFoundation 0x00d596f7 __CFRunLoopDoSource1 + 215 16 CoreFoundation 0x00d56983 __CFRunLoopRun + 979 17 CoreFoundation 0x00d56240 CFRunLoopRunSpecific + 208 18 CoreFoundation 0x00d56161 CFRunLoopRunInMode + 97 19 GraphicsServices 0x03eb6268 GSEventRunModal + 217 20 GraphicsServices 0x03eb632d GSEventRun + 115 21 UIKit 0x01bf042e UIApplicationMain + 1160 22 ??? 0x0a1e4bd9 0x0 + 169757657 23 ??? 0x0a1e4b12 0x0 + 169757458 24 ??? 0x0a1e4515 0x0 + 169755925 25 ??? 0x0a1e4451 0x0 + 169755729 26 ??? 0x0a1e44ac 0x0 + 169755820 27 MailDialog 0x0000e202 mono_jit_runtime_invoke + 1360 28 MailDialog 0x001c92af mono_runtime_invoke + 137 29 MailDialog 0x001caf6b mono_runtime_exec_main + 714 30 MailDialog 0x001ca891 mono_runtime_run_main + 812 31 MailDialog 0x00094fe8 mono_jit_exec + 200 32 MailDialog 0x0027cf05 main + 3494 33 MailDialog 0x00002ca1 _start + 208 34 MailDialog 0x00002bd0 start + 40 Debug info from gdb: warning: Could not find object file "/var/folders/Ny/NyElTwhDGD8kZMqIEeLGXE+++TI/-Tmp-//cc6F1tBs.o" - no debug information available for "template.m". warning: .o file "/Developer/MonoTouch/SDKs/MonoTouch.iphonesimulator4.2.sdk/usr/lib/libmonotouch.a(zlib-helper.x86.42.o)" more recent than executable timestamp in "/Users/rrkhan/Library/Application Support/iPhone Simulator/4.2/Applications/52AF1D24-AADA-48ED-B373-ED08E89E4985/MailDialog.app/MailDialog" warning: Could not open OSO file /Developer/MonoTouch/SDKs/MonoTouch.iphonesimulator4.2.sdk/usr/lib/libmonotouch.a(zlib-helper.x86.42.o) to scan for pubtypes for objfile /Users/rrkhan/Library/Application Support/iPhone Simulator/4.2/Applications/52AF1D24-AADA-48ED-B373-ED08E89E4985/MailDialog.app/MailDialog warning: .o file "/Developer/MonoTouch/SDKs/MonoTouch.iphonesimulator4.2.sdk/usr/lib/libmonotouch.a(monotouch-glue.x86.42.o)" more recent than executable timestamp in "/Users/rrkhan/Library/Application Support/iPhone Simulator/4.2/Applications/52AF1D24-AADA-48ED-B373-ED08E89E4985/MailDialog.app/MailDialog" warning: Could not open OSO file /Developer/MonoTouch/SDKs/MonoTouch.iphonesimulator4.2.sdk/usr/lib/libmonotouch.a(monotouch-glue.x86.42.o) to scan for pubtypes for objfile /Users/rrkhan/Library/Application Support/iPhone Simulator/4.2/Applications/52AF1D24-AADA-48ED-B373-ED08E89E4985/MailDialog.app/MailDialog warning: .o file "/Developer/MonoTouch/SDKs/MonoTouch.iphonesimulator4.2.sdk/usr/lib/libmonotouch.a(gc.x86.42.o)" more recent than executable timestamp in "/Users/rrkhan/Library/Application Support/iPhone Simulator/4.2/Applications/52AF1D24-AADA-48ED-B373-ED08E89E4985/MailDialog.app/MailDialog" warning: Could not open OSO file /Developer/MonoTouch/SDKs/MonoTouch.iphonesimulator4.2.sdk/usr/lib/libmonotouch.a(gc.x86.42.o) to scan for pubtypes for objfile /Users/rrkhan/Library/Application Support/iPhone Simulator/4.2/Applications/52AF1D24-AADA-48ED-B373-ED08E89E4985/MailDialog.app/MailDialog Error connecting stdout and stderr (127.0.0.1:10001) warning: .o file "/Developer/MonoTouch/SDKs/MonoTouch.iphonesimulator4.2.sdk/usr/lib/libmonotouch.a(monotouch-glue.x86.42.o)" more recent than executable timestamp in "/Users/rrkhan/Library/Application Support/iPhone Simulator/4.2/Applications/52AF1D24-AADA-48ED-B373-ED08E89E4985/MailDialog.app/MailDialog" warning: Couldn't open object file '/Developer/MonoTouch/SDKs/MonoTouch.iphonesimulator4.2.sdk/usr/lib/libmonotouch.a(monotouch-glue.x86.42.o)' Attaching to process 9992. Reading symbols for shared libraries . done Reading symbols for shared libraries ....................................................................................................................... done 0x9038e459 in read$UNIX2003 () 8 0x903a8a12 in __workq_kernreturn () 7 "WebThread" 0x903830fa in mach_msg_trap () 6 0x903b10a6 in __semwait_signal () 5 0x90383136 in semaphore_wait_trap () 4 0x903830fa in mach_msg_trap () 3 0x903a8a12 in __workq_kernreturn () 2 "com.apple.libdispatch-manager" 0x903a9982 in kevent () * 1 "com.apple.main-thread" 0x9038e459 in read$UNIX2003 () Thread 8 (process 9992): 0 0x903a8a12 in __workq_kernreturn () 1 0x903a8fa8 in _pthread_wqthread () 2 0x903a8bc6 in start_wqthread () Thread 7 (process 9992): 0 0x903830fa in mach_msg_trap () 1 0x90383867 in mach_msg () 2 0x00df94a6 in __CFRunLoopServiceMachPort () 3 0x00d56874 in __CFRunLoopRun () 4 0x00d56240 in CFRunLoopRunSpecific () 5 0x00d56161 in CFRunLoopRunInMode () 6 0x04f7c423 in RunWebThread () 7 0x903b085d in _pthread_start () 8 0x903b06e2 in thread_start () Thread 6 (process 9992): 0 0x903b10a6 in __semwait_signal () 1 0x903dcee5 in nanosleep$UNIX2003 () 2 0x903dce23 in usleep$UNIX2003 () 3 0x0027714c in monotouch_pump_gc () 4 0x903b085d in _pthread_start () 5 0x903b06e2 in thread_start () Thread 5 (process 9992): 0 0x90383136 in semaphore_wait_trap () 1 0x0015ae1d in finalizer_thread (unused=0x0) at ../../../../mono/metadata/gc.c:1026 2 0x002034a3 in start_wrapper (data=0x7b16ba0) at ../../../../mono/metadata/threads.c:661 3 0x002448e2 in thread_start_routine (args=0x8037e34) at ../../../../mono/io-layer/wthreads.c:286 4 0x00274357 in GC_start_routine (arg=0x6ff7f60) at ../../../libgc/pthread_support.c:1390 5 0x903b085d in _pthread_start () 6 0x903b06e2 in thread_start () Thread 4 (process 9992): 0 0x903830fa in mach_msg_trap () 1 0x90383867 in mach_msg () 2 0x0011cc46 in mach_exception_thread (arg=0x0) at ../../../../mono/mini/mini-darwin.c:138 3 0x903b085d in _pthread_start () 4 0x903b06e2 in thread_start () Thread 3 (process 9992): 0 0x903a8a12 in __workq_kernreturn () 1 0x903a8fa8 in _pthread_wqthread () 2 0x903a8bc6 in start_wqthread () Thread 2 (process 9992): 0 0x903a9982 in kevent () 1 0x903aa09c in _dispatch_mgr_invoke () 2 0x903a9559 in _dispatch_queue_invoke () 3 0x903a92fe in _dispatch_worker_thread2 () 4 0x903a8d81 in _pthread_wqthread () 5 0x903a8bc6 in start_wqthread () Thread 1 (process 9992): 0 0x9038e459 in read$UNIX2003 () 1 0x000be81f in mono_handle_native_sigsegv (signal=11, ctx=0xbfffd238) at ../../../../mono/mini/mini-exceptions.c:1826 2 0x0000e43e in mono_sigsegv_signal_handler (_dummy=10, info=0xbfffd1f8, context=0xbfffd238) at ../../../../mono/mini/mini.c:4846 3 4 0x028d6a63 in objc_msgSend () 5 0x01ad469f in func.24012 () 6 0x01a9f6b7 in -[MFMailComposeController _close] () 7 0x01f682f1 in -[UIActionSheet(Private) _buttonClicked:] () 8 0x01be1a6e in -[UIApplication sendAction:to:from:forEvent:] () 9 0x01c701b5 in -[UIControl sendAction:to:forEvent:] () 10 0x01c72647 in -[UIControl(Internal) _sendActionsForEvents:withEvent:] () 11 0x01c711f4 in -[UIControl touchesEnded:withEvent:] () 12 0x01c060d1 in -[UIWindow _sendTouchesForEvent:] () 13 0x01be737a in -[UIApplication sendEvent:] () 14 0x01bec732 in _UIApplicationHandleEvent () 15 0x03eb7a36 in PurpleEventCallback () 16 0x00df9064 in CFRUNLOOP_IS_CALLING_OUT_TO_A_SOURCE1_PERFORM_FUNCTION () 17 0x00d596f7 in __CFRunLoopDoSource1 () 18 0x00d56983 in __CFRunLoopRun () 19 0x00d56240 in CFRunLoopRunSpecific () 20 0x00d56161 in CFRunLoopRunInMode () 21 0x03eb6268 in GSEventRunModal () 22 0x03eb632d in GSEventRun () 23 0x01bf042e in UIApplicationMain () 24 0x0a1e4bd9 in ?? () 25 0x0a1e4b12 in ?? () 26 0x0a1e4515 in ?? () 27 0x0a1e4451 in ?? () 28 0x0a1e44ac in ?? () 29 0x0000e202 in mono_jit_runtime_invoke (method=0xa806e6c, obj=0x0, params=0xbfffedbc, exc=0x0) at ../../../../mono/mini/mini.c:4733 30 0x001c92af in mono_runtime_invoke (method=0xa806e6c, obj=0x0, params=0xbfffedbc, exc=0x0) at ../../../../mono/metadata/object.c:2615 31 0x001caf6b in mono_runtime_exec_main (method=0xa806e6c, args=0xa6a34e0, exc=0x0) at ../../../../mono/metadata/object.c:3581 32 0x001ca891 in mono_runtime_run_main (method=0xa806e6c, argc=0, argv=0xbfffeef4, exc=0x0) at ../../../../mono/metadata/object.c:3355 33 0x00094fe8 in mono_jit_exec (domain=0x6f8fe58, assembly=0xa200730, argc=1, argv=0xbfffeef0) at ../../../../mono/mini/driver.c:1094 34 0x0027cf05 in main () ================================================================= Got a SIGSEGV while executing native code. This usually indicates a fatal error in the mono runtime or one of the native libraries used by your application. Unhandled Exception: System.NullReferenceException: Object reference not set to an instance of an object at (wrapper managed-to-native) MonoTouch.UIKit.UIApplication:UIApplicationMain (int,string[],intptr,intptr) at MonoTouch.UIKit.UIApplication.Main (System.String[] args, System.String principalClassName, System.String delegateClassName) [0x00038] in /Users/plasma/Source/iphone/monotouch/UIKit/UIApplication.cs:26 at MonoTouch.UIKit.UIApplication.Main (System.String[] args) [0x00000] in /Users/plasma/Source/iphone/monotouch/UIKit/UIApplication.cs:31 at MailDialog.Application.Main (System.String[] args) [0x00000] in /Users/rrkhan/Projects/Sandbox/MailDialog/Main.cs:15 I have a very simple sample project illustrating the problem. I can sent you if required.

    Read the article

  • Java Cloud Service Integration to REST Service

    - by Jani Rautiainen
    Service (JCS) provides a platform to develop and deploy business applications in the cloud. In Fusion Applications Cloud deployments customers do not have the option to deploy custom applications developed with JDeveloper to ensure the integrity and supportability of the hosted application service. Instead the custom applications can be deployed to the JCS and integrated to the Fusion Application Cloud instance. This series of articles will go through the features of JCS, provide end-to-end examples on how to develop and deploy applications on JCS and how to integrate them with the Fusion Applications instance. In this article a custom application integrating with REST service will be implemented. We will use REST services provided by Taleo as an example; however the same approach will work with any REST service. In this example the data from the REST service is used to populate a dynamic table. Pre-requisites Access to Cloud instance In order to deploy the application access to a JCS instance is needed, a free trial JCS instance can be obtained from Oracle Cloud site. To register you will need a credit card even if the credit card will not be charged. To register simply click "Try it" and choose the "Java" option. The confirmation email will contain the connection details. See this video for example of the registration.Once the request is processed you will be assigned 2 service instances; Java and Database. Applications deployed to the JCS must use Oracle Database Cloud Service as their underlying database. So when JCS instance is created a database instance is associated with it using a JDBC data source.The cloud services can be monitored and managed through the web UI. For details refer to Getting Started with Oracle Cloud. JDeveloper JDeveloper contains Cloud specific features related to e.g. connection and deployment. To use these features download the JDeveloper from JDeveloper download site by clicking the "Download JDeveloper 11.1.1.7.1 for ADF deployment on Oracle Cloud" link, this version of JDeveloper will have the JCS integration features that will be used in this article. For versions that do not include the Cloud integration features the Oracle Java Cloud Service SDK or the JCS Java Console can be used for deployment. For details on installing and configuring the JDeveloper refer to the installation guideFor details on SDK refer to Using the Command-Line Interface to Monitor Oracle Java Cloud Service and Using the Command-Line Interface to Manage Oracle Java Cloud Service. Access to a local database The database associated with the JCS instance cannot be connected to with JDBC.  Since creating ADFbc business component requires a JDBC connection we will need access to a local database. 3rd party libraries This example will use some 3rd party libraries for implementing the REST service call and processing the input / output content. Other libraries may also be used, however these are tested to work. Jersey 1.x Jersey library will be used as a client to make the call to the REST service. JCS documentation for supported specifications states: Java API for RESTful Web Services (JAX-RS) 1.1 So Jersey 1.x will be used. Download the single-JAR Jersey bundle; in this example Jersey 1.18 JAR bundle is used. Json-simple Jjson-simple library will be used to process the json objects. Download the  JAR file; in this example json-simple-1.1.1.jar is used. Accessing data in Taleo Before implementing the application it is beneficial to familiarize oneself with the data in Taleo. Easiest way to do this is by using a RESTClient on your browser. Once added to the browser you can access the UI: The client can be used to call the REST services to test the URLs and data before adding them into the application. First derive the base URL for the service this can be done with: Method: GET URL: https://tbe.taleo.net/MANAGER/dispatcher/api/v1/serviceUrl/<company name> The response will contain the base URL to be used for the service calls for the company. Next obtain authentication token with: Method: POST URL: https://ch.tbe.taleo.net/CH07/ats/api/v1/login?orgCode=<company>&userName=<user name>&password=<password> The response includes an authentication token that can be used for few hours to authenticate with the service: {   "response": {     "authToken": "webapi26419680747505890557"   },   "status": {     "detail": {},     "success": true   } } To authenticate the service calls navigate to "Headers -> Custom Header": And add a new request header with: Name: Cookie Value: authToken=webapi26419680747505890557 Once authentication token is defined the tool can be used to invoke REST services; for example: Method: GET URL: https://ch.tbe.taleo.net/CH07/ats/api/v1/object/candidate/search.xml?status=16 This data will be used on the application to be created. For details on the Taleo REST services refer to the Taleo Business Edition REST API Guide. Create Application First Fusion Web Application is created and configured. Start JDeveloper and click "New Application": Application Name: JcsRestDemo Application Package Prefix: oracle.apps.jcs.test Application Template: Fusion Web Application (ADF) Configure Local Cloud Connection Follow the steps documented in the "Java Cloud Service ADF Web Application" article to configure a local database connection needed to create the ADFbc objects. Configure Libraries Add the 3rd party libraries into the class path. Create the following directory and copy the jar files into it: <JDEV_USER_HOME>/JcsRestDemo/lib  Select the "Model" project, navigate "Application -> Project Properties -> Libraries and Classpath -> Add JAR / Directory" and add the 2 3rd party libraries: Accessing Data from Taleo To access data from Taleo using the REST service the 3rd party libraries will be used. 2 Java classes are implemented, one representing the Candidate object and another for accessing the Taleo repository Candidate Candidate object is a POJO object used to represent the candidate data obtained from the Taleo repository. The data obtained will be used to populate the ADFbc object used to display the data on the UI. The candidate object contains simply the variables we obtain using the REST services and the getters / setters for them: Navigate "New -> General -> Java -> Java Class", enter "Candidate" as the name and create it in the package "oracle.apps.jcs.test.model".  Copy / paste the following as the content: import oracle.jbo.domain.Number; public class Candidate { private Number candId; private String firstName; private String lastName; public Candidate() { super(); } public Candidate(Number candId, String firstName, String lastName) { super(); this.candId = candId; this.firstName = firstName; this.lastName = lastName; } public void setCandId(Number candId) { this.candId = candId; } public Number getCandId() { return candId; } public void setFirstName(String firstName) { this.firstName = firstName; } public String getFirstName() { return firstName; } public void setLastName(String lastName) { this.lastName = lastName; } public String getLastName() { return lastName; } } Taleo Repository Taleo repository class will interact with the Taleo REST services. The logic will query data from Taleo and populate Candidate objects with the data. The Candidate object will then be used to populate the ADFbc object used to display data on the UI. Navigate "New -> General -> Java -> Java Class", enter "TaleoRepository" as the name and create it in the package "oracle.apps.jcs.test.model".  Copy / paste the following as the content (for details of the implementation refer to the documentation in the code): import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; import com.sun.jersey.core.util.MultivaluedMapImpl; import java.io.StringReader; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; import oracle.jbo.domain.Number; import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; /** * This class interacts with the Taleo REST services */ public class TaleoRepository { /** * Connection information needed to access the Taleo services */ String _company = null; String _userName = null; String _password = null; /** * Jersey client used to access the REST services */ Client _client = null; /** * Parser for processing the JSON objects used as * input / output for the services */ JSONParser _parser = null; /** * The base url for constructing the REST URLs. This is obtained * from Taleo with a service call */ String _baseUrl = null; /** * Authentication token obtained from Taleo using a service call. * The token can be used to authenticate on subsequent * service calls. The token will expire in 4 hours */ String _authToken = null; /** * Static url that can be used to obtain the url used to construct * service calls for a given company */ private static String _taleoUrl = "https://tbe.taleo.net/MANAGER/dispatcher/api/v1/serviceUrl/"; /** * Default constructor for the repository * Authentication details are passed as parameters and used to generate * authentication token. Note that each service call will * generate its own token. This is done to avoid dealing with the expiry * of the token. Also only 20 tokens are allowed per user simultaneously. * So instead for each call there is login / logout. * * @param company the company for which the service calls are made * @param userName the user name to authenticate with * @param password the password to authenticate with. */ public TaleoRepository(String company, String userName, String password) { super(); _company = company; _userName = userName; _password = password; _client = Client.create(); _parser = new JSONParser(); _baseUrl = getBaseUrl(); } /** * This obtains the base url for a company to be used * to construct the urls for service calls * @return base url for the service calls */ private String getBaseUrl() { String result = null; if (null != _baseUrl) { result = _baseUrl; } else { try { String company = _company; WebResource resource = _client.resource(_taleoUrl + company); ClientResponse response = resource.type(MediaType.APPLICATION_FORM_URLENCODED_TYPE).get(ClientResponse.class); String entity = response.getEntity(String.class); JSONObject jsonObject = (JSONObject)_parser.parse(new StringReader(entity)); JSONObject jsonResponse = (JSONObject)jsonObject.get("response"); result = (String)jsonResponse.get("URL"); } catch (Exception ex) { ex.printStackTrace(); } } return result; } /** * Generates authentication token, that can be used to authenticate on * subsequent service calls. Note that each service call will * generate its own token. This is done to avoid dealing with the expiry * of the token. Also only 20 tokens are allowed per user simultaneously. * So instead for each call there is login / logout. * @return authentication token that can be used to authenticate on * subsequent service calls */ private String login() { String result = null; try { MultivaluedMap<String, String> formData = new MultivaluedMapImpl(); formData.add("orgCode", _company); formData.add("userName", _userName); formData.add("password", _password); WebResource resource = _client.resource(_baseUrl + "login"); ClientResponse response = resource.type(MediaType.APPLICATION_FORM_URLENCODED_TYPE).post(ClientResponse.class, formData); String entity = response.getEntity(String.class); JSONObject jsonObject = (JSONObject)_parser.parse(new StringReader(entity)); JSONObject jsonResponse = (JSONObject)jsonObject.get("response"); result = (String)jsonResponse.get("authToken"); } catch (Exception ex) { throw new RuntimeException("Unable to login ", ex); } if (null == result) throw new RuntimeException("Unable to login "); return result; } /** * Releases a authentication token. Each call to login must be followed * by call to logout after the processing is done. This is required as * the tokens are limited to 20 per user and if not released the tokens * will only expire after 4 hours. * @param authToken */ private void logout(String authToken) { WebResource resource = _client.resource(_baseUrl + "logout"); resource.header("cookie", "authToken=" + authToken).post(ClientResponse.class); } /** * This method is used to obtain a list of candidates using a REST * service call. At this example the query is hard coded to query * based on status. The url constructed to access the service is: * <_baseUrl>/object/candidate/search.xml?status=16 * @return List of candidates obtained with the service call */ public List<Candidate> getCandidates() { List<Candidate> result = new ArrayList<Candidate>(); try { // First login, note that in finally block we must have logout _authToken = "authToken=" + login(); /** * Construct the URL, the resulting url will be: * <_baseUrl>/object/candidate/search.xml?status=16 */ MultivaluedMap<String, String> formData = new MultivaluedMapImpl(); formData.add("status", "16"); JSONArray searchResults = (JSONArray)getTaleoResource("object/candidate/search", "searchResults", formData); /** * Process the results, the resulting JSON object is something like * this (simplified for readability): * * { * "response": * { * "searchResults": * [ * { * "candidate": * { * "candId": 211, * "firstName": "Mary", * "lastName": "Stochi", * logic here will find the candidate object(s), obtain the desired * data from them, construct a Candidate object based on the data * and add it to the results. */ for (Object object : searchResults) { JSONObject temp = (JSONObject)object; JSONObject candidate = (JSONObject)findObject(temp, "candidate"); Long candIdTemp = (Long)candidate.get("candId"); Number candId = (null == candIdTemp ? null : new Number(candIdTemp)); String firstName = (String)candidate.get("firstName"); String lastName = (String)candidate.get("lastName"); result.add(new Candidate(candId, firstName, lastName)); } } catch (Exception ex) { ex.printStackTrace(); } finally { if (null != _authToken) logout(_authToken); } return result; } /** * Convenience method to construct url for the service call, invoke the * service and obtain a resource from the response * @param path the path for the service to be invoked. This is combined * with the base url to construct a url for the service * @param resource the key for the object in the response that will be * obtained * @param parameters any parameters used for the service call. The call * is slightly different depending whether parameters exist or not. * @return the resource from the response for the service call */ private Object getTaleoResource(String path, String resource, MultivaluedMap<String, String> parameters) { Object result = null; try { WebResource webResource = _client.resource(_baseUrl + path); ClientResponse response = null; if (null == parameters) response = webResource.header("cookie", _authToken).get(ClientResponse.class); else response = webResource.queryParams(parameters).header("cookie", _authToken).get(ClientResponse.class); String entity = response.getEntity(String.class); JSONObject jsonObject = (JSONObject)_parser.parse(new StringReader(entity)); result = findObject(jsonObject, resource); } catch (Exception ex) { ex.printStackTrace(); } return result; } /** * Convenience method to recursively find a object with an key * traversing down from a given root object. This will traverse a * JSONObject / JSONArray recursively to find a matching key, if found * the object with the key is returned. * @param root root object which contains the key searched for * @param key the key for the object to search for * @return the object matching the key */ private Object findObject(Object root, String key) { Object result = null; if (root instanceof JSONObject) { JSONObject rootJSON = (JSONObject)root; if (rootJSON.containsKey(key)) { result = rootJSON.get(key); } else { Iterator children = rootJSON.entrySet().iterator(); while (children.hasNext()) { Map.Entry entry = (Map.Entry)children.next(); Object child = entry.getValue(); if (child instanceof JSONObject || child instanceof JSONArray) { result = findObject(child, key); if (null != result) break; } } } } else if (root instanceof JSONArray) { JSONArray rootJSON = (JSONArray)root; for (Object child : rootJSON) { if (child instanceof JSONObject || child instanceof JSONArray) { result = findObject(child, key); if (null != result) break; } } } return result; } }   Creating Business Objects While JCS application can be created without a local database, the local database is required when using ADFbc objects even if database objects are not referred. For this example we will create a "Transient" view object that will be programmatically populated based the data obtained from Taleo REST services. Creating ADFbc objects Choose the "Model" project and navigate "New -> Business Tier : ADF Business Components : View Object". On the "Initialize Business Components Project" choose the local database connection created in previous step. On Step 1 enter "JcsRestDemoVO" on the "Name" and choose "Rows populated programmatically, not based on query": On step 2 create the following attributes: CandId Type: Number Updatable: Always Key Attribute: checked Name Type: String Updatable: Always On steps 3 and 4 accept defaults and click "Next".  On step 5 check the "Application Module" checkbox and enter "JcsRestDemoAM" as the name: Click "Finish" to generate the objects. Populating the VO To display the data on the UI the "transient VO" is populated programmatically based on the data obtained from the Taleo REST services. Open the "JcsRestDemoVOImpl.java". Copy / paste the following as the content (for details of the implementation refer to the documentation in the code): import java.sql.ResultSet; import java.util.List; import java.util.ListIterator; import oracle.jbo.server.ViewObjectImpl; import oracle.jbo.server.ViewRowImpl; import oracle.jbo.server.ViewRowSetImpl; // --------------------------------------------------------------------- // --- File generated by Oracle ADF Business Components Design Time. // --- Tue Feb 18 09:40:25 PST 2014 // --- Custom code may be added to this class. // --- Warning: Do not modify method signatures of generated methods. // --------------------------------------------------------------------- public class JcsRestDemoVOImpl extends ViewObjectImpl { /** * This is the default constructor (do not remove). */ public JcsRestDemoVOImpl() { } @Override public void executeQuery() { /** * For some reason we need to reset everything, otherwise * 2nd entry to the UI screen may fail with * "java.util.NoSuchElementException" in createRowFromResultSet * call to "candidates.next()". I am not sure why this is happening * as the Iterator is new and "hasNext" is true at the point * of the execution. My theory is that since the iterator object is * exactly the same the VO cache somehow reuses the iterator including * the pointer that has already exhausted the iterable elements on the * previous run. Working around the issue * here by cleaning out everything on the VO every time before query * is executed on the VO. */ getViewDef().setQuery(null); getViewDef().setSelectClause(null); setQuery(null); this.reset(); this.clearCache(); super.executeQuery(); } /** * executeQueryForCollection - overridden for custom java data source support. */ protected void executeQueryForCollection(Object qc, Object[] params, int noUserParams) { /** * Integrate with the Taleo REST services using TaleoRepository class. * A list of candidates matching a hard coded query is obtained. */ TaleoRepository repository = new TaleoRepository(<company>, <username>, <password>); List<Candidate> candidates = repository.getCandidates(); /** * Store iterator for the candidates as user data on the collection. * This will be used in createRowFromResultSet to create rows based on * the custom iterator. */ ListIterator<Candidate> candidatescIterator = candidates.listIterator(); setUserDataForCollection(qc, candidatescIterator); super.executeQueryForCollection(qc, params, noUserParams); } /** * hasNextForCollection - overridden for custom java data source support. */ protected boolean hasNextForCollection(Object qc) { boolean result = false; /** * Determines whether there are candidates for which to create a row */ ListIterator<Candidate> candidates = (ListIterator<Candidate>)getUserDataForCollection(qc); result = candidates.hasNext(); /** * If all candidates to be created indicate that processing is done */ if (!result) { setFetchCompleteForCollection(qc, true); } return result; } /** * createRowFromResultSet - overridden for custom java data source support. */ protected ViewRowImpl createRowFromResultSet(Object qc, ResultSet resultSet) { /** * Obtain the next candidate from the collection and create a row * for it. */ ListIterator<Candidate> candidates = (ListIterator<Candidate>)getUserDataForCollection(qc); ViewRowImpl row = createNewRowForCollection(qc); try { Candidate candidate = candidates.next(); row.setAttribute("CandId", candidate.getCandId()); row.setAttribute("Name", candidate.getFirstName() + " " + candidate.getLastName()); } catch (Exception e) { e.printStackTrace(); } return row; } /** * getQueryHitCount - overridden for custom java data source support. */ public long getQueryHitCount(ViewRowSetImpl viewRowSet) { /** * For this example this is not implemented rather we always return 0. */ return 0; } } Creating UI Choose the "ViewController" project and navigate "New -> Web Tier : JSF : JSF Page". On the "Create JSF Page" enter "JcsRestDemo" as name and ensure that the "Create as XML document (*.jspx)" is checked.  Open "JcsRestDemo.jspx" and navigate to "Data Controls -> JcsRestDemoAMDataControl -> JcsRestDemoVO1" and drag & drop the VO to the "<af:form> " as a "ADF Read-only Table": Accept the defaults in "Edit Table Columns". To execute the query navigate to to "Data Controls -> JcsRestDemoAMDataControl -> JcsRestDemoVO1 -> Operations -> Execute" and drag & drop the operation to the "<af:form> " as a "Button": Deploying to JCS Follow the same steps as documented in previous article"Java Cloud Service ADF Web Application". Once deployed the application can be accessed with URL: https://java-[identity domain].java.[data center].oraclecloudapps.com/JcsRestDemo-ViewController-context-root/faces/JcsRestDemo.jspx The UI displays a list of candidates obtained from the Taleo REST Services: Summary In this article we learned how to integrate with REST services using Jersey library in JCS. In future articles various other integration techniques will be covered.

    Read the article

  • 256 Windows Azure Worker Roles, Windows Kinect and a 90's Text-Based Ray-Tracer

    - by Alan Smith
    For a couple of years I have been demoing a simple render farm hosted in Windows Azure using worker roles and the Azure Storage service. At the start of the presentation I deploy an Azure application that uses 16 worker roles to render a 1,500 frame 3D ray-traced animation. At the end of the presentation, when the animation was complete, I would play the animation delete the Azure deployment. The standing joke with the audience was that it was that it was a “$2 demo”, as the compute charges for running the 16 instances for an hour was $1.92, factor in the bandwidth charges and it’s a couple of dollars. The point of the demo is that it highlights one of the great benefits of cloud computing, you pay for what you use, and if you need massive compute power for a short period of time using Windows Azure can work out very cost effective. The “$2 demo” was great for presenting at user groups and conferences in that it could be deployed to Azure, used to render an animation, and then removed in a one hour session. I have always had the idea of doing something a bit more impressive with the demo, and scaling it from a “$2 demo” to a “$30 demo”. The challenge was to create a visually appealing animation in high definition format and keep the demo time down to one hour.  This article will take a run through how I achieved this. Ray Tracing Ray tracing, a technique for generating high quality photorealistic images, gained popularity in the 90’s with companies like Pixar creating feature length computer animations, and also the emergence of shareware text-based ray tracers that could run on a home PC. In order to render a ray traced image, the ray of light that would pass from the view point must be tracked until it intersects with an object. At the intersection, the color, reflectiveness, transparency, and refractive index of the object are used to calculate if the ray will be reflected or refracted. Each pixel may require thousands of calculations to determine what color it will be in the rendered image. Pin-Board Toys Having very little artistic talent and a basic understanding of maths I decided to focus on an animation that could be modeled fairly easily and would look visually impressive. I’ve always liked the pin-board desktop toys that become popular in the 80’s and when I was working as a 3D animator back in the 90’s I always had the idea of creating a 3D ray-traced animation of a pin-board, but never found the energy to do it. Even if I had a go at it, the render time to produce an animation that would look respectable on a 486 would have been measured in months. PolyRay Back in 1995 I landed my first real job, after spending three years being a beach-ski-climbing-paragliding-bum, and was employed to create 3D ray-traced animations for a CD-ROM that school kids would use to learn physics. I had got into the strange and wonderful world of text-based ray tracing, and was using a shareware ray-tracer called PolyRay. PolyRay takes a text file describing a scene as input and, after a few hours processing on a 486, produced a high quality ray-traced image. The following is an example of a basic PolyRay scene file. background Midnight_Blue   static define matte surface { ambient 0.1 diffuse 0.7 } define matte_white texture { matte { color white } } define matte_black texture { matte { color dark_slate_gray } } define position_cylindrical 3 define lookup_sawtooth 1 define light_wood <0.6, 0.24, 0.1> define median_wood <0.3, 0.12, 0.03> define dark_wood <0.05, 0.01, 0.005>     define wooden texture { noise surface { ambient 0.2  diffuse 0.7  specular white, 0.5 microfacet Reitz 10 position_fn position_cylindrical position_scale 1  lookup_fn lookup_sawtooth octaves 1 turbulence 1 color_map( [0.0, 0.2, light_wood, light_wood] [0.2, 0.3, light_wood, median_wood] [0.3, 0.4, median_wood, light_wood] [0.4, 0.7, light_wood, light_wood] [0.7, 0.8, light_wood, median_wood] [0.8, 0.9, median_wood, light_wood] [0.9, 1.0, light_wood, dark_wood]) } } define glass texture { surface { ambient 0 diffuse 0 specular 0.2 reflection white, 0.1 transmission white, 1, 1.5 }} define shiny surface { ambient 0.1 diffuse 0.6 specular white, 0.6 microfacet Phong 7  } define steely_blue texture { shiny { color black } } define chrome texture { surface { color white ambient 0.0 diffuse 0.2 specular 0.4 microfacet Phong 10 reflection 0.8 } }   viewpoint {     from <4.000, -1.000, 1.000> at <0.000, 0.000, 0.000> up <0, 1, 0> angle 60     resolution 640, 480 aspect 1.6 image_format 0 }       light <-10, 30, 20> light <-10, 30, -20>   object { disc <0, -2, 0>, <0, 1, 0>, 30 wooden }   object { sphere <0.000, 0.000, 0.000>, 1.00 chrome } object { cylinder <0.000, 0.000, 0.000>, <0.000, 0.000, -4.000>, 0.50 chrome }   After setting up the background and defining colors and textures, the viewpoint is specified. The “camera” is located at a point in 3D space, and it looks towards another point. The angle, image resolution, and aspect ratio are specified. Two lights are present in the image at defined coordinates. The three objects in the image are a wooden disc to represent a table top, and a sphere and cylinder that intersect to form a pin that will be used for the pin board toy in the final animation. When the image is rendered, the following image is produced. The pins are modeled with a chrome surface, so they reflect the environment around them. Note that the scale of the pin shaft is not correct, this will be fixed later. Modeling the Pin Board The frame of the pin-board is made up of three boxes, and six cylinders, the front box is modeled using a clear, slightly reflective solid, with the same refractive index of glass. The other shapes are modeled as metal. object { box <-5.5, -1.5, 1>, <5.5, 5.5, 1.2> glass } object { box <-5.5, -1.5, -0.04>, <5.5, 5.5, -0.09> steely_blue } object { box <-5.5, -1.5, -0.52>, <5.5, 5.5, -0.59> steely_blue } object { cylinder <-5.2, -1.2, 1.4>, <-5.2, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <5.2, -1.2, 1.4>, <5.2, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <-5.2, 5.2, 1.4>, <-5.2, 5.2, -0.74>, 0.2 steely_blue } object { cylinder <5.2, 5.2, 1.4>, <5.2, 5.2, -0.74>, 0.2 steely_blue } object { cylinder <0, -1.2, 1.4>, <0, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <0, 5.2, 1.4>, <0, 5.2, -0.74>, 0.2 steely_blue }   In order to create the matrix of pins that make up the pin board I used a basic console application with a few nested loops to create two intersecting matrixes of pins, which models the layout used in the pin boards. The resulting image is shown below. The pin board contains 11,481 pins, with the scene file containing 23,709 lines of code. For the complete animation 2,000 scene files will be created, which is over 47 million lines of code. Each pin in the pin-board will slide out a specific distance when an object is pressed into the back of the board. This is easily modeled by setting the Z coordinate of the pin to a specific value. In order to set all of the pins in the pin-board to the correct position, a bitmap image can be used. The position of the pin can be set based on the color of the pixel at the appropriate position in the image. When the Windows Azure logo is used to set the Z coordinate of the pins, the following image is generated. The challenge now was to make a cool animation. The Azure Logo is fine, but it is static. Using a normal video to animate the pins would not work; the colors in the video would not be the same as the depth of the objects from the camera. In order to simulate the pin board accurately a series of frames from a depth camera could be used. Windows Kinect The Kenect controllers for the X-Box 360 and Windows feature a depth camera. The Kinect SDK for Windows provides a programming interface for Kenect, providing easy access for .NET developers to the Kinect sensors. The Kinect Explorer provided with the Kinect SDK is a great starting point for exploring Kinect from a developers perspective. Both the X-Box 360 Kinect and the Windows Kinect will work with the Kinect SDK, the Windows Kinect is required for commercial applications, but the X-Box Kinect can be used for hobby projects. The Windows Kinect has the advantage of providing a mode to allow depth capture with objects closer to the camera, which makes for a more accurate depth image for setting the pin positions. Creating a Depth Field Animation The depth field animation used to set the positions of the pin in the pin board was created using a modified version of the Kinect Explorer sample application. In order to simulate the pin board accurately, a small section of the depth range from the depth sensor will be used. Any part of the object in front of the depth range will result in a white pixel; anything behind the depth range will be black. Within the depth range the pixels in the image will be set to RGB values from 0,0,0 to 255,255,255. A screen shot of the modified Kinect Explorer application is shown below. The Kinect Explorer sample application was modified to include slider controls that are used to set the depth range that forms the image from the depth stream. This allows the fine tuning of the depth image that is required for simulating the position of the pins in the pin board. The Kinect Explorer was also modified to record a series of images from the depth camera and save them as a sequence JPEG files that will be used to animate the pins in the animation the Start and Stop buttons are used to start and stop the image recording. En example of one of the depth images is shown below. Once a series of 2,000 depth images has been captured, the task of creating the animation can begin. Rendering a Test Frame In order to test the creation of frames and get an approximation of the time required to render each frame a test frame was rendered on-premise using PolyRay. The output of the rendering process is shown below. The test frame contained 23,629 primitive shapes, most of which are the spheres and cylinders that are used for the 11,800 or so pins in the pin board. The 1280x720 image contains 921,600 pixels, but as anti-aliasing was used the number of rays that were calculated was 4,235,777, with 3,478,754,073 object boundaries checked. The test frame of the pin board with the depth field image applied is shown below. The tracing time for the test frame was 4 minutes 27 seconds, which means rendering the2,000 frames in the animation would take over 148 hours, or a little over 6 days. Although this is much faster that an old 486, waiting almost a week to see the results of an animation would make it challenging for animators to create, view, and refine their animations. It would be much better if the animation could be rendered in less than one hour. Windows Azure Worker Roles The cost of creating an on-premise render farm to render animations increases in proportion to the number of servers. The table below shows the cost of servers for creating a render farm, assuming a cost of $500 per server. Number of Servers Cost 1 $500 16 $8,000 256 $128,000   As well as the cost of the servers, there would be additional costs for networking, racks etc. Hosting an environment of 256 servers on-premise would require a server room with cooling, and some pretty hefty power cabling. The Windows Azure compute services provide worker roles, which are ideal for performing processor intensive compute tasks. With the scalability available in Windows Azure a job that takes 256 hours to complete could be perfumed using different numbers of worker roles. The time and cost of using 1, 16 or 256 worker roles is shown below. Number of Worker Roles Render Time Cost 1 256 hours $30.72 16 16 hours $30.72 256 1 hour $30.72   Using worker roles in Windows Azure provides the same cost for the 256 hour job, irrespective of the number of worker roles used. Provided the compute task can be broken down into many small units, and the worker role compute power can be used effectively, it makes sense to scale the application so that the task is completed quickly, making the results available in a timely fashion. The task of rendering 2,000 frames in an animation is one that can easily be broken down into 2,000 individual pieces, which can be performed by a number of worker roles. Creating a Render Farm in Windows Azure The architecture of the render farm is shown in the following diagram. The render farm is a hybrid application with the following components: ·         On-Premise o   Windows Kinect – Used combined with the Kinect Explorer to create a stream of depth images. o   Animation Creator – This application uses the depth images from the Kinect sensor to create scene description files for PolyRay. These files are then uploaded to the jobs blob container, and job messages added to the jobs queue. o   Process Monitor – This application queries the role instance lifecycle table and displays statistics about the render farm environment and render process. o   Image Downloader – This application polls the image queue and downloads the rendered animation files once they are complete. ·         Windows Azure o   Azure Storage – Queues and blobs are used for the scene description files and completed frames. A table is used to store the statistics about the rendering environment.   The architecture of each worker role is shown below.   The worker role is configured to use local storage, which provides file storage on the worker role instance that can be use by the applications to render the image and transform the format of the image. The service definition for the worker role with the local storage configuration highlighted is shown below. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="CloudRay" >   <WorkerRole name="CloudRayWorkerRole" vmsize="Small">     <Imports>     </Imports>     <ConfigurationSettings>       <Setting name="DataConnectionString" />     </ConfigurationSettings>     <LocalResources>       <LocalStorage name="RayFolder" cleanOnRoleRecycle="true" />     </LocalResources>   </WorkerRole> </ServiceDefinition>     The two executable programs, PolyRay.exe and DTA.exe are included in the Azure project, with Copy Always set as the property. PolyRay will take the scene description file and render it to a Truevision TGA file. As the TGA format has not seen much use since the mid 90’s it is converted to a JPG image using Dave's Targa Animator, another shareware application from the 90’s. Each worker roll will use the following process to render the animation frames. 1.       The worker process polls the job queue, if a job is available the scene description file is downloaded from blob storage to local storage. 2.       PolyRay.exe is started in a process with the appropriate command line arguments to render the image as a TGA file. 3.       DTA.exe is started in a process with the appropriate command line arguments convert the TGA file to a JPG file. 4.       The JPG file is uploaded from local storage to the images blob container. 5.       A message is placed on the images queue to indicate a new image is available for download. 6.       The job message is deleted from the job queue. 7.       The role instance lifecycle table is updated with statistics on the number of frames rendered by the worker role instance, and the CPU time used. The code for this is shown below. public override void Run() {     // Set environment variables     string polyRayPath = Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), PolyRayLocation);     string dtaPath = Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), DTALocation);       LocalResource rayStorage = RoleEnvironment.GetLocalResource("RayFolder");     string localStorageRootPath = rayStorage.RootPath;       JobQueue jobQueue = new JobQueue("renderjobs");     JobQueue downloadQueue = new JobQueue("renderimagedownloadjobs");     CloudRayBlob sceneBlob = new CloudRayBlob("scenes");     CloudRayBlob imageBlob = new CloudRayBlob("images");     RoleLifecycleDataSource roleLifecycleDataSource = new RoleLifecycleDataSource();       Frames = 0;       while (true)     {         // Get the render job from the queue         CloudQueueMessage jobMsg = jobQueue.Get();           if (jobMsg != null)         {             // Get the file details             string sceneFile = jobMsg.AsString;             string tgaFile = sceneFile.Replace(".pi", ".tga");             string jpgFile = sceneFile.Replace(".pi", ".jpg");               string sceneFilePath = Path.Combine(localStorageRootPath, sceneFile);             string tgaFilePath = Path.Combine(localStorageRootPath, tgaFile);             string jpgFilePath = Path.Combine(localStorageRootPath, jpgFile);               // Copy the scene file to local storage             sceneBlob.DownloadFile(sceneFilePath);               // Run the ray tracer.             string polyrayArguments =                 string.Format("\"{0}\" -o \"{1}\" -a 2", sceneFilePath, tgaFilePath);             Process polyRayProcess = new Process();             polyRayProcess.StartInfo.FileName =                 Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), polyRayPath);             polyRayProcess.StartInfo.Arguments = polyrayArguments;             polyRayProcess.Start();             polyRayProcess.WaitForExit();               // Convert the image             string dtaArguments =                 string.Format(" {0} /FJ /P{1}", tgaFilePath, Path.GetDirectoryName (jpgFilePath));             Process dtaProcess = new Process();             dtaProcess.StartInfo.FileName =                 Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), dtaPath);             dtaProcess.StartInfo.Arguments = dtaArguments;             dtaProcess.Start();             dtaProcess.WaitForExit();               // Upload the image to blob storage             imageBlob.UploadFile(jpgFilePath);               // Add a download job.             downloadQueue.Add(jpgFile);               // Delete the render job message             jobQueue.Delete(jobMsg);               Frames++;         }         else         {             Thread.Sleep(1000);         }           // Log the worker role activity.         roleLifecycleDataSource.Alive             ("CloudRayWorker", RoleLifecycleDataSource.RoleLifecycleId, Frames);     } }     Monitoring Worker Role Instance Lifecycle In order to get more accurate statistics about the lifecycle of the worker role instances used to render the animation data was tracked in an Azure storage table. The following class was used to track the worker role lifecycles in Azure storage.   public class RoleLifecycle : TableServiceEntity {     public string ServerName { get; set; }     public string Status { get; set; }     public DateTime StartTime { get; set; }     public DateTime EndTime { get; set; }     public long SecondsRunning { get; set; }     public DateTime LastActiveTime { get; set; }     public int Frames { get; set; }     public string Comment { get; set; }       public RoleLifecycle()     {     }       public RoleLifecycle(string roleName)     {         PartitionKey = roleName;         RowKey = Utils.GetAscendingRowKey();         Status = "Started";         StartTime = DateTime.UtcNow;         LastActiveTime = StartTime;         EndTime = StartTime;         SecondsRunning = 0;         Frames = 0;     } }     A new instance of this class is created and added to the storage table when the role starts. It is then updated each time the worker renders a frame to record the total number of frames rendered and the total processing time. These statistics are used be the monitoring application to determine the effectiveness of use of resources in the render farm. Rendering the Animation The Azure solution was deployed to Windows Azure with the service configuration set to 16 worker role instances. This allows for the application to be tested in the cloud environment, and the performance of the application determined. When I demo the application at conferences and user groups I often start with 16 instances, and then scale up the application to the full 256 instances. The configuration to run 16 instances is shown below. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="CloudRay" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*">   <Role name="CloudRayWorkerRole">     <Instances count="16" />     <ConfigurationSettings>       <Setting name="DataConnectionString"         value="DefaultEndpointsProtocol=https;AccountName=cloudraydata;AccountKey=..." />     </ConfigurationSettings>   </Role> </ServiceConfiguration>     About six minutes after deploying the application the first worker roles become active and start to render the first frames of the animation. The CloudRay Monitor application displays an icon for each worker role instance, with a number indicating the number of frames that the worker role has rendered. The statistics on the left show the number of active worker roles and statistics about the render process. The render time is the time since the first worker role became active; the CPU time is the total amount of processing time used by all worker role instances to render the frames.   Five minutes after the first worker role became active the last of the 16 worker roles activated. By this time the first seven worker roles had each rendered one frame of the animation.   With 16 worker roles u and running it can be seen that one hour and 45 minutes CPU time has been used to render 32 frames with a render time of just under 10 minutes.     At this rate it would take over 10 hours to render the 2,000 frames of the full animation. In order to complete the animation in under an hour more processing power will be required. Scaling the render farm from 16 instances to 256 instances is easy using the new management portal. The slider is set to 256 instances, and the configuration saved. We do not need to re-deploy the application, and the 16 instances that are up and running will not be affected. Alternatively, the configuration file for the Azure service could be modified to specify 256 instances.   <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="CloudRay" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*">   <Role name="CloudRayWorkerRole">     <Instances count="256" />     <ConfigurationSettings>       <Setting name="DataConnectionString"         value="DefaultEndpointsProtocol=https;AccountName=cloudraydata;AccountKey=..." />     </ConfigurationSettings>   </Role> </ServiceConfiguration>     Six minutes after the new configuration has been applied 75 new worker roles have activated and are processing their first frames.   Five minutes later the full configuration of 256 worker roles is up and running. We can see that the average rate of frame rendering has increased from 3 to 12 frames per minute, and that over 17 hours of CPU time has been utilized in 23 minutes. In this test the time to provision 140 worker roles was about 11 minutes, which works out at about one every five seconds.   We are now half way through the rendering, with 1,000 frames complete. This has utilized just under three days of CPU time in a little over 35 minutes.   The animation is now complete, with 2,000 frames rendered in a little over 52 minutes. The CPU time used by the 256 worker roles is 6 days, 7 hours and 22 minutes with an average frame rate of 38 frames per minute. The rendering of the last 1,000 frames took 16 minutes 27 seconds, which works out at a rendering rate of 60 frames per minute. The frame counts in the server instances indicate that the use of a queue to distribute the workload has been very effective in distributing the load across the 256 worker role instances. The first 16 instances that were deployed first have rendered between 11 and 13 frames each, whilst the 240 instances that were added when the application was scaled have rendered between 6 and 9 frames each.   Completed Animation I’ve uploaded the completed animation to YouTube, a low resolution preview is shown below. Pin Board Animation Created using Windows Kinect and 256 Windows Azure Worker Roles   The animation can be viewed in 1280x720 resolution at the following link: http://www.youtube.com/watch?v=n5jy6bvSxWc Effective Use of Resources According to the CloudRay monitor statistics the animation took 6 days, 7 hours and 22 minutes CPU to render, this works out at 152 hours of compute time, rounded up to the nearest hour. As the usage for the worker role instances are billed for the full hour, it may have been possible to render the animation using fewer than 256 worker roles. When deciding the optimal usage of resources, the time required to provision and start the worker roles must also be considered. In the demo I started with 16 worker roles, and then scaled the application to 256 worker roles. It would have been more optimal to start the application with maybe 200 worker roles, and utilized the full hour that I was being billed for. This would, however, have prevented showing the ease of scalability of the application. The new management portal displays the CPU usage across the worker roles in the deployment. The average CPU usage across all instances is 93.27%, with over 99% used when all the instances are up and running. This shows that the worker role resources are being used very effectively. Grid Computing Scenarios Although I am using this scenario for a hobby project, there are many scenarios where a large amount of compute power is required for a short period of time. Windows Azure provides a great platform for developing these types of grid computing applications, and can work out very cost effective. ·         Windows Azure can provide massive compute power, on demand, in a matter of minutes. ·         The use of queues to manage the load balancing of jobs between role instances is a simple and effective solution. ·         Using a cloud-computing platform like Windows Azure allows proof-of-concept scenarios to be tested and evaluated on a very low budget. ·         No charges for inbound data transfer makes the uploading of large data sets to Windows Azure Storage services cost effective. (Transaction charges still apply.) Tips for using Windows Azure for Grid Computing Scenarios I found the implementation of a render farm using Windows Azure a fairly simple scenario to implement. I was impressed by ease of scalability that Azure provides, and by the short time that the application took to scale from 16 to 256 worker role instances. In this case it was around 13 minutes, in other tests it took between 10 and 20 minutes. The following tips may be useful when implementing a grid computing project in Windows Azure. ·         Using an Azure Storage queue to load-balance the units of work across multiple worker roles is simple and very effective. The design I have used in this scenario could easily scale to many thousands of worker role instances. ·         Windows Azure accounts are typically limited to 20 cores. If you need to use more than this, a call to support and a credit card check will be required. ·         Be aware of how the billing model works. You will be charged for worker role instances for the full clock our in which the instance is deployed. Schedule the workload to start just after the clock hour has started. ·         Monitor the utilization of the resources you are provisioning, ensure that you are not paying for worker roles that are idle. ·         If you are deploying third party applications to worker roles, you may well run into licensing issues. Purchasing software licenses on a per-processor basis when using hundreds of processors for a short time period would not be cost effective. ·         Third party software may also require installation onto the worker roles, which can be accomplished using start-up tasks. Bear in mind that adding a startup task and possible re-boot will add to the time required for the worker role instance to start and activate. An alternative may be to use a prepared VM and use VM roles. ·         Consider using the Windows Azure Autoscaling Application Block (WASABi) to autoscale the worker roles in your application. When using a large number of worker roles, the utilization must be carefully monitored, if the scaling algorithms are not optimal it could get very expensive!

    Read the article

  • xml to xsl transformation

    - by amirin
    Hi, I have a requirement to extract the uniquie values from the different nodes of the given xml using xsl transformation. XML FILE:- var IPClaimCausesNice = ""; function changeIPClaimCause(){ if(CommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause == "Diagnosis") { IPClaimCausesNice = "diagnosis" } if(CommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause == "Illness") { IPClaimCausesNice = "illness" } if(CommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause == "Accident") { IPClaimCausesNice = "accident" } return IPClaimCausesNice; } <section id="1903879316" name="Logos"> <fraglink id="605609862" resid="1235000151"> <argvalue name="CommJob"> <var name="CommJob" type="Th_1235001170_CommJob" /> </argvalue> </fraglink> </section> <section id="13483397" name="Address Block"> <fraglink id="563800610" resid="986000123"> <argvalue name="PersonInformation"> <var name="AddresseePersonInformation" type="Th_1235000929_PersonInformation" /> </argvalue> </fraglink> </section> <section id="1093480468" name="Details"> <fraglink id="460316501" resid="1195000163"> <argvalue name="currentDateTime"> <var name="getSystemVariables.getCurrentDate" type="date" /> </argvalue> <argvalue name="CommJob"> <var name="CommJob" type="Th_1235000929_CommJob" /> </argvalue> <argvalue name="ShowDOB" /> <argvalue name="ShowYourRef" /> <argvalue name="YourRefLabel" /> </fraglink> <fraglink id="1026044336" resid="1235000070"> <argvalue name="brandKey"> <var name="CommJob.commJobDetails.brandingKey" type="string" /> </argvalue> <argvalue name="brandSponsor"> <var name="CommJob.client.policy.policyDetails.brandSponsor" type="string" /> </argvalue> </fraglink> </section> <section id="2092948772" name="Important info"> <frag id="1180564368" name="frag" no-match="error" type="text"> <edition id="1178777425" name="Any" withdrawn="False"> <edition-content> <p style="bodyTableHeader" align="left" xml:space="preserve">Important information</p> <p style="body" xml:space="preserve">In accordance with the terms and conditions of your policy, your claim has been classified as a <iif><expression><script language="JavaScript">CommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause == "Diagnosis" || CommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause == "Ill health"</script><description>the IPClaimCause of the CommJob's client policy insurances Insurance Coverages IPCover Claim equals "Diagnosis" or the IPClaimCause of the CommJob's client policy insurances Insurance Coverages IPCover Claim equals "Ill health"sicknessCommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause == "Accident"the IPClaimCause of the CommJob's client policy insurances Insurance Coverages IPCover Claim equals "Accident"accident. Please assist us Please quote your policy and claim numbers / when returning your forms. Your claim has been received Your Thank you for sending your Initial Claim Form which we received on . We are sorry to hear of your recent . Our assessmentThe attending doctor’s statement indicates you are claiming benefits as a result of , CommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause == "Diagnosis"the IPClaimCause of the CommJob's client policy Insurance Coverages first Coverage Claim equals "Diagnosis"which was diagnosedCommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause == "Accident"the IPClaimCause of the CommJob's client policy Insurance Coverages first Coverage Claim equals "Accident"which first occuredCommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause == "Ill health"the IPClaimCause of the CommJob's client policy Insurance Coverages first Coverage Claim equals "Ill health"with symptoms commencing on . We note you ceased all work on and consulted your doctor IsNotMissing(CommJob.placeHolders.date.date5)the date5 of the CommJob's placeHolders date is not missingon this day also regarding your condition. Further information is required We’ve enclosed the following forms. Please complete these and return them to us so that we can continue assessing your claim. Progress claim form Attached questionnaire Authority to the Health Insurance Commission Medical authority <<other/ free format>> <<other/ free format>> Please be advised we’ve CommJob.placeHolders._boolean.boolean1 == truethe CommJob's placeHolders boolean is boolean1also requested the following information: Medical report from Dr Medicare history report from the Health Insurance Commission <<other/ free format>> <<other/ free format>> As your claim forms have been submitted months after you ceased work, our ability to properly assess your claim may have been prejudiced. In order to complete our assessment of your claim, the following information is required within 30 days of this letter: Reason for late lodgement of claim. Reason(s) you ceased work on (eg redundancy or due to medical condition). Name and contact details of all doctors and specialists you have consulted since you ceased work. Copies of any medical, radiology, pathology or other reports in your possession. Details of all treatment you have received since you ceased work. Whether you returned to work (paid or unpaid) in either a full-time or part-time capacity. If so, please provide the dates you worked, hours you worked, duties you performed and any income you received. Financial information for any other related entities (if applicable). If you are unable to supply the above information, please contact us by WriteText(FormatDateTime(DateAdd(getSystemVariables.getCurrentDate,"day",30),"dd MMMM yyyy")). To help in the ongoing assessment of your claim, you are required to be under the regular care and attendance of a medical practitioner. We’ve enclosed a Progress Claim Form which needs to be completed and returned to us by . False False False CC: Different nodes to pick the values:- 1. 2.path form 5. Values to be picked form the xml node and display in HTML is like CommJob.client.policy.Insurance.Coverages.Coverage[0].Claim.IPClaimCause CommJob.commJobDetails.stockType CommJob.commJobDetails.targetClient.targetClientName CommJob.client.policy.policyDetails.policyStatus CommJob.client.policy.policyDetails.productType CommJob.commJobDetails.targetClient.targetClientName ........etc can any one help me to provide the solution. This xsl transformation doesn't pick correctly only the values <xsl:template match="@*"/> Any help on this will great.

    Read the article

  • Building applications with WCF - Intro

    - by skjagini
    I am going to write series of articles using Windows Communication Framework (WCF) to develop client and server applications and this is the first part of that series. What is WCF As Juwal puts in his Programming WCF book, WCF provides an SDK for developing and deploying services on Windows, provides runtime environment to expose CLR types as services and consume services as CLR types. Building services with WCF is incredibly easy and it’s implementation provides a set of industry standards and off the shelf plumbing including service hosting, instance management, reliability, transaction management, security etc such that it greatly increases productivity Scenario: Lets consider a typical bank customer trying to create an account, deposit amount and transfer funds between accounts, i.e. checking and savings. To make it interesting, we are going to divide the functionality into multiple services and each of them working with database directly. We will run test cases with and without transactional support across services. In this post we will build contracts, services, data access layer, unit tests to verify end to end communication etc, nothing big stuff here and we dig into other features of the WCF in subsequent posts with incremental changes. In any distributed architecture we have two pieces i.e. services and clients. Services as the name implies provide functionality to execute various pieces of business logic on the server, and clients providing interaction to the end user. Services can be built with Web Services or with WCF. Service built on WCF have the advantage of binding independent, i.e. can run against TCP and HTTP protocol without any significant changes to the code. Solution Services Profile: For creating a new bank customer, getting details about existing customer ProfileContract ProfileService Checking Account: To get checking account balance, deposit or withdraw amount CheckingAccountContract CheckingAccountService Savings Account: To get savings account balance, deposit or withdraw amount SavingsAccountContract SavingsAccountService ServiceHost: To host services, i.e. running the services at particular address, binding and contract where client can connect to Client: Helps end user to use services like creating account and amount transfer between the accounts BankDAL: Data access layer to work with database     BankDAL It’s no brainer not to use an ORM as many matured products are available currently in market including Linq2Sql, Entity Framework (EF), LLblGenPro etc. For this exercise I am going to use Entity Framework 4.0, CTP 5 with code first approach. There are two approaches when working with data, data driven and code driven. In data driven we start by designing tables and their constrains in database and generate entities in code while in code driven (code first) approach entities are defined in code and the metadata generated from the entities is used by the EF to create tables and table constrains. In previous versions the entity classes had  to derive from EF specific base classes. In EF 4 it  is not required to derive from any EF classes, the entities are not only persistence ignorant but also enable full test driven development using mock frameworks.  Application consists of 3 entities, Customer entity which contains Customer details; CheckingAccount and SavingsAccount to hold the respective account balance. We could have introduced an Account base class for CheckingAccount and SavingsAccount which is certainly possible with EF mappings but to keep it simple we are just going to follow 1 –1 mapping between entity and table mappings. Lets start out by defining a class called Customer which will be mapped to Customer table, observe that the class is simply a plain old clr object (POCO) and has no reference to EF at all. using System;   namespace BankDAL.Model { public class Customer { public int Id { get; set; } public string FullName { get; set; } public string Address { get; set; } public DateTime DateOfBirth { get; set; } } }   In order to inform EF about the Customer entity we have to define a database context with properties of type DbSet<> for every POCO which needs to be mapped to a table in database. EF uses convention over configuration to generate the metadata resulting in much less configuration. using System.Data.Entity;   namespace BankDAL.Model { public class BankDbContext: DbContext { public DbSet<Customer> Customers { get; set; } } }   Entity constrains can be defined through attributes on Customer class or using fluent syntax (no need to muscle with xml files), CustomerConfiguration class. By defining constrains in a separate class we can maintain clean POCOs without corrupting entity classes with database specific information.   using System; using System.Data.Entity.ModelConfiguration;   namespace BankDAL.Model { public class CustomerConfiguration: EntityTypeConfiguration<Customer> { public CustomerConfiguration() { Initialize(); }   private void Initialize() { //Setting the Primary Key this.HasKey(e => e.Id);   //Setting required fields this.HasRequired(e => e.FullName); this.HasRequired(e => e.Address); //Todo: Can't create required constraint as DateOfBirth is not reference type, research it //this.HasRequired(e => e.DateOfBirth); } } }   Any queries executed against Customers property in BankDbContext are executed against Cusomers table. By convention EF looks for connection string with key of BankDbContext when working with the context.   We are going to define a helper class to work with Customer entity with methods for querying, adding new entity etc and these are known as repository classes, i.e., CustomerRepository   using System; using System.Data.Entity; using System.Linq; using BankDAL.Model;   namespace BankDAL.Repositories { public class CustomerRepository { private readonly IDbSet<Customer> _customers;   public CustomerRepository(BankDbContext bankDbContext) { if (bankDbContext == null) throw new ArgumentNullException(); _customers = bankDbContext.Customers; }   public IQueryable<Customer> Query() { return _customers; }   public void Add(Customer customer) { _customers.Add(customer); } } }   From the above code it is observable that the Query methods returns customers as IQueryable i.e. customers are retrieved only when actually used i.e. iterated. Returning as IQueryable also allows to execute filtering and joining statements from business logic using lamba expressions without cluttering the data access layer with tens of methods.   Our CheckingAccountRepository and SavingsAccountRepository look very similar to each other using System; using System.Data.Entity; using System.Linq; using BankDAL.Model;   namespace BankDAL.Repositories { public class CheckingAccountRepository { private readonly IDbSet<CheckingAccount> _checkingAccounts;   public CheckingAccountRepository(BankDbContext bankDbContext) { if (bankDbContext == null) throw new ArgumentNullException(); _checkingAccounts = bankDbContext.CheckingAccounts; }   public IQueryable<CheckingAccount> Query() { return _checkingAccounts; }   public void Add(CheckingAccount account) { _checkingAccounts.Add(account); }   public IQueryable<CheckingAccount> GetAccount(int customerId) { return (from act in _checkingAccounts where act.CustomerId == customerId select act); }   } } The repository classes look very similar to each other for Query and Add methods, with the help of C# generics and implementing repository pattern (Martin Fowler) we can reduce the repeated code. Jarod from ElegantCode has posted an article on how to use repository pattern with EF which we will implement in the subsequent articles along with WCF Unity life time managers by Drew Contracts It is very easy to follow contract first approach with WCF, define the interface and append ServiceContract, OperationContract attributes. IProfile contract exposes functionality for creating customer and getting customer details.   using System; using System.ServiceModel; using BankDAL.Model;   namespace ProfileContract { [ServiceContract] public interface IProfile { [OperationContract] Customer CreateCustomer(string customerName, string address, DateTime dateOfBirth);   [OperationContract] Customer GetCustomer(int id);   } }   ICheckingAccount contract exposes functionality for working with checking account, i.e., getting balance, deposit and withdraw of amount. ISavingsAccount contract looks the same as checking account.   using System.ServiceModel;   namespace CheckingAccountContract { [ServiceContract] public interface ICheckingAccount { [OperationContract] decimal? GetCheckingAccountBalance(int customerId);   [OperationContract] void DepositAmount(int customerId,decimal amount);   [OperationContract] void WithdrawAmount(int customerId, decimal amount);   } }   Services   Having covered the data access layer and contracts so far and here comes the core of the business logic, i.e. services.   .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } ProfileService implements the IProfile contract for creating customer and getting customer detail using CustomerRepository. using System; using System.Linq; using System.ServiceModel; using BankDAL; using BankDAL.Model; using BankDAL.Repositories; using ProfileContract;   namespace ProfileService { [ServiceBehavior(IncludeExceptionDetailInFaults = true)] public class Profile: IProfile { public Customer CreateAccount( string customerName, string address, DateTime dateOfBirth) { Customer cust = new Customer { FullName = customerName, Address = address, DateOfBirth = dateOfBirth };   using (var bankDbContext = new BankDbContext()) { new CustomerRepository(bankDbContext).Add(cust); bankDbContext.SaveChanges(); } return cust; }   public Customer CreateCustomer(string customerName, string address, DateTime dateOfBirth) { return CreateAccount(customerName, address, dateOfBirth); } public Customer GetCustomer(int id) { return new CustomerRepository(new BankDbContext()).Query() .Where(i => i.Id == id).FirstOrDefault(); }   } } From the above code you shall observe that we are calling bankDBContext’s SaveChanges method and there is no save method specific to customer entity because EF manages all the changes centralized at the context level and all the pending changes so far are submitted in a batch and it is represented as Unit of Work. Similarly Checking service implements ICheckingAccount contract using CheckingAccountRepository, notice that we are throwing overdraft exception if the balance falls by zero. WCF has it’s own way of raising exceptions using fault contracts which will be explained in the subsequent articles. SavingsAccountService is similar to CheckingAccountService. using System; using System.Linq; using System.ServiceModel; using BankDAL.Model; using BankDAL.Repositories; using CheckingAccountContract;   namespace CheckingAccountService { [ServiceBehavior(IncludeExceptionDetailInFaults = true)] public class Checking:ICheckingAccount { public decimal? GetCheckingAccountBalance(int customerId) { using (var bankDbContext = new BankDbContext()) { CheckingAccount account = (new CheckingAccountRepository(bankDbContext) .GetAccount(customerId)).FirstOrDefault();   if (account != null) return account.Balance;   return null; } }   public void DepositAmount(int customerId, decimal amount) { using(var bankDbContext = new BankDbContext()) { var checkingAccountRepository = new CheckingAccountRepository(bankDbContext); CheckingAccount account = (checkingAccountRepository.GetAccount(customerId)) .FirstOrDefault();   if (account == null) { account = new CheckingAccount() { CustomerId = customerId }; checkingAccountRepository.Add(account); }   account.Balance = account.Balance + amount; if (account.Balance < 0) throw new ApplicationException("Overdraft not accepted");   bankDbContext.SaveChanges(); } } public void WithdrawAmount(int customerId, decimal amount) { DepositAmount(customerId, -1*amount); } } }   BankServiceHost The host acts as a glue binding contracts with it’s services, exposing the endpoints. The services can be exposed either through the code or configuration file, configuration file is preferred as it allows run time changes to service behavior even after deployment. We have 3 services and for each of the service you need to define name (the class that implements the service with fully qualified namespace) and endpoint known as ABC, i.e. address, binding and contract. We are using netTcpBinding and have defined the base address with for each of the contracts .csharpcode, .csharpcode pre { font-size: small; color: black; font-family: consolas, "Courier New", courier, monospace; background-color: #ffffff; /*white-space: pre;*/ } .csharpcode pre { margin: 0em; } .csharpcode .rem { color: #008000; } .csharpcode .kwrd { color: #0000ff; } .csharpcode .str { color: #006080; } .csharpcode .op { color: #0000c0; } .csharpcode .preproc { color: #cc6633; } .csharpcode .asp { background-color: #ffff00; } .csharpcode .html { color: #800000; } .csharpcode .attr { color: #ff0000; } .csharpcode .alt { background-color: #f4f4f4; width: 100%; margin: 0em; } .csharpcode .lnum { color: #606060; } <system.serviceModel> <services> <service name="ProfileService.Profile"> <endpoint binding="netTcpBinding" contract="ProfileContract.IProfile"/> <host> <baseAddresses> <add baseAddress="net.tcp://localhost:1000/Profile"/> </baseAddresses> </host> </service> <service name="CheckingAccountService.Checking"> <endpoint binding="netTcpBinding" contract="CheckingAccountContract.ICheckingAccount"/> <host> <baseAddresses> <add baseAddress="net.tcp://localhost:1000/Checking"/> </baseAddresses> </host> </service> <service name="SavingsAccountService.Savings"> <endpoint binding="netTcpBinding" contract="SavingsAccountContract.ISavingsAccount"/> <host> <baseAddresses> <add baseAddress="net.tcp://localhost:1000/Savings"/> </baseAddresses> </host> </service> </services> </system.serviceModel> Have to open the services by creating service host which will handle the incoming requests from clients.   using System;   namespace ServiceHost { class Program { static void Main(string[] args) { CreateHosts(); Console.ReadLine(); }   private static void CreateHosts() { CreateHost(typeof(ProfileService.Profile),"Profile Service"); CreateHost(typeof(SavingsAccountService.Savings), "Savings Account Service"); CreateHost(typeof(CheckingAccountService.Checking), "Checking Account Service"); }   private static void CreateHost(Type type, string hostDescription) { System.ServiceModel.ServiceHost host = new System.ServiceModel.ServiceHost(type); host.Open();   if (host.ChannelDispatchers != null && host.ChannelDispatchers.Count != 0 && host.ChannelDispatchers[0].Listener != null) Console.WriteLine("Started: " + host.ChannelDispatchers[0].Listener.Uri); else Console.WriteLine("Failed to start:" + hostDescription); } } } BankClient    The client has no knowledge about service business logic other than the functionality it exposes through the contract, end points and a proxy to work against. The endpoint data and server proxy can be generated by right clicking on the project reference and choosing ‘Add Service Reference’ and entering the service end point address. Or if you have access to source, you can manually reference contract dlls and update clients configuration file to point to the service end point if the server and client happens to be being built using .Net framework. One of the pros with the manual approach is you don’t have to work against messy code generated files.   <system.serviceModel> <client> <endpoint name="tcpProfile" address="net.tcp://localhost:1000/Profile" binding="netTcpBinding" contract="ProfileContract.IProfile"/> <endpoint name="tcpCheckingAccount" address="net.tcp://localhost:1000/Checking" binding="netTcpBinding" contract="CheckingAccountContract.ICheckingAccount"/> <endpoint name="tcpSavingsAccount" address="net.tcp://localhost:1000/Savings" binding="netTcpBinding" contract="SavingsAccountContract.ISavingsAccount"/>   </client> </system.serviceModel> The client uses a façade to connect to the services   using System.ServiceModel; using CheckingAccountContract; using ProfileContract; using SavingsAccountContract;   namespace Client { public class ProxyFacade { public static IProfile ProfileProxy() { return (new ChannelFactory<IProfile>("tcpProfile")).CreateChannel(); }   public static ICheckingAccount CheckingAccountProxy() { return (new ChannelFactory<ICheckingAccount>("tcpCheckingAccount")) .CreateChannel(); }   public static ISavingsAccount SavingsAccountProxy() { return (new ChannelFactory<ISavingsAccount>("tcpSavingsAccount")) .CreateChannel(); }   } }   With that in place, lets get our unit tests going   using System; using System.Diagnostics; using BankDAL.Model; using NUnit.Framework; using ProfileContract;   namespace Client { [TestFixture] public class Tests { private void TransferFundsFromSavingsToCheckingAccount(int customerId, decimal amount) { ProxyFacade.CheckingAccountProxy().DepositAmount(customerId, amount); ProxyFacade.SavingsAccountProxy().WithdrawAmount(customerId, amount); }   private void TransferFundsFromCheckingToSavingsAccount(int customerId, decimal amount) { ProxyFacade.SavingsAccountProxy().DepositAmount(customerId, amount); ProxyFacade.CheckingAccountProxy().WithdrawAmount(customerId, amount); }     [Test] public void CreateAndGetProfileTest() { IProfile profile = ProxyFacade.ProfileProxy(); const string customerName = "Tom"; int customerId = profile.CreateCustomer(customerName, "NJ", new DateTime(1982, 1, 1)).Id; Customer customer = profile.GetCustomer(customerId); Assert.AreEqual(customerName,customer.FullName); }   [Test] public void DepositWithDrawAndTransferAmountTest() { IProfile profile = ProxyFacade.ProfileProxy(); string customerName = "Smith" + DateTime.Now.ToString("HH:mm:ss"); var customer = profile.CreateCustomer(customerName, "NJ", new DateTime(1982, 1, 1)); // Deposit to Savings ProxyFacade.SavingsAccountProxy().DepositAmount(customer.Id, 100); ProxyFacade.SavingsAccountProxy().DepositAmount(customer.Id, 25); Assert.AreEqual(125, ProxyFacade.SavingsAccountProxy().GetSavingsAccountBalance(customer.Id)); // Withdraw ProxyFacade.SavingsAccountProxy().WithdrawAmount(customer.Id, 30); Assert.AreEqual(95, ProxyFacade.SavingsAccountProxy().GetSavingsAccountBalance(customer.Id));   // Deposit to Checking ProxyFacade.CheckingAccountProxy().DepositAmount(customer.Id, 60); ProxyFacade.CheckingAccountProxy().DepositAmount(customer.Id, 40); Assert.AreEqual(100, ProxyFacade.CheckingAccountProxy().GetCheckingAccountBalance(customer.Id)); // Withdraw ProxyFacade.CheckingAccountProxy().WithdrawAmount(customer.Id, 30); Assert.AreEqual(70, ProxyFacade.CheckingAccountProxy().GetCheckingAccountBalance(customer.Id));   // Transfer from Savings to Checking TransferFundsFromSavingsToCheckingAccount(customer.Id,10); Assert.AreEqual(85, ProxyFacade.SavingsAccountProxy().GetSavingsAccountBalance(customer.Id)); Assert.AreEqual(80, ProxyFacade.CheckingAccountProxy().GetCheckingAccountBalance(customer.Id));   // Transfer from Checking to Savings TransferFundsFromCheckingToSavingsAccount(customer.Id, 50); Assert.AreEqual(135, ProxyFacade.SavingsAccountProxy().GetSavingsAccountBalance(customer.Id)); Assert.AreEqual(30, ProxyFacade.CheckingAccountProxy().GetCheckingAccountBalance(customer.Id)); }   [Test] public void FundTransfersWithOverDraftTest() { IProfile profile = ProxyFacade.ProfileProxy(); string customerName = "Angelina" + DateTime.Now.ToString("HH:mm:ss");   var customerId = profile.CreateCustomer(customerName, "NJ", new DateTime(1972, 1, 1)).Id;   ProxyFacade.SavingsAccountProxy().DepositAmount(customerId, 100); TransferFundsFromSavingsToCheckingAccount(customerId,80); Assert.AreEqual(20, ProxyFacade.SavingsAccountProxy().GetSavingsAccountBalance(customerId)); Assert.AreEqual(80, ProxyFacade.CheckingAccountProxy().GetCheckingAccountBalance(customerId));   try { TransferFundsFromSavingsToCheckingAccount(customerId,30); } catch (Exception e) { Debug.WriteLine(e.Message); }   Assert.AreEqual(110, ProxyFacade.CheckingAccountProxy().GetCheckingAccountBalance(customerId)); Assert.AreEqual(20, ProxyFacade.SavingsAccountProxy().GetSavingsAccountBalance(customerId)); } } }   We are creating a new instance of the channel for every operation, we will look into instance management and how creating a new instance of channel affects it in subsequent articles. The first two test cases deals with creation of Customer, deposit and withdraw of month between accounts. The last case, FundTransferWithOverDraftTest() is interesting. Customer starts with depositing $100 in SavingsAccount followed by transfer of $80 in to checking account resulting in $20 in savings account.  Customer then initiates $30 transfer from Savings to Checking resulting in overdraft exception on Savings with $30 being deposited to Checking. As we are not running both the requests in transactions the customer ends up with more amount than what he started with $100. In subsequent posts we will look into transactions handling.  Make sure the ServiceHost project is set as start up project and start the solution. Run the test cases either from NUnit client or TestDriven.Net/Resharper which ever is your favorite tool. Make sure you have updated the data base connection string in the ServiceHost config file to point to your local database

    Read the article

  • Solving embarassingly parallel problems using Python multiprocessing

    - by gotgenes
    How does one use multiprocessing to tackle embarrassingly parallel problems? Embarassingly parallel problems typically consist of three basic parts: Read input data (from a file, database, tcp connection, etc.). Run calculations on the input data, where each calculation is independent of any other calculation. Write results of calculations (to a file, database, tcp connection, etc.). We can parallelize the program in two dimensions: Part 2 can run on multiple cores, since each calculation is independent; order of processing doesn't matter. Each part can run independently. Part 1 can place data on an input queue, part 2 can pull data off the input queue and put results onto an output queue, and part 3 can pull results off the output queue and write them out. This seems a most basic pattern in concurrent programming, but I am still lost in trying to solve it, so let's write a canonical example to illustrate how this is done using multiprocessing. Here is the example problem: Given a CSV file with rows of integers as input, compute their sums. Separate the problem into three parts, which can all run in parallel: Process the input file into raw data (lists/iterables of integers) Calculate the sums of the data, in parallel Output the sums Below is traditional, single-process bound Python program which solves these three tasks: #!/usr/bin/env python # -*- coding: UTF-8 -*- # basicsums.py """A program that reads integer values from a CSV file and writes out their sums to another CSV file. """ import csv import optparse import sys def make_cli_parser(): """Make the command line interface parser.""" usage = "\n\n".join(["python %prog INPUT_CSV OUTPUT_CSV", __doc__, """ ARGUMENTS: INPUT_CSV: an input CSV file with rows of numbers OUTPUT_CSV: an output file that will contain the sums\ """]) cli_parser = optparse.OptionParser(usage) return cli_parser def parse_input_csv(csvfile): """Parses the input CSV and yields tuples with the index of the row as the first element, and the integers of the row as the second element. The index is zero-index based. :Parameters: - `csvfile`: a `csv.reader` instance """ for i, row in enumerate(csvfile): row = [int(entry) for entry in row] yield i, row def sum_rows(rows): """Yields a tuple with the index of each input list of integers as the first element, and the sum of the list of integers as the second element. The index is zero-index based. :Parameters: - `rows`: an iterable of tuples, with the index of the original row as the first element, and a list of integers as the second element """ for i, row in rows: yield i, sum(row) def write_results(csvfile, results): """Writes a series of results to an outfile, where the first column is the index of the original row of data, and the second column is the result of the calculation. The index is zero-index based. :Parameters: - `csvfile`: a `csv.writer` instance to which to write results - `results`: an iterable of tuples, with the index (zero-based) of the original row as the first element, and the calculated result from that row as the second element """ for result_row in results: csvfile.writerow(result_row) def main(argv): cli_parser = make_cli_parser() opts, args = cli_parser.parse_args(argv) if len(args) != 2: cli_parser.error("Please provide an input file and output file.") infile = open(args[0]) in_csvfile = csv.reader(infile) outfile = open(args[1], 'w') out_csvfile = csv.writer(outfile) # gets an iterable of rows that's not yet evaluated input_rows = parse_input_csv(in_csvfile) # sends the rows iterable to sum_rows() for results iterable, but # still not evaluated result_rows = sum_rows(input_rows) # finally evaluation takes place as a chain in write_results() write_results(out_csvfile, result_rows) infile.close() outfile.close() if __name__ == '__main__': main(sys.argv[1:]) Let's take this program and rewrite it to use multiprocessing to parallelize the three parts outlined above. Below is a skeleton of this new, parallelized program, that needs to be fleshed out to address the parts in the comments: #!/usr/bin/env python # -*- coding: UTF-8 -*- # multiproc_sums.py """A program that reads integer values from a CSV file and writes out their sums to another CSV file, using multiple processes if desired. """ import csv import multiprocessing import optparse import sys NUM_PROCS = multiprocessing.cpu_count() def make_cli_parser(): """Make the command line interface parser.""" usage = "\n\n".join(["python %prog INPUT_CSV OUTPUT_CSV", __doc__, """ ARGUMENTS: INPUT_CSV: an input CSV file with rows of numbers OUTPUT_CSV: an output file that will contain the sums\ """]) cli_parser = optparse.OptionParser(usage) cli_parser.add_option('-n', '--numprocs', type='int', default=NUM_PROCS, help="Number of processes to launch [DEFAULT: %default]") return cli_parser def main(argv): cli_parser = make_cli_parser() opts, args = cli_parser.parse_args(argv) if len(args) != 2: cli_parser.error("Please provide an input file and output file.") infile = open(args[0]) in_csvfile = csv.reader(infile) outfile = open(args[1], 'w') out_csvfile = csv.writer(outfile) # Parse the input file and add the parsed data to a queue for # processing, possibly chunking to decrease communication between # processes. # Process the parsed data as soon as any (chunks) appear on the # queue, using as many processes as allotted by the user # (opts.numprocs); place results on a queue for output. # # Terminate processes when the parser stops putting data in the # input queue. # Write the results to disk as soon as they appear on the output # queue. # Ensure all child processes have terminated. # Clean up files. infile.close() outfile.close() if __name__ == '__main__': main(sys.argv[1:]) These pieces of code, as well as another piece of code that can generate example CSV files for testing purposes, can be found on github. I would appreciate any insight here as to how you concurrency gurus would approach this problem. Here are some questions I had when thinking about this problem. Bonus points for addressing any/all: Should I have child processes for reading in the data and placing it into the queue, or can the main process do this without blocking until all input is read? Likewise, should I have a child process for writing the results out from the processed queue, or can the main process do this without having to wait for all the results? Should I use a processes pool for the sum operations? If yes, what method do I call on the pool to get it to start processing the results coming into the input queue, without blocking the input and output processes, too? apply_async()? map_async()? imap()? imap_unordered()? Suppose we didn't need to siphon off the input and output queues as data entered them, but could wait until all input was parsed and all results were calculated (e.g., because we know all the input and output will fit in system memory). Should we change the algorithm in any way (e.g., not run any processes concurrently with I/O)?

    Read the article

  • problem with tinymce textarea in dynamically added jquery tabs

    - by kranthi
    I have an aspx page(Default1.aspx),in which i have a static jquery tab and anchor tag upon clicking the anchor tag(Add Tab) I am adding new tab dynamically,which gets its contents loaded from another aspx page(Default2.aspx).This second page contains some text inside a tag,a textarea with 'tinymce' class which is placed inside a div with 'style="display:none" ' and this textarea gets displayed only upon clicking the edit button on that page. The HTML of Default1.aspx page looks like this. <head runat="server"> <title>Untitled Page</title> <script src="js/jquery-1.3.2.min.js" type="text/javascript"></script> <script src="js/jquery-ui-1.7.2.custom.min.js" type="text/javascript"></script> <link href="css/custom-theme/jquery-ui-1.7.2.custom.css" rel="stylesheet" type="text/css" /> <link href="css/widgets.css" rel="stylesheet" type="text/css" /> <link href="css/print.css" rel="stylesheet" type="text/css" /> <link href="css/reset.css" rel="stylesheet" type="text/css" /> <script type="text/javascript" src="js/tiny_mce/jquery.tinymce.js"></script> <script type="text/javascript"> $(function() { //DECLARE FUNCTION: removetab var removetab = function(tabselector, index) { $(".removetab").click(function(){ $(tabselector).tabs('remove',index); }); }; //create tabs $("#tabs").tabs({ add: function(event, ui) { //select newely opened tab $(this).tabs('select',ui.index); //load function to close tab removetab($(this), ui.index); }, show: function(event, ui) { if($.fn.tinymce) { $('textarea.tinymce').tinymce({ // Location of TinyMCE script script_url : 'js/tiny_mce/tiny_mce.js', // General options theme : "advanced", plugins : "safari,style,layer,table,advhr,advimage,advlink,inlinepopups,insertdatetime,preview,media,searchreplace,print,contextmenu,paste,directionality,fullscreen,noneditable,visualchars,nonbreaking,xhtmlxtras,template", // Theme options theme_advanced_buttons1 : "bold,italic,underline,strikethrough,|,bullist,numlist,|,justifyleft,justifycenter,justifyright,justifyfull,styleselect,formatselect,fontselect,fontsizeselect", theme_advanced_buttons2 : "outdent,indent,blockquote,|,undo,redo,|,link,unlink,anchor,image,cleanup,help,code,|,insertdate,inserttime,preview,|,forecolor,backcolor", theme_advanced_buttons3 : "sub,sup,|,ltr,rtl,|,fullscreen", theme_advanced_toolbar_location : "top", theme_advanced_toolbar_align : "left" /*theme_advanced_statusbar_location : "bottom",*/ /*theme_advanced_resizing : true,*/ }); } //load function to close selected tabs removetab($(this), ui.index); } }); //load new tab $(".addtab").click(function(){ var href=$(this).attr("href"); var title=$(this).attr("title"); $("#tabs").tabs( 'add' , href , title+' <span class="removetab ui-icon ui-icon-circle-close" style="float:right; margin: -2px -10px 0px 3px; cursor:pointer;"></span>'); return false; }); }); function showEditFields(){ $('.edit').css('display','inline'); } </script> </head> <body> <form id="form1" runat="server"> <div> <a class="addtab" title="Tab Label" href="HTMLPage.htm">Add Tab</a> <div id="tabs"> <ul> <li><a href="#tabs-1">Default Tab</a></li> </ul> <div id="tabs-1"> <p>Etiam aliquet massa et lorem. Mauris dapibus lacus auctor risus. Aenean tempor ullamcorper leo. Vivamus sed magna quis ligula eleifend adipiscing. Duis orci. Aliquam Proin elit arcu, rutrum commodo, vehicula tempus, commodo a, risus. Curabitur nec arcu. Donec sollicitudin mi sit amet mauris. Nam elementum quam ullamcorper ante.sodales tortor vitae ipsum. Aliquam nulla. Duis aliquam molestie erat. Ut et mauris vel pede varius sollicitudin. Sed ut dolor nec orci tincidunt interdum. Phasellus ipsum. Nunc tristique tempus lectus.</p> </div> </div> </div> </form> </body> and the HTML of Default2.aspx looks like this. <head> </head> <body> <form id="form1" runat="server"> <div class="demo"> <p>Proin elit arcu, rutrum commodo, vehicula tempus, commodo a, risus. Curabitur nec arcu. Donec sollicitudin mi sit amet mauris. Nam elementum quam ullamcorper ante. Etiam aliquet massa et lorem. Mauris dapibus lacus auctor risus. Aenean tempor ullamcorper leo. Vivamus sed magna quis ligula eleifend adipiscing. Duis orci. Aliquam sodales tortor vitae ipsum. Aliquam nulla. Duis aliquam molestie erat. Ut et mauris vel pede varius sollicitudin. Sed ut dolor nec orci tincidunt interdum. Phasellus ipsum. Nunc tristique tempus lectus. <div class="edit" style="display:none"> <textarea style="height:80px; width:100%" class="tinymce" name="" rows="8" runat="server" id="txtans">answer text goes here </textarea> </div> <input id="Button1" type="button" value="edit" onclick="showEditFields();" /> </p> </form> </body> so when I click on the "edit" button available on Default2.aspx ,the textarea with tinymce should appear and I can add as many tabs as I want from Default1.aspx by clicking on Add Tab(anchor) which loads multiple tabs with content from Default2.aspx.After adding these multiple tabs ,if I check to see whether all the textareas are with tinymce,I noticed that only the 1st tab contains textarea with tinymce and in all the other tabs tinymce doesnt show up ,simply the normal text area appears. Could someone please help me with this? Thanks.

    Read the article

  • Communicating between C# application and Android app via bluetooth

    - by Akki
    The android application acts as a server in this case. I have a main activity which creates a Thread to handle serverSocket and a different thread to handle the socket connection. I am using a uuid common to C# and android. I am using 32feet bluetooth library for C#. The errors i am facing are 1) My logcat shows this debug log Error while doing socket.connect()1 java.io.IOException: File descriptor in bad state Message: File descriptor in bad state Localized Message: File descriptor in bad state Received : Testing Connection Count of Thread is : 1 2) When i try to send something via C# app the second time, this exception is thrown: A first chance exception of type 'System.InvalidOperationException' occurred in System.dll System.InvalidOperationException: BeginConnect cannot be called while another asynchronous operation is in progress on the same Socket. at System.Net.Sockets.Socket.DoBeginConnect(EndPoint endPointSnapshot, SocketAddress socketAddress, LazyAsyncResult asyncResult) at System.Net.Sockets.Socket.BeginConnect(EndPoint remoteEP, AsyncCallback callback, Object state) at InTheHand.Net.Bluetooth.Msft.SocketBluetoothClient.BeginConnect(BluetoothEndPoint remoteEP, AsyncCallback requestCallback, Object state) at InTheHand.Net.Sockets.BluetoothClient.BeginConnect(BluetoothEndPoint remoteEP, AsyncCallback requestCallback, Object state) at InTheHand.Net.Sockets.BluetoothClient.BeginConnect(BluetoothAddress address, Guid service, AsyncCallback requestCallback, Object state) at BTSyncClient.Form1.connect() in c:\users\----\documents\visual studio 2010\Projects\TestClient\TestClient\Form1.cs:line 154 I only know android application programming and i designed the C# by learning bit and pieces. FYI, My android phone is galaxy s with ICS running on it.Please point out my mistakes.. Source codes : C# Code using System; using System.Collections.Generic; using System.ComponentModel; using System.Data; using System.Drawing; using System.Linq; using System.Text; using System.Windows.Forms; using System.Threading; using System.Net.Sockets; using InTheHand.Net.Bluetooth; using InTheHand.Windows.Forms; using InTheHand.Net.Sockets; using InTheHand.Net; namespace BTSyncClient { public partial class Form1 : Form { BluetoothClient myself; BluetoothDeviceInfo bTServerDevice; private Guid uuid = Guid.Parse("00001101-0000-1000-8000-00805F9B34FB"); bool isConnected; public Form1() { InitializeComponent(); if (BluetoothRadio.IsSupported) { myself = new BluetoothClient(); } } private void Form1_Load(object sender, EventArgs e) { } private void button1_Click(object sender, EventArgs e) { connect(); } private void Form1_FormClosing(object sender, FormClosingEventArgs e) { try { myself.GetStream().Close(); myself.Dispose(); } catch (Exception ex) { Console.Out.WriteLine(ex); } } private void button2_Click(object sender, EventArgs e) { SelectBluetoothDeviceDialog dialog = new SelectBluetoothDeviceDialog(); DialogResult result = dialog.ShowDialog(this); if(result.Equals(DialogResult.OK)){ bTServerDevice = dialog.SelectedDevice; } } private void callback(IAsyncResult ar) { String msg = (String)ar.AsyncState; if (ar.IsCompleted) { isConnected = myself.Connected; if (myself.Connected) { UTF8Encoding encoder = new UTF8Encoding(); NetworkStream stream = myself.GetStream(); if (!stream.CanWrite) { MessageBox.Show("Stream is not Writable"); } System.IO.StreamWriter mywriter = new System.IO.StreamWriter(stream, Encoding.UTF8); mywriter.WriteLine(msg); mywriter.Flush(); } else MessageBox.Show("Damn thing isnt connected"); } } private void connect() { try { if (bTServerDevice != null) { myself.BeginConnect(bTServerDevice.DeviceAddress, uuid, new AsyncCallback(callback) , message.Text); } } catch (Exception e) { Console.Out.WriteLine(e); } } } } Server Thread import java.io.IOException; import java.util.UUID; import android.bluetooth.BluetoothAdapter; import android.bluetooth.BluetoothServerSocket; import android.bluetooth.BluetoothSocket; import android.util.Log; public class ServerSocketThread extends Thread { private static final String TAG = "TestApp"; private BluetoothAdapter btAdapter; private BluetoothServerSocket serverSocket; private boolean stopMe; private static final UUID uuid = UUID.fromString("00001101-0000-1000-8000-00805F9B34FB"); //private static final UUID uuid = UUID.fromString("6e58c9d5-b0b6-4009-ad9b-fd9481aef9b3"); private static final String SERVICE_NAME = "TestService"; public ServerSocketThread() { stopMe = false; btAdapter = BluetoothAdapter.getDefaultAdapter(); try { serverSocket = btAdapter.listenUsingRfcommWithServiceRecord(SERVICE_NAME, uuid); } catch (IOException e) { Log.d(TAG,e.toString()); } } public void signalStop(){ stopMe = true; } public void run(){ Log.d(TAG,"In ServerThread"); BluetoothSocket socket = null; while(!stopMe){ try { socket = serverSocket.accept(); } catch (IOException e) { break; } if(socket != null){ AcceptThread newClientConnection = new AcceptThread(socket); newClientConnection.start(); } } Log.d(TAG,"Server Thread now dead"); } // Will cancel the listening socket and cause the thread to finish public void cancel(){ try { serverSocket.close(); } catch (IOException e) { } } } Accept Thread import java.io.IOException; import java.io.InputStream; import java.util.Scanner; import android.bluetooth.BluetoothSocket; import android.util.Log; public class AcceptThread extends Thread { private BluetoothSocket socket; private String TAG = "TestApp"; static int count = 0; public AcceptThread(BluetoothSocket Socket) { socket = Socket; } volatile boolean isError; String output; String error; public void run() { Log.d(TAG, "AcceptThread Started"); isError = false; try { socket.connect(); } catch (IOException e) { Log.d(TAG,"Error while doing socket.connect()"+ ++count); Log.d(TAG, e.toString()); Log.d(TAG,"Message: "+e.getLocalizedMessage()); Log.d(TAG,"Localized Message: "+e.getMessage()); isError = true; } InputStream in = null; try { in = socket.getInputStream(); } catch (IOException e) { Log.d(TAG,"Error while doing socket.getInputStream()"); Log.d(TAG, e.toString()); Log.d(TAG,"Message: "+e.getLocalizedMessage()); Log.d(TAG,"Localized Message: "+e.getMessage()); isError = true; } Scanner istream = new Scanner(in); if (istream.hasNextLine()) { Log.d(TAG, "Received : "+istream.nextLine()); Log.d(TAG,"Count of Thread is : " + count); } istream.close(); try { in.close(); } catch (IOException e) { Log.d(TAG,"Error while doing in.close()"); Log.d(TAG, e.toString()); Log.d(TAG,"Message: "+e.getLocalizedMessage()); Log.d(TAG,"Localized Message: "+e.getMessage()); isError = true; } try { socket.close(); } catch (IOException e) { Log.d(TAG,"Error while doing socket.close()"); Log.d(TAG, e.toString()); Log.d(TAG,"Message: "+e.getLocalizedMessage()); Log.d(TAG,"Localized Message: "+e.getMessage()); isError = true; } } }

    Read the article

  • Part 2&ndash;Load Testing In The Cloud

    - by Tarun Arora
    Welcome to Part 2, In Part 1 we discussed the advantages of creating a Test Rig in the cloud, the Azure edge and the Test Rig Topology we want to get to. In Part 2, Let’s start by understanding the components of Azure we’ll be making use of followed by manually putting them together to create the test rig, so… let’s get down dirty start setting up the Test Rig.  What Components of Azure will I be using for building the Test Rig in the Cloud? To run the Test Agents we’ll make use of Windows Azure Compute and to enable communication between Test Controller and Test Agents we’ll make use of Windows Azure Connect.  Azure Connect The Test Controller is on premise and the Test Agents are in the cloud (How will they talk?). To enable communication between the two, we’ll make use of Windows Azure Connect. With Windows Azure Connect, you can use a simple user interface to configure IPsec protected connections between computers or virtual machines (VMs) in your organization’s network, and roles running in Windows Azure. With this you can now join Windows Azure role instances to your domain, so that you can use your existing methods for domain authentication, name resolution, or other domain-wide maintenance actions. For more details refer to an overview of Windows Azure connect. A very useful video explaining everything you wanted to know about Windows Azure connect.  Azure Compute Windows Azure compute provides developers a platform to host and manage applications in Microsoft’s data centres across the globe. A Windows Azure application is built from one or more components called ‘roles.’ Roles come in three different types: Web role, Worker role, and Virtual Machine (VM) role, we’ll be using the Worker role to set up the Test Agents. A very nice blog post discussing the difference between the 3 role types. Developers are free to use the .NET framework or other software that runs on Windows with the Worker role or Web role. Developers can also create applications using languages such as PHP and Java. More on Windows Azure Compute. Each Windows Azure compute instance represents a virtual server... Virtual Machine Size CPU Cores Memory Cost Per Hour Extra Small Shared 768 MB $0.04 Small 1 1.75 GB $0.12 Medium 2 3.50 GB $0.24 Large 4 7.00 GB $0.48 Extra Large 8 14.00 GB $0.96   You might want to review the Windows Azure Pricing FAQ. Let’s Get Started building the Test Rig… Configuration Machine Role Comments VM – 1 Domain Controller for Playpit.com On Premise VM – 2 TFS, Test Controller On Premise VM – 3 Test Agent Cloud   In this blog post I would assume that you have the domain, Team Foundation Server and Test Controller Installed and set up already. If not, please refer to the TFS 2010 Installation Guide and this walkthrough on MSDN to set up your Test Controller. You can also download a preconfigured TFS 2010 VM from Brian Keller's blog, Brian also has some great hands on Labs on TFS 2010 that you may want to explore. I. Lets start building VM – 3: The Test Agent Download the Windows Azure SDK and Tools Open Visual Studio and create a new Windows Azure Project using the Cloud Template                   Choose the Worker Role for reasons explained in the earlier post         The WorkerRole.cs implements the Run() and OnStart() methods, no code changes required. You should be able to compile the project and run it in the compute emulator (The compute emulator should have been installed as part of the Windows Azure Toolkit) on your local machine.                   We will only be making changes to WindowsAzureProject, open ServiceDefinition.csdef. Ensure that the vmsize is small (remember the cost chart above). Import the “Connect” module. I am importing the Connect module because I need to join the Worker role VM to the Playpit domain. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceDefinition"> <WorkerRole name="WorkerRole1" vmsize="Small"> <Imports> <Import moduleName="Diagnostics" /> <Import moduleName="Connect"/> </Imports> </WorkerRole> </ServiceDefinition> Go to the ServiceConfiguration.Cloud.cscfg and note that settings with key ‘Microsoft.WindowsAzure.Plugins.Connect.%%%%’ have been added to the configuration file. This is because you decided to import the connect module. See the config below. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*"> <Role name="WorkerRole1"> <Instances count="1" /> <ConfigurationSettings> <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="UseDevelopmentStorage=true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.ActivationToken" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Refresh" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.WaitForConnectivity" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Upgrade" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.EnableDomainJoin" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainFQDN" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainControllerFQDN" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainAccountName" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainPassword" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainOU" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Administrators" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainSiteName" value="" /> </ConfigurationSettings> </Role> </ServiceConfiguration>             Let’s go step by step and understand all the highlighted parameters and where you can find the values for them.       osFamily – By default this is set to 1 (Windows Server 2008 SP2). Change this to 2 if you want the Windows Server 2008 R2 operating system. The Advantage of using osFamily = “2” is that you get Powershell 2.0 rather than Powershell 1.0. In Powershell 2.0 you could simply use “powershell -ExecutionPolicy Unrestricted ./myscript.ps1” and it will work while in Powershell 1.0 you will have to change the registry key by including the following in your command file “reg add HKLM\Software\Microsoft\PowerShell\1\ShellIds\Microsoft.PowerShell /v ExecutionPolicy /d Unrestricted /f” before you can execute any power shell. The other reason you might want to move to os2 is if you wanted IIS 7.5.       Activation Token – To enable communication between the on premise machine and the Windows Azure Worker role VM both need to have the same token. Log on to Windows Azure Management Portal, click on Connect, click on Get Activation Token, this should give you the activation token, copy the activation token to the clipboard and paste it in the configuration file. Note – Later in the blog I’ll be showing you how to install connect on the on premise machine.                       EnableDomainJoin – Set the value to true, ofcourse we want to join the on windows azure worker role VM to the domain.       DomainFQDN, DomainControllerFQDN, DomainAccountName, DomainPassword, DomainOU, Administrators – This information is specific to your domain. I have extracted this information from the ‘service manager’ and ‘Active Directory Users and Computers’. Also, i created a new Domain-OU namely ‘CloudInstances’ so all my cloud instances joined to my domain show up here, this is optional. You can encrypt the DomainPassword – refer to the instructions here. Or hold fire, I’ll be covering that when i come to certificates and encryption in the coming section.       Now once you have filled all this information up, the configuration file should look something like below, <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="2" osVersion="*"> <Role name="WorkerRole1"> <Instances count="1" /> <ConfigurationSettings> <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="UseDevelopmentStorage=true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.ActivationToken" value="45f55fea-f194-4fbc-b36e-25604faac784" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Refresh" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.WaitForConnectivity" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Upgrade" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.EnableDomainJoin" value="true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainFQDN" value="play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainControllerFQDN" value="WIN-KUDQMQFGQOL.play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainAccountName" value="playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainPassword" value="************************" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainOU" value="OU=CloudInstances, DC=Play, DC=Pit, DC=com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Administrators" value="Playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainSiteName" value="" /> </ConfigurationSettings> </Role> </ServiceConfiguration> Next we will be enabling the Remote Desktop module in to the ServiceDefinition.csdef, we could make changes manually or allow a beautiful wizard to help us make changes. I prefer the second option. So right click on the Windows Azure project and choose Publish       Now once you get the publish wizard, if you haven’t already you would be asked to import your Windows Azure subscription, this is simply the Msdn subscription activation key xml. Once you have done click Next to go to the Settings page and check ‘Enable Remote Desktop for all roles’.       As soon as you do that you get another pop up asking you the details for the user that you would be logging in with (make sure you enter a reasonable expiry date, you do not want the user account to expire today). Notice the more information tag at the bottom, click that to get access to the certificate section. See screen shot below.       From the drop down select the option to create a new certificate        In the pop up window enter the friendly name for your certificate. In my case I entered ‘WAC – Test Rig’ and click ok. This will create a new certificate for you. Click on the view button to see the certificate details. Do you see the Thumbprint, this is the value that will go in the config file (very important). Now click on the Copy to File button to copy the certificate, we will need to import the certificate to the windows Azure Management portal later. So, make sure you save it a safe location.                                Click Finish and enter details of the user you would like to create with permissions for remote desktop access, once you have entered the details on the ‘Remote desktop configuration’ screen click on Ok. From the Publish Windows Azure Wizard screen press Cancel. Cancel because we don’t want to publish the role just yet and Yes because we want to save all the changes in the config file.       Now if you go to the ServiceDefinition.csdef file you will see that the RemoteAccess and RemoteForwarder roles have been imported for you. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceDefinition"> <WorkerRole name="WorkerRole1" vmsize="Small"> <Imports> <Import moduleName="Diagnostics" /> <Import moduleName="Connect" /> <Import moduleName="RemoteAccess" /> <Import moduleName="RemoteForwarder" /> </Imports> </WorkerRole> </ServiceDefinition> Now go to the ServiceConfiguration.Cloud.cscfg file and you see a whole bunch for setting “Microsoft.WindowsAzure.Plugins.RemoteAccess.%%%” values added for you. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="2" osVersion="*"> <Role name="WorkerRole1"> <Instances count="1" /> <ConfigurationSettings> <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="UseDevelopmentStorage=true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.ActivationToken" value="45f55fea-f194-4fbc-b36e-25604faac784" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Refresh" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.WaitForConnectivity" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Upgrade" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.EnableDomainJoin" value="true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainFQDN" value="play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainControllerFQDN" value="WIN-KUDQMQFGQOL.play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainAccountName" value="playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainPassword" value="************************" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainOU" value="OU=CloudInstances, DC=Play, DC=Pit, DC=com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Administrators" value="Playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainSiteName" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" value="true" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" value="Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" value="MIIBnQYJKoZIhvcNAQcDoIIBjjCCAYoCAQAxggFOMIIBSgIBADAyMB4xHDAaBgNVBAMME1dpbmRvd 3MgQXp1cmUgVG9vbHMCEGa+B46voeO5T305N7TSG9QwDQYJKoZIhvcNAQEBBQAEggEABg4ol5Xol66Ip6QKLbAPWdmD4ae ADZ7aKj6fg4D+ATr0DXBllZHG5Umwf+84Sj2nsPeCyrg3ZDQuxrfhSbdnJwuChKV6ukXdGjX0hlowJu/4dfH4jTJC7sBWS AKaEFU7CxvqYEAL1Hf9VPL5fW6HZVmq1z+qmm4ecGKSTOJ20Fptb463wcXgR8CWGa+1w9xqJ7UmmfGeGeCHQ4QGW0IDSBU6ccg vzF2ug8/FY60K1vrWaCYOhKkxD3YBs8U9X/kOB0yQm2Git0d5tFlIPCBT2AC57bgsAYncXfHvPesI0qs7VZyghk8LVa9g5IqaM Cp6cQ7rmY/dLsKBMkDcdBHuCTAzBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECDRVifSXbA43gBApNrp40L1VTVZ1iGag+3O1" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" value="2012-11-27T23:59:59.0000000+00:00" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" value="true" /> </ConfigurationSettings> <Certificates> <Certificate name="Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" thumbprint="AA23016CF0BDFC344400B5B82706B608B92E4217" thumbprintAlgorithm="sha1" /> </Certificates> </Role> </ServiceConfiguration>          Okay let’s look at them one at a time,       Enabled - Yes, we would like to enable Remote Access.       AccountUserName – This is the user name you entered while you were on the publish windows azure role screen, as detailed above.       AccountEncrytedPassword – Try and decode that, the certificate is used to encrypt the password you specified for the user account. Remember earlier i said, either use the instructions or wait and i’ll be showing you encryption, now the user account i am using for rdp has the same password as my domain password, so i can simply copy the value of the AccountEncryptedPassword to the DomainPassword as well.       AccountExpiration – This is the expiration as you specified in the wizard earlier, make sure your account does not expire today.       Remote Forwarder – Check out the documentation, below is how I understand it, -- One role in an application that implements a remote desktop connection must import the RemoteForwarder module. The two modules work together to enable the remote desktop connections to role instances. -- If you have multiple roles defined in the service model, it does not matter which role you add the RemoteForwarder module to, but you must add it to only one of the role definitions.       Certificate – Remember the certificate thumbprint from the wizard, the on premise machine and windows azure role machine that need to speak to each other must have the same thumbprint. More on that when we install Windows Azure connect Endpoints on the on premise machine. As i said earlier, in this blog post, I’ll be showing you the manual process so i won’t be scripting any star up tasks to install the test agent or register the test agent with the TFS Server. I’ll be showing you all this cool stuff in the next blog post, that’s because it’s important to understand the manual side of it, it becomes easier for you to troubleshoot in case something fails. Having said that, the changes we have made are sufficient to spin up the Windows Azure Worker Role aka Test Agent VM, have it connected with the play.pit.com domain and have remote access enabled on it. Before we deploy the Test Agent VM we need to set up Windows Azure Connect on the TFS Server. II. Windows Azure Connect: Setting up Connect on VM – 2 i.e. TFS & Test Controller Glad you made it so far, now to enable communication between the on premise TFS/Test Controller and Azure-ed Test Agent we need to enable communication. We have set up the Azure connect module in the Test Agent configuration, now the connect end points need to be enabled on the on premise machines, let’s have a look at how we can do this. Log on to VM – 2 running the TFS Server and Test Controller Log on to the Windows Azure Management Portal and click on Virtual Network Click on Virtual Network, if you already have a subscription you should see the below screen shot, if not, you would be asked to complete the subscription first        Click on Install Local Endpoints from the top left on the panel and you get a url appended with a token id in it, remember the token i showed you earlier, in theory the token you get here should match the token you added to the Test Agent config file.        Copy the url to the clip board and paste it in IE explorer (important, the installation at present only works out of IE and you need to have cookies enabled in order to complete the installation). As stated in the pop up, you can NOT download and run the software later, you need to run it as is, since it contains a token. Once the installation completes you should see the Windows Azure connect icon in the system tray.                         Right click the Azure Connect icon, choose Diagnostics and refer to this link for diagnostic detail terminology. NOTE – Unfortunately I could not see the Windows Azure connect icon in the system tray, a bit of binging with Google revealed that the azure connect icon is only shown when the ‘Windows Azure Connect Endpoint’ Service is started. So go to services.msc and make sure that the service is started, if not start it, unfortunately again, the service did not start for me on a manual start and i realised that one of the dependant services was disabled, you can look at the service dependencies and start them and then start windows azure connect. Bottom line, you need to start Windows Azure connect service before you can proceed. Please refer here on MSDN for more on Troubleshooting Windows Azure connect. (Follow the next step as well)   Now go back to the Windows Azure Management Portal and from Groups and Roles create a new group, lets call it ‘Test Rig’. Make sure you add the VM – 2 (the TFS Server VM where you just installed the endpoint).       Now if you go back to the Azure Connect icon in the system tray and click ‘Refresh Policy’ you will notice that the disconnected status of the icon should change to ready for connection. III. Importing Certificate in to Windows Azure Management Portal But before that you need to import the certificate you created in Step I in to the Windows Azure Management Portal. Log on to the Windows Azure Management Portal and click on ‘Hosted Services, Storage Accounts & CDN’ and then ‘Management Certificates’ followed by Add Certificates as shown in the screen shot below        Browse to the location where you saved the certificate earlier, remember… Refer to Step I in case you forgot.        Now you should be able to see the imported certificate here, make sure the thumbprint of the certificate matches the one you inserted in the config files        IV. Publish Windows Azure Worker Role aka Test Agent Having completed I, II and III, you are ready to publish the Test Agent VM – 3 to the cloud. Go to Visual Studio and right click the Windows Azure project and select Publish. Verify the infomration in the wizard, from the advanced settings tab, you can also enabled capture of intellitrace or profiling information.         Click Next and Click Publish! From the view menu bar select the Windows Azure Activity Log window.       Now you should be able to see the deployment progress in real time.             In the Windows Azure Management Portal, you should also be able to see the progress of creation of a new Worker Role.       Once the deployment is complete you should be able to RDP (go to run prompt type mstsc and in the pop up the machine name) in to the Test Agent Worker Role VM from the Playpit network using the domain admin user account. In case you are unable to log in to the Test Agent using the domain admin user account it means the process of joining the Test Agent to the domain has failed! But the good news is, because you imported the connect module, you can connect to the Test Agent machine using Windows Azure Management Portal and troubleshoot the reason for failure, you will be able to log in with the user name and password you specified in the config file for the keys ‘RemoteAccess.AccountUsername, RemoteAccess.EncryptedPassword (just that enter the password unencrypted)’, fix it or manually join the machine to the domain. Once you have managed to Join the Test Agent VM to the Domain move to the next step.      So, log in to the Test Agent Worker Role VM with the Playpit Domain Administrator and verify that you can log in, the machine is connected to the domain and the connect service is successfully running. If yes, give your self a pat on the back, you are 80% mission accomplished!         Go to the Windows Azure Management Portal and click on Virtual Network, click on Groups and Roles and click on Test Rig, click Edit Group, the edit the Test Rig group you created earlier. In the Connect to section, click on Add to select the worker role you have just deployed. Also, check the ‘Allow connections between endpoints in the group’ with this you will enable to communication between test controller and test agents and test agents/test agents. Click Save.      Now, you are ready to deploy the Test Agent software on the Worker Role Test Agent VM and configure it to work with the Test Controller. V. Configuring VM – 3: Installing Test Agent and Associating Test Agent to Controller Log in to the Worker Role Test Agent VM that you have just successfully deployed, make sure you log in with the domain administrator account. Download the All Agents software from MSDN, ‘en_visual_studio_agents_2010_x86_x64_dvd_509679.iso’, extract the iso and navigate to where you have extracted the iso. In my case, i have extracted the iso to “C:\Resources\Temp\VsAgentSetup”. Open the Test Agent folder and double click on setup.exe. Once you have installed the Test Agent you should reach the configuration window. If you face any issues installing TFS Test Agent on the VM, refer to the walkthrough on MSDN.       Once you have successfully installed the Test Agent software you will need to configure the test agent. Right click the test agent configuration tool and run as a different user. i.e. an Administrator. This is really to run the configuration wizard with elevated privileges (you might have UAC block something's otherwise).        In the run options, you can select ‘service’ you do not need to run the agent as interactive un less you are running coded UI tests. I have specified the domain administrator to connect to the TFS Test Controller. In real life, i would never do that, i would create a separate test user service account for this purpose. But for the blog post, we are using the most powerful user so that any policies or restrictions don’t block you.        Click the Apply Settings button and you should be all green! If not, the summary usually gives helpful error messages that you can resolve and proceed. As per my experience, you may run in to either a permission or a firewall blocking communication issue.        And now the moment of truth! Go to VM –2 open up Visual Studio and from the Test Menu select Manage Test Controller       Mission Accomplished! You should be able to see the Test Agent that you have just configured here,         VI. Creating and Running Load Tests on your brand new Azure-ed Test Rig I have various blog posts on Performance Testing with Visual Studio Ultimate, you can follow the links and videos below, Blog Posts: - Part 1 – Performance Testing using Visual Studio 2010 Ultimate - Part 2 – Performance Testing using Visual Studio 2010 Ultimate - Part 3 – Performance Testing using Visual Studio 2010 Ultimate Videos: - Test Tools Configuration & Settings in Visual Studio - Why & How to Record Web Performance Tests in Visual Studio Ultimate - Goal Driven Load Testing using Visual Studio Ultimate Now that you have created your load tests, there is one last change you need to make before you can run the tests on your Azure Test Rig, create a new Test settings file, and change the Test Execution method to ‘Remote Execution’ and select the test controller you have configured the Worker Role Test Agent against in our case VM – 2 So, go on, fire off a test run and see the results of the test being executed on the Azur-ed Test Rig. Review and What’s next? A quick recap of the benefits of running the Test Rig in the cloud and what i will be covering in the next blog post AND I would love to hear your feedback! Advantages Utilizing the power of Azure compute to run a heavy virtual user load. Benefiting from the Azure flexibility, destroy Test Agents when not in use, takes < 25 minutes to spin up a new Test Agent. Most important test Network Latency, (network latency and speed of connection are two different things – usually network latency is very hard to test), by placing the Test Agents in Microsoft Data centres around the globe, one can actually test the lag in transferring the bytes not because of a slow connection but because the page has been requested from the other side of the globe. Next Steps The process of spinning up the Test Agents in windows Azure is not 100% automated. I am working on the Worker process and power shell scripts to make the role deployment, unattended install of test agent software and registration of the test agent to the test controller automated. In the next blog post I will show you how to make the complete process unattended and automated. Remember to subscribe to http://feeds.feedburner.com/TarunArora. Hope you enjoyed this post, I would love to hear your feedback! If you have any recommendations on things that I should consider or any questions or feedback, feel free to leave a comment. See you in Part III.   Share this post : CodeProject

    Read the article

  • Help with chat server

    - by mithun1538
    I am designing a chat server in java. The communication is Http based and not socket based. In the client side I have an applet. In the server side I have a servlet. Applet: I create a new thread to listen for incoming messages(GET method). The main thread is used to send messages(POST messages). The partial code is : public void start() { System.out.println("Creating new thread"); Thread thread = new Thread(this); thread.start(); } private String getNewMessage() { System.out.println("Inside getNewMessage"); String msg = null; try { while(msg == null) { System.out.println("Trying to listen to servlet"); URL servlet = new URL(getCodeBase(), "NewServlet?mode=msg"); URLConnection con = servlet.openConnection(); con.setUseCaches(false); DataInputStream din = new DataInputStream(new BufferedInputStream(con.getInputStream())); msg = din.readUTF(); System.out.println("message read :" + msg); } } catch (Exception e) { e.printStackTrace(); } return msg + "\n"; } public void run() { System.out.println("Inside new thread"); while(true) { System.out.println("inside first while"); String newMsg = getNewMessage(); chatOutput.append(newMsg); System.out.println("Appended!!"); } } private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) { String message = chatInput.getText(); chatInput.setText(""); chatOutput.append(message + "\n"); try { System.out.println("Trying to send msg :" + message); URL url = new URL(getCodeBase(), "NewServlet"); URLConnection servletConnection = url.openConnection(); servletConnection.setDoInput(true); servletConnection.setDoOutput(true); servletConnection.setUseCaches(false); servletConnection.setRequestProperty("Content-Type", "application/octet-stream"); ObjectOutputStream out = new ObjectOutputStream(servletConnection.getOutputStream()); out.writeObject(message); out.flush(); out.close(); System.out.println("Message sent!"); } catch (Exception e) { e.printStackTrace(); } } This next code is from the servlet side. it uses the Observable interface to identify and send messages to clients. public class NewServlet extends HttpServlet { // getNextMessage() returns the next new message. // It blocks until there is one. public String getNextMessage() { // Create a message sink to wait for a new message from the // message source. System.out.println("inside getNextMessage"); return new MessageSink().getNextMessage(source);} @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { System.out.println("Inside Doget"); response.setContentType("text/plain"); PrintWriter out = response.getWriter(); out.println(getNextMessage()); } // broadcastMessage() informs all currently listening clients that there // is a new message. Causes all calls to getNextMessage() to unblock. public void broadcastMessage(String message) { // Send the message to all the HTTP-connected clients by giving the // message to the message source source.sendMessage(message); } @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { System.out.println("Inside DoPost"); try { ObjectInputStream din= new ObjectInputStream(request.getInputStream()); String message = (String)din.readObject(); System.out.println("received msg"); if (message != null) broadcastMessage(message); System.out.println("Called broadcast"); // Set the status code to indicate there will be no response response.setStatus(response.SC_NO_CONTENT); } catch (Exception e) { e.printStackTrace(); } } /** * Returns a short description of the servlet. * @return a String containing servlet description */ @Override public String getServletInfo() { return "Short description"; } MessageSource source = new MessageSource();} class MessageSource extends Observable { public void sendMessage(String message) { System.out.println("inside sendMsg"); setChanged(); notifyObservers(message); } } class MessageSink implements Observer { String message = null; // set by update() and read by getNextMessage() // Called by the message source when it gets a new message synchronized public void update(Observable o, Object arg) { // Get the new message message = (String)arg; // Wake up our waiting thread notify(); } // Gets the next message sent out from the message source synchronized public String getNextMessage(MessageSource source) { // Tell source we want to be told about new messages source.addObserver(this); System.out.println("AddedObserver"); // Wait until our update() method receives a message while (message == null) { try { wait(); } catch (Exception ignored) { } } // Tell source to stop telling us about new messages source.deleteObserver(this); // Now return the message we received // But first set the message instance variable to null // so update() and getNextMessage() can be called again. String messageCopy = message; message = null; System.out.println("Returning msg"); return messageCopy; } } As you can see I have included System.out.println("Some message"); in some places. this was just for debugging purposes. In java console, i get the following output: Creating new thread Inside new thread. inside first while. Inside getNewMessage. Trying to listen to servlet. In the servlet side, i get the following output in the tomcat logs: Inside Doget. inside getNextMessage. AddedObserver. After i type a message in the applet, and send it, I get the foll output in java console: Trying to send msg :you deR?? Message sent! But in servlet side, I dont get anything in the logs. I used the O'Reily Java Servlet Programming as reference(The observer interface comes from there). But I am not getting any chat communication between two clients. As can be understood from the logs, the POST method is not called. Any reason for this?

    Read the article

  • How to change image through click - javascript

    - by Elmir Kouliev
    I have a toolbar that has 5 table cells. The first cell looks clear, and the other 4 have a shade over them. I want to make it so that clicking on the table cell will also change the image so that the shade will also change in respect to the current table cell that is selected. <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <title>X?B?RL?R V? HADIS?L?R</title> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <link rel="stylesheet" href="N&SAz.css" /> <link rel="shortcut icon" href="../../Images/favicon.ico" /> <script type="text/javascript"> var switchTo5x = true; </script> <script type="text/javascript" src="http://w.sharethis.com/button/buttons.js"></script> <script type="text/javascript"> stLight.options({ publisher: "581d0c30-ee9d-4c94-9b6f-a55e8ae3f4ae" }); </script> <script src="../../jquery-1.7.2.min.js" type="text/javascript"> </script> <script type="text/javascript"> $(document).ready(function () { $(".fade").css("display", "none"); $(".fade").fadeIn(20); $("a.transition").click(function (event) { event.preventDefault(); linkLocation = this.href; $("body").fadeOut(500, redirectPage); }); function redirectPage() { window.location = linkLocation; } }); $(document).ready(function () { $('.preview').hide(); $('#link_1').click(function () { $('#latest_story_preview1').hide(); $('#latest_story_preview2').hide(); $('#latest_story_preview3').hide(); $('#latest_story_preview4').hide(); $('#latest_story_main').fadeIn(800); }); $('#link_2').click(function () { $('#latest_story_main').hide(); $('#latest_story_preview2').hide(); $('#latest_story_preview3').hide(); $('#latest_story_preview4').hide(); $('#latest_story_preview1').fadeIn(800); }); $('#link_3').click(function () { $('#latest_story_main').hide(); $('#latest_story_preview1').hide(); $('#latest_story_preview3').hide(); $('#latest_story_preview4').hide(); $('#latest_story_preview2').fadeIn(800); }); $('#link_4').click(function () { $('#latest_story_main').hide(); $('#latest_story_preview1').hide(); $('#latest_story_preview2').hide(); $('#latest_story_preview4').hide(); $('#latest_story_preview3').fadeIn(800); }); $('#link_5').click(function () { $('#latest_story_main').hide(); $('#latest_story_preview1').hide(); $('#latest_story_preview2').hide(); $('#latest_story_preview3').hide(); $('#latest_story_preview4').fadeIn(800); }); $(".fade").css("display", "none"); $(".fade").fadeIn(1200); $("a.transition").click(function (event) { event.preventDefault(); linkLocation = this.href; $("body").fadeOut(500, redirectPage); }); }); </script> </head> <body id="body" style="background-color:#FFF;" onload="document"> <div style="margin:0px auto;width:1000px;" id="all_content"> <div id="top_content" style="background-color:transparent;"> <ul id="translation_list"> <li> <a href=""> AZ </a> </li> <li> <a href="#"> RUS </a> </li> <li> <a href="#"> ENG </a> </li> </ul> <div id="share_buttons"> <span class='st_facebook' displayText='' title="Facebook"></span> <span class='st_twitter' displayText='' title="Twitter"></span> <span class='st_linkedin' displayText='' title="Linkedin"></span> <span class='st_googleplus' displayText='' title="Google +"></span> <span class='st_email' displayText='' title="Email"></span> </div> <img src="../../Images/RasulGuliyev.png" width="330" height="80" id="top_logo"> <br /> <br /> <div class="fade" id="navigation"> <ul> <font face="Verdana, Geneva, sans-serif"> <li> <a href="../../index.html"> ANA S?HIF? </a> </li> <li> <a href="../biographyAZ.html"> BIOQRAFIYA </a> </li> <li style="background-color:#9C1A35;"> <a href="#"> X?B?RL?R V? HADIS?L?R </a> </li> <li> <a> PROQRAM </a> </li> <li> <a> SEÇICIL?R </a> </li> <li> <a> ?LAQ?L?R</a> </li> </font> </ul> </div> <font face="Tahoma, Geneva, sans-serif"> <br /> <div id="navigation2"> <ul> <a> <li><i> HADIS?L?R </i></li> </a> <a> <li><i>VIDEOLAR</i> </li> </a> </ul> </div> <div id="news_section" style="background-color:#FFF;"> <h3 style="font-weight:100; font-size:22px; font-style:normal; color:#7C7C7C;">Son X?b?rl?r</h3> <div class="fade" id="Latest-Stories"> <table id="stories-preview" width="330" height="598" border="0" cellpadding="0" cellspacing="0"> <tr> <td> <a id="link_1" href="#"><img src="../../Images/N&EImages/images/Article-Nav-Bar1_01.gif" width="330" height="114" alt=""></a> </td> </tr> <tr> <td> <a id="link_2" href="#"> <img src="../../Images/N&EImages/images/Article-Nav-Bar1_02.gif" width="330" height="109" alt=""> </a> </td> </tr> <tr> <td> <a id="link_3" href="#"> <img src="../../Images/N&EImages/images/Article-Nav-Bar1_03.gif" width="330" height="132" alt=""></a> </td> </tr> <tr> <td> <a id="link_4" href="#"><img src="../../Images/N&EImages/images/Article-Nav-Bar1_04.gif" width="330" height="124" alt=""></a> </td> </tr> <tr> <td> <a id="link_5" href="#"><img src="../../Images/N&EImages/images/Article-Nav-Bar1_05.gif" width="330" height="119" alt=""></a> </td> </tr> </table> <div class="fade" id="latest_story_main"> <!--START--> <img src="../../Images/N&EImages/GuliyevFace.jpeg" style="padding:4px; margin-top:6px; border-style:groove; border-width:thin; margin-left:90px;" /> <a href="#"> <h2 style="font-weight:100; font-style:normal;"> "Bizim V?zif?miz Az?rbaycan Xalqinin T?zyiq? M?ruz Qalmamasini T?min Etm?kdir" </h2> </a> <h5 style="font-weight:100; font-size:12px; color:#888; opacity:.9;"> <img src="../../Images/ClockImage.png" />IYUN 19, 2012 BY RASUL GULIYEV - R?SUL QULIYEV</h5> <p style="font-size:14px; font-style:normal;">ACP-nin v? Müqavim?t H?r?katinin lideri, eks-spiker R?sul Quliyev "Yeni Müsavat"a müsahib? verib. O, son vaxtlar ACP-d? bas ver?n kadr d?yisiklikl?ri, bar?sind? dolasan söz-söhb?tl?r v? dig?r m?s?l?l?r? aydinliq g?tirib. Müsahib?ni t?qdim edirik. – Az?rbaycanda siyasi günd?mi ?hat? ed?n m?s?l?l?rd?n biri d? Sülh?ddin ?kb?rin ACP-y? s?dr g?tirilm?sidir. Ideya v? t?s?bbüs kimin idi? – ?vv?ll?r d? qeyd <a href="#"> [...]</a> </p> <!--FIRST STORY END --> </div> <div class="preview" id="latest_story_preview1"> <!--START--> <img src="../../Images/N&EImages/GuliyevFace2.jpeg" style="padding:4px; margin-top:6px; border-style:groove; border-width:thin; margin-left:90px;" /> <a href="#"> <h2 style="font-weight:100; font-style:normal;"> "S?xsiyy?ti Alçaldilan Insanlarin Qisasi Amansiz Olur" </h2></a> <h5 style="font-weight:100; font-size:12px; color:#888; opacity:.9;"> <img src="../../Images/ClockImage.png" />IYUN 12, 2012 BY RASUL GULIYEV - R?SUL QULIYEV</h5> <p style="font-size:14px; font-style:normal;">R?sul Quliyev: "Az?rbaycanda müxalif?tin görün?n f?aliyy?ti ?halinin hökum?td?n naraziliq potensialini ifad? etmir" Eks-spiker Avropa görüsl?rinin yekunlarini s?rh etdi ACP lideri R?sul Quliyevin Avropa görüsl?ri basa çatib. S?f?rin yekunlari bar?d? R?sul Quliyev eksklüziv olaraq "Yeni Müsavat"a açiqlama verib. Norveçd? keçiril?n görüsl?rd? Açiq C?miyy?t v? Liberal Demokrat partiyalarinin s?drl?ri Sülh?ddin ?kb?r, Fuad ?liyev v? Müqavim?t H?r?kati Avropa <a href="#"> [...]</a> </p> <!--SECOND STORY END --> </div> <div class="preview" id="latest_story_preview2"> <!--START--> <img src="../../Images/N&EImages/GuliyevFace3.jpeg" style="padding:4px; margin-top:6px; border-style:groove; border-width:thin; margin-left:90px;" /> <a href="#"> <h2 style="font-weight:100; font-style:normal;"> R?sul Quliyevin Iyunun 4, 2012-ci ild? Bryusseld?ki Görüsl?rl? ?laq?dar Çixisi </h2></a> <h5 style="font-weight:100; font-size:12px; color:#888; opacity:.9;"> <img src="../../Images/ClockImage.png" />IYUN 4, 2012 BY RASUL GULIYEV - R?SUL QULIYEV</h5> <p style="font-size:14px; font-style:normal;">Brüssel görüsl?ri – Az?rbaycanda xalqin malini ogurlayan korrupsioner Höküm?t liderl?rinin xarici banklarda olan qara pullari v? ?mlaklarinin dondurulmasina çox qalmayib. Camaatin hüquqlarini pozan polis, prokuratura v? m?hk?m? isçil?rin? v? onlarin r?hb?rl?rin? viza m?hdudiyy?tl?ri qoymaqda reallasacaq. R?sul Quliyevin Iyunun 4, 2012-ci ild? Bryusseld?ki Görüsl?rl? ?laq?dar Çixisi Rasul Guliyev's Speech on June 4, 2012 about Brussels Meetings <a href="#">[...]</a> </p> <!--THIRD STORY END --> </div> <div class="preview" id="latest_story_preview3"> <!--START--> <img src="../../Images/N&EImages/GuliyevGroup1.jpeg" style="padding:4px; margin-top:6px; border-style:groove; border-width:thin; margin-left:90px;" /> <a href="#"> <h2 style="font-weight:100; font-style:normal;"> R?sul Quliyevin Avropa Parlamentind? v? Hakimiyy?t Qurumlarinda Görüsl?ri Baslamisdir </h2></a> <h5 style="font-weight:100; font-size:12px; color:#888; opacity:.9;"> <img src="../../Images/ClockImage.png" />MAY 31, 2012 BY RASUL GULIYEV - R?SUL QULIYEV</h5> <p style="font-size:14px; font-style:normal;">Aciq C?miyy?t Partiyasinin lideri, eks-spiker R?sul Quliyev Avropa Parlamentind? görüsl?rini davam etdirir. Bu haqda "Yeni Müsavat"a R.Quliyev özü m?lumat verib. O bildirib ki, görüsl?rd? Liberal Demokrat Partiyasinin s?dri Fuad ?liyev v? R.Quliyevin Skandinaviya ölk?l?ri üzr? müsaviri Rauf K?rimov da istirak edirl?r. Eks-spiker deyib ki, bu görüsl?r 2013-cü ild? keçiril?c?k prezident seçkil?rind? saxtalasdirmanin qarsisini almaq planinin [...]</p> <!--FOURTH STORY END --> </div> <div class="preview" id="latest_story_preview4"> <!--START--> <img src="../../Images/N&EImages/GuliyevGroup2.jpeg" style="padding:4px; margin-top:6px; border-style:groove; border-width:thin; margin-left:90px;" /> <a href="#"> <h2 style="font-weight:100; font-style:normal;"> Norveçin Oslo S?h?rind? Parlament Üzvl?ri il? v? Xarici Isl?r Nazirliyind? Görüsl?r </h2></a> <h5 style="font-weight:100; font-size:12px; color:#888; opacity:.9;"> <img src="../../Images/ClockImage.png" />MAY 30, 2012 BY RASUL GULIYEV - R?SUL QULIYEV</h5> <p style="font-size:14px; font-style:normal;">R?sul Quliyev Norveçin Oslo s?h?rind? Parlament üzvl?ri v? Xarici isl?r nazirliyind? görüsl?r keçirmisdir. Bu görüsl?rd? Az?rbaycandan Liberal Demokrat Partiyasinin s?dri Fuad ?liyev, Avro-Atlantik Surasinin s?dri Sülh?ddin ?kb?r v? Milli Müqavim?t H?r?katinin Skandinaviya ölk?l?ri üzr? nümay?nd?si Rauf K?rimov istirak etmisdir. Siyasil?r ilk ?vv?l mayin 22-d? Norveç Parlamentinin Avropa Surasinda t?msil ed?n nümay?nd? hey?tinin üzvül?ri Karin S. [...]</a> </p> <!--FIFTH STORY END --> </div> <hr /> </div> <!--LATEST STORIES --> <div class="fade" id="article-section"> <h3 style="font-weight:100; font-size:22px; font-style:normal; color:#7C7C7C;">Çecin X?b?rl?r</h3> <div class="older-article"> <img src="../../Images/N&EImages/GuliyevGroup2.jpeg" style="padding:4px; margin-top:6px; border-style:groove; border-width:thin; margin-left:90px;" /> <a href="#"> <h2 style="font-weight:100; font-style:normal;"> Norveçin Oslo S?h?rind? Parlament Üzvl?ri il? v? Xarici Isl?r Nazirliyind? Görüsl?r </h2></a> <h5 style="font-weight:100; font-size:12px; color:#888; opacity:.9;"> <img src="../../Images/ClockImage.png" />MAY 30, 2012 BY RASUL GULIYEV - R?SUL QULIYEV</h5> <p style="font-size:14px; font-style:normal;">R?sul Quliyev Norveçin Oslo s?h?rind? Parlament üzvl?ri v? Xarici isl?r nazirliyind? görüsl?r keçirmisdir. Bu görüsl?rd? Az?rbaycandan Liberal Demokrat Partiyasinin s?dri Fuad ?liyev, Avro-Atlantik Surasinin s?dri Sülh?ddin ?kb?r v? Milli Müqavim?t H?r?katinin Skandinaviya ölk?l?ri üzr? nümay?nd?si Rauf K?rimov istirak etmisdir. Siyasil?r ilk ?vv?l mayin 22-d? Norveç Parlamentinin Avropa Surasinda t?msil ed?n nümay?nd? hey?tinin üzvül?ri Karin S. [...]</a> </p> </div> <hr /> </div> <!--NEWS SECTION--> </font> <h3 class="fade" id="footer">Rasul Guliyev 2012</h3> </div> </body> </head> </html>

    Read the article

  • JSON, Ajax login and signup form problem, critique

    - by user552828
    Here is my problem; indexdeneme2.php has two forms Sign up and Login form, and there is validation.js and login.js which are handling the AJAX and JSON response, there are validate.php and login.php which are my scripts for validating and login. When you sign up, it sends the data to validate.php perfectly and validate.php response with JSON perfectly, validate.js must show the error in #error div. validation.js works perfectly if it is working alone. I use same kind of script for login form. Login.php also works perfectly it responses with JSON and login.js shows the errors are appear in #errorlogin div. But this works when login.js works alone. When I try to work login.js and validate.js together, it is not working. validate.php and login.php works perfectly but login.js and validation.js are not working together. They can't handle the responses coming from php scripts. It is not showing the errors in #errorlogin and #error div. They intercept each other I guess. By the way if you can critique my login.php and validate.php I will be really appreciated. Thank you all. this is indexdeneme2.php <?php include('functions.php')?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Untitled Document</title> <link rel="stylesheet" href="css/cssdeneme1.css" /> <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js"></script> <script type="text/javascript" src="validation.js"></script> <script type="text/javascript" src="login.js"></script> <script type="text/javascript"> var RecaptchaOptions = { theme : 'custom', custom_theme_widget: 'recaptcha_widget' }; </script> </head> <body onload="document.signup.reset()"> <div id="topbar"> <div class="wrapper"> </div> </div> <div id="middlebar"> <div class="wrapper"> <div id="middleleft"> <div id="mainformsecondcover"> <div id="mainform"> <div id="formhead"> <div id="signup">Sign Up</div> </div> <form method="post" action="validate.php" id="myform" name="signup"> <div id="form"> <table border="0" cellpadding="0" cellspacing="1"> <tbody> <tr> <td class="formlabel"> <label for="name">First Name:</label> </td> <td class="forminput"> <input type="text" name="name" id="name" /> </td> </tr> <tr> <td class="formlabel"> <label for="lastname">Last Name:</label> </td> <td class="forminput"> <input type="text" name="surname" id="lastname" /> </td> </tr> <tr> <td class="formlabel"> <label for="email">Email:</label> </td> <td class="forminput"> <input type="text" name="email" id="email" /> </td> </tr> <tr> <td class="formlabel"> <label for="remail">Re-Enter Email:</label> </td> <td class="forminput"> <input type="text" name="remail" id="remail" /> </td> </tr> <tr> <td class="formlabel"> <label for="password">Password:</label> </td> <td class="forminput"> <input type="password" name="password" id="password" maxlength="16" /> </td> </tr> <tr> <td class="formlabel"> <label for="gender">I am:</label> </td> <td class="forminput"> <select name="gender" id="gender"> <option value="0" selected="selected">-Select Sex-</option> <option value="1">Male</option> <option value="2">Female</option> </select> </td> </tr> <tr> <td class="formlabel"> <label>My Birthday:</label> </td> <td class="forminput"> <select size="1" name="day"> <option value="0" selected="selected">Day</option> <?php formDay(); ?> </select>&nbsp; <select size="1" name="month"> <option value="0" selected="selected">Month</option> <option value="1">January</option> <option value="2">February</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select>&nbsp; <select size="1" name="year"> <option value="0" selected="selected">Year</option> <?php formYear(); ?> </select> </td> </tr> <tr> <td class="formlabel"> <label for="recaptcha_response_field">Security Check:</label> </td> </tr> </tbody> </table> <?php require_once('captchalib.php'); ?> </div> <div id="formbottom"> <div id="error"> </div> <div id="formbottomright"> <input type="submit" id="formbutton" value="Sign Up" /> <img id="loading" src="css/images/ajax-loader.gif" height="35" width="35" alt="Processing.." style="float:right; display:block" /> </div> </div> </form> </div> </div> </div> <div id="middleright"> <div id="loginform"> <form name="login" action="login.php" method="post" id="login"> <label for="username">Email:</label> <input type="text" name="emaillogin" /> <label for="password">Password:</label> <input type="password" name="passwordlogin" maxlength="16" /> <input type="submit" value="Login" /> <img id="loading2" src="css/images/ajax-loader.gif" height="35" width="35" alt="Processing.." style="float:right; display:block" /> </form> </div> <div id="errorlogin"></div> </div> </div> </div> <div id="bottombar"> <div class="wrapper"></div> </div> </body> </html> validation.js $(document).ready(function(){ $('#myform').submit(function(e) { register(); e.preventDefault(); }); }); function register() { hideshow('loading',1); error(0); $.ajax({ type: "POST", url: "validate.php", data: $('#myform').serialize(), dataType: "json", success: function(msg){ if(parseInt(msg.status)==1) { window.location=msg.txt; } else if(parseInt(msg.status)==0) { error(1,msg.txt); Recaptcha.reload(); } hideshow('loading',0); } }); } function hideshow(el,act) { if(act) $('#'+el).css('visibility','visible'); else $('#'+el).css('visibility','hidden'); } function error(act,txt) { hideshow('error',act); if(txt) $('#error').html(txt); } login.js $(document).ready(function(){ $('#login').submit(function(e) { login(); e.preventDefault(); }); }); function login() { error(2); $.ajax({ type: "POST", url: "login.php", data: $('#login').serialize(), dataType: "json", success: function(msg){ if(parseInt(msg.status)==3) { window.location=msg.txt; } else if(parseInt(msg.status)==2) { error(3,msg.txt); } } }); } function error(act,txt) { hideshow('error',act); if(txt) $('#errorlogin').html(txt); } login.php <?php session_start(); require("connect.php"); $email = $_POST['emaillogin']; $password = $_POST['passwordlogin']; $email = mysql_real_escape_string($email); $password = mysql_real_escape_string($password); if(empty($email)) { die('{status:2,txt:"Enter your email address."}'); } if(!filter_var($email, FILTER_VALIDATE_EMAIL)) { die('{status:2,txt:"Invalid email or password"}'); } if(empty($password)) { die('{status:2,txt:"Enter your password."}'); } if(strlen($password)<6 || strlen($password)>16) { die('{status:2,txt:"Invalid email or password"}'); } $query = "SELECT password, salt FROM users WHERE Email = '$email';"; $result = mysql_query($query); if(mysql_num_rows($result) < 1) //no such user exists { die('{status:2,txt:"Invalid email or password"}'); } $userData = mysql_fetch_array($result, MYSQL_ASSOC); $hash = hash('sha256', $userData['salt'] . hash('sha256', $password) ); if($hash != $userData['password']) //incorrect password { die('{status:2,txt:"Invalid email or password"}'); } //////////////////////////////////////////////////////////////////////////////////// if('{status:3}') { session_regenerate_id (); //this is a security measure $getMemDetails = "SELECT * FROM users WHERE Email = '$email'"; $link = mysql_query($getMemDetails); $member = mysql_fetch_row($link); $_SESSION['valid'] = 1; $_SESSION['userid'] = $member[0]; $_SESSION['name'] = $member[1]; session_write_close(); mysql_close($con); echo '{status:3,txt:"success.php"}'; } validate.php <?php $name = $_POST['name']; $surname = $_POST['surname']; $email = $_POST['email']; $remail = $_POST['remail']; $gender = $_POST['gender']; $bdate = $_POST['year'].'-'.$_POST['month'].'-'.$_POST['day']; $bday = $_POST['day']; $bmon = $_POST['month']; $byear = $_POST['year']; $cdate = date("Y-n-j"); $password = $_POST['password']; $hash = hash('sha256', $password); $regdate = date("Y-m-d"); function createSalt() { $string = md5(uniqid(rand(), true)); return substr($string, 0, 3); } $salt = createSalt(); $hash = hash('sha256', $salt . $hash); if(empty($name) || empty($surname) || empty($email) || empty($remail) || empty($password) ) { die('{status:0,txt:"All the fields are required"}'); } if(!preg_match('/^[A-Za-z\s ]+$/', $name)) { die('{status:0,txt:"Please check your name"}'); } if(!preg_match('/^[A-Za-z\s ]+$/', $surname)) { die('{status:0,txt:"Please check your last name"}'); } if($bdate > $cdate) { die('{status:0,txt:"Please check your birthday"}'); } if(!(int)$gender) { die('{status:0,txt:"You have to select your sex"}'); } if(!(int)$bday || !(int)$bmon || !(int)$byear) { die('{status:0,txt:"You have to fill in your birthday"}'); } if(!$email == $remail) { die('{status:0,txt:"Emails doesn&sbquo;t match"}'); } if(!filter_var($email, FILTER_VALIDATE_EMAIL)) { die('{status:0,txt:"Enter a valid email"}'); } if(strlen($password)<6 || strlen($password)>16) { die('{status:0,txt:"Password must be between 6-16 characters"}'); } if (!$_POST["recaptcha_challenge_field"]===$_POST["recaptcha_response_field"]) { die('{status:0,txt:"You entered incorrect security code"}'); } if('{status:1}') { require("connect.php"); function getRealIpAddr() { if (!empty($_SERVER['HTTP_CLIENT_IP'])) { $ip=$_SERVER['HTTP_CLIENT_IP']; } elseif (!empty($_SERVER['HTTP_X_FORWARDED_FOR'])) { $ip=$_SERVER['HTTP_X_FORWARDED_FOR']; } else { $ip=$_SERVER['REMOTE_ADDR']; } return $ip; } $rip = getRealIpAddr(); $ipn = inet_pton($rip); $checkuser = mysql_query("SELECT Email FROM users WHERE Email = '$email'"); $username_exist = mysql_num_rows($checkuser); if ( $username_exist !== 0 ) { mysql_close($con); die('{status:0,txt:"This email Address is already registered!"}'); } else { $query = "INSERT INTO users (name, surname, date, Email, Gender, password, salt, RegistrationDate, IP) VALUES ('$name', '$surname', '$bdate', '$email', '$gender', '$hash', '$salt', '$cdate', '$ipn')"; $link = mysql_query($query); if(!$link) { die('Becerilemedi: ' . mysql_error()); } else { mysql_close($con); echo '{status:1,txt:"afterreg.php"}'; } } } ?> css of indexdeneme2.php * { padding:0; margin:0; } #topbar { width:100%; height:50px; } .wrapper { margin:0 auto; width:1000px; height:100%; } #middlebar { width:100%; height:650px; } #middleleft { width:55%; float:left; height:650px; } #middleright { width:45%; float:right; height:650px; } #mainformsecondcover { width:404px; padding:0px; margin:0px; border:4px solid #59B; border-radius: 14px; -moz-border-radius: 14px; -webkit-border-radius: 14px; } #mainform { width:400px; border:2px solid #CCC; border-radius: 11px; -moz-border-radius: 11px; -webkit-border-radius: 11px; } #formhead { margin:7px; } #signup { margin-top:13px; margin-left:13px; margin-bottom:3px; color:#333; font-size:18px; font-family:"Lucida Sans Unicode", "Lucida Grande", sans-serif; font-weight:bold } #form { margin:7px; } #form table { margin:0px; width:380px; } #form table tr{ height:28px; } #form table td{ height:18px; } .formlabel { cursor:pointer; display:table-cell; text-align:right; font-size:12px; color:#000; font-weight:normal; vertical-align:middle; font-family:"Lucida Sans Unicode", "Lucida Grande", sans-serif; letter-spacing:1px; width:120px; height:37px; padding-right:5px; } .formlabel label{ cursor:pointer } .forminput input { width:240px; font-size:13px; padding:4px; } #recaptcha_image { width:300px; height:57px; border:2px solid #CCC; } #recaptcha_widget { margin-left:35px; } #securityinfo { font-size: 11px; line-height: 16px; } #formbottom { width:360px; min-height:45px; } #error { float:left; width:200px; border:1px solid #F00; margin-left:20px; margin-top:7px; text-align:center; color:#F00; font-family:"Lucida Sans Unicode", "Lucida Grande", sans-serif; font-size:11px; line-height:16px; padding:2px; visibility:hidden; } #errorlogin { float:left; width:200px; border:1px solid #F00; margin-left:20px; margin-top:7px; text-align:center; color:#F00; font-family:"Lucida Sans Unicode", "Lucida Grande", sans-serif; font-size:11px; line-height:16px; padding:2px; visibility:hidden; } #formbottomright { float:right; height:45px; width:115px; margin-left:5px; } #loading { visibility:hidden; } #loading2 { visibility:hidden; } #formbutton { display:block; font-size:14px; color:#FFF; background: #0b85c6; /* Old browsers */ background: -moz-linear-gradient(top, #0b85c6 0%, #59b 100%); /* FF3.6+ */ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#0b85c6), color-stop(100%,#59b)); /* Chrome,Safari4+ */ background: -webkit-linear-gradient(top, #0b85c6 0%,#59b 100%); /* Chrome10+,Safari5.1+ */ background: -o-linear-gradient(top, #0b85c6 0%,#59b 100%); /* Opera11.10+ */ background: -ms-linear-gradient(top, #0b85c6 0%,#59b 100%); /* IE10+ */ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#0B85C6', endColorstr='#59B',GradientType=0 ); /* IE6-9 */ background: linear-gradient(top, #0b85c6 0%,#59b 100%); /* W3C */ font-family:"Lucida Sans Unicode", "Lucida Grande", sans-serif; height:26px; width:60px; margin:7px; text-align:center; padding-bottom:4px; padding-left:4px; padding-right:4px; float:left; margin-right:5px; } #bottombar { width:100%; height:50px; } {}

    Read the article

  • IText can't keep rows together, second row spans multiple pages but won't stick with first row.

    - by J2SE31
    I am having trouble keeping my first and second rows of my main PDFPTable together using IText. My first row consists of a PDFPTable with some basic search criteria. My second row consists of a PdfPTable that contains all of the tabulated results. Everytime the tabulated results becomes too big and spans multiple pages, it is kicked to the second page automatically rather than showing up directly below the search criteria and then paging to the next page. How can I avoid this problem? I have tried using setSplitRows(false), but I simply get a blank document (see commented lines 117 and 170). How can I keep my tabulated data (second row) up on the first page? An example of my code is shown below (you should be able to just copy/paste). public class TestHelper{ private TestEventHelper helper; public TestHelper(){ super(); helper = new TestEventHelper(); } public TestEventHelper getHelper() { return helper; } public void setHelper(TestEventHelper helper) { this.helper = helper; } public static void main(String[] args){ TestHelper test = new TestHelper(); TestEventHelper helper = test.getHelper(); FileOutputStream file = null; Document document = null; PdfWriter writer = null; try { file = new FileOutputStream(new File("C://Documents and Settings//All Users//Desktop//pdffile2.pdf")); document = new Document(PageSize.A4.rotate(), 36, 36, 36, 36); writer = PdfWriter.getInstance(document, file); // writer.setPageEvent(templateHelper); writer.setPdfVersion(PdfWriter.PDF_VERSION_1_7); writer.setUserunit(1f); document.open(); List<Element> pages = null; try { pages = helper.createTemplate(); } catch (Exception e) { e.printStackTrace(); } Iterator<Element> iterator = pages.iterator(); while (iterator.hasNext()) { Element element = iterator.next(); if (element instanceof Phrase) { document.newPage(); } else { document.add(element); } } } catch (Exception de) { de.printStackTrace(); // log.debug("Exception " + de + " " + de.getMessage()); } finally { if (document != null) { document.close(); } if (writer != null) { writer.close(); } } System.out.println("Done!"); } private class TestEventHelper extends PdfPageEventHelper{ // The PdfTemplate that contains the total number of pages. protected PdfTemplate total; protected BaseFont helv; private static final float SMALL_MARGIN = 20f; private static final float MARGIN = 36f; private final Font font = new Font(Font.HELVETICA, 12, Font.BOLD); private final Font font2 = new Font(Font.HELVETICA, 10, Font.BOLD); private final Font smallFont = new Font(Font.HELVETICA, 10, Font.NORMAL); private String[] datatableHeaderFields = new String[]{"Header1", "Header2", "Header3", "Header4", "Header5", "Header6", "Header7", "Header8", "Header9"}; public TestEventHelper(){ super(); } public List<Element> createTemplate() throws Exception { List<Element> elementList = new ArrayList<Element>(); float[] tableWidths = new float[]{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.25f, 1.25f, 1.25f, 1.25f}; // logger.debug("entering create reports template..."); PdfPTable splitTable = new PdfPTable(1); splitTable.setSplitRows(false); splitTable.setWidthPercentage(100f); PdfPTable pageTable = new PdfPTable(1); pageTable.setKeepTogether(true); pageTable.setWidthPercentage(100f); PdfPTable searchTable = generateSearchFields(); if(searchTable != null){ searchTable.setSpacingAfter(25f); } PdfPTable outlineTable = new PdfPTable(1); outlineTable.setKeepTogether(true); outlineTable.setWidthPercentage(100f); PdfPTable datatable = new PdfPTable(datatableHeaderFields.length); datatable.setKeepTogether(false); datatable.setWidths(tableWidths); generateDatatableHeader(datatable); for(int i = 0; i < 100; i++){ addCell(datatable, String.valueOf(i), 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, smallFont, true); addCell(datatable, String.valueOf(i+1), 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, smallFont, true); addCell(datatable, String.valueOf(i+2), 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, smallFont, true); addCell(datatable, String.valueOf(i+3), 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, smallFont, true); addCell(datatable, String.valueOf(i+4), 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, smallFont, true); addCell(datatable, String.valueOf(i+5), 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, smallFont, true); addCell(datatable, String.valueOf(i+6), 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, smallFont, true); addCell(datatable, String.valueOf(i+7), 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, smallFont, true); addCell(datatable, String.valueOf(i+8), 1, Rectangle.NO_BORDER, Element.ALIGN_RIGHT, smallFont, true); } PdfPCell dataCell = new PdfPCell(datatable); dataCell.setBorder(Rectangle.BOX); outlineTable.addCell(dataCell); PdfPCell searchCell = new PdfPCell(searchTable); searchCell.setVerticalAlignment(Element.ALIGN_TOP); PdfPCell outlineCell = new PdfPCell(outlineTable); outlineCell.setVerticalAlignment(Element.ALIGN_TOP); addCell(pageTable, searchCell, 1, Rectangle.NO_BORDER, Element.ALIGN_LEFT, null, null); addCell(pageTable, outlineCell, 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, null, null); PdfPCell pageCell = new PdfPCell(pageTable); pageCell.setVerticalAlignment(Element.ALIGN_TOP); addCell(splitTable, pageCell, 1, Rectangle.NO_BORDER, Element.ALIGN_CENTER, null, null); elementList.add(pageTable); // elementList.add(splitTable); return elementList; } public void onOpenDocument(PdfWriter writer, Document document) { total = writer.getDirectContent().createTemplate(100, 100); total.setBoundingBox(new Rectangle(-20, -20, 100, 100)); try { helv = BaseFont.createFont(BaseFont.HELVETICA, BaseFont.WINANSI, BaseFont.NOT_EMBEDDED); } catch (Exception e) { throw new ExceptionConverter(e); } } public void onEndPage(PdfWriter writer, Document document) { //TODO } public void onCloseDocument(PdfWriter writer, Document document) { total.beginText(); total.setFontAndSize(helv, 10); total.setTextMatrix(0, 0); total.showText(String.valueOf(writer.getPageNumber() - 1)); total.endText(); } private PdfPTable generateSearchFields(){ PdfPTable searchTable = new PdfPTable(2); for(int i = 0; i < 6; i++){ addCell(searchTable, "Search Key" +i, 1, Rectangle.NO_BORDER, Element.ALIGN_RIGHT, font2, MARGIN, true); addCell(searchTable, "Search Value +i", 1, Rectangle.NO_BORDER, Element.ALIGN_LEFT, smallFont, null, true); } return searchTable; } private void generateDatatableHeader(PdfPTable datatable) { if (datatableHeaderFields != null && datatableHeaderFields.length != 0) { for (int i = 0; i < datatableHeaderFields.length; i++) { addCell(datatable, datatableHeaderFields[i], 1, Rectangle.BOX, Element.ALIGN_CENTER, font2); } } } private PdfPCell addCell(PdfPTable table, String cellContent, int colspan, int cellBorder, int horizontalAlignment, Font font) { return addCell(table, cellContent, colspan, cellBorder, horizontalAlignment, font, null, null); } private PdfPCell addCell(PdfPTable table, String cellContent, int colspan, int cellBorder, int horizontalAlignment, Font font, Boolean noWrap) { return addCell(table, cellContent, colspan, cellBorder, horizontalAlignment, font, null, noWrap); } private PdfPCell addCell(PdfPTable table, String cellContent, Integer colspan, Integer cellBorder, Integer horizontalAlignment, Font font, Float paddingLeft, Boolean noWrap) { PdfPCell cell = new PdfPCell(new Phrase(cellContent, font)); return addCell(table, cell, colspan, cellBorder, horizontalAlignment, paddingLeft, noWrap); } private PdfPCell addCell(PdfPTable table, PdfPCell cell, int colspan, int cellBorder, int horizontalAlignment, Float paddingLeft, Boolean noWrap) { cell.setColspan(colspan); cell.setBorder(cellBorder); cell.setHorizontalAlignment(horizontalAlignment); if(paddingLeft != null){ cell.setPaddingLeft(paddingLeft); } if(noWrap != null){ cell.setNoWrap(noWrap); } table.addCell(cell); return cell; } } }

    Read the article

  • DirectShow: Video-Preview and Image (with working code)

    - by xsl
    Questions / Issues If someone can recommend me a good free hosting site I can provide the whole project file. As mentioned in the text below the TakePicture() method is not working properly on the HTC HD 2 device. It would be nice if someone could look at the code below and tell me if it is right or wrong what I'm doing. Introduction I recently asked a question about displaying a video preview, taking camera image and rotating a video stream with DirectShow. The tricky thing about the topic is, that it's very hard to find good examples and the documentation and the framework itself is very hard to understand for someone who is new to windows programming and C++ in general. Nevertheless I managed to create a class that implements most of this features and probably works with most mobile devices. Probably because the DirectShow implementation depends a lot on the device itself. I could only test it with the HTC HD and HTC HD2, which are known as quite incompatible. HTC HD Working: Video preview, writing photo to file Not working: Set video resolution (CRASH), set photo resolution (LOW quality) HTC HD 2 Working: Set video resolution, set photo resolution Problematic: Video Preview rotated Not working: Writing photo to file To make it easier for others by providing a working example, I decided to share everything I have got so far below. I removed all of the error handling for the sake of simplicity. As far as documentation goes, I can recommend you to read the MSDN documentation, after that the code below is pretty straight forward. void Camera::Init() { CreateComObjects(); _captureGraphBuilder->SetFiltergraph(_filterGraph); InitializeVideoFilter(); InitializeStillImageFilter(); } Dipslay a video preview (working with any tested handheld): void Camera::DisplayVideoPreview(HWND windowHandle) { IVideoWindow *_vidWin; _filterGraph->QueryInterface(IID_IMediaControl,(void **) &_mediaControl); _filterGraph->QueryInterface(IID_IVideoWindow, (void **) &_vidWin); _videoCaptureFilter->QueryInterface(IID_IAMVideoControl, (void**) &_videoControl); _captureGraphBuilder->RenderStream(&PIN_CATEGORY_PREVIEW, &MEDIATYPE_Video, _videoCaptureFilter, NULL, NULL); CRect rect; long width, height; GetClientRect(windowHandle, &rect); _vidWin->put_Owner((OAHWND)windowHandle); _vidWin->put_WindowStyle(WS_CHILD | WS_CLIPSIBLINGS); _vidWin->get_Width(&width); _vidWin->get_Height(&height); height = rect.Height(); _vidWin->put_Height(height); _vidWin->put_Width(rect.Width()); _vidWin->SetWindowPosition(0,0, rect.Width(), height); _mediaControl->Run(); } HTC HD2: If set SetPhotoResolution() is called FindPin will return E_FAIL. If not, it will create a file full of null bytes. HTC HD: Works void Camera::TakePicture(WCHAR *fileName) { CComPtr<IFileSinkFilter> fileSink; CComPtr<IPin> stillPin; CComPtr<IUnknown> unknownCaptureFilter; CComPtr<IAMVideoControl> videoControl; _imageSinkFilter.QueryInterface(&fileSink); fileSink->SetFileName(fileName, NULL); _videoCaptureFilter.QueryInterface(&unknownCaptureFilter); _captureGraphBuilder->FindPin(unknownCaptureFilter, PINDIR_OUTPUT, &PIN_CATEGORY_STILL, &MEDIATYPE_Video, FALSE, 0, &stillPin); _videoCaptureFilter.QueryInterface(&videoControl); videoControl->SetMode(stillPin, VideoControlFlag_Trigger); } Set resolution: Works great on HTC HD2. HTC HD won't allow SetVideoResolution() and only offers one low resolution photo resolution: void Camera::SetVideoResolution(int width, int height) { SetResolution(true, width, height); } void Camera::SetPhotoResolution(int width, int height) { SetResolution(false, width, height); } void Camera::SetResolution(bool video, int width, int height) { IAMStreamConfig *config; config = NULL; if (video) { _captureGraphBuilder->FindInterface(&PIN_CATEGORY_PREVIEW, &MEDIATYPE_Video, _videoCaptureFilter, IID_IAMStreamConfig, (void**) &config); } else { _captureGraphBuilder->FindInterface(&PIN_CATEGORY_STILL, &MEDIATYPE_Video, _videoCaptureFilter, IID_IAMStreamConfig, (void**) &config); } int resolutions, size; VIDEO_STREAM_CONFIG_CAPS caps; config->GetNumberOfCapabilities(&resolutions, &size); for (int i = 0; i < resolutions; i++) { AM_MEDIA_TYPE *mediaType; if (config->GetStreamCaps(i, &mediaType, reinterpret_cast<BYTE*>(&caps)) == S_OK ) { int maxWidth = caps.MaxOutputSize.cx; int maxHeigth = caps.MaxOutputSize.cy; if(maxWidth == width && maxHeigth == height) { VIDEOINFOHEADER *info = reinterpret_cast<VIDEOINFOHEADER*>(mediaType->pbFormat); info->bmiHeader.biWidth = maxWidth; info->bmiHeader.biHeight = maxHeigth; info->bmiHeader.biSizeImage = DIBSIZE(info->bmiHeader); config->SetFormat(mediaType); DeleteMediaType(mediaType); break; } DeleteMediaType(mediaType); } } } Other methods used to build the filter graph and create the COM objects: void Camera::CreateComObjects() { CoInitialize(NULL); CoCreateInstance(CLSID_CaptureGraphBuilder, NULL, CLSCTX_INPROC_SERVER, IID_ICaptureGraphBuilder2, (void **) &_captureGraphBuilder); CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER, IID_IGraphBuilder, (void **) &_filterGraph); CoCreateInstance(CLSID_VideoCapture, NULL, CLSCTX_INPROC, IID_IBaseFilter, (void**) &_videoCaptureFilter); CoCreateInstance(CLSID_IMGSinkFilter, NULL, CLSCTX_INPROC, IID_IBaseFilter, (void**) &_imageSinkFilter); } void Camera::InitializeVideoFilter() { _videoCaptureFilter->QueryInterface(&_propertyBag); wchar_t deviceName[MAX_PATH] = L"\0"; GetDeviceName(deviceName); CComVariant comName = deviceName; CPropertyBag propertyBag; propertyBag.Write(L"VCapName", &comName); _propertyBag->Load(&propertyBag, NULL); _filterGraph->AddFilter(_videoCaptureFilter, L"Video Capture Filter Source"); } void Camera::InitializeStillImageFilter() { _filterGraph->AddFilter(_imageSinkFilter, L"Still image filter"); _captureGraphBuilder->RenderStream(&PIN_CATEGORY_STILL, &MEDIATYPE_Video, _videoCaptureFilter, NULL, _imageSinkFilter); } void Camera::GetDeviceName(WCHAR *deviceName) { HRESULT hr = S_OK; HANDLE handle = NULL; DEVMGR_DEVICE_INFORMATION di; GUID guidCamera = { 0xCB998A05, 0x122C, 0x4166, 0x84, 0x6A, 0x93, 0x3E, 0x4D, 0x7E, 0x3C, 0x86 }; di.dwSize = sizeof(di); handle = FindFirstDevice(DeviceSearchByGuid, &guidCamera, &di); StringCchCopy(deviceName, MAX_PATH, di.szLegacyName); } Full header file: #ifndef __CAMERA_H__ #define __CAMERA_H__ class Camera { public: void Init(); void DisplayVideoPreview(HWND windowHandle); void TakePicture(WCHAR *fileName); void SetVideoResolution(int width, int height); void SetPhotoResolution(int width, int height); private: CComPtr<ICaptureGraphBuilder2> _captureGraphBuilder; CComPtr<IGraphBuilder> _filterGraph; CComPtr<IBaseFilter> _videoCaptureFilter; CComPtr<IPersistPropertyBag> _propertyBag; CComPtr<IMediaControl> _mediaControl; CComPtr<IAMVideoControl> _videoControl; CComPtr<IBaseFilter> _imageSinkFilter; void GetDeviceName(WCHAR *deviceName); void InitializeVideoFilter(); void InitializeStillImageFilter(); void CreateComObjects(); void SetResolution(bool video, int width, int height); }; #endif

    Read the article

  • Questions related to writing your own file downloader using multiple threads java

    - by Shekhar
    Hello In my current company, i am doing a PoC on how we can write a file downloader utility. We have to use socket programming(TCP/IP) for downloading the files. One of the requirements of the client is that a file(which will be large in size) should be transfered in chunks for example if we have a file of 5Mb size then we can have 5 threads which transfer 1 Mb each. I have written a small application which downloads a file. You can download the eclipe project from http://www.fileflyer.com/view/QM1JSC0 A brief explanation of my classes FileSender.java This class provides the bytes of file. It has a method called sendBytesOfFile(long start,long end, long sequenceNo) which gives the number of bytes. import java.io.File; import java.io.IOException; import java.util.zip.CRC32; import org.apache.commons.io.FileUtils; public class FileSender { private static final String FILE_NAME = "C:\\shared\\test.pdf"; public ByteArrayWrapper sendBytesOfFile(long start,long end, long sequenceNo){ try { File file = new File(FILE_NAME); byte[] fileBytes = FileUtils.readFileToByteArray(file); System.out.println("Size of file is " +fileBytes.length); System.out.println(); System.out.println("Start "+start +" end "+end); byte[] bytes = getByteArray(fileBytes, start, end); ByteArrayWrapper wrapper = new ByteArrayWrapper(bytes, sequenceNo); return wrapper; } catch (IOException e) { throw new RuntimeException(e); } } private byte[] getByteArray(byte[] bytes, long start, long end){ long arrayLength = end-start; System.out.println("Start : "+start +" end : "+end + " Arraylength : "+arrayLength +" length of source array : "+bytes.length); byte[] arr = new byte[(int)arrayLength]; for(int i = (int)start, j =0; i < end;i++,j++){ arr[j] = bytes[i]; } return arr; } public static long fileSize(){ File file = new File(FILE_NAME); return file.length(); } } Second Class is FileReceiver.java - This class receives the file. Small Explanation what this file does This class finds the size of the file to be fetched from Sender Depending upon the size of the file it finds the start and end position till the bytes needs to be read. It starts n number of threads giving each thread start,end, sequence number and a list which all the threads share. Each thread reads the number of bytes and creates a ByteArrayWrapper. ByteArrayWrapper objects are added to the list Then i have while loop which basically make sure that all threads have done their work finally it sorts the list based on the sequence number. then the bytes are joined, and a complete byte array is formed which is converted to a file. Code of File Receiver package com.filedownloader; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.zip.CRC32; import org.apache.commons.io.FileUtils; public class FileReceiver { public static void main(String[] args) { FileReceiver receiver = new FileReceiver(); receiver.receiveFile(); } public void receiveFile(){ long startTime = System.currentTimeMillis(); long numberOfThreads = 10; long filesize = FileSender.fileSize(); System.out.println("File size received "+filesize); long start = filesize/numberOfThreads; List<ByteArrayWrapper> list = new ArrayList<ByteArrayWrapper>(); for(long threadCount =0; threadCount<numberOfThreads ;threadCount++){ FileDownloaderTask task = new FileDownloaderTask(threadCount*start,(threadCount+1)*start,threadCount,list); new Thread(task).start(); } while(list.size() != numberOfThreads){ // this is done so that all the threads should complete their work before processing further. //System.out.println("Waiting for threads to complete. List size "+list.size()); } if(list.size() == numberOfThreads){ System.out.println("All bytes received "+list); Collections.sort(list, new Comparator<ByteArrayWrapper>() { @Override public int compare(ByteArrayWrapper o1, ByteArrayWrapper o2) { long sequence1 = o1.getSequence(); long sequence2 = o2.getSequence(); if(sequence1 < sequence2){ return -1; }else if(sequence1 > sequence2){ return 1; } else{ return 0; } } }); byte[] totalBytes = list.get(0).getBytes(); byte[] firstArr = null; byte[] secondArr = null; for(int i = 1;i<list.size();i++){ firstArr = totalBytes; secondArr = list.get(i).getBytes(); totalBytes = concat(firstArr, secondArr); } System.out.println(totalBytes.length); convertToFile(totalBytes,"c:\\tmp\\test.pdf"); long endTime = System.currentTimeMillis(); System.out.println("Total time taken with "+numberOfThreads +" threads is "+(endTime-startTime)+" ms" ); } } private byte[] concat(byte[] A, byte[] B) { byte[] C= new byte[A.length+B.length]; System.arraycopy(A, 0, C, 0, A.length); System.arraycopy(B, 0, C, A.length, B.length); return C; } private void convertToFile(byte[] totalBytes,String name) { try { FileUtils.writeByteArrayToFile(new File(name), totalBytes); } catch (IOException e) { throw new RuntimeException(e); } } } Code of ByteArrayWrapper package com.filedownloader; import java.io.Serializable; public class ByteArrayWrapper implements Serializable{ private static final long serialVersionUID = 3499562855188457886L; private byte[] bytes; private long sequence; public ByteArrayWrapper(byte[] bytes, long sequenceNo) { this.bytes = bytes; this.sequence = sequenceNo; } public byte[] getBytes() { return bytes; } public long getSequence() { return sequence; } } Code of FileDownloaderTask import java.util.List; public class FileDownloaderTask implements Runnable { private List<ByteArrayWrapper> list; private long start; private long end; private long sequenceNo; public FileDownloaderTask(long start,long end,long sequenceNo,List<ByteArrayWrapper> list) { this.list = list; this.start = start; this.end = end; this.sequenceNo = sequenceNo; } @Override public void run() { ByteArrayWrapper wrapper = new FileSender().sendBytesOfFile(start, end, sequenceNo); list.add(wrapper); } } Questions related to this code 1) Does file downloading becomes fast when multiple threads is used? In this code i am not able to see the benefit. 2) How should i decide how many threads should i create ? 3) Are their any opensource libraries which does that 4) The file which file receiver receives is valid and not corrupted but checksum (i used FileUtils of common-io) does not match. Whats the problem? 5) This code gives out of memory when used with large file(above 100 Mb) i.e. because byte array which is created. How can i avoid? I know this is a very bad code but i have to write this in one day -:). Please suggest any other good way to do this? Thanks Shekhar

    Read the article

  • Different behavior of functors (copies, assignments) in VS2010 (compared with VS2005)

    - by Patrick
    When moving from VS2005 to VS2010 we noticed a performance decrease, which seemed to be caused by additional copies of a functor. The following code illustrates the problem. It is essential to have a map where the value itself is a set. On both the map and the set we defined a comparison functor (which is templated in the example). #include <iostream> #include <map> #include <set> class A { public: A(int i, char c) : m_i(i), m_c(c) { std::cout << "Construct object " << m_c << m_i << std::endl; } A(const A &a) : m_i(a.m_i), m_c(a.m_c) { std::cout << "Copy object " << m_c << m_i << std::endl; } ~A() { std::cout << "Destruct object " << m_c << m_i << std::endl; } void operator= (const A &a) { m_i = a.m_i; m_c = a.m_c; std::cout << "Assign object " << m_c << m_i << std::endl; } int m_i; char m_c; }; class B : public A { public: B(int i) : A(i, 'B') { } static const char s_c = 'B'; }; class C : public A { public: C(int i) : A(i, 'C') { } static const char s_c = 'C'; }; template <class X> class compareA { public: compareA() : m_i(999) { std::cout << "Construct functor " << X::s_c << m_i << std::endl; } compareA(const compareA &a) : m_i(a.m_i) { std::cout << "Copy functor " << X::s_c << m_i << std::endl; } ~compareA() { std::cout << "Destruct functor " << X::s_c << m_i << std::endl; } void operator= (const compareA &a) { m_i = a.m_i; std::cout << "Assign functor " << X::s_c << m_i << std::endl; } bool operator() (const X &x1, const X &x2) const { std::cout << "Comparing object " << x1.m_i << " with " << x2.m_i << std::endl; return x1.m_i < x2.m_i; } private: int m_i; }; typedef std::set<C, compareA<C> > SetTest; typedef std::map<B, SetTest, compareA<B> > MapTest; int main() { int i = 0; std::cout << "--- " << i++ << std::endl; MapTest mapTest; std::cout << "--- " << i++ << std::endl; SetTest &setTest = mapTest[0]; std::cout << "--- " << i++ << std::endl; } If I compile this code with VS2005 I get the following output: --- 0 Construct functor B999 Copy functor B999 Copy functor B999 Destruct functor B999 Destruct functor B999 --- 1 Construct object B0 Construct functor C999 Copy functor C999 Copy functor C999 Destruct functor C999 Destruct functor C999 Copy object B0 Copy functor C999 Copy functor C999 Copy functor C999 Destruct functor C999 Destruct functor C999 Copy object B0 Copy functor C999 Copy functor C999 Copy functor C999 Destruct functor C999 Destruct functor C999 Destruct functor C999 Destruct object B0 Destruct functor C999 Destruct object B0 --- 2 If I compile this with VS2010, I get the following output: --- 0 Construct functor B999 Copy functor B999 Copy functor B999 Destruct functor B999 Destruct functor B999 --- 1 Construct object B0 Construct functor C999 Copy functor C999 Copy functor C999 Destruct functor C999 Destruct functor C999 Copy object B0 Copy functor C999 Copy functor C999 Copy functor C999 Destruct functor C999 Destruct functor C999 Copy functor C999 Assign functor C999 Assign functor C999 Destruct functor C999 Copy object B0 Copy functor C999 Copy functor C999 Copy functor C999 Destruct functor C999 Destruct functor C999 Copy functor C999 Assign functor C999 Assign functor C999 Destruct functor C999 Destruct functor C999 Destruct object B0 Destruct functor C999 Destruct object B0 --- 2 The output for the first statement (constructing the map) is identical. The output for the second statement (creating the first element in the map and getting a reference to it), is much bigger in the VS2010 case: Copy constructor of functor: 10 times vs 8 times Assignment of functor: 2 times vs. 0 times Destructor of functor: 10 times vs 8 times My questions are: Why does the STL copy a functor? Isn't it enough to construct it once for every instantiation of the set? Why is the functor constructed more in the VS2010 case than in the VS2005 case? (didn't check VS2008) And why is it assigned two times in VS2010 and not in VS2005? Are there any tricks to avoid the copy of functors? I saw a similar question at http://stackoverflow.com/questions/2216041/prevent-unnecessary-copies-of-c-functor-objects but I'm not sure that's the same question. Thanks in advance, Patrick

    Read the article

  • Marshalling to a native library in C#

    - by Daniel Baulig
    I'm having trouble calling functions of a native library from within managed C# code. I am developing for the 3.5 compact framework (Windows Mobile 6.x) just in case this would make any difference. I am working with the waveIn* functions from coredll.dll (these are in winmm.dll in regular Windows I believe). This is what I came up with: // namespace winmm; class winmm [StructLayout(LayoutKind.Sequential)] public struct WAVEFORMAT { public ushort wFormatTag; public ushort nChannels; public uint nSamplesPerSec; public uint nAvgBytesPerSec; public ushort nBlockAlign; public ushort wBitsPerSample; public ushort cbSize; } [StructLayout(LayoutKind.Sequential)] public struct WAVEHDR { public IntPtr lpData; public uint dwBufferLength; public uint dwBytesRecorded; public IntPtr dwUser; public uint dwFlags; public uint dwLoops; public IntPtr lpNext; public IntPtr reserved; } public delegate void AudioRecordingDelegate(IntPtr deviceHandle, uint message, IntPtr instance, ref WAVEHDR wavehdr, IntPtr reserved2); [DllImport("coredll.dll")] public static extern int waveInAddBuffer(IntPtr hWaveIn, ref WAVEHDR lpWaveHdr, uint cWaveHdrSize); [DllImport("coredll.dll")] public static extern int waveInPrepareHeader(IntPtr hWaveIn, ref WAVEHDR lpWaveHdr, uint Size); [DllImport("coredll.dll")] public static extern int waveInStart(IntPtr hWaveIn); // some other class private WinMM.WinMM.AudioRecordingDelegate waveIn; private IntPtr handle; private uint bufferLength; private void setupBuffer() { byte[] buffer = new byte[bufferLength]; GCHandle bufferPin = GCHandle.Alloc(buffer, GCHandleType.Pinned); WinMM.WinMM.WAVEHDR hdr = new WinMM.WinMM.WAVEHDR(); hdr.lpData = bufferPin.AddrOfPinnedObject(); hdr.dwBufferLength = this.bufferLength; hdr.dwFlags = 0; int i = WinMM.WinMM.waveInPrepareHeader(this.handle, ref hdr, Convert.ToUInt32(Marshal.SizeOf(hdr))); if (i != WinMM.WinMM.MMSYSERR_NOERROR) { this.Text = "Error: waveInPrepare"; return; } i = WinMM.WinMM.waveInAddBuffer(this.handle, ref hdr, Convert.ToUInt32(Marshal.SizeOf(hdr))); if (i != WinMM.WinMM.MMSYSERR_NOERROR) { this.Text = "Error: waveInAddrBuffer"; return; } } private void setupWaveIn() { WinMM.WinMM.WAVEFORMAT format = new WinMM.WinMM.WAVEFORMAT(); format.wFormatTag = WinMM.WinMM.WAVE_FORMAT_PCM; format.nChannels = 1; format.nSamplesPerSec = 8000; format.wBitsPerSample = 8; format.nBlockAlign = Convert.ToUInt16(format.nChannels * format.wBitsPerSample); format.nAvgBytesPerSec = format.nSamplesPerSec * format.nBlockAlign; this.bufferLength = format.nAvgBytesPerSec; format.cbSize = 0; int i = WinMM.WinMM.waveInOpen(out this.handle, WinMM.WinMM.WAVE_MAPPER, ref format, Marshal.GetFunctionPointerForDelegate(waveIn), 0, WinMM.WinMM.CALLBACK_FUNCTION); if (i != WinMM.WinMM.MMSYSERR_NOERROR) { this.Text = "Error: waveInOpen"; return; } setupBuffer(); WinMM.WinMM.waveInStart(this.handle); } I read alot about marshalling the last few days, nevertheless I do not get this code working. When my callback function is called (waveIn) when the buffer is full, the hdr structure passed back in wavehdr is obviously corrupted. Here is an examlpe of how the structure looks like at that point: - wavehdr {WinMM.WinMM.WAVEHDR} WinMM.WinMM.WAVEHDR dwBufferLength 0x19904c00 uint dwBytesRecorded 0x0000fa00 uint dwFlags 0x00000003 uint dwLoops 0x1990f6a4 uint + dwUser 0x00000000 System.IntPtr + lpData 0x00000000 System.IntPtr + lpNext 0x00000000 System.IntPtr + reserved 0x7c07c9a0 System.IntPtr This obiously is not what I expected to get passed. I am clearly concerned about the order of the fields in the view. I do not know if Visual Studio .NET cares about actual memory order when displaying the record in the "local"-view, but they are obviously not displayed in the order I speciefied in the struct. Then theres no data pointer and the bufferLength field is far to high. Interestingly the bytesRecorded field is exactly 64000 - bufferLength and bytesRecorded I'd expect both to be 64000 though. I do not know what exactly is going wrong, maybe someone can help me out on this. I'm an absolute noob to managed code programming and marshalling so please don't be too harsh to me for all the stupid things I've propably done. Oh here's the C code definition for WAVEHDR which I found here, I believe I might have done something wrong in the C# struct definition: /* wave data block header */ typedef struct wavehdr_tag { LPSTR lpData; /* pointer to locked data buffer */ DWORD dwBufferLength; /* length of data buffer */ DWORD dwBytesRecorded; /* used for input only */ DWORD_PTR dwUser; /* for client's use */ DWORD dwFlags; /* assorted flags (see defines) */ DWORD dwLoops; /* loop control counter */ struct wavehdr_tag FAR *lpNext; /* reserved for driver */ DWORD_PTR reserved; /* reserved for driver */ } WAVEHDR, *PWAVEHDR, NEAR *NPWAVEHDR, FAR *LPWAVEHDR; If you are used to work with all those low level tools like pointer-arithmetic, casts, etc starting writing managed code is a pain in the ass. It's like trying to learn how to swim with your hands tied on your back. Some things I tried (to no effect): .NET compact framework does not seem to support the Pack = 2^x directive in [StructLayout]. I tried [StructLayout(LayoutKind.Explicit)] and used 4 bytes and 8 bytes alignment. 4 bytes alignmentgave me the same result as the above code and 8 bytes alignment only made things worse - but that's what I expected. Interestingly if I move the code from setupBuffer into the setupWaveIn and do not declare the GCHandle in the context of the class but in a local context of setupWaveIn the struct returned by the callback function does not seem to be corrupted. I am not sure however why this is the case and how I can use this knowledge to fix my code. I'd really appreciate any good links on marshalling, calling unmanaged code from C#, etc. Then I'd be very happy if someone could point out my mistakes. What am I doing wrong? Why do I not get what I'd expect.

    Read the article

  • Authoritative sources about Database vs. Flatfile decision

    - by FastAl
    <tldr>looking for a reference to a book or other undeniably authoritative source that gives reasons when you should choose a database vs. when you should choose other storage methods. I have provided an un-authoritative list of reasons about 2/3 of the way down this post.</tldr> I have a situation at my company where a database is being used where it would be better to use another solution (in this case, an auto-generated piece of source code that contains a static lookup table, searched by binary sort). Normally, a database would be an OK solution even though the problem does not require a database, e.g, none of the elements of ACID are needed, as it is read-only data, updated about every 3-5 years (also requiring other sourcecode changes), and fits in memory, and can be keyed into via binary search (a tad faster than db, but speed is not an issue). The problem is that this code runs on our enterprise server, but is shared with several PC platforms (some disconnected, some use a central DB, etc.), and parts of it are managed by multiple programming units, parts by the DBAs, parts even by mathematicians in another department, etc. These hit their own platform’s version of their databases (containing their own copy of the static data). What happens is that every implementation, every little change, something different goes wrong. There are many other issues as well. I can’t even use a flatfile, because one mode of running on our enterprise server does not have permission to read files (only databases, and of course, its own literal storage, e.g., in-source table). Of course, other parts of the system use databases in proper, less obscure manners; there is no problem with those parts. So why don’t we just change it? I don’t have administrative ability to force a change. But I’m affected because sometimes I have to help fix the problems, but mostly because it causes outages and tons of extra IT time by other programmers and d*mmit that makes me mad! The reason neither management, nor the designers of the system, can see the problem is that they propose a solution that won’t work: increase communication; implement more safeguards and standards; etc. But every time, in a different part of the already-pared-down but still multi-step processes, a few different diligent, hard-working, top performing IT personnel make a unique subtle error that causes it to fail, sometimes after the last round of testing! And in general these are not single-person failures, but understandable miscommunications. And communication at our company is actually better than most. People just don't think that's the case because they haven't dug into the matter. However, I have it on very good word from somebody with extensive formal study of sociology and psychology that the relatively small amount of less-than-proper database usage in this gigantic cross-platform multi-source, multi-language project is bureaucratically un-maintainable. Impossible. No chance. At least with Human Beings in the loop, and it can’t be automated. In addition, the management and developers who could change this, though intelligent and capable, don’t understand the rigidity of this ‘how humans are’ issue, and are not convincible on the matter. The reason putting the static data in sourcecode will solve the problem is, although the solution is less sexy than a database, it would function with no technical drawbacks; and since the sharing of sourcecode already works very well, you basically erase any database-related effort from this section of the project, along with all the drawbacks of it that are causing problems. OK, that’s the background, for the curious. I won’t be able to convince management that this is an unfixable sociological problem, and that the real solution is coding around these limits of human nature, just as you would code around a bug in a 3rd party component that you can’t change. So what I have to do is exploit the unsuitableness of the database solution, and not do it using logic, but rather authority. I am aware of many reasons, and posts on this site giving reasons for one over the other; I’m not looking for lists of reasons like these (although you can add a comment if I've miss a doozy): WHY USE A DATABASE? instead of flatfile/other DB vs. file: if you need... Random Read / Transparent search optimization Advanced / varied / customizable Searching and sorting capabilities Transaction/rollback Locks, semaphores Concurrency control / Shared users Security 1-many/m-m is easier Easy modification Scalability Load Balancing Random updates / inserts / deletes Advanced query Administrative control of design, etc. SQL / learning curve Debugging / Logging Centralized / Live Backup capabilities Cached queries / dvlp & cache execution plans Interleaved update/read Referential integrity, avoid redundant/missing/corrupt/out-of-sync data Reporting (from on olap or oltp db) / turnkey generation tools [Disadvantages:] Important to get right the first time - professional design - but only b/c it's meant to last s/w & h/w cost Usu. over a network, speed issue (best vs. best design vs. local=even then a separate process req's marshalling/netwk layers/inter-p comm) indicies and query processing can stand in the way of simple processing (vs. flatfile) WHY USE FLATFILE: If you only need... Sequential Row processing only Limited usage append only (no reading, no master key/update) Only Update the record you're reading (fixed length recs only) Too big to fit into memory If Local disk / read-ahead network connection Portability / small system Email / cut & Paste / store as document by novice - simple format Low design learning curve but high cost later WHY USE IN-MEMORY/TABLE (tables, arrays, etc.): if you need... Processing a single db/ff record that was imported Known size of data Static data if hardcoding the table Narrow, unchanging use (e.g., one program or proc) -includes a class that will be shared, but encapsulates its data manipulation Extreme speed needed / high transaction frequency Random access - but search is dependent on implementation Following are some other posts about the topic: http://stackoverflow.com/questions/1499239/database-vs-flat-text-file-what-are-some-technical-reasons-for-choosing-one-over http://stackoverflow.com/questions/332825/are-flat-file-databases-any-good http://stackoverflow.com/questions/2356851/database-vs-flat-files http://stackoverflow.com/questions/514455/databases-vs-plain-text/514530 What I’d like to know is if anybody could recommend a hard, authoritative source containing these reasons. I’m looking for a paper book I can buy, or a reputable website with whitepapers about the issue (e.g., Microsoft, IBM), not counting the user-generated content on those sites. This will have a greater change to elicit a change that I’m looking for: less wasted programmer time, and more reliable programs. Thanks very much for your help. You win a prize for reading such a large post!

    Read the article

  • Customer Attribute, not sorting select options

    - by Bosworth99
    Made a module that creates some customer EAV attributes. One of these attributes is a Select, and I'm dropping a bunch of options into their respective tables. Everything lines up and is accessible on both the front end and the back. Last thing before calling this part of things finished is the sort order of the options. They come out all scrambled, instead of the obvious default or alphabetical (seemingly at random... very wierd). I'm on Mage v1.11 (Pro/Enterprise). config.xml <config> <modules> <WACI_CustomerAttr> <version>0.1.0</version> </WACI_CustomerAttr> </modules> <global> <resources> <customerattr_setup> <setup> <module>WACI_CustomerAttr</module> <class>Mage_Customer_Model_Entity_Setup</class> </setup> <connection> <use>core_setup</use> </connection> </customerattr_setup> </resources> <models> <WACI_CustomerAttr> <class>WACI_CustomerAttr_Model</class> </WACI_CustomerAttr> </models> <fieldsets> <customer_account> <agency><create>1</create><update>1</update></agency> <title><create>1</create><update>1</update></title> <phone><create>1</create><update>1</update></phone> <mailing_address><create>1</create><update>1</update></mailing_address> <city><create>1</create><update>1</update></city> <state><create>1</create><update>1</update></state> <zip><create>1</create><update>1</update></zip> <fed_id><create>1</create><update>1</update></fed_id> <ubi><create>1</create><update>1</update></ubi> </customer_account> </fieldsets> </global> </config> mysql4-install-0.1.0.php <?php Mage::log('Installing WACI_CustomerAttr'); echo 'Running Upgrade: '.get_class($this)."\n <br /> \n"; //die ( 'its running' ); $installer = $this; /* @var $installer Mage_Customer_Model_Entity_Setup */ $installer->startSetup(); // bunch of attributes // State $installer->addAttribute('customer','state', array( 'type' => 'varchar', 'group' => 'Default', 'label' => 'State', 'input' => 'select', 'default' => 'Washington', 'source' => 'WACI_CustomerAttr/customer_attribute_data_select', 'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE, 'required' => true, 'visible' => true, 'user_defined' => 1, 'position' => 67 ) ); $attrS = Mage::getSingleton('eav/config')->getAttribute('customer', 'state'); $attrS->addData(array('sort_order'=>67)); $attrS->setData('used_in_forms', array('adminhtml_customer','customer_account_edit','customer_account_create'))->save(); $state_list = array('Alabama','Alaska','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','Florida','Georgia', 'Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana','Maine','Maryland','Massachusetts','Michigan', 'Minnesota','Mississippi','Missouri','Montana','Nebraska','Nevada','New Hampshire','New Jersey','New Mexico','New York', 'North Carolina','North Dakota','Ohio','Oklahoma','Oregon','Pennsylvania','Rhode Island','South Carolina','South Dakota', 'Tennessee','Texas','Utah','Vermont','Virginia','Washington','West Virginia','Wisconsin','Wyoming'); $aOption = array(); $aOption['attribute_id'] = $installer->getAttributeId('customer', 'state'); for($iCount=0;$iCount<sizeof($state_list);$iCount++){ $aOption['value']['option'.$iCount][0] = $state_list[$iCount]; } $installer->addAttributeOption($aOption); // a few more $installer->endSetup(); app/code/local/WACI/CustomerAttr/Model/Customer/Attribute/Data/Select.php <?php class WACI_CustomerAttr_Model_Customer_Attribute_Data_Select extends Mage_Eav_Model_Entity_Attribute_Source_Abstract{ function getAllOptions(){ if (is_null($this->_options)) { $this->_options = Mage::getResourceModel('eav/entity_attribute_option_collection') ->setAttributeFilter($this->getAttribute()->getId()) ->setStoreFilter($this->getAttribute()->getStoreId()) ->setPositionOrder('asc') ->load() ->toOptionArray(); } $options = $this->_options; return $options; } } theme/variation/template/persistent/customer/form/register.phtml <li> <?php $attribute = Mage::getModel('eav/config')->getAttribute('customer','state'); ?> <label for="state" class="<?php if($attribute->getIsRequired() == true){?>required<?php } ?>"><?php if($attribute->getIsRequired() == true){?><em>*</em><?php } ?><?php echo $this->__('State') ?></label> <div class="input-box"> <select name="state" id="state" class="<?php if($attribute->getIsRequired() == true){?>required-entry<?php } ?>"> <?php $options = $attribute->getSource()->getAllOptions(); foreach($options as $option){ ?> <option value='<?php echo $option['value']?>' <?php if($this->getFormData()->getState() == $option['value']){ echo 'selected="selected"';}?>><?php echo $this->__($option['label'])?></option> <?php } ?> </select> </div> </li> All options are getting loaded into table eav_attribute_option just fine (albeit without a sort_order defined), as well as table eav_attribute_option_value. In the adminhtml / customer-manage customers-account information this select is showing up fine (but its delivered automatically by the system). Seems I should be able to set the sort-order on creation of the attributeOptions, or, certainly, define the sort order in the data/select class. But nothing I've tried works. I'd rather not do a front-end hack either... Oh, and how do I set the default value of this select? (Different question, I know, but related). Setting the attributes 'default' = 'washington' seems to do nothing. There seem to be a lot of ways to set up attribute select options like this. Is there a better way that the one I've outlined here? Perhaps I'm messing something up. Cheers

    Read the article

  • Why does this PHP script interfere with my CSS layout?

    - by CT
    This page uses $_GET to grab an asset id and query a mysql database and return some information. If 'id' does not match anything, no results are displayed but the page looks fine. If 'id' is null an error would occur at $id = $_GET["id"] or die(mysql_error()); When this occurs, they page layout is not displayed correctly. How do I fix this? Bonus question: How would I get a message like "No matching results found" or something when the id does not match any id in the database or is null. Thank you. <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <link rel="stylesheet" type="text/css" href="style.css" /> <title>Wagman IT Asset</title> </head> <body> <div id="page"> <div id="header"> <img src="images/logo.png" /> </div> </div> <div id="content"> <div id="container"> <div id="main"> <div id="menu"> <ul> <table width="100%" border="0"> <tr> <td><li><a href="index.php">Search Assets</a></li></td> <td><li><a href="browse.php">Browse Assets</a></li></td> <td><li><a href="add_asset.php">Add Asset</a></li></td> <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td> </tr> </table> </ul> </div> <div id="text"> <ul> <li> <h1>View Asset</h1> </li> </ul> <table width="100%" border="0" cellpadding="2"> <?php //make database connect mysql_connect("localhost", "asset_db", "asset_db") or die(mysql_error()); mysql_select_db("asset_db") or die(mysql_error()); //get asset $id = $_GET["id"] or die(mysql_error()); //get type of asset $sql = "SELECT asset.type From asset WHERE asset.id = $id"; $result = mysql_query($sql) or die(mysql_error()); $row = mysql_fetch_assoc($result); $type = $row['type']; switch ($type){ case "Server": $sql = " SELECT asset.id ,asset.company ,asset.location ,asset.purchase_date ,asset.purchase_order ,asset.value ,asset.type ,asset.notes ,server.manufacturer ,server.model ,server.serial_number ,server.esc ,server.user ,server.prev_user ,server.warranty FROM asset LEFT JOIN server ON server.id = asset.id WHERE asset.id = $id "; $result = mysql_query($sql); while($row = mysql_fetch_assoc($result)) { echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Asset ID:</td><td>"; $id = $row['id']; setcookie('id', $id); echo "$id</td></tr>"; echo "<tr<td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Company:</td><td>"; $company = $row['company']; setcookie('company', $company); echo "$company</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Location:</td><td>"; $location = $row['location']; setcookie('location', $location); echo "$location</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Purchase Date:</td><td>"; $purchase_date = $row['purchase_date']; setcookie('purchase_date', $purchase_date); echo "$purchase_date</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Purchase Order:</td><td>"; $purchase_order = $row['purchase_order']; setcookie('purchase_order', $purchase_order); echo "$purchase_order</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Value:</td><td>"; $value = $row['value']; setcookie('value', $value); echo "$value</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Type:</td><td>"; $type = $row['type']; setcookie('type', $type); echo "$type</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Notes:</td><td>"; $notes = $row['notes']; setcookie('notes', $notes); echo "$notes</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Manufacturer:</td><td>"; $manufacturer = $row['manufacturer']; setcookie('manufacturer', $manufacturer); echo "$manufacturer</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Model / Description:</td><td>"; $model = $row['model']; setcookie('model', $model); echo "$model</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Serial Number / Service Tag:</td><td>"; $serial_number = $row['serial_number']; setcookie('serial_number', $serial_number); echo "$serial_number</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Express Service Code:</td><td>"; $esc = $row['esc']; setcookie('esc', $esc); echo "$esc</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>User:</td><td>"; $user = $row['user']; setcookie('user', $user); echo "$user</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Previous User:</td><td>"; $prev_user = $row['prev_user']; setcookie('prev_user', $prev_user); echo "$prev_user</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Warranty:</td><td>"; $warranty = $row['warranty']; setcookie('warranty', $warranty); echo "$warranty</td></tr>"; } break; case "Laptop": $sql = " SELECT asset.id ,asset.company ,asset.location ,asset.purchase_date ,asset.purchase_order ,asset.value ,asset.type ,asset.notes ,laptop.manufacturer ,laptop.model ,laptop.serial_number ,laptop.esc ,laptop.user ,laptop.prev_user ,laptop.warranty FROM asset LEFT JOIN laptop ON laptop.id = asset.id WHERE asset.id = $id "; $result = mysql_query($sql); while($row = mysql_fetch_assoc($result)) { echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Asset ID:</td><td>"; $id = $row['id']; setcookie('id', $id); echo "$id</td></tr>"; echo "<tr<td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Company:</td><td>"; $company = $row['company']; setcookie('company', $company); echo "$company</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Location:</td><td>"; $location = $row['location']; setcookie('location', $location); echo "$location</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Purchase Date:</td><td>"; $purchase_date = $row['purchase_date']; setcookie('purchase_date', $purchase_date); echo "$purchase_date</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Purchase Order:</td><td>"; $purchase_order = $row['purchase_order']; setcookie('purchase_order', $purchase_order); echo "$purchase_order</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Value:</td><td>"; $value = $row['value']; setcookie('value', $value); echo "$value</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Type:</td><td>"; $type = $row['type']; setcookie('type', $type); echo "$type</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Notes:</td><td>"; $notes = $row['notes']; setcookie('notes', $notes); echo "$notes</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Manufacturer:</td><td>"; $manufacturer = $row['manufacturer']; setcookie('manufacturer', $manufacturer); echo "$manufacturer</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Model / Description:</td><td>"; $model = $row['model']; setcookie('model', $model); echo "$model</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Serial Number / Service Tag:</td><td>"; $serial_number = $row['serial_number']; setcookie('serial_number', $serial_number); echo "$serial_number</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Express Service Code:</td><td>"; $esc = $row['esc']; setcookie('esc', $esc); echo "$esc</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>User:</td><td>"; $user = $row['user']; setcookie('user', $user); echo "$user</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Previous User:</td><td>"; $prev_user = $row['prev_user']; setcookie('prev_user', $prev_user); echo "$prev_user</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Warranty:</td><td>"; $warranty = $row['warranty']; setcookie('warranty', $warranty); echo "$warranty</td></tr>"; } break; case "Desktop": $sql = " SELECT asset.id ,asset.company ,asset.location ,asset.purchase_date ,asset.purchase_order ,asset.value ,asset.type ,asset.notes ,desktop.manufacturer ,desktop.model ,desktop.serial_number ,desktop.esc ,desktop.user ,desktop.prev_user ,desktop.warranty FROM asset LEFT JOIN desktop ON desktop.id = asset.id WHERE asset.id = $id "; $result = mysql_query($sql); while($row = mysql_fetch_assoc($result)) { echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Asset ID:</td><td>"; $id = $row['id']; setcookie('id', $id); echo "$id</td></tr>"; echo "<tr<td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Company:</td><td>"; $company = $row['company']; setcookie('company', $company); echo "$company</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Location:</td><td>"; $location = $row['location']; setcookie('location', $location); echo "$location</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Purchase Date:</td><td>"; $purchase_date = $row['purchase_date']; setcookie('purchase_date', $purchase_date); echo "$purchase_date</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Purchase Order:</td><td>"; $purchase_order = $row['purchase_order']; setcookie('purchase_order', $purchase_order); echo "$purchase_order</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Value:</td><td>"; $value = $row['value']; setcookie('value', $value); echo "$value</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Type:</td><td>"; $type = $row['type']; setcookie('type', $type); echo "$type</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Notes:</td><td>"; $notes = $row['notes']; setcookie('notes', $notes); echo "$notes</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Manufacturer:</td><td>"; $manufacturer = $row['manufacturer']; setcookie('manufacturer', $manufacturer); echo "$manufacturer</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Model / Description:</td><td>"; $model = $row['model']; setcookie('model', $model); echo "$model</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Serial Number / Service Tag:</td><td>"; $serial_number = $row['serial_number']; setcookie('serial_number', $serial_number); echo "$serial_number</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Express Service Code:</td><td>"; $esc = $row['esc']; setcookie('esc', $esc); echo "$esc</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>User:</td><td>"; $user = $row['user']; setcookie('user', $user); echo "$user</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Previous User:</td><td>"; $prev_user = $row['prev_user']; setcookie('prev_user', $prev_user); echo "$prev_user</td></tr>"; echo "<tr><td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>Warranty:</td><td>"; $warranty = $row['warranty']; setcookie('warranty', $warranty); echo "$warranty</td></tr>"; } break; } ?> </table> <br /> <br /> <table width="100%" border="0"> <tr> <td>&nbsp;</td> <td>&nbsp;</td> <td>&nbsp;</td> <td><a href="#">Add Software</a></td> <td><a href="#">Edit Asset</a></td> <td><a href="#">Delete Asset</a></td> </tr> </table> </div> </div> </div> <div class="clear"></div> <div id="footer" align="center"> <p>&nbsp;</p> </div> </div> <div id="tagline"> Wagman Construction - Bridging Generations since 1902 </div> </body> </html>

    Read the article

  • How do I prove I should put a table of values in source code instead of a database table?

    - by FastAl
    <tldr>looking for a reference to a book or other undeniably authoritative source that gives reasons when you should choose a database vs. when you should choose other storage methods. I have provided an un-authoritative list of reasons about 2/3 of the way down this post.</tldr> I have a situation at my company where a database is being used where it would be better to use another solution (in this case, an auto-generated piece of source code that contains a static lookup table, searched by binary sort). Normally, a database would be an OK solution even though the problem does not require a database, e.g, none of the elements of ACID are needed, as it is read-only data, updated about every 3-5 years (also requiring other sourcecode changes), and fits in memory, and can be keyed into via binary search (a tad faster than db, but speed is not an issue). The problem is that this code runs on our enterprise server, but is shared with several PC platforms (some disconnected, some use a central DB, etc.), and parts of it are managed by multiple programming units, parts by the DBAs, parts even by mathematicians in another department, etc. These hit their own platform’s version of their databases (containing their own copy of the static data). What happens is that every implementation, every little change, something different goes wrong. There are many other issues as well. I can’t even use a flatfile, because one mode of running on our enterprise server does not have permission to read files (only databases, and of course, its own literal storage, e.g., in-source table). Of course, other parts of the system use databases in proper, less obscure manners; there is no problem with those parts. So why don’t we just change it? I don’t have administrative ability to force a change. But I’m affected because sometimes I have to help fix the problems, but mostly because it causes outages and tons of extra IT time by other programmers and d*mmit that makes me mad! The reason neither management, nor the designers of the system, can see the problem is that they propose a solution that won’t work: increase communication; implement more safeguards and standards; etc. But every time, in a different part of the already-pared-down but still multi-step processes, a few different diligent, hard-working, top performing IT personnel make a unique subtle error that causes it to fail, sometimes after the last round of testing! And in general these are not single-person failures, but understandable miscommunications. And communication at our company is actually better than most. People just don't think that's the case because they haven't dug into the matter. However, I have it on very good word from somebody with extensive formal study of sociology and psychology that the relatively small amount of less-than-proper database usage in this gigantic cross-platform multi-source, multi-language project is bureaucratically un-maintainable. Impossible. No chance. At least with Human Beings in the loop, and it can’t be automated. In addition, the management and developers who could change this, though intelligent and capable, don’t understand the rigidity of this ‘how humans are’ issue, and are not convincible on the matter. The reason putting the static data in sourcecode will solve the problem is, although the solution is less sexy than a database, it would function with no technical drawbacks; and since the sharing of sourcecode already works very well, you basically erase any database-related effort from this section of the project, along with all the drawbacks of it that are causing problems. OK, that’s the background, for the curious. I won’t be able to convince management that this is an unfixable sociological problem, and that the real solution is coding around these limits of human nature, just as you would code around a bug in a 3rd party component that you can’t change. So what I have to do is exploit the unsuitableness of the database solution, and not do it using logic, but rather authority. I am aware of many reasons, and posts on this site giving reasons for one over the other; I’m not looking for lists of reasons like these (although you can add a comment if I've miss a doozy): WHY USE A DATABASE? instead of flatfile/other DB vs. file: if you need... Random Read / Transparent search optimization Advanced / varied / customizable Searching and sorting capabilities Transaction/rollback Locks, semaphores Concurrency control / Shared users Security 1-many/m-m is easier Easy modification Scalability Load Balancing Random updates / inserts / deletes Advanced query Administrative control of design, etc. SQL / learning curve Debugging / Logging Centralized / Live Backup capabilities Cached queries / dvlp & cache execution plans Interleaved update/read Referential integrity, avoid redundant/missing/corrupt/out-of-sync data Reporting (from on olap or oltp db) / turnkey generation tools [Disadvantages:] Important to get right the first time - professional design - but only b/c it's meant to last s/w & h/w cost Usu. over a network, speed issue (best vs. best design vs. local=even then a separate process req's marshalling/netwk layers/inter-p comm) indicies and query processing can stand in the way of simple processing (vs. flatfile) WHY USE FLATFILE: If you only need... Sequential Row processing only Limited usage append only (no reading, no master key/update) Only Update the record you're reading (fixed length recs only) Too big to fit into memory If Local disk / read-ahead network connection Portability / small system Email / cut & Paste / store as document by novice - simple format Low design learning curve but high cost later WHY USE IN-MEMORY/TABLE (tables, arrays, etc.): if you need... Processing a single db/ff record that was imported Known size of data Static data if hardcoding the table Narrow, unchanging use (e.g., one program or proc) -includes a class that will be shared, but encapsulates its data manipulation Extreme speed needed / high transaction frequency Random access - but search is dependent on implementation Following are some other posts about the topic: http://stackoverflow.com/questions/1499239/database-vs-flat-text-file-what-are-some-technical-reasons-for-choosing-one-over http://stackoverflow.com/questions/332825/are-flat-file-databases-any-good http://stackoverflow.com/questions/2356851/database-vs-flat-files http://stackoverflow.com/questions/514455/databases-vs-plain-text/514530 What I’d like to know is if anybody could recommend a hard, authoritative source containing these reasons. I’m looking for a paper book I can buy, or a reputable website with whitepapers about the issue (e.g., Microsoft, IBM), not counting the user-generated content on those sites. This will have a greater change to elicit a change that I’m looking for: less wasted programmer time, and more reliable programs. Thanks very much for your help. You win a prize for reading such a large post!

    Read the article

  • I need help on my C++ assignment using Microsoft Visual C++

    - by krayzwytie
    Ok, so I don't want you to do my homework for me, but I'm a little lost with this final assignment and need all the help I can get. Learning about programming is tough enough, but doing it online is next to impossible for me... Now, to get to the program, I am going to paste what I have so far. This includes mostly //comments and what I have written so far. If you can help me figure out where all the errors are and how to complete the assignment, I will really appreciate it. Like I said, I don't want you to do my homework for me (it's my final), but any constructive criticism is welcome. This is my final assignment for this class and it is due tomorrow (Sunday before midnight, Arizona time). This is the assignment: Examine the following situation: Your company, Datamax, Inc., is in the process of automating its payroll systems. Your manager has asked you to create a program that calculates overtime pay for all employees. Your program must take into account the employee’s salary, total hours worked, and hours worked more than 40 in a week, and then provide an output that is useful and easily understood by company management. Compile your program utilizing the following background information and the code outline in Appendix D (included in the code section). Submit your project as an attachment including the code and the output. Company Background: Three employees: Mark, John, and Mary The end user needs to be prompted for three specific pieces of input—name, hours worked, and hourly wage. Calculate overtime if input is greater than 40 hours per week. Provide six test plans to verify the logic within the program. Plan 1 must display the proper information for employee #1 with overtime pay. Plan 2 must display the proper information for employee #1 with no overtime pay. Plans 3-6 are duplicates of plan 1 and 2 but for the other employees. Program Requirements: Define a base class to use for the entire program. The class holds the function calls and the variables related to the overtime pay calculations. Define one object per employee. Note there will be three employees. Your program must take the objects created and implement calculations based on total salaries, total hours, and the total number of overtime hours. See the Employee Summary Data section of the sample output. Logic Steps to Complete Your Program: Define your base class. Define your objects from your base class. Prompt for user input, updating your object classes for all three users. Implement your overtime pay calculations. Display overtime or regular time pay calculations. See the sample output below. Implement object calculations by summarizing your employee objects and display the summary information in the example below. And this is the code: // Final_Project.cpp : Defines the entry point for the console application. // #include "stdafx.h" #include <iostream> #include <string> #include <iomanip> using namespace std; // //CLASS DECLARATION SECTION // class CEmployee { public: void ImplementCalculations(string EmployeeName, double hours, double wage); void DisplayEmployInformation(void); void Addsomethingup (CEmployee, CEmployee, CEmployee); string EmployeeName ; int hours ; int overtime_hours ; int iTotal_hours ; int iTotal_OvertimeHours ; float wage ; float basepay ; float overtime_pay ; float overtime_extra ; float iTotal_salaries ; float iIndividualSalary ; }; int main() { system("cls"); cout << "Welcome to the Employee Pay Center"; /* Use this section to define your objects. You will have one object per employee. You have only three employees. The format is your class name and your object name. */ std::cout << "Please enter Employee's Name: "; std::cin >> EmployeeName; std::cout << "Please enter Total Hours for (EmployeeName): "; std::cin >> hours; std::cout << "Please enter Base Pay for(EmployeeName): "; std::cin >> basepay; /* Here you will prompt for the first employee’s information. Prompt the employee name, hours worked, and the hourly wage. For each piece of information, you will update the appropriate class member defined above. Example of Prompts Enter the employee name = Enter the hours worked = Enter his or her hourly wage = */ /* Here you will prompt for the second employee’s information. Prompt the employee name, hours worked, and the hourly wage. For each piece of information, you will update the appropriate class member defined above. Enter the employee name = Enter the hours worked = Enter his or her hourly wage = */ /* Here you will prompt for the third employee’s information. Prompt the employee name, hours worked, and the hourly wage. For each piece of information, you will update the appropriate class member defined above. Enter the employee name = Enter the hours worked = Enter his or her hourly wage = */ /* Here you will implement a function call to implement the employ calcuations for each object defined above. You will do this for each of the three employees or objects. The format for this step is the following: [(object name.function name(objectname.name, objectname.hours, objectname.wage)] ; */ /* This section you will send all three objects to a function that will add up the the following information: - Total Employee Salaries - Total Employee Hours - Total Overtime Hours The format for this function is the following: - Define a new object. - Implement function call [objectname.functionname(object name 1, object name 2, object name 3)] /* } //End of Main Function void CEmployee::ImplementCalculations (string EmployeeName, double hours, double wage){ //Initialize overtime variables overtime_hours=0; overtime_pay=0; overtime_extra=0; if (hours > 40) { /* This section is for the basic calculations for calculating overtime pay. - base pay = 40 hours times the hourly wage - overtime hours = hours worked – 40 - overtime pay = hourly wage * 1.5 - overtime extra pay over 40 = overtime hours * overtime pay - salary = overtime money over 40 hours + your base pay */ /* Implement function call to output the employee information. Function is defined below. */ } // if (hours > 40) else { /* Here you are going to calculate the hours less than 40 hours. - Your base pay is = your hours worked times your wage - Salary = your base pay */ /* Implement function call to output the employee information. Function is defined below. */ } // End of the else } //End of Primary Function void CEmployee::DisplayEmployInformation(); { // This function displays all the employee output information. /* This is your cout statements to display the employee information: Employee Name ............. = Base Pay .................. = Hours in Overtime ......... = Overtime Pay Amount........ = Total Pay ................. = */ } // END OF Display Employee Information void CEmployee::Addsomethingup (CEmployee Employ1, CEmployee Employ2) { // Adds two objects of class Employee passed as // function arguments and saves them as the calling object's data member values. /* Add the total hours for objects 1, 2, and 3. Add the salaries for each object. Add the total overtime hours. */ /* Then display the information below. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%% EMPLOYEE SUMMARY DATA%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%% Total Employee Salaries ..... = 576.43 %%%% Total Employee Hours ........ = 108 %%%% Total Overtime Hours......... = 5 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% */ } // End of function

    Read the article

  • Using R to Analyze G1GC Log Files

    - by user12620111
    Using R to Analyze G1GC Log Files body, td { font-family: sans-serif; background-color: white; font-size: 12px; margin: 8px; } tt, code, pre { font-family: 'DejaVu Sans Mono', 'Droid Sans Mono', 'Lucida Console', Consolas, Monaco, monospace; } h1 { font-size:2.2em; } h2 { font-size:1.8em; } h3 { font-size:1.4em; } h4 { font-size:1.0em; } h5 { font-size:0.9em; } h6 { font-size:0.8em; } a:visited { color: rgb(50%, 0%, 50%); } pre { margin-top: 0; max-width: 95%; border: 1px solid #ccc; white-space: pre-wrap; } pre code { display: block; padding: 0.5em; } code.r, code.cpp { background-color: #F8F8F8; } table, td, th { border: none; } blockquote { color:#666666; margin:0; padding-left: 1em; border-left: 0.5em #EEE solid; } hr { height: 0px; border-bottom: none; border-top-width: thin; border-top-style: dotted; border-top-color: #999999; } @media print { * { background: transparent !important; color: black !important; filter:none !important; -ms-filter: none !important; } body { font-size:12pt; max-width:100%; } a, a:visited { text-decoration: underline; } hr { visibility: hidden; page-break-before: always; } pre, blockquote { padding-right: 1em; page-break-inside: avoid; } tr, img { page-break-inside: avoid; } img { max-width: 100% !important; } @page :left { margin: 15mm 20mm 15mm 10mm; } @page :right { margin: 15mm 10mm 15mm 20mm; } p, h2, h3 { orphans: 3; widows: 3; } h2, h3 { page-break-after: avoid; } } pre .operator, pre .paren { color: rgb(104, 118, 135) } pre .literal { color: rgb(88, 72, 246) } pre .number { color: rgb(0, 0, 205); } pre .comment { color: rgb(76, 136, 107); } pre .keyword { color: rgb(0, 0, 255); } pre .identifier { color: rgb(0, 0, 0); } pre .string { color: rgb(3, 106, 7); } var hljs=new function(){function m(p){return p.replace(/&/gm,"&").replace(/"}while(y.length||w.length){var v=u().splice(0,1)[0];z+=m(x.substr(q,v.offset-q));q=v.offset;if(v.event=="start"){z+=t(v.node);s.push(v.node)}else{if(v.event=="stop"){var p,r=s.length;do{r--;p=s[r];z+=("")}while(p!=v.node);s.splice(r,1);while(r'+M[0]+""}else{r+=M[0]}O=P.lR.lastIndex;M=P.lR.exec(L)}return r+L.substr(O,L.length-O)}function J(L,M){if(M.sL&&e[M.sL]){var r=d(M.sL,L);x+=r.keyword_count;return r.value}else{return F(L,M)}}function I(M,r){var L=M.cN?'':"";if(M.rB){y+=L;M.buffer=""}else{if(M.eB){y+=m(r)+L;M.buffer=""}else{y+=L;M.buffer=r}}D.push(M);A+=M.r}function G(N,M,Q){var R=D[D.length-1];if(Q){y+=J(R.buffer+N,R);return false}var P=q(M,R);if(P){y+=J(R.buffer+N,R);I(P,M);return P.rB}var L=v(D.length-1,M);if(L){var O=R.cN?"":"";if(R.rE){y+=J(R.buffer+N,R)+O}else{if(R.eE){y+=J(R.buffer+N,R)+O+m(M)}else{y+=J(R.buffer+N+M,R)+O}}while(L1){O=D[D.length-2].cN?"":"";y+=O;L--;D.length--}var r=D[D.length-1];D.length--;D[D.length-1].buffer="";if(r.starts){I(r.starts,"")}return R.rE}if(w(M,R)){throw"Illegal"}}var E=e[B];var D=[E.dM];var A=0;var x=0;var y="";try{var s,u=0;E.dM.buffer="";do{s=p(C,u);var t=G(s[0],s[1],s[2]);u+=s[0].length;if(!t){u+=s[1].length}}while(!s[2]);if(D.length1){throw"Illegal"}return{r:A,keyword_count:x,value:y}}catch(H){if(H=="Illegal"){return{r:0,keyword_count:0,value:m(C)}}else{throw H}}}function g(t){var p={keyword_count:0,r:0,value:m(t)};var r=p;for(var q in e){if(!e.hasOwnProperty(q)){continue}var s=d(q,t);s.language=q;if(s.keyword_count+s.rr.keyword_count+r.r){r=s}if(s.keyword_count+s.rp.keyword_count+p.r){r=p;p=s}}if(r.language){p.second_best=r}return p}function i(r,q,p){if(q){r=r.replace(/^((]+|\t)+)/gm,function(t,w,v,u){return w.replace(/\t/g,q)})}if(p){r=r.replace(/\n/g,"")}return r}function n(t,w,r){var x=h(t,r);var v=a(t);var y,s;if(v){y=d(v,x)}else{return}var q=c(t);if(q.length){s=document.createElement("pre");s.innerHTML=y.value;y.value=k(q,c(s),x)}y.value=i(y.value,w,r);var u=t.className;if(!u.match("(\\s|^)(language-)?"+v+"(\\s|$)")){u=u?(u+" "+v):v}if(/MSIE [678]/.test(navigator.userAgent)&&t.tagName=="CODE"&&t.parentNode.tagName=="PRE"){s=t.parentNode;var p=document.createElement("div");p.innerHTML=""+y.value+"";t=p.firstChild.firstChild;p.firstChild.cN=s.cN;s.parentNode.replaceChild(p.firstChild,s)}else{t.innerHTML=y.value}t.className=u;t.result={language:v,kw:y.keyword_count,re:y.r};if(y.second_best){t.second_best={language:y.second_best.language,kw:y.second_best.keyword_count,re:y.second_best.r}}}function o(){if(o.called){return}o.called=true;var r=document.getElementsByTagName("pre");for(var p=0;p|=||=||=|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~";this.ER="(?![\\s\\S])";this.BE={b:"\\\\.",r:0};this.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[this.BE],r:0};this.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[this.BE],r:0};this.CLCM={cN:"comment",b:"//",e:"$"};this.CBLCLM={cN:"comment",b:"/\\*",e:"\\*/"};this.HCM={cN:"comment",b:"#",e:"$"};this.NM={cN:"number",b:this.NR,r:0};this.CNM={cN:"number",b:this.CNR,r:0};this.BNM={cN:"number",b:this.BNR,r:0};this.inherit=function(r,s){var p={};for(var q in r){p[q]=r[q]}if(s){for(var q in s){p[q]=s[q]}}return p}}();hljs.LANGUAGES.cpp=function(){var a={keyword:{"false":1,"int":1,"float":1,"while":1,"private":1,"char":1,"catch":1,"export":1,virtual:1,operator:2,sizeof:2,dynamic_cast:2,typedef:2,const_cast:2,"const":1,struct:1,"for":1,static_cast:2,union:1,namespace:1,unsigned:1,"long":1,"throw":1,"volatile":2,"static":1,"protected":1,bool:1,template:1,mutable:1,"if":1,"public":1,friend:2,"do":1,"return":1,"goto":1,auto:1,"void":2,"enum":1,"else":1,"break":1,"new":1,extern:1,using:1,"true":1,"class":1,asm:1,"case":1,typeid:1,"short":1,reinterpret_cast:2,"default":1,"double":1,register:1,explicit:1,signed:1,typename:1,"try":1,"this":1,"switch":1,"continue":1,wchar_t:1,inline:1,"delete":1,alignof:1,char16_t:1,char32_t:1,constexpr:1,decltype:1,noexcept:1,nullptr:1,static_assert:1,thread_local:1,restrict:1,_Bool:1,complex:1},built_in:{std:1,string:1,cin:1,cout:1,cerr:1,clog:1,stringstream:1,istringstream:1,ostringstream:1,auto_ptr:1,deque:1,list:1,queue:1,stack:1,vector:1,map:1,set:1,bitset:1,multiset:1,multimap:1,unordered_set:1,unordered_map:1,unordered_multiset:1,unordered_multimap:1,array:1,shared_ptr:1}};return{dM:{k:a,i:"",k:a,r:10,c:["self"]}]}}}();hljs.LANGUAGES.r={dM:{c:[hljs.HCM,{cN:"number",b:"\\b0[xX][0-9a-fA-F]+[Li]?\\b",e:hljs.IMMEDIATE_RE,r:0},{cN:"number",b:"\\b\\d+(?:[eE][+\\-]?\\d*)?L\\b",e:hljs.IMMEDIATE_RE,r:0},{cN:"number",b:"\\b\\d+\\.(?!\\d)(?:i\\b)?",e:hljs.IMMEDIATE_RE,r:1},{cN:"number",b:"\\b\\d+(?:\\.\\d*)?(?:[eE][+\\-]?\\d*)?i?\\b",e:hljs.IMMEDIATE_RE,r:0},{cN:"number",b:"\\.\\d+(?:[eE][+\\-]?\\d*)?i?\\b",e:hljs.IMMEDIATE_RE,r:1},{cN:"keyword",b:"(?:tryCatch|library|setGeneric|setGroupGeneric)\\b",e:hljs.IMMEDIATE_RE,r:10},{cN:"keyword",b:"\\.\\.\\.",e:hljs.IMMEDIATE_RE,r:10},{cN:"keyword",b:"\\.\\.\\d+(?![\\w.])",e:hljs.IMMEDIATE_RE,r:10},{cN:"keyword",b:"\\b(?:function)",e:hljs.IMMEDIATE_RE,r:2},{cN:"keyword",b:"(?:if|in|break|next|repeat|else|for|return|switch|while|try|stop|warning|require|attach|detach|source|setMethod|setClass)\\b",e:hljs.IMMEDIATE_RE,r:1},{cN:"literal",b:"(?:NA|NA_integer_|NA_real_|NA_character_|NA_complex_)\\b",e:hljs.IMMEDIATE_RE,r:10},{cN:"literal",b:"(?:NULL|TRUE|FALSE|T|F|Inf|NaN)\\b",e:hljs.IMMEDIATE_RE,r:1},{cN:"identifier",b:"[a-zA-Z.][a-zA-Z0-9._]*\\b",e:hljs.IMMEDIATE_RE,r:0},{cN:"operator",b:"|=||   Using R to Analyze G1GC Log Files   Using R to Analyze G1GC Log Files Introduction Working in Oracle Platform Integration gives an engineer opportunities to work on a wide array of technologies. My team’s goal is to make Oracle applications run best on the Solaris/SPARC platform. When looking for bottlenecks in a modern applications, one needs to be aware of not only how the CPUs and operating system are executing, but also network, storage, and in some cases, the Java Virtual Machine. I was recently presented with about 1.5 GB of Java Garbage First Garbage Collector log file data. If you’re not familiar with the subject, you might want to review Garbage First Garbage Collector Tuning by Monica Beckwith. The customer had been running Java HotSpot 1.6.0_31 to host a web application server. I was told that the Solaris/SPARC server was running a Java process launched using a commmand line that included the following flags: -d64 -Xms9g -Xmx9g -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:InitiatingHeapOccupancyPercent=80 -XX:PermSize=256m -XX:MaxPermSize=256m -XX:+PrintGC -XX:+PrintGCTimeStamps -XX:+PrintHeapAtGC -XX:+PrintGCDateStamps -XX:+PrintFlagsFinal -XX:+DisableExplicitGC -XX:+UnlockExperimentalVMOptions -XX:ParallelGCThreads=8 Several sources on the internet indicate that if I were to print out the 1.5 GB of log files, it would require enough paper to fill the bed of a pick up truck. Of course, it would be fruitless to try to scan the log files by hand. Tools will be required to summarize the contents of the log files. Others have encountered large Java garbage collection log files. There are existing tools to analyze the log files: IBM’s GC toolkit The chewiebug GCViewer gchisto HPjmeter Instead of using one of the other tools listed, I decide to parse the log files with standard Unix tools, and analyze the data with R. Data Cleansing The log files arrived in two different formats. I guess that the difference is that one set of log files was generated using a more verbose option, maybe -XX:+PrintHeapAtGC, and the other set of log files was generated without that option. Format 1 In some of the log files, the log files with the less verbose format, a single trace, i.e. the report of a singe garbage collection event, looks like this: {Heap before GC invocations=12280 (full 61): garbage-first heap total 9437184K, used 7499918K [0xfffffffd00000000, 0xffffffff40000000, 0xffffffff40000000) region size 4096K, 1 young (4096K), 0 survivors (0K) compacting perm gen total 262144K, used 144077K [0xffffffff40000000, 0xffffffff50000000, 0xffffffff50000000) the space 262144K, 54% used [0xffffffff40000000, 0xffffffff48cb3758, 0xffffffff48cb3800, 0xffffffff50000000) No shared spaces configured. 2014-05-14T07:24:00.988-0700: 60586.353: [GC pause (young) 7324M->7320M(9216M), 0.1567265 secs] Heap after GC invocations=12281 (full 61): garbage-first heap total 9437184K, used 7496533K [0xfffffffd00000000, 0xffffffff40000000, 0xffffffff40000000) region size 4096K, 0 young (0K), 0 survivors (0K) compacting perm gen total 262144K, used 144077K [0xffffffff40000000, 0xffffffff50000000, 0xffffffff50000000) the space 262144K, 54% used [0xffffffff40000000, 0xffffffff48cb3758, 0xffffffff48cb3800, 0xffffffff50000000) No shared spaces configured. } A simple grep can be used to extract a summary: $ grep "\[ GC pause (young" g1gc.log 2014-05-13T13:24:35.091-0700: 3.109: [GC pause (young) 20M->5029K(9216M), 0.0146328 secs] 2014-05-13T13:24:35.440-0700: 3.459: [GC pause (young) 9125K->6077K(9216M), 0.0086723 secs] 2014-05-13T13:24:37.581-0700: 5.599: [GC pause (young) 25M->8470K(9216M), 0.0203820 secs] 2014-05-13T13:24:42.686-0700: 10.704: [GC pause (young) 44M->15M(9216M), 0.0288848 secs] 2014-05-13T13:24:48.941-0700: 16.958: [GC pause (young) 51M->20M(9216M), 0.0491244 secs] 2014-05-13T13:24:56.049-0700: 24.066: [GC pause (young) 92M->26M(9216M), 0.0525368 secs] 2014-05-13T13:25:34.368-0700: 62.383: [GC pause (young) 602M->68M(9216M), 0.1721173 secs] But that format wasn't easily read into R, so I needed to be a bit more tricky. I used the following Unix command to create a summary file that was easy for R to read. $ echo "SecondsSinceLaunch BeforeSize AfterSize TotalSize RealTime" $ grep "\[GC pause (young" g1gc.log | grep -v mark | sed -e 's/[A-SU-z\(\),]/ /g' -e 's/->/ /' -e 's/: / /g' | more SecondsSinceLaunch BeforeSize AfterSize TotalSize RealTime 2014-05-13T13:24:35.091-0700 3.109 20 5029 9216 0.0146328 2014-05-13T13:24:35.440-0700 3.459 9125 6077 9216 0.0086723 2014-05-13T13:24:37.581-0700 5.599 25 8470 9216 0.0203820 2014-05-13T13:24:42.686-0700 10.704 44 15 9216 0.0288848 2014-05-13T13:24:48.941-0700 16.958 51 20 9216 0.0491244 2014-05-13T13:24:56.049-0700 24.066 92 26 9216 0.0525368 2014-05-13T13:25:34.368-0700 62.383 602 68 9216 0.1721173 Format 2 In some of the log files, the log files with the more verbose format, a single trace, i.e. the report of a singe garbage collection event, was more complicated than Format 1. Here is a text file with an example of a single G1GC trace in the second format. As you can see, it is quite complicated. It is nice that there is so much information available, but the level of detail can be overwhelming. I wrote this awk script (download) to summarize each trace on a single line. #!/usr/bin/env awk -f BEGIN { printf("SecondsSinceLaunch IncrementalCount FullCount UserTime SysTime RealTime BeforeSize AfterSize TotalSize\n") } ###################### # Save count data from lines that are at the start of each G1GC trace. # Each trace starts out like this: # {Heap before GC invocations=14 (full 0): # garbage-first heap total 9437184K, used 325496K [0xfffffffd00000000, 0xffffffff40000000, 0xffffffff40000000) ###################### /{Heap.*full/{ gsub ( "\\)" , "" ); nf=split($0,a,"="); split(a[2],b," "); getline; if ( match($0, "first") ) { G1GC=1; IncrementalCount=b[1]; FullCount=substr( b[3], 1, length(b[3])-1 ); } else { G1GC=0; } } ###################### # Pull out time stamps that are in lines with this format: # 2014-05-12T14:02:06.025-0700: 94.312: [GC pause (young), 0.08870154 secs] ###################### /GC pause/ { DateTime=$1; SecondsSinceLaunch=substr($2, 1, length($2)-1); } ###################### # Heap sizes are in lines that look like this: # [ 4842M->4838M(9216M)] ###################### /\[ .*]$/ { gsub ( "\\[" , "" ); gsub ( "\ \]" , "" ); gsub ( "->" , " " ); gsub ( "\\( " , " " ); gsub ( "\ \)" , " " ); split($0,a," "); if ( split(a[1],b,"M") > 1 ) {BeforeSize=b[1]*1024;} if ( split(a[1],b,"K") > 1 ) {BeforeSize=b[1];} if ( split(a[2],b,"M") > 1 ) {AfterSize=b[1]*1024;} if ( split(a[2],b,"K") > 1 ) {AfterSize=b[1];} if ( split(a[3],b,"M") > 1 ) {TotalSize=b[1]*1024;} if ( split(a[3],b,"K") > 1 ) {TotalSize=b[1];} } ###################### # Emit an output line when you find input that looks like this: # [Times: user=1.41 sys=0.08, real=0.24 secs] ###################### /\[Times/ { if (G1GC==1) { gsub ( "," , "" ); split($2,a,"="); UserTime=a[2]; split($3,a,"="); SysTime=a[2]; split($4,a,"="); RealTime=a[2]; print DateTime,SecondsSinceLaunch,IncrementalCount,FullCount,UserTime,SysTime,RealTime,BeforeSize,AfterSize,TotalSize; G1GC=0; } } The resulting summary is about 25X smaller that the original file, but still difficult for a human to digest. SecondsSinceLaunch IncrementalCount FullCount UserTime SysTime RealTime BeforeSize AfterSize TotalSize ... 2014-05-12T18:36:34.669-0700: 3985.744 561 0 0.57 0.06 0.16 1724416 1720320 9437184 2014-05-12T18:36:34.839-0700: 3985.914 562 0 0.51 0.06 0.19 1724416 1720320 9437184 2014-05-12T18:36:35.069-0700: 3986.144 563 0 0.60 0.04 0.27 1724416 1721344 9437184 2014-05-12T18:36:35.354-0700: 3986.429 564 0 0.33 0.04 0.09 1725440 1722368 9437184 2014-05-12T18:36:35.545-0700: 3986.620 565 0 0.58 0.04 0.17 1726464 1722368 9437184 2014-05-12T18:36:35.726-0700: 3986.801 566 0 0.43 0.05 0.12 1726464 1722368 9437184 2014-05-12T18:36:35.856-0700: 3986.930 567 0 0.30 0.04 0.07 1726464 1723392 9437184 2014-05-12T18:36:35.947-0700: 3987.023 568 0 0.61 0.04 0.26 1727488 1723392 9437184 2014-05-12T18:36:36.228-0700: 3987.302 569 0 0.46 0.04 0.16 1731584 1724416 9437184 Reading the Data into R Once the GC log data had been cleansed, either by processing the first format with the shell script, or by processing the second format with the awk script, it was easy to read the data into R. g1gc.df = read.csv("summary.txt", row.names = NULL, stringsAsFactors=FALSE,sep="") str(g1gc.df) ## 'data.frame': 8307 obs. of 10 variables: ## $ row.names : chr "2014-05-12T14:00:32.868-0700:" "2014-05-12T14:00:33.179-0700:" "2014-05-12T14:00:33.677-0700:" "2014-05-12T14:00:35.538-0700:" ... ## $ SecondsSinceLaunch: num 1.16 1.47 1.97 3.83 6.1 ... ## $ IncrementalCount : int 0 1 2 3 4 5 6 7 8 9 ... ## $ FullCount : int 0 0 0 0 0 0 0 0 0 0 ... ## $ UserTime : num 0.11 0.05 0.04 0.21 0.08 0.26 0.31 0.33 0.34 0.56 ... ## $ SysTime : num 0.04 0.01 0.01 0.05 0.01 0.06 0.07 0.06 0.07 0.09 ... ## $ RealTime : num 0.02 0.02 0.01 0.04 0.02 0.04 0.05 0.04 0.04 0.06 ... ## $ BeforeSize : int 8192 5496 5768 22528 24576 43008 34816 53248 55296 93184 ... ## $ AfterSize : int 1400 1672 2557 4907 7072 14336 16384 18432 19456 21504 ... ## $ TotalSize : int 9437184 9437184 9437184 9437184 9437184 9437184 9437184 9437184 9437184 9437184 ... head(g1gc.df) ## row.names SecondsSinceLaunch IncrementalCount ## 1 2014-05-12T14:00:32.868-0700: 1.161 0 ## 2 2014-05-12T14:00:33.179-0700: 1.472 1 ## 3 2014-05-12T14:00:33.677-0700: 1.969 2 ## 4 2014-05-12T14:00:35.538-0700: 3.830 3 ## 5 2014-05-12T14:00:37.811-0700: 6.103 4 ## 6 2014-05-12T14:00:41.428-0700: 9.720 5 ## FullCount UserTime SysTime RealTime BeforeSize AfterSize TotalSize ## 1 0 0.11 0.04 0.02 8192 1400 9437184 ## 2 0 0.05 0.01 0.02 5496 1672 9437184 ## 3 0 0.04 0.01 0.01 5768 2557 9437184 ## 4 0 0.21 0.05 0.04 22528 4907 9437184 ## 5 0 0.08 0.01 0.02 24576 7072 9437184 ## 6 0 0.26 0.06 0.04 43008 14336 9437184 Basic Statistics Once the data has been read into R, simple statistics are very easy to generate. All of the numbers from high school statistics are available via simple commands. For example, generate a summary of every column: summary(g1gc.df) ## row.names SecondsSinceLaunch IncrementalCount FullCount ## Length:8307 Min. : 1 Min. : 0 Min. : 0.0 ## Class :character 1st Qu.: 9977 1st Qu.:2048 1st Qu.: 0.0 ## Mode :character Median :12855 Median :4136 Median : 12.0 ## Mean :12527 Mean :4156 Mean : 31.6 ## 3rd Qu.:15758 3rd Qu.:6262 3rd Qu.: 61.0 ## Max. :55484 Max. :8391 Max. :113.0 ## UserTime SysTime RealTime BeforeSize ## Min. :0.040 Min. :0.0000 Min. : 0.0 Min. : 5476 ## 1st Qu.:0.470 1st Qu.:0.0300 1st Qu.: 0.1 1st Qu.:5137920 ## Median :0.620 Median :0.0300 Median : 0.1 Median :6574080 ## Mean :0.751 Mean :0.0355 Mean : 0.3 Mean :5841855 ## 3rd Qu.:0.920 3rd Qu.:0.0400 3rd Qu.: 0.2 3rd Qu.:7084032 ## Max. :3.370 Max. :1.5600 Max. :488.1 Max. :8696832 ## AfterSize TotalSize ## Min. : 1380 Min. :9437184 ## 1st Qu.:5002752 1st Qu.:9437184 ## Median :6559744 Median :9437184 ## Mean :5785454 Mean :9437184 ## 3rd Qu.:7054336 3rd Qu.:9437184 ## Max. :8482816 Max. :9437184 Q: What is the total amount of User CPU time spent in garbage collection? sum(g1gc.df$UserTime) ## [1] 6236 As you can see, less than two hours of CPU time was spent in garbage collection. Is that too much? To find the percentage of time spent in garbage collection, divide the number above by total_elapsed_time*CPU_count. In this case, there are a lot of CPU’s and it turns out the the overall amount of CPU time spent in garbage collection isn’t a problem when viewed in isolation. When calculating rates, i.e. events per unit time, you need to ask yourself if the rate is homogenous across the time period in the log file. Does the log file include spikes of high activity that should be separately analyzed? Averaging in data from nights and weekends with data from business hours may alias problems. If you have a reason to suspect that the garbage collection rates include peaks and valleys that need independent analysis, see the “Time Series” section, below. Q: How much garbage is collected on each pass? The amount of heap space that is recovered per GC pass is surprisingly low: At least one collection didn’t recover any data. (“Min.=0”) 25% of the passes recovered 3MB or less. (“1st Qu.=3072”) Half of the GC passes recovered 4MB or less. (“Median=4096”) The average amount recovered was 56MB. (“Mean=56390”) 75% of the passes recovered 36MB or less. (“3rd Qu.=36860”) At least one pass recovered 2GB. (“Max.=2121000”) g1gc.df$Delta = g1gc.df$BeforeSize - g1gc.df$AfterSize summary(g1gc.df$Delta) ## Min. 1st Qu. Median Mean 3rd Qu. Max. ## 0 3070 4100 56400 36900 2120000 Q: What is the maximum User CPU time for a single collection? The worst garbage collection (“Max.”) is many standard deviations away from the mean. The data appears to be right skewed. summary(g1gc.df$UserTime) ## Min. 1st Qu. Median Mean 3rd Qu. Max. ## 0.040 0.470 0.620 0.751 0.920 3.370 sd(g1gc.df$UserTime) ## [1] 0.3966 Basic Graphics Once the data is in R, it is trivial to plot the data with formats including dot plots, line charts, bar charts (simple, stacked, grouped), pie charts, boxplots, scatter plots histograms, and kernel density plots. Histogram of User CPU Time per Collection I don't think that this graph requires any explanation. hist(g1gc.df$UserTime, main="User CPU Time per Collection", xlab="Seconds", ylab="Frequency") Box plot to identify outliers When the initial data is viewed with a box plot, you can see the one crazy outlier in the real time per GC. Save this data point for future analysis and drop the outlier so that it’s not throwing off our statistics. Now the box plot shows many outliers, which will be examined later, using times series analysis. Notice that the scale of the x-axis changes drastically once the crazy outlier is removed. par(mfrow=c(2,1)) boxplot(g1gc.df$UserTime,g1gc.df$SysTime,g1gc.df$RealTime, main="Box Plot of Time per GC\n(dominated by a crazy outlier)", names=c("usr","sys","elapsed"), xlab="Seconds per GC", ylab="Time (Seconds)", horizontal = TRUE, outcol="red") crazy.outlier.df=g1gc.df[g1gc.df$RealTime > 400,] g1gc.df=g1gc.df[g1gc.df$RealTime < 400,] boxplot(g1gc.df$UserTime,g1gc.df$SysTime,g1gc.df$RealTime, main="Box Plot of Time per GC\n(crazy outlier excluded)", names=c("usr","sys","elapsed"), xlab="Seconds per GC", ylab="Time (Seconds)", horizontal = TRUE, outcol="red") box(which = "outer", lty = "solid") Here is the crazy outlier for future analysis: crazy.outlier.df ## row.names SecondsSinceLaunch IncrementalCount ## 8233 2014-05-12T23:15:43.903-0700: 20741 8316 ## FullCount UserTime SysTime RealTime BeforeSize AfterSize TotalSize ## 8233 112 0.55 0.42 488.1 8381440 8235008 9437184 ## Delta ## 8233 146432 R Time Series Data To analyze the garbage collection as a time series, I’ll use Z’s Ordered Observations (zoo). “zoo is the creator for an S3 class of indexed totally ordered observations which includes irregular time series.” require(zoo) ## Loading required package: zoo ## ## Attaching package: 'zoo' ## ## The following objects are masked from 'package:base': ## ## as.Date, as.Date.numeric head(g1gc.df[,1]) ## [1] "2014-05-12T14:00:32.868-0700:" "2014-05-12T14:00:33.179-0700:" ## [3] "2014-05-12T14:00:33.677-0700:" "2014-05-12T14:00:35.538-0700:" ## [5] "2014-05-12T14:00:37.811-0700:" "2014-05-12T14:00:41.428-0700:" options("digits.secs"=3) times=as.POSIXct( g1gc.df[,1], format="%Y-%m-%dT%H:%M:%OS%z:") g1gc.z = zoo(g1gc.df[,-c(1)], order.by=times) head(g1gc.z) ## SecondsSinceLaunch IncrementalCount FullCount ## 2014-05-12 17:00:32.868 1.161 0 0 ## 2014-05-12 17:00:33.178 1.472 1 0 ## 2014-05-12 17:00:33.677 1.969 2 0 ## 2014-05-12 17:00:35.538 3.830 3 0 ## 2014-05-12 17:00:37.811 6.103 4 0 ## 2014-05-12 17:00:41.427 9.720 5 0 ## UserTime SysTime RealTime BeforeSize AfterSize ## 2014-05-12 17:00:32.868 0.11 0.04 0.02 8192 1400 ## 2014-05-12 17:00:33.178 0.05 0.01 0.02 5496 1672 ## 2014-05-12 17:00:33.677 0.04 0.01 0.01 5768 2557 ## 2014-05-12 17:00:35.538 0.21 0.05 0.04 22528 4907 ## 2014-05-12 17:00:37.811 0.08 0.01 0.02 24576 7072 ## 2014-05-12 17:00:41.427 0.26 0.06 0.04 43008 14336 ## TotalSize Delta ## 2014-05-12 17:00:32.868 9437184 6792 ## 2014-05-12 17:00:33.178 9437184 3824 ## 2014-05-12 17:00:33.677 9437184 3211 ## 2014-05-12 17:00:35.538 9437184 17621 ## 2014-05-12 17:00:37.811 9437184 17504 ## 2014-05-12 17:00:41.427 9437184 28672 Example of Two Benchmark Runs in One Log File The data in the following graph is from a different log file, not the one of primary interest to this article. I’m including this image because it is an example of idle periods followed by busy periods. It would be uninteresting to average the rate of garbage collection over the entire log file period. More interesting would be the rate of garbage collect in the two busy periods. Are they the same or different? Your production data may be similar, for example, bursts when employees return from lunch and idle times on weekend evenings, etc. Once the data is in an R Time Series, you can analyze isolated time windows. Clipping the Time Series data Flashing back to our test case… Viewing the data as a time series is interesting. You can see that the work intensive time period is between 9:00 PM and 3:00 AM. Lets clip the data to the interesting period:     par(mfrow=c(2,1)) plot(g1gc.z$UserTime, type="h", main="User Time per GC\nTime: Complete Log File", xlab="Time of Day", ylab="CPU Seconds per GC", col="#1b9e77") clipped.g1gc.z=window(g1gc.z, start=as.POSIXct("2014-05-12 21:00:00"), end=as.POSIXct("2014-05-13 03:00:00")) plot(clipped.g1gc.z$UserTime, type="h", main="User Time per GC\nTime: Limited to Benchmark Execution", xlab="Time of Day", ylab="CPU Seconds per GC", col="#1b9e77") box(which = "outer", lty = "solid") Cumulative Incremental and Full GC count Here is the cumulative incremental and full GC count. When the line is very steep, it indicates that the GCs are repeating very quickly. Notice that the scale on the Y axis is different for full vs. incremental. plot(clipped.g1gc.z[,c(2:3)], main="Cumulative Incremental and Full GC count", xlab="Time of Day", col="#1b9e77") GC Analysis of Benchmark Execution using Time Series data In the following series of 3 graphs: The “After Size” show the amount of heap space in use after each garbage collection. Many Java objects are still referenced, i.e. alive, during each garbage collection. This may indicate that the application has a memory leak, or may indicate that the application has a very large memory footprint. Typically, an application's memory footprint plateau's in the early stage of execution. One would expect this graph to have a flat top. The steep decline in the heap space may indicate that the application crashed after 2:00. The second graph shows that the outliers in real execution time, discussed above, occur near 2:00. when the Java heap seems to be quite full. The third graph shows that Full GCs are infrequent during the first few hours of execution. The rate of Full GC's, (the slope of the cummulative Full GC line), changes near midnight.   plot(clipped.g1gc.z[,c("AfterSize","RealTime","FullCount")], xlab="Time of Day", col=c("#1b9e77","red","#1b9e77")) GC Analysis of heap recovered Each GC trace includes the amount of heap space in use before and after the individual GC event. During garbage coolection, unreferenced objects are identified, the space holding the unreferenced objects is freed, and thus, the difference in before and after usage indicates how much space has been freed. The following box plot and bar chart both demonstrate the same point - the amount of heap space freed per garbage colloection is surprisingly low. par(mfrow=c(2,1)) boxplot(as.vector(clipped.g1gc.z$Delta), main="Amount of Heap Recovered per GC Pass", xlab="Size in KB", horizontal = TRUE, col="red") hist(as.vector(clipped.g1gc.z$Delta), main="Amount of Heap Recovered per GC Pass", xlab="Size in KB", breaks=100, col="red") box(which = "outer", lty = "solid") This graph is the most interesting. The dark blue area shows how much heap is occupied by referenced Java objects. This represents memory that holds live data. The red fringe at the top shows how much data was recovered after each garbage collection. barplot(clipped.g1gc.z[,c("AfterSize","Delta")], col=c("#7570b3","#e7298a"), xlab="Time of Day", border=NA) legend("topleft", c("Live Objects","Heap Recovered on GC"), fill=c("#7570b3","#e7298a")) box(which = "outer", lty = "solid") When I discuss the data in the log files with the customer, I will ask for an explaination for the large amount of referenced data resident in the Java heap. There are two are posibilities: There is a memory leak and the amount of space required to hold referenced objects will continue to grow, limited only by the maximum heap size. After the maximum heap size is reached, the JVM will throw an “Out of Memory” exception every time that the application tries to allocate a new object. If this is the case, the aplication needs to be debugged to identify why old objects are referenced when they are no longer needed. The application has a legitimate requirement to keep a large amount of data in memory. The customer may want to further increase the maximum heap size. Another possible solution would be to partition the application across multiple cluster nodes, where each node has responsibility for managing a unique subset of the data. Conclusion In conclusion, R is a very powerful tool for the analysis of Java garbage collection log files. The primary difficulty is data cleansing so that information can be read into an R data frame. Once the data has been read into R, a rich set of tools may be used for thorough evaluation.

    Read the article

  • How can I fix my program from crashing in C++?

    - by Rachel
    I'm very new to programming and I am trying to write a program that adds and subtracts polynomials. My program sometimes works, but most of the time, it randomly crashes and I have no idea why. It's very buggy and has other problems I'm trying to fix, but I am unable to really get any further coding done since it crashes. I'm completely new here but any help would be greatly appreciated. Here's the code: #include <iostream> #include <cstdlib> using namespace std; int getChoice(void); class Polynomial10 { private: double* coef; int degreePoly; public: Polynomial10(int max); //Constructor for a new Polynomial10 int getDegree(){return degreePoly;}; void print(); //Print the polynomial in standard form void read(); //Read a polynomial from the user void add(const Polynomial10& pol); //Add a polynomial void multc(double factor); //Multiply the poly by scalar void subtract(const Polynomial10& pol); //Subtract polynom }; void Polynomial10::read() { cout << "Enter degree of a polynom between 1 and 10 : "; cin >> degreePoly; cout << "Enter space separated coefficients starting from highest degree" << endl; for (int i = 0; i <= degreePoly; i++) { cin >> coef[i]; } } void Polynomial10::print() { for(int i=0;i<=degreePoly;i++) { if(coef[i] == 0) { cout << ""; } else if(i>=0) { if(coef[i] > 0 && i!=0) { cout<<"+"; } if((coef[i] != 1 && coef[i] != -1) || i == degreePoly) { cout << coef[i]; } if((coef[i] != 1 && coef[i] != -1) && i!=degreePoly ) { cout << "*"; } if (i != degreePoly && coef[i] == -1) { cout << "-"; } if(i != degreePoly) { cout << "x"; } if ((degreePoly - i) != 1 && i != degreePoly) { cout << "^"; cout << degreePoly-i; } } } } void Polynomial10::add(const Polynomial10& pol) { for(int i = 0; i<degreePoly; i++) { int degree = degreePoly; coef[degreePoly-i] += pol.coef[degreePoly-(i+1)]; } } void Polynomial10::subtract(const Polynomial10& pol) { for(int i = 0; i<degreePoly; i++) { coef[degreePoly-i] -= pol.coef[degreePoly-(i+1)]; } } void Polynomial10::multc(double factor) { //int degreePoly=0; //double coef[degreePoly]; cout << "Enter the scalar multiplier : "; cin >> factor; for(int i = 0; i<degreePoly; i++) { coef[i] *= factor; } }; Polynomial10::Polynomial10(int max) { degreePoly=max; coef = new double[degreePoly]; for(int i; i<degreePoly; i++) { coef[i] = 0; } } int main() { int choice; Polynomial10 p1(1),p2(1); cout << endl << "CGS 2421: The Polynomial10 Class" << endl << endl << endl; cout << "0. Quit\n" << "1. Enter polynomial\n" << "2. Print polynomial\n" << "3. Add another polynomial\n" << "4. Subtract another polynomial\n" << "5. Multiply by scalar\n\n"; int choiceFirst = getChoice(); if (choiceFirst != 1) { cout << "Enter a Polynomial first!"; } if (choiceFirst == 1) {choiceFirst = choice;} while(choice != 0) { switch(choice) { case 0: return 0; case 1: p1.read(); break; case 2: p1.print(); break; case 3: p2.read(); p1.add(p2); cout << "Updated Polynomial: "; p1.print(); break; case 4: p2.read(); p1.subtract(p2); cout << "Updated Polynomial: "; p1.print(); break; case 5: p1.multc(10); cout << "Updated Polynomial: "; p1.print(); break; } choice = getChoice(); } return 0; } int getChoice(void) { int c; cout << "\nEnter your choice : "; cin >> c; return c; }

    Read the article

< Previous Page | 856 857 858 859 860 861 862 863  | Next Page >