Search Results

Search found 15403 results on 617 pages for 'request querystring'.

Page 596/617 | < Previous Page | 592 593 594 595 596 597 598 599 600 601 602 603  | Next Page >

  • Nginx HTTPS redirects causing loop

    - by Ben Chiappetta
    I've been banging my head against the wall trying to figure this out, so if anyone can help I'd appreciate it. My Nginx conf has three different redirect loops, haven't been able to get any of the three to work right. The three problem areas are: Redirecting memcache directory to SSL Redirecting accounts directory to SSL Redirecting SSL to www if non-www nginx.conf: user nginx; worker_processes 1; error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; error_log /var/log/nginx/error.log notice; sendfile on; #tcp_nopush on; keepalive_timeout 65; proxy_set_header X-Url-Scheme $scheme; #gzip on; rewrite_log on; include /etc/nginx/conf.d/*.conf; } conf.d/default.conf: server { listen 80; server_name <redacted>.net; rewrite ^(.*) http://www.<redacted>.net$1; } server { listen 80; server_name www.<redacted>.net; set_real_ip_from 192.168.30.4; set_real_ip_from 192.168.30.5; set_real_ip_from 192.168.30.10; real_ip_header X-Forwarded-For; #charset koi8-r; access_log /var/log/nginx/host.access.log main; root /var/www/html; index index.php index.html index.htm; location =/memcache { rewrite ^/(.*)$ https://$server_name$request_uri? permanent; } location /accounts { rewrite ^/(.*)$ https://$server_name$request_uri? permanent; } #error_page 404 /404.html; # redirect server error pages to the static page /50x.html # error_page 500 502 503 504 /50x.html; location = /50x.html { } # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 # location ~ \.php$ { fastcgi_pass 127.0.0.1:9000; fastcgi_index index.php; fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; include /etc/nginx/fastcgi_params; try_files $uri = 404; } # deny access to .htaccess files, if Apache's document root # concurs with nginx's one # location ~ /\.ht { deny all; } } conf.d/ssl.conf: # HTTPS server # server { listen 443; server_name <redacted>.net; rewrite ^(.*) https://www.<redacted>.net$1; } server { listen 443 default_server ssl; server_name www.<redacted>.net; set_real_ip_from 192.168.30.4; set_real_ip_from 192.168.30.5; set_real_ip_from 192.168.30.10; real_ip_header X-Forwarded-For; proxy_set_header X-Forwarded_Proto https; proxy_set_header Host $host; proxy_redirect off; proxy_max_temp_file_size 0; proxy_set_header X-Forwarded-Ssl on; set $https_enabled on; ssl_certificate <redacted>.crt; ssl_certificate_key <redacted>.key; ssl_session_timeout 5m; ssl_protocols SSLv2 SSLv3 TLSv1; ssl_ciphers HIGH:!aNULL:!MD5; ssl_prefer_server_ciphers on; root /var/www/html; index index.php index.html index.htm; location /memcache { auth_basic "Restricted"; auth_basic_user_file $document_root/memcache/.htpasswd; } location ~ \.php$ { fastcgi_pass 127.0.0.1:9000; fastcgi_index index.php; fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; fastcgi_param HTTPS on; include /etc/nginx/fastcgi_params; try_files $uri = 404; } }

    Read the article

  • Multiple Graphics Cards on Fedora 15 (Gnome 3)

    - by Michael
    I have the (delightful) misfortune of having 3 graphics cards. They are XFX Radeon 5750s. Each drives 2 monitors via dvi. I am having a really hard time getting these running on fedora 15 (gnome 3). So my setup is 3 columns of 2 monitors (the upper monitor is mounted upside down in each column to reduce the bezel between monitors). When the (graphical) login screen comes up all 6 have the blue stripey background that must be the default, but then when I login, things get interesting. In the xorg.conf below, you will see only 2 of the screens in the serverlayout while the other 4 are commented out. Logging in with only 2 of the screens active works well (and it even remembers that the top one is upside down, and should be considered above the lower, i am not sure where it stores this info, but i set it using the graphical "Displays" settings) However, as soon as I uncomment a third screen, or more, it gives me an error message when I login. It's one of those friendly, less helpful messages (Oh no! Something has gone wrong. A problem has occurred and the system can't recover. Please log out and try again). If i do not use an xorg.conf, then the "Displays" prefs pane shows only the two monitors on one of my graphics cards Thanks to anyone who can help me get going! (xorg.conf and then lspci below, and xorg log) xorg.conf Section "ServerLayout" Identifier "X.org Configured" Screen "Screen0" 0 0 Screen "Screen1" Below "Screen0" # Screen "Screen2" RightOf "Screen0" # Screen "Screen3" RightOf "Screen1" # Screen "Screen4" RightOf "Screen3" # Screen "Screen5" RightOf "Screen4" InputDevice "Mouse0" "CorePointer" InputDevice "Keyboard0" "CoreKeyboard" EndSection Section "Files" ModulePath "/usr/lib/xorg/modules" FontPath "catalogue:/etc/X11/fontpath.d" FontPath "built-ins" EndSection Section "Module" Load "record" Load "dri" Load "dbe" Load "extmod" Load "dri2" Load "glx" EndSection Section "InputDevice" Identifier "Keyboard0" Driver "kbd" EndSection Section "InputDevice" Identifier "Mouse0" Driver "mouse" Option "Protocol" "auto" Option "Device" "/dev/input/mice" Option "ZAxisMapping" "4 5 6 7" EndSection Section "Monitor" Identifier "Monitor0" VendorName "Monitor Vendor" ModelName "Monitor Model" EndSection Section "Monitor" Identifier "Monitor1" VendorName "Monitor Vendor" ModelName "Monitor Model" EndSection Section "Monitor" Identifier "Monitor2" VendorName "Monitor Vendor" ModelName "Monitor Model" EndSection Section "Monitor" Identifier "Monitor3" VendorName "Monitor Vendor" ModelName "Monitor Model" EndSection Section "Monitor" Identifier "Monitor4" VendorName "Monitor Vendor" ModelName "Monitor Model" EndSection Section "Monitor" Identifier "Monitor5" VendorName "Monitor Vendor" ModelName "Monitor Model" EndSection Section "Device" Identifier "Card0" Driver "radeon" BusID "PCI:4:0:0" EndSection Section "Device" Identifier "Card1" Driver "radeon" BusID "PCI:5:0:0" EndSection Section "Device" Identifier "Card2" Driver "radeon" BusID "PCI:6:0:0" EndSection Section "Screen" Identifier "Screen0" Device "Card0" Monitor "Monitor0" SubSection "Display" Viewport 0 0 Depth 24 EndSubSection EndSection Section "Screen" Identifier "Screen1" Device "Card0" Monitor "Monitor1" SubSection "Display" Viewport 0 0 Depth 24 EndSubSection EndSection Section "Screen" Identifier "Screen2" Device "Card1" Monitor "Monitor2" SubSection "Display" Viewport 0 0 Depth 24 EndSubSection EndSection Section "Screen" Identifier "Screen3" Device "Card1" Monitor "Monitor3" SubSection "Display" Viewport 0 0 Depth 24 EndSubSection EndSection Section "Screen" Identifier "Screen4" Device "Card2" Monitor "Monitor4" SubSection "Display" Viewport 0 0 Depth 24 EndSubSection EndSection Section "Screen" Identifier "Screen5" Device "Card2" Monitor "Monitor5" SubSection "Display" Viewport 0 0 Depth 24 EndSubSection EndSection lspci output follows [tgm@tgm ~]$ lspci 00:00.0 Host bridge: Intel Corporation 5520/5500/X58 I/O Hub to ESI Port (rev 13) 00:01.0 PCI bridge: Intel Corporation 5520/5500/X58 I/O Hub PCI Express Root Port 1 (rev 13) 00:03.0 PCI bridge: Intel Corporation 5520/5500/X58 I/O Hub PCI Express Root Port 3 (rev 13) 00:07.0 PCI bridge: Intel Corporation 5520/5500/X58 I/O Hub PCI Express Root Port 7 (rev 13) 00:14.0 PIC: Intel Corporation 5520/5500/X58 I/O Hub System Management Registers (rev 13) 00:14.1 PIC: Intel Corporation 5520/5500/X58 I/O Hub GPIO and Scratch Pad Registers (rev 13) 00:14.2 PIC: Intel Corporation 5520/5500/X58 I/O Hub Control Status and RAS Registers (rev 13) 00:14.3 PIC: Intel Corporation 5520/5500/X58 I/O Hub Throttle Registers (rev 13) 00:1a.0 USB Controller: Intel Corporation 82801JI (ICH10 Family) USB UHCI Controller #4 00:1a.1 USB Controller: Intel Corporation 82801JI (ICH10 Family) USB UHCI Controller #5 00:1a.2 USB Controller: Intel Corporation 82801JI (ICH10 Family) USB UHCI Controller #6 00:1a.7 USB Controller: Intel Corporation 82801JI (ICH10 Family) USB2 EHCI Controller #2 00:1b.0 Audio device: Intel Corporation 82801JI (ICH10 Family) HD Audio Controller 00:1c.0 PCI bridge: Intel Corporation 82801JI (ICH10 Family) PCI Express Root Port 1 00:1c.1 PCI bridge: Intel Corporation 82801JI (ICH10 Family) PCI Express Port 2 00:1c.2 PCI bridge: Intel Corporation 82801JI (ICH10 Family) PCI Express Root Port 3 00:1d.0 USB Controller: Intel Corporation 82801JI (ICH10 Family) USB UHCI Controller #1 00:1d.1 USB Controller: Intel Corporation 82801JI (ICH10 Family) USB UHCI Controller #2 00:1d.2 USB Controller: Intel Corporation 82801JI (ICH10 Family) USB UHCI Controller #3 00:1d.7 USB Controller: Intel Corporation 82801JI (ICH10 Family) USB2 EHCI Controller #1 00:1e.0 PCI bridge: Intel Corporation 82801 PCI Bridge (rev 90) 00:1f.0 ISA bridge: Intel Corporation 82801JIR (ICH10R) LPC Interface Controller 00:1f.2 IDE interface: Intel Corporation 82801JI (ICH10 Family) 4 port SATA IDE Controller #1 00:1f.3 SMBus: Intel Corporation 82801JI (ICH10 Family) SMBus Controller 00:1f.5 IDE interface: Intel Corporation 82801JI (ICH10 Family) 2 port SATA IDE Controller #2 02:00.0 PCI bridge: nVidia Corporation NF200 PCIe 2.0 switch for mainboards (rev a3) 03:00.0 PCI bridge: nVidia Corporation NF200 PCIe 2.0 switch for mainboards (rev a3) 03:02.0 PCI bridge: nVidia Corporation NF200 PCIe 2.0 switch for mainboards (rev a3) 04:00.0 VGA compatible controller: ATI Technologies Inc Juniper [Radeon HD 5750 Series] 04:00.1 Audio device: ATI Technologies Inc Juniper HDMI Audio [Radeon HD 5700 Series] 05:00.0 VGA compatible controller: ATI Technologies Inc Juniper [Radeon HD 5750 Series] 05:00.1 Audio device: ATI Technologies Inc Juniper HDMI Audio [Radeon HD 5700 Series] 06:00.0 VGA compatible controller: ATI Technologies Inc Juniper [Radeon HD 5750 Series] 06:00.1 Audio device: ATI Technologies Inc Juniper HDMI Audio [Radeon HD 5700 Series] 07:00.0 Ethernet controller: Realtek Semiconductor Co., Ltd. RTL8111/8168B PCI Express Gigabit Ethernet controller (rev 02) 08:00.0 Ethernet controller: Realtek Semiconductor Co., Ltd. RTL8111/8168B PCI Express Gigabit Ethernet controller (rev 02) xorg log (posted to fpaste due to how long it is. thanks to marcusw for the request) http://www.fpaste.org/r5ww/ xorg log with all 6 monitors enabled in xorg.conf (they all turn on and have blue, but then one gets the aforementioned user-friendly error message). http://www.fpaste.org/X63H/

    Read the article

  • Adding multiple data importers support to web applications

    - by DigiMortal
    I’m building web application for customer and there is requirement that users must be able to import data in different formats. Today we will support XLSX and ODF as import formats and some other formats are waiting. I wanted to be able to add new importers on the fly so I don’t have to deploy web application again when I add new importer or change some existing one. In this posting I will show you how to build generic importers support to your web application. Importer interface All importers we use must have something in common so we can easily detect them. To keep things simple I will use interface here. public interface IMyImporter {     string[] SupportedFileExtensions { get; }     ImportResult Import(Stream fileStream, string fileExtension); } Our interface has the following members: SupportedFileExtensions – string array of file extensions that importer supports. This property helps us find out what import formats are available and which importer to use with given format. Import – method that does the actual importing work. Besides file we give in as stream we also give file extension so importer can decide how to handle the file. It is enough to get started. When building real importers I am sure you will switch over to abstract base class. Importer class Here is sample importer that imports data from Excel and Word documents. Importer class with no implementation details looks like this: public class MyOpenXmlImporter : IMyImporter {     public string[] SupportedFileExtensions     {         get { return new[] { "xlsx", "docx" }; }     }     public ImportResult Import(Stream fileStream, string extension)     {         // ...     } } Finding supported import formats in web application Now we have importers created and it’s time to add them to web application. Usually we have one page or ASP.NET MVC controller where we need importers. To this page or controller we add the following method that uses reflection to find all classes that implement our IMyImporter interface. private static string[] GetImporterFileExtensions() {     var types = from a in AppDomain.CurrentDomain.GetAssemblies()                 from t in a.GetTypes()                 where t.GetInterfaces().Contains(typeof(IMyImporter))                 select t;       var extensions = new Collection<string>();     foreach (var type in types)     {         var instance = (IMyImporter)type.InvokeMember(null,                        BindingFlags.CreateInstance, null, null, null);           foreach (var extension in instance.SupportedFileExtensions)         {             if (extensions.Contains(extension))                 continue;               extensions.Add(extension);         }     }       return extensions.ToArray(); } This code doesn’t look nice and is far from optimal but it works for us now. It is possible to improve performance of web application if we cache extensions and their corresponding types to some static dictionary. We have to fill it only once because our application is restarted when something changes in bin folder. Finding importer by extension When user uploads file we need to detect the extension of file and find the importer that supports given extension. We add another method to our page or controller that uses reflection to return us importer instance or null if extension is not supported. private static IMyImporter GetImporterForExtension(string extensionToFind) {     var types = from a in AppDomain.CurrentDomain.GetAssemblies()                 from t in a.GetTypes()                 where t.GetInterfaces().Contains(typeof(IMyImporter))                 select t;     foreach (var type in types)     {         var instance = (IMyImporter)type.InvokeMember(null,                        BindingFlags.CreateInstance, null, null, null);           if (instance.SupportedFileExtensions.Contains(extensionToFind))         {             return instance;         }     }       return null; } Here is example ASP.NET MVC controller action that accepts uploaded file, finds importer that can handle file and imports data. Again, this is sample code I kept minimal to better illustrate how things work. public ActionResult Import(MyImporterModel model) {     var file = Request.Files[0];     var extension = Path.GetExtension(file.FileName).ToLower();     var importer = GetImporterForExtension(extension.Substring(1));     var result = importer.Import(file.InputStream, extension);     if (result.Errors.Count > 0)     {         foreach (var error in result.Errors)             ModelState.AddModelError("file", error);           return Import();     }     return RedirectToAction("Index"); } Conclusion That’s it. Using couple of ugly methods and one simple interface we were able to add importers support to our web application. Example code here is not perfect but it works. It is possible to cache mappings between file extensions and importer types to some static variable because changing of these mappings means that something is changed in bin folder of web application and web application is restarted in this case anyway.

    Read the article

  • How can I render multiple windows with DirectX 9 in C++?

    - by Friso1990
    I'm trying to render multiple windows, using DirectX 9 and swap chains, but even though I create 2 windows, I only see the first one that I've created. My RendererDX9 header is this: #include <d3d9.h> #include <Windows.h> #include <vector> #include "RAT_Renderer.h" namespace RAT_ENGINE { class RAT_RendererDX9 : public RAT_Renderer { public: RAT_RendererDX9(); ~RAT_RendererDX9(); void Init(RAT_WindowManager* argWMan); void CleanUp(); void ShowWin(); private: LPDIRECT3D9 renderInterface; // Used to create the D3DDevice LPDIRECT3DDEVICE9 renderDevice; // Our rendering device LPDIRECT3DSWAPCHAIN9* swapChain; // Swapchain to make multi-window rendering possible WNDCLASSEX wc; std::vector<HWND> hwindows; void Render(int argI); }; } And my .cpp file is this: #include "RAT_RendererDX9.h" static LRESULT CALLBACK MsgProc( HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam ); namespace RAT_ENGINE { RAT_RendererDX9::RAT_RendererDX9() : renderInterface(NULL), renderDevice(NULL) { } RAT_RendererDX9::~RAT_RendererDX9() { } void RAT_RendererDX9::Init(RAT_WindowManager* argWMan) { wMan = argWMan; // Register the window class WNDCLASSEX windowClass = { sizeof( WNDCLASSEX ), CS_CLASSDC, MsgProc, 0, 0, GetModuleHandle( NULL ), NULL, NULL, NULL, NULL, "foo", NULL }; wc = windowClass; RegisterClassEx( &wc ); for (int i = 0; i< wMan->getWindows().size(); ++i) { HWND hWnd = CreateWindow( "foo", argWMan->getWindow(i)->getName().c_str(), WS_OVERLAPPEDWINDOW, argWMan->getWindow(i)->getX(), argWMan->getWindow(i)->getY(), argWMan->getWindow(i)->getWidth(), argWMan->getWindow(i)->getHeight(), NULL, NULL, wc.hInstance, NULL ); hwindows.push_back(hWnd); } // Create the D3D object, which is needed to create the D3DDevice. renderInterface = (LPDIRECT3D9)Direct3DCreate9( D3D_SDK_VERSION ); // Set up the structure used to create the D3DDevice. Most parameters are // zeroed out. We set Windowed to TRUE, since we want to do D3D in a // window, and then set the SwapEffect to "discard", which is the most // efficient method of presenting the back buffer to the display. And // we request a back buffer format that matches the current desktop display // format. D3DPRESENT_PARAMETERS deviceConfig; ZeroMemory( &deviceConfig, sizeof( deviceConfig ) ); deviceConfig.Windowed = TRUE; deviceConfig.SwapEffect = D3DSWAPEFFECT_DISCARD; deviceConfig.BackBufferFormat = D3DFMT_UNKNOWN; deviceConfig.BackBufferHeight = 1024; deviceConfig.BackBufferWidth = 768; deviceConfig.EnableAutoDepthStencil = TRUE; deviceConfig.AutoDepthStencilFormat = D3DFMT_D16; // Create the Direct3D device. Here we are using the default adapter (most // systems only have one, unless they have multiple graphics hardware cards // installed) and requesting the HAL (which is saying we want the hardware // device rather than a software one). Software vertex processing is // specified since we know it will work on all cards. On cards that support // hardware vertex processing, though, we would see a big performance gain // by specifying hardware vertex processing. renderInterface->CreateDevice( D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, hwindows[0], D3DCREATE_SOFTWARE_VERTEXPROCESSING, &deviceConfig, &renderDevice ); this->swapChain = new LPDIRECT3DSWAPCHAIN9[wMan->getWindows().size()]; this->renderDevice->GetSwapChain(0, &swapChain[0]); for (int i = 0; i < wMan->getWindows().size(); ++i) { renderDevice->CreateAdditionalSwapChain(&deviceConfig, &swapChain[i]); } renderDevice->SetRenderState(D3DRS_CULLMODE, D3DCULL_CCW); // Set cullmode to counterclockwise culling to save resources renderDevice->SetRenderState(D3DRS_AMBIENT, 0xffffffff); // Turn on ambient lighting renderDevice->SetRenderState(D3DRS_ZENABLE, TRUE); // Turn on the zbuffer } void RAT_RendererDX9::CleanUp() { renderDevice->Release(); renderInterface->Release(); } void RAT_RendererDX9::Render(int argI) { // Clear the backbuffer to a blue color renderDevice->Clear( 0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB( 0, 0, 255 ), 1.0f, 0 ); LPDIRECT3DSURFACE9 backBuffer = NULL; // Set draw target this->swapChain[argI]->GetBackBuffer(0, D3DBACKBUFFER_TYPE_MONO, &backBuffer); this->renderDevice->SetRenderTarget(0, backBuffer); // Begin the scene renderDevice->BeginScene(); // End the scene renderDevice->EndScene(); swapChain[argI]->Present(NULL, NULL, hwindows[argI], NULL, 0); } void RAT_RendererDX9::ShowWin() { for (int i = 0; i < wMan->getWindows().size(); ++i) { ShowWindow( hwindows[i], SW_SHOWDEFAULT ); UpdateWindow( hwindows[i] ); // Enter the message loop MSG msg; while( GetMessage( &msg, NULL, 0, 0 ) ) { if (PeekMessage( &msg, NULL, 0U, 0U, PM_REMOVE ) ) { TranslateMessage( &msg ); DispatchMessage( &msg ); } else { Render(i); } } } } } LRESULT CALLBACK MsgProc( HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam ) { switch( msg ) { case WM_DESTROY: //CleanUp(); PostQuitMessage( 0 ); return 0; case WM_PAINT: //Render(); ValidateRect( hWnd, NULL ); return 0; } return DefWindowProc( hWnd, msg, wParam, lParam ); } I've made a sample function to make multiple windows: void RunSample1() { //Create the window manager. RAT_ENGINE::RAT_WindowManager* wMan = new RAT_ENGINE::RAT_WindowManager(); //Create the render manager. RAT_ENGINE::RAT_RenderManager* rMan = new RAT_ENGINE::RAT_RenderManager(); //Create a window. //This is currently needed to initialize the render manager and create a renderer. wMan->CreateRATWindow("Sample 1 - 1", 10, 20, 640, 480); wMan->CreateRATWindow("Sample 1 - 2", 150, 100, 480, 640); //Initialize the render manager. rMan->Init(wMan); //Show the window. rMan->getRenderer()->ShowWin(); } How do I get the multiple windows to work?

    Read the article

  • ?????Exadata????

    - by Liu Maclean(???)
    ??check Exadata Image & OS versions , GI & DB patches sundiag exacheck cellserv ==> imageinfo dbhost ==> /usr/local/bin/imagehistory Also check the version of the switch. Login to Switch and execute the following command [root@myswitch-1 sbin]# version [root@dmorlsw-ib2 sbin]# cd /usr/local/bin [root@dmorlsw-ib2 bin]# ls -lrt version -rwxr-xr-x 1 root root 20356 Apr 4 2011 version Output will look as below. [root@dmorlsw-ib2 ~]# version SUN DCS 36p version: 1.3.3-2 Build time: Apr 4 2011 11:15:19 SP board info: Manufacturing Date: 2009.05.05 Serial Number: "NCD3X0178" Hardware Revision: 0x0006 Firmware Revision: 0x0102 BIOS version: NOW1R112 BIOS date: 04/24/2009 ib8# cat /sys/class/infiniband/is4_0/fw_ver 7.2.300 ib8 # cat /sys/class/dmi/id/bios_version NOW1R112 ib8 # nm2version NM2-36p version: 1.0.1-1 Build time: Sep 14 2009 12:52:51 ComExpress info: Manufacturing Date: 2009.08.19 Serial Number: Hardware Revision: 0x0006 Firmware Revision: 0x0102 { case `uname` in Linux ) ILOM="/usr/bin/ipmitool sunoem cli" ;; SunOS ) ILOM="/opt/ipmitool/bin/ipmitool sunoem cli" ;; esac ; ImageInfo="/opt/oracle.cellos/imageinfo" ; uname -srm ; head -1 /etc/*release ; uptime | cut -d, -f1 ; $ILOM "show /SP system_description system_identifier" | grep = ; $ImageInfo -activated -node -status -ver | grep -v ^$ ; } | tee /tmp/ExaInfo.log $GRID_HOME/OPatch/opatch lsinv -all -oh $GRID_HOME | tee /tmp/OPatchInv.log $ORACLE_HOME/OPatch/opatch lsinv -all | tee -a /tmp/OPatchInv.log cat /tmp/ExaInfo.log Linux 2.6.18-128.1.16.0.1.el5 x86_64 ==> /etc/enterprise-release <== Enterprise Linux Enterprise Linux Server release 5.3 (Carthage) ==> /etc/redhat-release <== Enterprise Linux Enterprise Linux Server release 5.3 (Carthage) 20:37:56 up 458 days system_description = SUN FIRE X4170 SERVER, ILOM v3.0.6.10.b, r52264 system_identifier = Sun Oracle Database Machine Active image version: 11.2.1.2.3 Active image activated: XXXX-XX-XX 12:27:12 +0800 Active image status: success Active node type: COMPUTE Inactive image version: undefined FileName: OPatchInv.log ---------------- ... Oracle Home       : /u01/app/11.2.0/grid Central Inventory : /u01/app/oraInventory   from           : /etc/oraInst.loc OPatch version    : 11.2.0.1.2 OUI version       : 11.2.0.1.0 OUI location      : /u01/app/11.2.0/grid/oui ... -------------------------------------------------------------------------------- List of Oracle Homes:   Name                                       Location   Ora11g_gridinfrahome1         /u01/app/11.2.0/grid   OraDb11g_home1                  /u01/app/oracle/product/11.2.0/dbhome_1 -------------------------------------------------------------------------------- Installed Top-level Products (1): Oracle Grid Infrastructure                                           11.2.0.1.0 ... Interim patches (2) : Patch  9524394      : applied on Thu Jun 03 20:46:05 CST 2010 ... {TRACKING BUG FOR 11.2.0.1 DB MACHINE BUNDLE PATCH 3} Patch  9455587      : applied on Fri Apr 02 18:27:47 CST 2010 ... {MERGE REQUEST ON TOP OF 11.2.0.1.0 FOR BUGS 8483425 8667622 8702731 8730804} Rac system comprising of multiple nodes  Local node = dbserv01  Remote node = dbserv02  Remote node = dbserv03  Remote node = dbserv04 -------------------------------------------------------------------------------- OPatch succeeded. ... Oracle Home       : /u01/app/oracle/product/11.2.0/dbhome_1 ... Oracle Database 11g                                                  11.2.0.1.0 ... Interim patches (5) : Patch  8888434      : applied on Sat Jan 08 00:27:33 CST 2011 ... {AIX-ASM-CF: LMHB TERMINATE INSTANCE WHEN OFFLINE ONE FAILGROUP IN ASM DG} Patch  8730312      : applied on Thu Jun 03 21:30:03 CST 2010 ... {FWD MERGE FOR BASE BUG 8715387 FOR 12G} Patch  9502717      : applied on Thu Jun 03 21:25:54 CST 2010 ... {LMS HIT ORA-600 [KJBLDRMNEXTPKEY:SEEN] AND CRASHED THE INSTANCE} { + same 2 as GI above} ?? cell server Cache Policy cell08# MegaCli64 -LDInfo -Lall -aALL | grep 'Current Cache Policy' Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU cell09# MegaCli64 -LDInfo -Lall -aALL | grep 'Current Cache Policy' Current Cache Policy: WriteBack, ReadAheadNone, Direct, No Write Cache if Bad BBU Default Cache Policy: WriteBack, ReadAheadNone, Direct, No Write Cache if Bad BBU Current Cache Policy: WriteThrough, ReadAheadNone, Direct, No Write Cache if Bad BBU Cache policy is in WB Would recommend proactive  battery repalcement. Example : a. /opt/MegaRAID/MegaCli/MegaCli64 -LDGetProp  -Cache -LALL -aALL ####( Will list the cache policy) b. /opt/MegaRAID/MegaCli/MegaCli64 -LDSetProp  -WB  -LALL -aALL ####( Will try to change teh policy from xx to WB)     So policy Change to WB will not come into effect immediately     Set Write Policy to WriteBack on Adapter 0, VD 0 (target id: 0) success     Battery capacity is below the threshold value ??cell BBU??????: cell08# /opt/MegaRAID/MegaCli/MegaCli64 -AdpBbuCmd -GetBbuStatus -a0 BBU status for Adapter: 0 BatteryType: iBBU Voltage: 4061 mV Current: 0 mA Temperature: 36 C BBU Firmware Status: Charging Status : None Voltage : OK Temperature : OK Learn Cycle Requested : No Learn Cycle Active : No Learn Cycle Status : OK Learn Cycle Timeout : No I2c Errors Detected : No Battery Pack Missing : No Battery Replacement required : No Remaining Capacity Low : Yes Periodic Learn Required : No Battery state: GasGuageStatus: Fully Discharged : No Fully Charged : Yes Discharging : Yes Initialized : Yes Remaining Time Alarm : No Remaining Capacity Alarm: No Discharge Terminated : No Over Temperature : No Charging Terminated : No Over Charged : No Relative State of Charge: 99 % Charger System State: 49168 Charger System Ctrl: 0 Charging current: 0 mA Absolute state of charge: 21 % Max Error: 2 % Exit Code: 0x00 ????BBU ??: dcli -g ~/cell_group -l root -t '{ uname -srm ; head -1 /etc/*release ; uptime | cut -d, -f1 ; imagehistory ; ipmitool sunoem cli "show /SP system_description system_identifier" | grep = ; ipmitool sunoem cli "show /SP/policy FLASH_ACCELERATOR_CARD_INSTALLED /opt/MegaRAID/MegaCli/MegaCli64 -AdpBbuCmd -GetBbuStatus -a0 | egrep -i 'BBU|Battery|Charge:|Fully|Low|Learn' ; }' | tee /tmp/ExaInfo.log Target cells: ['cellserv01', 'cellserv02', 'cellserv03', 'cellserv04', 'cellserv05', 'cellserv06', 'cellserv07'] cellserv01: Linux 2.6.18-128.1.16.0.1.el5 x86_64 cellserv01: ==> /etc/enterprise-release <== cellserv01: Enterprise Linux Enterprise Linux Server release 5.3 (Carthage) cellserv01: cellserv01: ==> /etc/redhat-release <== cellserv01: Enterprise Linux Enterprise Linux Server release 5.3 (Carthage) cellserv01: 01:17:39 up 635 days cellserv01: Version : 11.2.1.2.1 cellserv01: Image activation date : 2011-03-25 11:59:34 -0800 cellserv01: Imaging mode : fresh cellserv01: Imaging status : success cellserv01: cellserv01: Version : 11.2.1.2.3 cellserv01: Image activation date : 2011-04-13 12:15:46 +0800 cellserv01: Imaging mode : patch cellserv01: Imaging status : success cellserv01: cellserv01: Version : 11.2.1.2.6 cellserv01: Image activation date : 2011-05-27 23:08:22 +0800 cellserv01: Imaging mode : patch cellserv01: Imaging status : success cellserv01: cellserv01: system_description = SUN FIRE X4275 SERVER, ILOM v3.0.6.10.b, r52264 cellserv01: system_identifier = Sun Oracle Database Machine cellserv01: Connected. Use ^D to exit. cellserv01: -> show /SP/policy FLASH_ACCELERATOR_CARD_INSTALLED cellserv01: show: No matching properties found. cellserv01: cellserv01: -> Session closed cellserv01: Disconnected cellserv01: BBU status for Adapter: 0 cellserv01: BatteryType: iBBU cellserv01: BBU Firmware Status: cellserv01: Learn Cycle Requested : No cellserv01: Learn Cycle Active : No cellserv01: Learn Cycle Status : OK cellserv01: Learn Cycle Timeout : No cellserv01: Battery Pack Missing : No cellserv01: Battery Replacement required : No cellserv01: Remaining Capacity Low : Yes cellserv01: Periodic Learn Required : No cellserv01: Battery state: cellserv01: Fully Discharged : No cellserv01: Fully Charged : Yes cellserv01: Relative State of Charge: 99 % cellserv01: Absolute state of charge: 21 % dcli -l root -g /root/all_group '/opt/MegaRAID/MegAaCli/MegaCli64 -AdpBbuCmd -a0' > BBU.out check ipmi: dcli -g ~/cell_group -l root -t '{ > ipmitool sunoem cli "show /SP/policy FLASH_ACCELERATOR_CARD_INSTALLED" | grep = ; MegaCli64 -LDInfo -Lall -aALL | grep 'Current Cache Policy' ; }' | tee /tmp/ExaCells.log

    Read the article

  • Cannot Create New Team Project TFS2010 TF249063 TF218017

    - by Kodicus
    Server: Windows 2008 R2 Standard Team Foundation Server 2010 WSS 3.0 TFS Configuration: Single Server instalation (including SharePoint) The following error occurs when trying to create a new team project from my local machine. The ://sourcecontrol site and ://sourcecontrol/sites/DefaultCollection/ site appears to be functioning fine and my user is a Site collection administrator on both. I can navigate both sites through a browser on my local machine. Thanks for your help! 2010-04-23T10:01:42 | Module: Internal | Team Foundation Server proxy retrieved | Completion time: 0 seconds 2010-04-23T10:01:42 | Module: Wizard | Retrieved IAuthorizationService proxy | Completion time: 0 seconds 2010-04-23T10:01:42 | Module: Wizard | TF30227: Project creation permissions retrieved | Completion time: 0.109382 seconds 2010-04-23T10:01:42 | Module: Internal | The template information for Team Foundation Server "sourcecontrol\DefaultCollection" was retrieved from the Team Foundation Server. | Completion time: 0.15626 seconds ---begin Exception entry--- Time: 2010-04-23T10:03:24 Module: Wizard Exception Message: TF218017: A SharePoint site could not be created for use as the team project portal. The following error occurred: TF249063: The following Web service is not available: ://sourcecontrol/_vti_bin/TeamFoundationIntegrationService.asmx. This Web service is used for the Team Foundation Server Extensions for SharePoint Products. The underlying error is: The underlying connection was closed: A connection that was expected to be kept alive was closed by the server.. Verify that the following URL points to a valid SharePoint Web application and that the application is available: ://sourcecontrol. If the URL is correct and the Web application is operating normally, verify that a firewall is not blocking access to the Web application. (type TeamFoundationServerException) Exception Stack Trace: at Microsoft.VisualStudio.TeamFoundation.WssSiteCreator.CheckCreateSite(TfsTeamProjectCollection tfsServer, Uri adminUri, Uri siteUri) at Microsoft.VisualStudio.TeamFoundation.WssSiteCreator.ValidateSettings(ProjectCreationContext context) at Microsoft.VisualStudio.TeamFoundation.PortfolioProjectForm.OnFinish() Inner Exception Details: Exception Message: TF249063: The following Web service is not available: ://sourcecontrol/_vti_bin/TeamFoundationIntegrationService.asmx. This Web service is used for the Team Foundation Server Extensions for SharePoint Products. The underlying error is: The underlying connection was closed: A connection that was expected to be kept alive was closed by the server.. Verify that the following URL points to a valid SharePoint Web application and that the application is available: ://sourcecontrol. If the URL is correct and the Web application is operating normally, verify that a firewall is not blocking access to the Web application. (type TeamFoundationServiceUnavailableException) Exception Stack Trace: at Microsoft.TeamFoundation.Client.SharePoint.SharePointTeamFoundationIntegrationService.HandleException(Exception e) at Microsoft.TeamFoundation.Client.SharePoint.SharePointTeamFoundationIntegrationService.CheckUrl(String absolutePath, CheckUrlOptions options, Guid configurationServerId, Guid projectCollectionId) at Microsoft.TeamFoundation.Client.SharePoint.WssUtilities.CheckUrl(ICredentials credentials, Uri adminUrl, Uri siteUrl, CheckUrlOptions options, Guid configurationServerId, Guid projectCollectionId) at Microsoft.TeamFoundation.Client.SharePoint.WssUtilities.CheckCreateSite(TfsConnection tfs, Uri adminUrl, Uri siteUrl) at Microsoft.VisualStudio.TeamFoundation.WssSiteCreator.CheckCreateSite(TfsTeamProjectCollection tfsServer, Uri adminUri, Uri siteUri) Inner Exception Details: Exception Message: The underlying connection was closed: A connection that was expected to be kept alive was closed by the server. (type WebException) Exception Stack Trace: at System.Net.WebRequest.GetResponse() at Microsoft.TeamFoundation.Client.TeamFoundationClientProxyBase.AsyncWebRequest.ExecRequest(Object obj) Inner Exception Details: Exception Message: Unable to read data from the transport connection: An existing connection was forcibly closed by the remote host. (type IOException) Exception Stack Trace: at System.Net.Sockets.NetworkStream.Read(Byte[] buffer, Int32 offset, Int32 size) at System.Net.PooledStream.Read(Byte[] buffer, Int32 offset, Int32 size) at System.Net.Connection.SyncRead(WebRequest request, Boolean userRetrievedStream, Boolean probeRead) Inner Exception Details: Exception Message: An existing connection was forcibly closed by the remote host (type SocketException) Exception Stack Trace: at System.Net.Sockets.Socket.Receive(Byte[] buffer, Int32 offset, Int32 size, SocketFlags socketFlags) at System.Net.Sockets.NetworkStream.Read(Byte[] buffer, Int32 offset, Int32 size) --- end Exception entry ---

    Read the article

  • "(401)Authorization Required" when making a web service call using Axis

    - by Arun P Johny
    Hi, I'm using apache axis to connect to my sugar crm instance. When I'm trying to connect to the instance it is throwing the following exception Exception in thread "main" AxisFault faultCode: {http://xml.apache.org/axis/}HTTP faultSubcode: faultString: (401)Authorization Required faultActor: faultNode: faultDetail: {}:return code: 401 &lt;!DOCTYPE HTML PUBLIC &quot;-//IETF//DTD HTML 2.0//EN&quot;&gt; &lt;html&gt;&lt;head&gt; &lt;title&gt;401 Authorization Required&lt;/title&gt; &lt;/head&gt;&lt;body&gt; &lt;h1&gt;Authorization Required&lt;/h1&gt; &lt;p&gt;This server could not verify that you are authorized to access the document requested. Either you supplied the wrong credentials (e.g., bad password), or your browser doesn't understand how to supply the credentials required.&lt;/p&gt; &lt;/body&gt;&lt;/html&gt; {http://xml.apache.org/axis/}HttpErrorCode:401 (401)Authorization Required at org.apache.axis.transport.http.HTTPSender.readFromSocket(HTTPSender.java:744) at org.apache.axis.transport.http.HTTPSender.invoke(HTTPSender.java:144) at org.apache.axis.strategies.InvocationStrategy.visit(InvocationStrategy.java:32) at org.apache.axis.SimpleChain.doVisiting(SimpleChain.java:118) at org.apache.axis.SimpleChain.invoke(SimpleChain.java:83) at org.apache.axis.client.AxisClient.invoke(AxisClient.java:165) at org.apache.axis.client.Call.invokeEngine(Call.java:2784) at org.apache.axis.client.Call.invoke(Call.java:2767) at org.apache.axis.client.Call.invoke(Call.java:2443) at org.apache.axis.client.Call.invoke(Call.java:2366) at org.apache.axis.client.Call.invoke(Call.java:1812) at org.beanizer.sugarcrm.SugarsoapBindingStub.get_server_info(SugarsoapBindingStub.java:1115) at com.greytip.sugarcrm.GreytipCrm.main(GreytipCrm.java:42) This basically says that I do not have the authorization to the resource. The same code is working fine in my testing environment. Sugarsoap service = new SugarsoapLocator(); SugarsoapPortType port = service.getsugarsoapPort(new java.net.URL( SUGAR_CRM_LOCATION + "/soap.php")); System.out.println(port.get_server_info().getVersion()); User_auth userAuth = new User_auth(); userAuth.setUser_name("user_name"); MessageDigest md = MessageDigest.getInstance("MD5"); String password = getHexString(md.digest("password".getBytes())); userAuth.setPassword(password); // userAuth.setVersion("0.1"); Entry_value login = port.login(userAuth, "myAppName", null); String sessionID = login.getId(); Above code is used to connect to the Sugar CRM installation. here line "System.out.println(port.get_server_info().getVersion());" is throwing the exception. One difference I noticed between the test and production environment is when I used the soap url in the browser the production site pops up a 'Authentication Required' popup. When I gives my proxy username and password in this popup, it shows the soap request details. The same is applicable for the login url also. First it will ask for the 'Authentication' then it will take to the sugar crm login page? Is it a server security setting? If it is then how to set this user name and password using java in a web service call. The authentication required popup is same as the one which comes when we try to access the tomcat manager through a browser. Thanks

    Read the article

  • SSRS ReportViewer 2010 Iframe IE Problem

    - by Phil
    Hello all, My problem relates to trying to include an SSRS (SQL Server) Report inside my MVC application. I've settled on the hybrid solution of having a WebForm with the ReportViewer Control in and then on my MVC View pages having an iframe reference this WebForm page. The tricky part is that the iframe needs to be dynamically populated with the report rather than using src due to posting parameters to the WebForm. It works perfectly in Firefox and Chrome, however IE throws a "Sys is Undefined" javascript error. Using src on the iframe works in IE, but I can't find a way to post parameters (don't want to use something like /Report.aspx?param1=test due to the possible length). Its a ASP.NET MVC 2 project, .NET 4, Visual Studio 2010 on Windows 7 x74 if its any help. So here is the code (I could provide the VS2010 project files if anyone wants them) In my webform: <%@ Page Language="C#" AutoEventWireup="true" CodeBehind="Report.aspx.cs" Inherits="SSRSMVC.Views.Home.Report" %> <%@ Register Assembly="Microsoft.ReportViewer.WebForms, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a" Namespace="Microsoft.Reporting.WebForms" TagPrefix="rsweb" %> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <body> <form id="RSForm" runat="server"> <asp:ScriptManager ID="ScriptManager" runat="server" EnablePartialRendering="true" ScriptMode="Release"> </asp:ScriptManager> <asp:UpdatePanel ID="ReportViewerUP" runat="server"> <ContentTemplate> <rsweb:ReportViewer ID="ReportViewer" runat="server" Width="100%" Height="380px" ProcessingMode="Local" InteractivityPostBackMode="AlwaysAsynchronous" AsyncRendering="true"> <LocalReport ReportPath="Models\\TestReport.rdlc"> </LocalReport> </rsweb:ReportViewer> </ContentTemplate> </asp:UpdatePanel> </form> </body> </html> And Codebehind: using System; using System.Collections.Generic; using System.Linq; using System.Web; using System.Web.UI; using System.Web.UI.WebControls; using Microsoft.Reporting.WebForms; namespace SSRSMVC.Views.Home { public partial class Report : System.Web.UI.Page { protected void Page_Load(object sender, EventArgs e) { if (!Page.IsPostBack) { string test = Request.Params["val1"]; ReportViewer.LocalReport.DataSources.Add(new ReportDataSource("DataSet1", new SSRSMVC.Models.DataProvider().GetData())); } } } } And lastly my View page, <script type="text/javascript"> $(window).load(function () { $.post('/Report.aspx', { val1: "Hello World" }, function (data) { var rv_frame = document.getElementById('Frame1'); rv_frame = (rv_frame.contentWindow) ? rv_frame.contentWindow : (rv_frame.contentDocument.document) ? rv_frame.contentDocument.document : rv_frame.contentDocument; rv_frame.document.open(); rv_frame.document.write(data); rv_frame.document.close(); }); }); </script>

    Read the article

  • EWS - How to search for items [message] between dates ?

    - by SomFred
    Hi, I am trying to search for message items between two dates from the inbox folder. I use the following restrictionType but it throws this error: firmt.RootFolder = null What am I doing wrong? There is some messages between the mentionned dates ;-) Thanks for your suggestions. using (ExchangeServiceBinding esb = new ExchangeServiceBinding()) { esb.Url = ConfigurationManager.AppSettings["ExchangeWebServicesURL"].ToString(); esb.RequestServerVersionValue = new RequestServerVersion(); esb.RequestServerVersionValue.Version = ExchangeVersionType.Exchange2007_SP1; esb.PreAuthenticate = true; esb.Credentials = new NetworkCredential(email, password); FindItemType findItemRequest = new FindItemType(); // paging IndexedPageViewType ipvt = new IndexedPageViewType(); ipvt.BasePoint = IndexBasePointType.Beginning; ipvt.MaxEntriesReturned = nombreMessage; ipvt.MaxEntriesReturnedSpecified = true; ipvt.Offset = offset; findItemRequest.Item = ipvt; // filter by dates AndType andType = new AndType(); List<SearchExpressionType> searchExps = new List<SearchExpressionType>(); RestrictionType restriction = new RestrictionType(); PathToUnindexedFieldType pteft = new PathToUnindexedFieldType { FieldURI = UnindexedFieldURIType.itemDateTimeSent }; IsGreaterThanOrEqualToType IsGreaterThanOrEqualTo = new IsGreaterThanOrEqualToType { Item = pteft, FieldURIOrConstant = new FieldURIOrConstantType { Item = new ConstantValueType { Value = DateTime.Today.AddDays(-6d).ToString() } } }; searchExps.Add(IsGreaterThanOrEqualTo); IsLessThanOrEqualToType IsLessThanOrEqualTo = new IsLessThanOrEqualToType { Item = pteft, FieldURIOrConstant = new FieldURIOrConstantType { Item = new ConstantValueType { Value = DateTime.Today.AddDays(1d).ToString() } } }; searchExps.Add(IsLessThanOrEqualTo); andType.Items = searchExps.ToArray(); restriction.Item = andType; findItemRequest.Restriction = restriction; //// Define the sort order of items. FieldOrderType[] fieldsOrder = new FieldOrderType[1]; fieldsOrder[0] = new FieldOrderType(); PathToUnindexedFieldType dateOrder = new PathToUnindexedFieldType { FieldURI = UnindexedFieldURIType.itemDateTimeReceived }; fieldsOrder[0].Item = dateOrder; fieldsOrder[0].Order = SortDirectionType.Descending; findItemRequest.SortOrder = fieldsOrder; findItemRequest.Traversal = ItemQueryTraversalType.Shallow; // define which item properties are returned in the response findItemRequest.ItemShape = new ItemResponseShapeType { BaseShape = DefaultShapeNamesType.IdOnly }; // identify which folder to search DistinguishedFolderIdType[] folderIDArray = new DistinguishedFolderIdType[1]; folderIDArray[0] = new DistinguishedFolderIdType { Id = DistinguishedFolderIdNameType.inbox }; // add folders to request findItemRequest.ParentFolderIds = folderIDArray; // find the messages FindItemResponseType findItemResponse = esb.FindItem(findItemRequest); //------------- ArrayOfResponseMessagesType responseMessages = findItemResponse.ResponseMessages; ResponseMessageType responseMessage = responseMessages.Items[0]; if (responseMessage is FindItemResponseMessageType) { FindItemResponseMessageType firmt = (responseMessage as FindItemResponseMessageType); *******FindItemParentType fipt = firmt.RootFolder;******** object obj = fipt.Item; // FindItem contains an array of items. ArrayOfRealItemsType realitems = (obj as ArrayOfRealItemsType); ItemType[] items = realitems.Items; // if no messages were found, then return null -- we're done if (items == null || items.Count() <= 0) return null; // FindItem never gets "all" the properties, so now that we've found them all, we need to get them all. BaseItemIdType[] itemIds = new BaseItemIdType[items.Count()]; for (int i = 0; i < items.Count(); i++) itemIds[i] = items[i].ItemId; GetItemType getItemType = new GetItemType { ItemIds = itemIds, ItemShape = new ItemResponseShapeType { BaseShape = DefaultShapeNamesType.AllProperties, BodyType = BodyTypeResponseType.Text, BodyTypeSpecified = true, AdditionalProperties = new BasePathToElementType[] { new PathToUnindexedFieldType { FieldURI = UnindexedFieldURIType.itemDateTimeSent }, new PathToUnindexedFieldType { FieldURI = UnindexedFieldURIType.messageFrom }, new PathToUnindexedFieldType { FieldURI = UnindexedFieldURIType.messageIsRead }, new PathToUnindexedFieldType { FieldURI = UnindexedFieldURIType.messageSender }, new PathToUnindexedFieldType { FieldURI = UnindexedFieldURIType.messageToRecipients }, new PathToUnindexedFieldType { FieldURI = UnindexedFieldURIType.messageCcRecipients }, new PathToUnindexedFieldType { FieldURI = UnindexedFieldURIType.messageBccRecipients } } } }; GetItemResponseType getItemResponse = esb.GetItem(getItemType); messages = ReadItems(getItemResponse, items.Count()); }

    Read the article

  • delayed job problem in rails.

    - by krunal shah
    My controller data_files_controller.rb def upload_balances DataFile.load_balances(params) end My model data_file.rb def self.load_balances(params) # Pull the file out of the http request, write it to file system name = params['Filename'] directory = "public/uploads" errors_table_name = "snapshot_errors" upload_file = File.join(directory, name) File.open(upload_file, "wb") { |f| f.write(params['Filedata'].read) } # Remove the old data from the table Balance.destroy_all ------ more code----- end It's working fine. Now i want to use delayed job with my controller to call my model action like .. My controller data_files_controller.rb def upload_balances DataFile.send_later(:load_balances,params) end Is it possible?? What's the other way to do it? Is it create any problem? With this send_later i am getting this error in column last_error in delayed_job table. uninitialized stream C:/cyncabc/app/models/data_file.rb:12:in read' C:/cyncabc/app/models/data_file.rb:12:inload_balances' C:/cyncabc/app/models/data_file.rb:12:in open' C:/cyncabc/app/models/data_file.rb:12:inload_balances' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/performable_method.rb:35:in send' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/performable_method.rb:35:inperform' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/backend/base.rb:66:in invoke_job' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:120:inrun' c:/ruby/lib/ruby/1.8/timeout.rb:62:in timeout' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:120:inrun' c:/ruby/lib/ruby/gems/1.8/gems/activesupport-2.3.8/lib/active_support/core_ext/benchmark.rb:10:in realtime' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:119:inrun' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:180:in reserve_and_run_one_job' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:104:inwork_off' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:103:in times' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:103:inwork_off' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:78:in start' c:/ruby/lib/ruby/gems/1.8/gems/activesupport-2.3.8/lib/active_support/core_ext/benchmark.rb:10:inrealtime' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:77:in start' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:74:inloop' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/worker.rb:74:in start' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/command.rb:93:inrun' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/command.rb:72:in run_process' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons/application.rb:215:incall' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons/application.rb:215:in start_proc' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons/application.rb:225:incall' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons/application.rb:225:in start_proc' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons/application.rb:255:instart' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons/controller.rb:72:in run' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons.rb:188:inrun_proc' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons/cmdline.rb:105:in call' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons/cmdline.rb:105:incatch_exceptions' c:/ruby/lib/ruby/gems/1.8/gems/daemons-1.0.10/lib/daemons.rb:187:in run_proc' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/command.rb:71:inrun_process' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/command.rb:65:in daemonize' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/command.rb:63:intimes' c:/ruby/lib/ruby/gems/1.8/gems/delayed_job-2.0.3/lib/delayed/command.rb:63:in `daemonize' script/delayed_job:5 Without send_later it's working fine... Is there any solution?

    Read the article

  • WCF timeouts are a nightmare

    - by Greg
    We have a bunch of WCF services that work almost all of the time, using various bindings, ports, max sizes, etc. The super-frustrating thing about WCF is that when it (rarely) fails, we are powerless to find out why it failed. Sometimes you will get a message that looks like this: System.ServiceModel.CommunicationException: The socket connection was aborted. This could be caused by an error processing your message or a receive timeout being exceeded by the remote host, or an underlying network resource issue. Local socket timeout was '01:00:00'. --- System.IO.IOException: Unable to read data from the transport connection: An existing connection was forcibly closed by the remote host. The problem is that the local socket timeout it's giving you is merely an attempt to be convenient. It may or may not be the cause of the problem. But OK, sometimes networks have issues. No big deal. We can retry or something. But here's the huge problem. On top of failing to tell you which precisely which timeout (if any) resulted in the failure ("your server-side receive timeout was exceeded," or something, would be helpful), WCF seems to have two types of timeouts. Timeout Type #1) A timeout, that, if increased, would increase the chance of your operation's success. So, the pertinent timeout is an hour, you are uploading a huge file that will take an hour and twenty minutes. It fails. You increase the timeout, it succeeds. I have no no problem with this type of timeout. Timeout Type #2) A timeout which merely defines how long you have to wait for the service to actually fail and give you an error, but modifying the value of this timeout has no impact on the chance of success. Basically, something happens during the first second of the service request which mucks things up. It will never recover. WCF doesn't magically retry the network connection for you. Fine, sometimes establishing a network connection doesn't go well. But, if your timeout is 2 hours, you have to wait 2 whole hours with no chance of it ever working before it finally acknowledges that it didn't work and gives you the error. But the error you see in both cases looks the same. With timeout Type #2, it still looks like you are running into a timeout. But, you could increase all of your timeouts to 4 years, and all it would do is make it take 4 years to get an error message. I know that Type #2 exists because I can do an operation that is known to complete in less than a minute when successful, and have it take 2 hours to fail. But, if I kill it and retry, it succeeds quickly. (If you are wondering why there might be a 2 hour timeout on an operation that takes less than a minute, there are times I run the operation with a much larger file and it could take over an hour.) So, to combat the problem with Type #2, you'd want your timeout to be really quick so you immediately know if there is a problem. Then you can retry. But the insurmountable problem is that because I don't know which timeouts are the cause of failure, I don't know what timeouts are Type #1 and which ones are Type #2. There may be one timeout (let's say the client-side send timeout) that acts like Type #1 in some cases and Type #2 in others. I have no idea, and I have no way of finding out. Does anyone know how to track down Type #2 timeouts so I can set them to low values without having to shorten actual (read: Type #1) timeouts and lower the chance of success? Thank you.

    Read the article

  • FOSUserBundle override mapping to remove need for username

    - by musoNic80
    I want to remove the need for a username in the FOSUserBundle. My users will login using an email address only and I've added real name fields as part of the user entity. I realised that I needed to redo the entire mapping as described here. I think I've done it correctly but when I try to submit the registration form I get the error: "Only field names mapped by Doctrine can be validated for uniqueness." The strange thing is that I haven't tried to assert a unique constraint to anything in the user entity. Here is my full user entity file: <?php // src/MyApp/UserBundle/Entity/User.php namespace MyApp\UserBundle\Entity; use FOS\UserBundle\Model\User as BaseUser; use Doctrine\ORM\Mapping as ORM; use Symfony\Component\Validator\Constraints as Assert; /** * @ORM\Entity * @ORM\Table(name="depbook_user") */ class User extends BaseUser { /** * @ORM\Id * @ORM\Column(type="integer") * @ORM\GeneratedValue(strategy="AUTO") */ protected $id; /** * @ORM\Column(type="string", length=255) * * @Assert\NotBlank(message="Please enter your first name.", groups={"Registration", "Profile"}) * @Assert\MaxLength(limit="255", message="The name is too long.", groups={"Registration", "Profile"}) */ protected $firstName; /** * @ORM\Column(type="string", length=255) * * @Assert\NotBlank(message="Please enter your last name.", groups={"Registration", "Profile"}) * @Assert\MaxLength(limit="255", message="The name is too long.", groups={"Registration", "Profile"}) */ protected $lastName; /** * @ORM\Column(type="string", length=255) * * @Assert\NotBlank(message="Please enter your email address.", groups={"Registration", "Profile"}) * @Assert\MaxLength(limit="255", message="The name is too long.", groups={"Registration", "Profile"}) * @Assert\Email(groups={"Registration"}) */ protected $email; /** * @ORM\Column(type="string", length=255, name="email_canonical", unique=true) */ protected $emailCanonical; /** * @ORM\Column(type="boolean") */ protected $enabled; /** * @ORM\Column(type="string") */ protected $salt; /** * @ORM\Column(type="string") */ protected $password; /** * @ORM\Column(type="datetime", nullable=true, name="last_login") */ protected $lastLogin; /** * @ORM\Column(type="boolean") */ protected $locked; /** * @ORM\Column(type="boolean") */ protected $expired; /** * @ORM\Column(type="datetime", nullable=true, name="expires_at") */ protected $expiresAt; /** * @ORM\Column(type="string", nullable=true, name="confirmation_token") */ protected $confirmationToken; /** * @ORM\Column(type="datetime", nullable=true, name="password_requested_at") */ protected $passwordRequestedAt; /** * @ORM\Column(type="array") */ protected $roles; /** * @ORM\Column(type="boolean", name="credentials_expired") */ protected $credentialsExpired; /** * @ORM\Column(type="datetime", nullable=true, name="credentials_expired_at") */ protected $credentialsExpiredAt; public function __construct() { parent::__construct(); // your own logic } /** * @return string */ public function getFirstName() { return $this->firstName; } /** * @return string */ public function getLastName() { return $this->lastName; } /** * Sets the first name. * * @param string $firstname * * @return User */ public function setFirstName($firstname) { $this->firstName = $firstname; return $this; } /** * Sets the last name. * * @param string $lastname * * @return User */ public function setLastName($lastname) { $this->lastName = $lastname; return $this; } } I've seen various suggestions about this but none of the suggestions seem to work for me. The FOSUserBundle docs are very sparse about what must be a very common request.

    Read the article

  • Can't view order in magento

    - by koko
    Hi, I've been setting up a fresh magento 1.4.0.1 install, working great so far. I did some test orders just to see. Everything works fine, but when I click on "view order" under "my orders", I get a bunch of error messages: There has been an error processing your request Notice: iconv_substr() [function.iconv-substr]: Unknown error (0) in /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Helper/String.php on line 98 Trace: #0 [internal function]: mageCoreErrorHandler(8, 'iconv_substr() ...', '/data/web/A1423...', 98, Array) #1 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Helper/String.php(98): iconv_substr('1', 0, 50, 'UTF-8') #2 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Helper/String.php(173): Mage_Core_Helper_String-substr('1', 0, 50) #3 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Helper/String.php(112): Mage_Core_Helper_String-str_split('1', 50) #4 /data/web/A14237/htdocs/magento/app/design/frontend/base/default/template/sales/order/items/renderer/default.phtml(58): Mage_Core_Helper_String-splitInjection('1') #5 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(189): include('/data/web/A1423...') #6 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(225): Mage_Core_Block_Template-fetchView('frontend/base/d...') #7 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(242): Mage_Core_Block_Template-renderView() #8 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(674): Mage_Core_Block_Template-_toHtml() #9 /data/web/A14237/htdocs/magento/app/code/core/Mage/Sales/Block/Items/Abstract.php(137): Mage_Core_Block_Abstract-toHtml() #10 /data/web/A14237/htdocs/magento/app/design/frontend/base/default/template/sales/order/items.phtml(52): Mage_Sales_Block_Items_Abstract-getItemHtml(Object(Mage_Sales_Model_Order_Item)) #11 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(189): include('/data/web/A1423...') #12 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(225): Mage_Core_Block_Template-fetchView('frontend/base/d...') #13 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(242): Mage_Core_Block_Template-renderView() #14 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(674): Mage_Core_Block_Template-_toHtml() #15 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(516): Mage_Core_Block_Abstract-toHtml() #16 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(467): Mage_Core_Block_Abstract-_getChildHtml('order_items', true) #17 /data/web/A14237/htdocs/magento/app/design/frontend/base/default/template/sales/order/view.phtml(64): Mage_Core_Block_Abstract-getChildHtml('order_items') #18 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(189): include('/data/web/A1423...') #19 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(225): Mage_Core_Block_Template-fetchView('frontend/base/d...') #20 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(242): Mage_Core_Block_Template-renderView() #21 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(674): Mage_Core_Block_Template-_toHtml() #22 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(516): Mage_Core_Block_Abstract-toHtml() #23 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(463): Mage_Core_Block_Abstract-_getChildHtml('sales.order.vie...', true) #24 /data/web/A14237/htdocs/magento/app/code/core/Mage/Page/Block/Html/Wrapper.php(52): Mage_Core_Block_Abstract-getChildHtml('', true, true) #25 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(674): Mage_Page_Block_Html_Wrapper-_toHtml() #26 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Text/List.php(43): Mage_Core_Block_Abstract-toHtml() #27 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(674): Mage_Core_Block_Text_List-_toHtml() #28 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(516): Mage_Core_Block_Abstract-toHtml() #29 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(467): Mage_Core_Block_Abstract-_getChildHtml('content', true) #30 /data/web/A14237/htdocs/magento/app/design/frontend/base/default/template/page/2columns-left.phtml(48): Mage_Core_Block_Abstract-getChildHtml('content') #31 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(189): include('/data/web/A1423...') #32 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(225): Mage_Core_Block_Template-fetchView('frontend/base/d...') #33 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Template.php(242): Mage_Core_Block_Template-renderView() #34 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Block/Abstract.php(674): Mage_Core_Block_Template-_toHtml() #35 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Model/Layout.php(536): Mage_Core_Block_Abstract-toHtml() #36 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Controller/Varien/Action.php(389): Mage_Core_Model_Layout-getOutput() #37 /data/web/A14237/htdocs/magento/app/code/core/Mage/Sales/controllers/OrderController.php(100): Mage_Core_Controller_Varien_Action-renderLayout() #38 /data/web/A14237/htdocs/magento/app/code/core/Mage/Sales/controllers/OrderController.php(136): Mage_Sales_OrderController-_viewAction() #39 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Controller/Varien/Action.php(418): Mage_Sales_OrderController-viewAction() #40 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Controller/Varien/Router/Standard.php(254): Mage_Core_Controller_Varien_Action-dispatch('view') #41 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Controller/Varien/Front.php(177): Mage_Core_Controller_Varien_Router_Standard-match(Object(Mage_Core_Controller_Request_Http)) #42 /data/web/A14237/htdocs/magento/app/code/core/Mage/Core/Model/App.php(304): Mage_Core_Controller_Varien_Front-dispatch() #43 /data/web/A14237/htdocs/magento/app/Mage.php(596): Mage_Core_Model_App-run(Array) #44 /data/web/A14237/htdocs/magento/index.php(78): Mage::run('', 'store') #45 {main} gtx, koko

    Read the article

  • ASP.NET MVC File Upload Error - "The input is not a valid Base-64 string"

    - by Justin
    Hey all, I'm trying to add a file upload control to my ASP.NET MVC 2 form but after I select a jpg and click Save, it gives the following error: The input is not a valid Base-64 string as it contains a non-base 64 character, more than two padding characters, or a non-white space character among the padding characters. Here's the view: <% using (Html.BeginForm("Save", "Developers", FormMethod.Post, new {enctype = "multipart/form-data"})) { %> <%: Html.ValidationSummary(true) %> <fieldset> <legend>Fields</legend> <div class="editor-label"> Login Name </div> <div class="editor-field"> <%: Html.TextBoxFor(model => model.LoginName) %> <%: Html.ValidationMessageFor(model => model.LoginName) %> </div> <div class="editor-label"> Password </div> <div class="editor-field"> <%: Html.Password("Password") %> <%: Html.ValidationMessageFor(model => model.Password) %> </div> <div class="editor-label"> First Name </div> <div class="editor-field"> <%: Html.TextBoxFor(model => model.FirstName) %> <%: Html.ValidationMessageFor(model => model.FirstName) %> </div> <div class="editor-label"> Last Name </div> <div class="editor-field"> <%: Html.TextBoxFor(model => model.LastName) %> <%: Html.ValidationMessageFor(model => model.LastName) %> </div> <div class="editor-label"> Photo </div> <div class="editor-field"> <input id="Photo" name="Photo" type="file" /> </div> <p> <%: Html.Hidden("DeveloperID") %> <%: Html.Hidden("CreateDate") %> <input type="submit" value="Save" /> </p> </fieldset> <% } %> And the controller: //POST: /Secure/Developers/Save/ [AcceptVerbs(HttpVerbs.Post)] public ActionResult Save(Developer developer) { //get profile photo. var upload = Request.Files["Photo"]; if (upload.ContentLength > 0) { string savedFileName = Path.Combine( ConfigurationManager.AppSettings["FileUploadDirectory"], "Developer_" + developer.FirstName + "_" + developer.LastName + ".jpg"); upload.SaveAs(savedFileName); } developer.UpdateDate = DateTime.Now; if (developer.DeveloperID == 0) {//inserting new developer. DataContext.DeveloperData.Insert(developer); } else {//attaching existing developer. DataContext.DeveloperData.Attach(developer); } //save changes. DataContext.SaveChanges(); //redirect to developer list. return RedirectToAction("Index"); } Thanks, Justin

    Read the article

  • Fixing predicated NSFetchedResultsController/NSFetchRequest performance with SQLite backend?

    - by Jaanus
    I have a series of NSFetchedResultsControllers powering some table views, and their performance on device was abysmal, on the order of seconds. Since it all runs on main thread, it's blocking my app at startup, which is not great. I investigated and turns out the predicate is the problem: NSPredicate *somePredicate = [NSPredicate predicateWithFormat:@"ANY somethings == %@", something]; [fetchRequest setPredicate:somePredicate]; I.e the fetch entity, call it "things", has a many-to-many relation with entity "something". This predicate is a filter that limits the results to only things that have a relation with a particular "something". When I removed the predicate for testing, fetch time (the initial performFetch: call) dropped (for some extreme cases) from 4 seconds to around 100ms or less, which is acceptable. I am troubled by this, though, as it negates a lot of the benefit I was hoping to gain with Core Data and NSFRC, which otherwise seems like a powerful tool. So, my question is, how can I optimize this performance? Am I using the predicate wrong? Should I modify the model/schema somehow? And what other ways there are to fix this? Is this kind of degraded performance to be expected? (There are on the order of hundreds of <1KB objects.) EDIT WITH DETAILS: Here's the code: [fetchRequest setFetchLimit:200]; NSLog(@"before fetch"); BOOL success = [frc performFetch:&error]; if (!success) { NSLog(@"Fetch request error: %@", error); } NSLog(@"after fetch"); Updated logs (previously, I had some application inefficiencies degrading the performance here. These are the updated logs that should be as close to optimal as you can get under my current environment): 2010-02-05 12:45:22.138 Special Ppl[429:207] before fetch 2010-02-05 12:45:22.144 Special Ppl[429:207] CoreData: sql: SELECT DISTINCT 0, t0.Z_PK, t0.Z_OPT, <model fields> FROM ZTHING t0 LEFT OUTER JOIN Z_1THINGS t1 ON t0.Z_PK = t1.Z_2THINGS WHERE t1.Z_1SOMETHINGS = ? ORDER BY t0.ZID DESC LIMIT 200 2010-02-05 12:45:22.663 Special Ppl[429:207] CoreData: annotation: sql connection fetch time: 0.5094s 2010-02-05 12:45:22.668 Special Ppl[429:207] CoreData: annotation: total fetch execution time: 0.5240s for 198 rows. 2010-02-05 12:45:22.706 Special Ppl[429:207] after fetch If I do the same fetch without predicate (by commenting out the two lines in the beginning of the question): 2010-02-05 12:44:10.398 Special Ppl[414:207] before fetch 2010-02-05 12:44:10.405 Special Ppl[414:207] CoreData: sql: SELECT 0, t0.Z_PK, t0.Z_OPT, <model fields> FROM ZTHING t0 ORDER BY t0.ZID DESC LIMIT 200 2010-02-05 12:44:10.426 Special Ppl[414:207] CoreData: annotation: sql connection fetch time: 0.0125s 2010-02-05 12:44:10.431 Special Ppl[414:207] CoreData: annotation: total fetch execution time: 0.0262s for 200 rows. 2010-02-05 12:44:10.457 Special Ppl[414:207] after fetch 20-fold difference in times. 500ms is not that great, and there does not seem to be a way to do it in background thread or otherwise optimize that I can think of. (Apart from going to a binary store where this becomes a non-issue, so I might do that. Binary store performance is consistently ~100ms for the above 200-object predicated query.) (I nested another question here previously, which I now moved away).

    Read the article

  • "Content is not allowed in prolog" when parsing perfectly valid XML on GAE

    - by Adrian Petrescu
    Hey guys, I've been beating my head against this absolutely infuriating bug for the last 48 hours, so I thought I'd finally throw in the towel and try asking here before I throw my laptop out the window. I'm trying to parse the response XML from a call I made to AWS SimpleDB. The response is coming back on the wire just fine; for example, it may look like: <?xml version="1.0" encoding="utf-8"?> <ListDomainsResponse xmlns="http://sdb.amazonaws.com/doc/2009-04-15/"> <ListDomainsResult> <DomainName>Audio</DomainName> <DomainName>Course</DomainName> <DomainName>DocumentContents</DomainName> <DomainName>LectureSet</DomainName> <DomainName>MetaData</DomainName> <DomainName>Professors</DomainName> <DomainName>Tag</DomainName> </ListDomainsResult> <ResponseMetadata> <RequestId>42330b4a-e134-6aec-e62a-5869ac2b4575</RequestId> <BoxUsage>0.0000071759</BoxUsage> </ResponseMetadata> </ListDomainsResponse> I pass in this XML to a parser with XMLEventReader eventReader = xmlInputFactory.createXMLEventReader(response.getContent()); and call eventReader.nextEvent(); a bunch of times to get the data I want. Here's the bizarre part -- it works great inside the local server. The response comes in, I parse it, everyone's happy. The problem is that when I deploy the code to Google App Engine, the outgoing request still works, and the response XML seems 100% identical and correct to me, but the response fails to parse with the following exception: com.amazonaws.http.HttpClient handleResponse: Unable to unmarshall response (ParseError at [row,col]:[1,1] Message: Content is not allowed in prolog.): <?xml version="1.0" encoding="utf-8"?> <ListDomainsResponse xmlns="http://sdb.amazonaws.com/doc/2009-04-15/"><ListDomainsResult><DomainName>Audio</DomainName><DomainName>Course</DomainName><DomainName>DocumentContents</DomainName><DomainName>LectureSet</DomainName><DomainName>MetaData</DomainName><DomainName>Professors</DomainName><DomainName>Tag</DomainName></ListDomainsResult><ResponseMetadata><RequestId>42330b4a-e134-6aec-e62a-5869ac2b4575</RequestId><BoxUsage>0.0000071759</BoxUsage></ResponseMetadata></ListDomainsResponse> javax.xml.stream.XMLStreamException: ParseError at [row,col]:[1,1] Message: Content is not allowed in prolog. at com.sun.org.apache.xerces.internal.impl.XMLStreamReaderImpl.next(Unknown Source) at com.sun.xml.internal.stream.XMLEventReaderImpl.nextEvent(Unknown Source) at com.amazonaws.transform.StaxUnmarshallerContext.nextEvent(StaxUnmarshallerContext.java:153) ... (rest of lines omitted) I have double, triple, quadruple checked this XML for 'invisible characters' or non-UTF8 encoded characters, etc. I looked at it byte-by-byte in an array for byte-order-marks or something of that nature. Nothing; it passes every validation test I could throw at it. Even stranger, it happens if I use a Saxon-based parser as well -- but ONLY on GAE, it always works fine in my local environment. It makes it very hard to trace the code for problems when I can only run the debugger on an environment that works perfectly (I haven't found any good way to remotely debug on GAE). Nevertheless, using the primitive means I have, I've tried a million approaches including: XML with and without the prolog With and without newlines With and without the "encoding=" attribute in the prolog Both newline styles With and without the chunking information present in the HTTP stream And I've tried most of these in multiple combinations where it made sense they would interact -- nothing! I'm at my wit's end. Has anyone seen an issue like this before that can hopefully shed some light on it? Thanks!

    Read the article

  • ASP.NET MVC : AJAX ActionLink - Persist Data

    - by Mio
    Hi guys, I'm really new at this and I was searching the web for an answer to my question and I couldn't find it, so here I am posting my question :) I'm trying to create a new record in my table Facility. For my goreign keys I'm displaying the choices in tables instead of a dropdowns. When the user clicks on the select link which is an Ajax.ActionLink(), I wanna retrieve the right record from the DB and set the foreign key of my object Facility to the one slected and replace the Div by a new Partial View. The problem is when I try to submit the form, the Facility object doesnt seem to have the foreign key that I've just set in my ajax fuction in my controller. And if the user has enter some data in the other fields of the create form, I don't them to lose what they already entered. Here's my code. Model only contains a Facility. public ActionResult Create() { Model.Facility = new Facility(); return View(Model); } This is part of my Create View <div id="FacilityTypePartialView"> <% Html.RenderPartial("FacilityType"); %> </div> This is my Partial View FacilityType <% if (Model.IsNewFacility()) { %> <p> Id: <%= Html.Encode(Model.Facility.FacilityType.FId)%> </p> <p> Type: <%= Html.Encode(Model.Facility.FacilityType.FType)%> </p> <p> Description: <%= Html.Encode(Model.Facility.FacilityType.FDescription) %> </p> <% } %> <p> <%= Html.ActionLink("Manage Facility Type", "Index","FacilityType") %> </p> <table id="FacilityTypesList"> <tr> <th> Select </th> <th> FId </th> <th> FType </th> <th> FDescription </th> </tr> <% foreach (var item in Model.GetFacilityTypes()) { %> <tr> <td> <%=Ajax.ActionLink("Select", "FacilityTypeSelect", new { id = item.FId}, new AjaxOptions { UpdateTargetId = "FacilityTypePartialView" })%> </td> <td> <%= Html.Encode(item.FId) %> </td> <td> <%= Html.Encode(item.FType) %> </td> <td> <%= Html.Encode(item.FDescription) %> </td> </tr> <% } %> </table> Here's y Ajax Funcion public PartialViewResult FacilityTypeSelect(int id) { Facility facility = new Facility(); facility.FacilityType = _repository.GetFacilityType(id); Model.Facility = facility; if (this.Request.IsAjaxRequest() == false) { return PartialView("FacilityType/FacilityType", Model); } else { return PartialView("FacilityType/FacilityTypeSelected", Model); } } Finally, my Post method [AcceptVerbs(HttpVerbs.Post)] public ActionResult Create( Facility facility) { Model.Facility = facility; if (ModelState.IsValid) { try { _repository.AddEntity(facility); _repository.Save(); return RedirectToAction("Details", new { id = facility.Id }); } catch { } } return View("Create", Model); } My Faciliy object coming from the View have the Facility.FacilityType set to nothing.

    Read the article

  • Android AsyncTask testing problem with Android Test Framework

    - by Vlad
    I have a very simple AsyncTask implementation example and have problem to test it using Android JUnit framework. It works just fine when I instantiate and execute it in normal application. However when it's executed from any of Android Testing framework classes (i.e. AndroidTestCase, ActivityUnitTestCase, ActivityInstrumentationTestCase2 etc) it behaves sarngely: - It executes doInBackground() method correctly - However it doesn't invokes any of its notification methods (onPostExecute(), onProgressUpdate(), etc) -- just silently ignores them whitout showing any errors. This is very simple AsyncTask example package kroz.andcookbook.threads.asynctask; import android.os.AsyncTask; import android.util.Log; import android.widget.ProgressBar; import android.widget.Toast; public class AsyncTaskDemo extends AsyncTask<Integer, Integer, String> { AsyncTaskDemoActivity _parentActivity; int _counter; int _maxCount; public AsyncTaskDemo(AsyncTaskDemoActivity asyncTaskDemoActivity) { _parentActivity = asyncTaskDemoActivity; } @Override protected void onPreExecute() { super.onPreExecute(); _parentActivity._progressBar.setVisibility(ProgressBar.VISIBLE); _parentActivity._progressBar.invalidate(); } @Override protected String doInBackground(Integer... params) { _maxCount = params[0]; for (_counter = 0; _counter <= _maxCount; _counter++) { try { Thread.sleep(1000); publishProgress(_counter); } catch (InterruptedException e) { // Ignore } } } @Override protected void onProgressUpdate(Integer... values) { super.onProgressUpdate(values); int progress = values[0]; String progressStr = "Counting " + progress + " out of " + _maxCount; _parentActivity._textView.setText(progressStr); _parentActivity._textView.invalidate(); } @Override protected void onPostExecute(String result) { super.onPostExecute(result); _parentActivity._progressBar.setVisibility(ProgressBar.INVISIBLE); _parentActivity._progressBar.invalidate(); } @Override protected void onCancelled() { super.onCancelled(); _parentActivity._textView.setText("Request to cancel AsyncTask"); } } This is a test case. Here AsyncTaskDemoActivity is a very simple Activity providing UI for testing AsyncTask in mode: package kroz.andcookbook.test.threads.asynctask; import java.util.concurrent.ExecutionException; import kroz.andcookbook.R; import kroz.andcookbook.threads.asynctask.AsyncTaskDemo; import kroz.andcookbook.threads.asynctask.AsyncTaskDemoActivity; import android.content.Intent; import android.test.ActivityUnitTestCase; import android.widget.Button; public class AsyncTaskDemoTest2 extends ActivityUnitTestCase<AsyncTaskDemoActivity> { AsyncTaskDemo _atask; private Intent _startIntent; public AsyncTaskDemoTest2() { super(AsyncTaskDemoActivity.class); } protected void setUp() throws Exception { super.setUp(); _startIntent = new Intent(Intent.ACTION_MAIN); } protected void tearDown() throws Exception { super.tearDown(); } public final void testExecute() { startActivity(_startIntent, null, null); Button btnStart = (Button) getActivity().findViewById(R.id.Button01); btnStart.performClick(); assertNotNull(getActivity()); } } All this code is working just fine, except the fact that AsynTask doesn't invoke it's notification methods when executed by whithin Android Testing Framework. Any ideas?

    Read the article

  • How to tell Seam to inject a local EJB interface (SLSB) and not the remote EJB interface (SLSB)?

    - by Harshad V
    Hello, I am using Seam with JBoss AS. In my application I have a SLSB which is also declared as a seam component using the @Name annotation. I am trying to inject and use this SLSB in another seam component using the @In annotation. My problem is that sometimes Seam injects the local interface (then the code runs fine) and sometimes seam injects the remote interface (then there is an error in execution of the code). I have tried doing all the things specified on this link: http://docs.jboss.org/seam/2.2.0.GA/reference/en-US/html/configuration.html#config.integration.ejb.container The SeamInterceptor is configured, I have specified the jndi pattern in components.xml file ( < core:init debug="true" jndi-pattern="earName/#{ejbName}/local"/ ), I have also tried using the @JndiName("earName/ejbName/local") annotation for every SLSB, I have tried setting this property ( org.jboss.seam.core.init.jndiPattern=earName/#{ejbName}/local ) in the seam.properties file. I have also tried putting the text below in web.xml file <context-param> <param-name>org.jboss.seam.core.init.jndiPattern</param-name> <param-value>earName/#{ejbName}/local</param-value> </context-param> Even after doing all the above mentioned things, the seam still injects the remote interface sometimes. Am I missing something here? Can anyone tell me how to resolve this issue and tell seam to always inject the local interface? My components.xml file looks like: <?xml version="1.0" encoding="UTF-8"?> <components xmlns="http://jboss.com/products/seam/components" xmlns:core="http://jboss.com/products/seam/core" xmlns:persistence="http://jboss.com/products/seam/persistence" xmlns:drools="http://jboss.com/products/seam/drools" xmlns:bpm="http://jboss.com/products/seam/bpm" xmlns:security="http://jboss.com/products/seam/security" xmlns:mail="http://jboss.com/products/seam/mail" xmlns:web="http://jboss.com/products/seam/web" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation= "http://jboss.com/products/seam/core http://jboss.com/products/seam/core-2.1.xsd http://jboss.com/products/seam/persistence http://jboss.com/products/seam/persistence-2.1.xsd http://jboss.com/products/seam/drools http://jboss.com/products/seam/drools-2.1.xsd http://jboss.com/products/seam/bpm http://jboss.com/products/seam/bpm-2.1.xsd http://jboss.com/products/seam/security http://jboss.com/products/seam/security-2.1.xsd http://jboss.com/products/seam/mail http://jboss.com/products/seam/mail-2.1.xsd http://jboss.com/products/seam/web http://jboss.com/products/seam/web-2.1.xsd http://jboss.com/products/seam/components http://jboss.com/products/seam/components-2.1.xsd"> <core:init debug="true" jndi-pattern="myEarName/#{ejbName}/local"/> <core:manager concurrent-request-timeout="500" conversation-timeout="120000" conversation-id-parameter="cid" parent-conversation-id-parameter="pid"/> <web:hot-deploy-filter url-pattern="*.seam"/> <persistence:managed-persistence-context name="entityManager" auto-create="true" persistence-unit-jndi-name="@puJndiName@"/> <drools:rule-base name="securityRules"> <drools:rule-files> <value>/security.drl</value> </drools:rule-files> </drools:rule-base> <security:rule-based-permission-resolver security-rules="#{securityRules}"/> <security:identity authenticate-method="#{authenticator.authenticate}" remember-me="true"/> <event type="org.jboss.seam.security.notLoggedIn"> <action execute="#{redirect.captureCurrentView}"/> </event> <event type="org.jboss.seam.security.loginSuccessful"> <action execute="#{redirect.returnToCapturedView}"/> </event> <component name="org.jboss.seam.core.init"> <property name="jndiPattern">myEarName/#{ejbName}/local</property> </component> </components> And my EJB component looks like: @Stateless @Name("myEJBComponent") @AutoCreate public class MyEJBComponentImpl implements MyEJBComponentRemote, MyEJBComponentLocal { public void doSomething() { } }

    Read the article

  • W3WP crashes when initializing a collection

    - by asbjornu
    I've created an ASP.NET MVC application that has an initializer attached to the PreApplicationStartMethodAttribute. When initializing, a collection is instantiated that implements an interface I've defined. When I instantiate this collection, w3wp.exe crashes with the following two incomprehensible entries in the event log: Faulting application name: w3wp.exe, version: 7.5.7600.16385, time stamp: 0x4a5bd0eb Faulting module name: clr.dll, version: 4.0.30319.1, time stamp: 0x4ba21eeb Exception code: 0xc00000fd Fault offset: 0x0000000000001177 Faulting process id: 0x1348 Faulting application start time: 0x01cb0224882f4723 Faulting application path: c:\windows\system32\inetsrv\w3wp.exe Faulting module path: C:\Windows\Microsoft.NET\Framework64\v4.0.30319\clr.dll Report Id: c6a0941e-6e17-11df-864d-000acd16dcdb And: Fault bucket , type 0 Event Name: APPCRASH Response: Not available Cab Id: 0 Problem signature: P1: w3wp.exe P2: 7.5.7600.16385 P3: 4a5bd0eb P4: clr.dll P5: 4.0.30319.1 P6: 4ba21eeb P7: c00000fd P8: 0000000000001177 P9: P10: Attached files: These files may be available here: Analysis symbol: Rechecking for solution: 0 Report Id: c6a0941e-6e17-11df-864d-000acd16dcdb Report Status: 0 If I remove the instantiation of the collection, the application starts normally. If I leave the instantiation, w3wp crashes. If I modify the interface, w3wp still crashes. I've tried every variation I could come up with on the theme of keeping the instantiation but doing everything else differently, but w3wp still crashes. My biggest issue here is that I have absolutely no idea why w3wp is crashing. It's not a StackOverflowException or anything concrete like that, all I get is the unintelligent junk cited above. I've tried to use DebugDiag and IISState to debug the w3wp process, but DebugDiag is only available for post-dump analysis in x64 (I'm running on Windows 7 x64, so the w3wp process is thus 64 bit) and IISStat says the following when I try to run it: D:\Programs\iisstate>IISState.exe -p 9204 -d Symbol search path is: SRV*D:\Programs\iisstate\symbols*http://msdl.microsoft.com/download/symbols IISState is limited to processes associated with IIS. If you require a generic debugger, please use WinDBG or CDB. They are available for download from http://www.microsoft.com/ddk/debugging. This error may also occur if a debugger is already attached to the process being checked. Incorrect Process Attachment I've double-checked 10 times that the process ID of my w3wp process is correct. I'm suspecting that IISState too only can debug x86 processes. Setting a breakpoint anywhere in the application does absolutely nothing. The break point isn't hit and w3wp crashes as soon as the request comes through to IIS from the browser. Starting the application with F5 in Visual Studio 2010 or starting another application to get the w3wp process up and running and then attaching the VS2010 debugger to it and then visiting the faulting application doesn't help. I've also tried to add an HTTP module as described in KB-911816 as well as add this to my web.config file: <configuration> <runtime> <legacyUnhandledExceptionPolicy enabled="true" /> </runtime> </configuration> Needless to say, it makes absolutely no difference. So I'm left with no way to debug the w3wp process, no way to extract any information from it and complete garbage dumped in my event log. If anybody has any idea on how to debug this problem, please let me know! Update My collection was initialized based on RouteTable.Routes which might have thrown an exception (perhaps by not being initialized itself yet in such an early stage of the ASP.NET lifecycle). Postponing the communication with RouteTable.Routes until a later stage solved the problem. While I don't really need an answer to this question anymore, I still find it so obscure that I'll leave it for anyone to comment on and answer, because I found no existing posts on this problem anywhere, so it might be of good reference in the future.

    Read the article

  • Visual Basic Cryptography Question

    - by Glenn Sullivan
    I am trying to mimic the results of some C code that uses the OpenSSL library using the system.security.crytography library in the .net 3.5 world, and I can't seem to get it right. I need some help... part of the issue is my understanding of crytography in general. Here's what is supposed to happen: I send a request for authentication to a device. It returns a challenge digest, which I then need to sign with a known key and return The device returns a "success" or "Fail" message. I have the following code snippet that I am trying to "copy": //Seed the PRNG //Cheating here - the PRNG will be seeded when we create a key pair //The key pair is discarded only doing this to seed the PRNG. DSA *temp_dsa = DSA_new(); if(!temp_dsa) { printf("Error: The client had an error with the DSA API\n"); exit(0); } unsigned char seed[20] = "Our Super Secret Key"; temp_dsa = DSA_generate_parameters(128, seed, sizeof(seed), NULL, NULL, NULL, NULL); DSA_free(temp_dsa); //A pointer to the private key. p = (unsigned char *)&priv_key; //Create and allocate a DSA structure from the private key. DSA *priv_dsa = NULL; priv_dsa = d2i_DSAPrivateKey(NULL, &p, sizeof(priv_key)); if(!priv_dsa) { printf("Error: The client had an error with the DSA API\n"); exit(0); } //Allocate memory for the to be computed signature. sigret = OPENSSL_malloc(DSA_size(priv_dsa)); //Sign the challenge digest recieved from the ISC. retval = DSA_sign(0, pResp->data, pResp->data_length, sigret, &siglen, priv_dsa); A few more bits of information: priv_key is a 252 element character array of hex characters that is included. The end result is a 512 (or less) array of characters to send back for validation to the device. Rasmus asked to see the key array. Here it is: unsigned char priv_key[] = {0x30, 0x81, 0xf9, 0x02, 0x01, 0x00, 0x02, 0x41, 0x00, 0xfe, 0xca, 0x97, 0x55, 0x1f, 0xc0, 0xb7, 0x1f, 0xad, 0xf0, 0x93, 0xec, 0x4b, 0x31, 0x94, 0x78, 0x86, 0x82, 0x1b, 0xab, 0xc4, 0x9e, 0x5c, 0x40, 0xd9, 0x89, 0x7d, 0xde, 0x43, 0x38, 0x06, 0x4f, 0x1b, 0x2b, 0xef, 0x5c, 0xb7, 0xff, 0x21, 0xb1, 0x11, 0xe6, 0x9a, 0x81, 0x9a, 0x2b, 0xef, 0x3a, 0xbb, 0x5c, 0xea, 0x76, 0xae, 0x3a, 0x8b, 0x92, 0xd2, 0x7c, 0xf1, 0x89, 0x8e, 0x4d, 0x3f, 0x0d, 0x02, 0x15, 0x00, 0x88, 0x16, 0x1b, 0xf5, 0xda, 0x43, 0xee, 0x4b, 0x58, 0xbb, 0x93, 0xea, 0x4e, 0x2b, 0xda, 0xb9, 0x17, 0xd1, 0xff, 0x21, 0x02, 0x41, 0x00, 0xf6, 0xbb, 0x45, 0xea, 0xda, 0x72, 0x39, 0x4f, 0xc1, 0xdd, 0x02, 0xb4, 0xf3, 0xaa, 0xe5, 0xe2, 0x76, 0xc7, 0xdc, 0x34, 0xb2, 0x0a, 0xd8, 0x69, 0x63, 0xc3, 0x40, 0x2c, 0x58, 0xea, 0xa6, 0xbd, 0x24, 0x8b, 0x6b, 0xaa, 0x4b, 0x41, 0xfc, 0x5f, 0x21, 0x02, 0x3c, 0x27, 0xa9, 0xc7, 0x7a, 0xc8, 0x59, 0xcd, 0x5b, 0xdd, 0x6c, 0x44, 0x48, 0x86, 0xd1, 0x34, 0x46, 0xb0, 0x89, 0x55, 0x50, 0x87, 0x02, 0x41, 0x00, 0x80, 0x29, 0xc6, 0x4a, 0x08, 0x3e, 0x30, 0x54, 0x71, 0x9b, 0x95, 0x49, 0x55, 0x17, 0x70, 0xc7, 0x96, 0x65, 0xc8, 0xc2, 0xe2, 0x8a, 0xe0, 0x5d, 0x9f, 0xe4, 0xb2, 0x1f, 0x20, 0x83, 0x70, 0xbc, 0x88, 0x36, 0x03, 0x29, 0x59, 0xcd, 0xc7, 0xcd, 0xd9, 0x4a, 0xa8, 0x65, 0x24, 0x6a, 0x77, 0x8a, 0x10, 0x88, 0x0d, 0x2f, 0x15, 0x4b, 0xbe, 0xba, 0x13, 0x23, 0xa1, 0x73, 0xa3, 0x04, 0x37, 0xc9, 0x02, 0x14, 0x06, 0x8e, 0xc1, 0x41, 0x40, 0xf1, 0xf6, 0xe1, 0xfa, 0xfb, 0x64, 0x28, 0x02, 0x15, 0xce, 0x47, 0xaa, 0xce, 0x6e, 0xfe}; Can anyone help me translate this code to it's VB.net crypto equivalent? TIA, Glenn

    Read the article

  • Creating a Reverse Proxy using Jpcap

    - by Ramon Marco Navarro
    I need to create a program that receives HTTP request and forwards those requests to the web servers. I have successfully made this using only Java Sockets but the client needed the program to be implemented in Jpcap. I'd like to know if this is possible and what literature I should be reading for this project. This is what I have now by stitching together pieces from the Jpcap tutorial: import java.net.InetAddress; import java.io.*; import jpcap.*; import jpcap.packet.*; public class Router { public static void main(String args[]) { //Obtain the list of network interfaces NetworkInterface[] devices = JpcapCaptor.getDeviceList(); //for each network interface for (int i = 0; i < devices.length; i++) { //print out its name and description System.out.println(i+": "+devices[i].name + "(" + devices[i].description+")"); //print out its datalink name and description System.out.println(" datalink: "+devices[i].datalink_name + "(" + devices[i].datalink_description+")"); //print out its MAC address System.out.print(" MAC address:"); for (byte b : devices[i].mac_address) System.out.print(Integer.toHexString(b&0xff) + ":"); System.out.println(); //print out its IP address, subnet mask and broadcast address for (NetworkInterfaceAddress a : devices[i].addresses) System.out.println(" address:"+a.address + " " + a.subnet + " "+ a.broadcast); } int index = 1; // set index of the interface that you want to open. //Open an interface with openDevice(NetworkInterface intrface, int snaplen, boolean promics, int to_ms) JpcapCaptor captor = null; try { captor = JpcapCaptor.openDevice(devices[index], 65535, false, 20); captor.setFilter("port 80 and host 192.168.56.1", true); } catch(java.io.IOException e) { System.err.println(e); } //call processPacket() to let Jpcap call PacketPrinter.receivePacket() for every packet capture. captor.loopPacket(-1,new PacketPrinter()); captor.close(); } } class PacketPrinter implements PacketReceiver { //this method is called every time Jpcap captures a packet public void receivePacket(Packet p) { JpcapSender sender = null; try { NetworkInterface[] devices = JpcapCaptor.getDeviceList(); sender = JpcapSender.openDevice(devices[1]); } catch(IOException e) { System.err.println(e); } IPPacket packet = (IPPacket)p; try { // IP Address of machine sending HTTP requests (the client) // It's still on the same LAN as the servers for testing purposes. packet.dst_ip = InetAddress.getByName("192.168.56.2"); } catch(java.net.UnknownHostException e) { System.err.println(e); } //create an Ethernet packet (frame) EthernetPacket ether=new EthernetPacket(); //set frame type as IP ether.frametype=EthernetPacket.ETHERTYPE_IP; //set source and destination MAC addresses // MAC Address of machine running reverse proxy server ether.src_mac = new MacAddress("08:00:27:00:9C:80").getAddress(); // MAC Address of machine running web server ether.dst_mac = new MacAddress("08:00:27:C7:D2:4C").getAddress(); //set the datalink frame of the packet as ether packet.datalink=ether; //send the packet sender.sendPacket(packet); sender.close(); //just print out a captured packet System.out.println(packet); } } Any help would be greatly appreciated. Thank you.

    Read the article

  • Partial Trust in WPF 4

    - by Hadi Eskandari
    I've started a new project in WPF 4 (.NET 4) and trying to see if I can run it in xbap mode. I need to run the application in Full Trust with the new mode made available in .NET 4 which asks the end user if the full trust application should be run. I've set the "Security" mode to "Full Trust" application, and it builds just fine. When I run it, an exception is thrown and IE error message shows the following error. Any ways around it?? Startup URI: T:\projects\Hightech Sources\PayRoll\PayRoll.Web\publish\PayRoll.Web.xbap Application Identity: file:///T:/projects/Hightech%20Sources/PayRoll/PayRoll.Web/publish/PayRoll.Web.xbap#PayRoll.Web.xbap, Version=1.0.0.0, Culture=neutral, PublicKeyToken=1d910f49755d2c97, processorArchitecture=msil/PayRoll.Web.exe, Version=1.0.0.0, Culture=neutral, PublicKeyToken=1d910f49755d2c97, processorArchitecture=msil, type=win32 System.Security.SecurityException: Request for the permission of type 'System.Security.Permissions.FileIOPermission, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' failed. at System.Security.CodeAccessSecurityEngine.Check(Object demand, StackCrawlMark& stackMark, Boolean isPermSet) at System.Security.CodeAccessSecurityEngine.Check(CodeAccessPermission cap, StackCrawlMark& stackMark) at System.Security.CodeAccessPermission.Demand() at System.Reflection.RuntimeAssembly.InternalLoadAssemblyName(AssemblyName assemblyRef, Evidence assemblySecurity, StackCrawlMark& stackMark, Boolean forIntrospection, Boolean suppressSecurityChecks) at System.Reflection.RuntimeAssembly.InternalLoadFrom(String assemblyFile, Evidence securityEvidence, Byte[] hashValue, AssemblyHashAlgorithm hashAlgorithm, Boolean forIntrospection, Boolean suppressSecurityChecks, StackCrawlMark& stackMark) at System.Reflection.Assembly.LoadFrom(String assemblyFile) at PayRoll.Web.App.SelectAssemblies() at Caliburn.PresentationFramework.ApplicationModel.CaliburnApplication..ctor() at PayRoll.Web.App..ctor() at PayRoll.Web.App.Main() at System.AppDomain._nExecuteAssembly(RuntimeAssembly assembly, String[] args) at System.AppDomain.nExecuteAssembly(RuntimeAssembly assembly, String[] args) at System.Runtime.Hosting.ManifestRunner.Run(Boolean checkAptModel) at System.Runtime.Hosting.ManifestRunner.ExecuteAsAssembly() at System.Runtime.Hosting.ApplicationActivator.CreateInstance(ActivationContext activationContext, String[] activationCustomData) at System.Runtime.Hosting.ApplicationActivator.CreateInstance(ActivationContext activationContext) at System.Windows.Interop.PresentationApplicationActivator.CreateInstance(ActivationContext actCtx) at System.Activator.CreateInstance(ActivationContext activationContext) at System.AppDomain.Setup(Object arg) at System.AppDomain.nCreateInstance(String friendlyName, AppDomainSetup setup, Evidence providedSecurityInfo, Evidence creatorsSecurityInfo, IntPtr parentSecurityDescriptor) at System.Runtime.Hosting.ApplicationActivator.CreateInstanceHelper(AppDomainSetup adSetup) at System.Runtime.Hosting.ApplicationActivator.CreateInstance(ActivationContext activationContext, String[] activationCustomData) at System.Windows.Interop.PresentationApplicationActivator.CreateInstance(ActivationContext actCtx) at System.Activator.CreateInstance(ActivationContext activationContext) at System.Deployment.Application.DeploymentManager.ExecuteNewDomain() at System.Deployment.Application.InPlaceHostingManager.Execute() at MS.Internal.AppModel.XappLauncherApp.ExecuteDownloadedApplication() at System.Windows.Interop.DocObjHost.RunApplication(ApplicationRunner runner) at MS.Internal.AppModel.XappLauncherApp.XappLauncherApp_Exit(Object sender, ExitEventArgs e) at System.Windows.Application.OnExit(ExitEventArgs e) at System.Windows.Application.DoShutdown() at System.Windows.Application.ShutdownImpl() at System.Windows.Application.ShutdownCallback(Object arg) at System.Windows.Threading.ExceptionWrapper.InternalRealCall(Delegate callback, Object args, Int32 numArgs) at MS.Internal.Threading.ExceptionFilterHelper.TryCatchWhen(Object source, Delegate method, Object args, Int32 numArgs, Delegate catchHandler) at System.Windows.Threading.DispatcherOperation.InvokeImpl() at System.Windows.Threading.DispatcherOperation.InvokeInSecurityContext(Object state) at System.Threading.ExecutionContext.runTryCode(Object userData) at System.Runtime.CompilerServices.RuntimeHelpers.ExecuteCodeWithGuaranteedCleanup(TryCode code, CleanupCode backoutCode, Object userData) at System.Threading.ExecutionContext.RunInternal(ExecutionContext executionContext, ContextCallback callback, Object state) at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean ignoreSyncCtx) at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state) at System.Windows.Threading.DispatcherOperation.Invoke() at System.Windows.Threading.Dispatcher.ProcessQueue() at System.Windows.Threading.Dispatcher.WndProcHook(IntPtr hwnd, Int32 msg, IntPtr wParam, IntPtr lParam, Boolean& handled) at MS.Win32.HwndWrapper.WndProc(IntPtr hwnd, Int32 msg, IntPtr wParam, IntPtr lParam, Boolean& handled) at MS.Win32.HwndSubclass.DispatcherCallbackOperation(Object o) at System.Windows.Threading.ExceptionWrapper.InternalRealCall(Delegate callback, Object args, Int32 numArgs) at MS.Internal.Threading.ExceptionFilterHelper.TryCatchWhen(Object source, Delegate method, Object args, Int32 numArgs, Delegate catchHandler) at System.Windows.Threading.Dispatcher.InvokeImpl(DispatcherPriority priority, TimeSpan timeout, Delegate method, Object args, Int32 numArgs) at MS.Win32.HwndSubclass.SubclassWndProc(IntPtr hwnd, Int32 msg, IntPtr wParam, IntPtr lParam) at MS.Win32.UnsafeNativeMethods.DispatchMessage(MSG& msg) at System.Windows.Threading.Dispatcher.PushFrameImpl(DispatcherFrame frame) at System.Windows.Threading.Dispatcher.PushFrame(DispatcherFrame frame) at System.Windows.Threading.Dispatcher.Run() at System.Windows.Application.RunDispatcher(Object ignore) at System.Windows.Application.StartDispatcherInBrowser(Object unused) at System.Windows.Threading.ExceptionWrapper.InternalRealCall(Delegate callback, Object args, Int32 numArgs) at MS.Internal.Threading.ExceptionFilterHelper.TryCatchWhen(Object source, Delegate method, Object args, Int32 numArgs, Delegate catchHandler) at System.Windows.Threading.DispatcherOperation.InvokeImpl() at System.Windows.Threading.DispatcherOperation.InvokeInSecurityContext(Object state) at System.Threading.ExecutionContext.runTryCode(Object userData) at System.Runtime.CompilerServices.RuntimeHelpers.ExecuteCodeWithGuaranteedCleanup(TryCode code, CleanupCode backoutCode, Object userData) at System.Threading.ExecutionContext.RunInternal(ExecutionContext executionContext, ContextCallback callback, Object state) at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean ignoreSyncCtx) at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state) at System.Windows.Threading.DispatcherOperation.Invoke() at System.Windows.Threading.Dispatcher.ProcessQueue() at System.Windows.Threading.Dispatcher.WndProcHook(IntPtr hwnd, Int32 msg, IntPtr wParam, IntPtr lParam, Boolean& handled) at MS.Win32.HwndWrapper.WndProc(IntPtr hwnd, Int32 msg, IntPtr wParam, IntPtr lParam, Boolean& handled) at MS.Win32.HwndSubclass.DispatcherCallbackOperation(Object o) at System.Windows.Threading.ExceptionWrapper.InternalRealCall(Delegate callback, Object args, Int32 numArgs) at MS.Internal.Threading.ExceptionFilterHelper.TryCatchWhen(Object source, Delegate method, Object args, Int32 numArgs, Delegate catchHandler) at System.Windows.Threading.Dispatcher.InvokeImpl(DispatcherPriority priority, TimeSpan timeout, Delegate method, Object args, Int32 numArgs) at MS.Win32.HwndSubclass.SubclassWndProc(IntPtr hwnd, Int32 msg, IntPtr wParam, IntPtr lParam) The action that failed was: Demand The type of the first permission that failed was: System.Security.Permissions.FileIOPermission

    Read the article

  • Need help with setting up comet code

    - by Saif Bechan
    Does anyone know off a way or maybe think its possible to connect Node.js with Nginx http push module to maintain a persistent connection between client and browser. I am new to comet so just don't understand the publishing etc maybe someone can help me with this. What i have set up so far is the following. I downloaded the jQuery.comet plugin and set up the following basic code: Client JavaScript <script type="text/javascript"> function updateFeed(data) { $('#time').text(data); } function catchAll(data, type) { console.log(data); console.log(type); } $.comet.connect('/broadcast/sub?channel=getIt'); $.comet.bind(updateFeed, 'feed'); $.comet.bind(catchAll); $('#kill-button').click(function() { $.comet.unbind(updateFeed, 'feed'); }); </script> What I can understand from this is that the client will keep on listening to the url followed by /broadcast/sub=getIt. When there is a message it will fire updateFeed. Pretty basic and understandable IMO. Nginx http push module config default_type application/octet-stream; sendfile on; keepalive_timeout 65; push_authorized_channels_only off; server { listen 80; location /broadcast { location = /broadcast/sub { set $push_channel_id $arg_channel; push_subscriber; push_subscriber_concurrency broadcast; push_channel_group broadcast; } location = /broadcast/pub { set $push_channel_id $arg_channel; push_publisher; push_min_message_buffer_length 5; push_max_message_buffer_length 20; push_message_timeout 5s; push_channel_group broadcast; } } } Ok now this tells nginx to listen at port 80 for any calls to /broadcast/sub and it will give back any responses sent to /broadcast/pub. Pretty basic also. This part is not so hard to understand, and is well documented over the internet. Most of the time there is a ruby or a php file behind this that does the broadcasting. My idea is to have node.js broadcasting /broadcast/pub. I think this will let me have persistent streaming data from the server to the client without breaking the connection. I tried the long-polling approach with looping the request but I think this will be more efficient. Or is this not going to work. Node.js file Now to create the Node.js i'm lost. First off all I don't know how to have node.js to work in this way. The setup I used for long polling is as follows: var sys = require('sys'), http = require('http'); http.createServer(function (req, res) { res.writeHead(200, {'Content-Type': 'text/html'}); res.write(new Date()); res.close(); seTimeout('',1000); }).listen(8000); This listens to port 8000 and just writes on the response variable. For long polling my nginx.config looked something like this: server { listen 80; server_name _; location / { proxy_pass http://mydomain.com:8080$request_uri; include /etc/nginx/proxy.conf; } } This just redirected the port 80 to 8000 and this worked fine. Does anyone have an idea on how to have Node.js act in a way Comet understands it. Would be really nice and you will help me out a lot. Recources used An example where this is done with ruby instead of Node.js jQuery.comet Nginx HTTP push module homepage Faye: a Comet client and server for Node.js and Rack To use faye I have to install the comet client, but I want to use the one supplied with Nginx. Thats why I don't just use faye. The one nginx uses is much more optimzed. extra Persistant connections Going evented with Node.js

    Read the article

  • Android - Custom Adapter Problem

    - by Ryan
    Hello, I seem to be having a problem with my Custom Adapter view. When I display the list, it only displays a white screen. Here is how it works: 1.) I send a JSON request 2.) populate the ArrayList with the returned results 3.) create a custom adapter 4.) then bind the adapter. Here is steps 2-4 private void updateUI() { ListView myList = (ListView) findViewById(android.R.id.list); itemList = new ArrayList(); Iterator it = data.entrySet().iterator(); while (it.hasNext()) { //Get the key name and value for it Map.Entry pair = (Map.Entry)it.next(); String keyName = (String) pair.getKey(); String value = pair.getValue().toString(); if (value != null) { ListItem li = new ListItem(keyName, value, false); itemList.add(li); } } CustomAdapter mAdapter = new CustomAdapter( mContext, itemList); myList.setAdapter(mAdapter); //Bind the adapter to the list //Tell the dialog it's cool now. dismissDialog(0); //Show next screen flipper.setInAnimation(inFromRightAnimation()); flipper.setOutAnimation(outToRightAnimation()); flipper.showNext(); } And here is my CustomAdapter class: import java.util.List; import android.R.color; import android.content.Context; import android.view.View; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.RelativeLayout; import android.widget.TextView; class MyAdapterView extends RelativeLayout { public MyAdapterView(Context c, ListItem li) { super( c ); RelativeLayout rL = new RelativeLayout(c); RelativeLayout.LayoutParams containerParams = new RelativeLayout.LayoutParams( ViewGroup.LayoutParams.FILL_PARENT, ViewGroup.LayoutParams.FILL_PARENT); rL.setLayoutParams(containerParams); rL.setBackgroundColor(color.white); ImageView img = new ImageView (c); img.setImageResource(li.getImage()); img.setPadding(5, 5, 10, 5); rL.addView(img, 48, 48); TextView top = new TextView(c); top.setText(li.getTopText()); top.setTextColor(color.black); top.setTextSize(20); top.setPadding(0, 20, 0, 0); rL.addView(top,ViewGroup.LayoutParams.FILL_PARENT, ViewGroup.LayoutParams.WRAP_CONTENT); TextView bot = new TextView( c ); bot.setText(li.getBottomText()); bot.setTextColor(color.black); bot.setTextSize(12); bot.setPadding(0, 0, 0, 10); bot.setAutoLinkMask(1); rL.addView(bot,ViewGroup.LayoutParams.FILL_PARENT, ViewGroup.LayoutParams.WRAP_CONTENT); } } public class CustomAdapter extends BaseAdapter { private Context context; private List itemList; public CustomAdapter(Context c, List itemL ) { this.context = c; this.itemList = itemL; } public int getCount() { return itemList.size(); } public Object getItem(int position) { return itemList.get(position); } public long getItemId(int position) { return position; } @Override public View getView(int position, View convertView, ViewGroup parent) { ListItem li = itemList.get(position); return new MyAdapterView(this.context, li); } } Does anyone have any idea why this displays a white screen upon completion?? Thanks in advance!

    Read the article

< Previous Page | 592 593 594 595 596 597 598 599 600 601 602 603  | Next Page >