Search Results

Search found 46833 results on 1874 pages for 'distributed system'.

Page 1807/1874 | < Previous Page | 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814  | Next Page >

  • Map refresh problem mapview.invalidate() method is not working

    - by RockOn
    Hi friends! In my application I tried to search diff map location using diff lat and long. First time the application show the map but wen i change the lat long and try to invalidate the mapview using diff lat long, map is not refreshed. Below is my code please have a look and suggest accordingly: //Source code protected void onCreate(Bundle icicle) { // TODO Auto-generated method stub super.onCreate(icicle); setContentView(R.layout.main); infoTextView = (TextView) findViewById(R.id.infoTextView); // Finding Current Location locationManager = (LocationManager) getSystemService(Context.LOCATION_SERVICE); locationManager.requestLocationUpdates(LocationManager.GPS_PROVIDER, 1l, 1l, this); Location location = locationManager.getLastKnownLocation("gps"); // mock location by hard-code if DDMS has not sent a fake loc to the emulator. if (location == null) { lat = 13.6972 * 1E6; lng = 100.5150 * 1E6; } else { // get real location if can retrieve the location sent by DDMS or GPS lat = location.getLatitude() * 1E6; lng = location.getLongitude() * 1E6; } setGPSLocation(lat, lng); } //This is the function which I am calling with different lat and long public void setGPSLocation(Double lati, Double longi) { lat = lati; lng = longi; System.out.println("Latitude :"+ lat +" Longitude :"+lng); // Prepare text being shown String tmpLoc = LOC_INFO_TEMPLATE; tmpLoc = tmpLoc.replace("lg", String.valueOf(lng)); tmpLoc = tmpLoc.replace("lt", String.valueOf(lat)); infoTextView.setText(tmpLoc); // Setup Zoom/Hide Buttons linearLayout = (LinearLayout) findViewById(R.id.zoomview); mapView = (MapView) findViewById(R.id.mapview); mapView.invalidate(); mapView.setBuiltInZoomControls(true); //new by me mapView.setSatellite(true); // Set satellite view mZoom = (ZoomControls) mapView.getZoomControls(); linearLayout.addView(mZoom); // Setup Marker mapOverlays = mapView.getOverlays(); drawable = this.getResources().getDrawable(R.drawable.marker); itemizedOverlay = new MyItemizedOverlay(drawable); GeoPoint point = new GeoPoint(lat.intValue(), lng.intValue()); OverlayItem overlayitem = new OverlayItem(point, "", ""); itemizedOverlay.addOverlay(overlayitem); mapOverlays.add(itemizedOverlay); // Centralize Current Location myMapController = mapView.getController(); myMapController.setZoom(DEFAULT_ZOOM_NUM); centerlizeCurrentLocation(point); } Any suggest is truly appreciable.

    Read the article

  • PyOpenGL - passing transformation matrix into shader

    - by M-V
    I am having trouble passing projection and modelview matrices into the GLSL shader from my PyOpenGL code. My understanding is that OpenGL matrices are column major, but when I pass in projection and modelview matrices as shown, I don't see anything. I tried the transpose of the matrices, and it worked for the modelview matrix, but the projection matrix doesn't work either way. Here is the code: import OpenGL from OpenGL.GL import * from OpenGL.GL.shaders import * from OpenGL.GLU import * from OpenGL.GLUT import * from OpenGL.GLUT.freeglut import * from OpenGL.arrays import vbo import numpy, math, sys strVS = """ attribute vec3 aVert; uniform mat4 uMVMatrix; uniform mat4 uPMatrix; uniform vec4 uColor; varying vec4 vCol; void main() { // option #1 - fails gl_Position = uPMatrix * uMVMatrix * vec4(aVert, 1.0); // option #2 - works gl_Position = vec4(aVert, 1.0); // set color vCol = vec4(uColor.rgb, 1.0); } """ strFS = """ varying vec4 vCol; void main() { // use vertex color gl_FragColor = vCol; } """ # particle system class class Scene: # initialization def __init__(self): # create shader self.program = compileProgram(compileShader(strVS, GL_VERTEX_SHADER), compileShader(strFS, GL_FRAGMENT_SHADER)) glUseProgram(self.program) self.pMatrixUniform = glGetUniformLocation(self.program, 'uPMatrix') self.mvMatrixUniform = glGetUniformLocation(self.program, "uMVMatrix") self.colorU = glGetUniformLocation(self.program, "uColor") # attributes self.vertIndex = glGetAttribLocation(self.program, "aVert") # color self.col0 = [1.0, 1.0, 0.0, 1.0] # define quad vertices s = 0.2 quadV = [ -s, s, 0.0, -s, -s, 0.0, s, s, 0.0, s, s, 0.0, -s, -s, 0.0, s, -s, 0.0 ] # vertices self.vertexBuffer = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, self.vertexBuffer) vertexData = numpy.array(quadV, numpy.float32) glBufferData(GL_ARRAY_BUFFER, 4*len(vertexData), vertexData, GL_STATIC_DRAW) # render def render(self, pMatrix, mvMatrix): # use shader glUseProgram(self.program) # set proj matrix glUniformMatrix4fv(self.pMatrixUniform, 1, GL_FALSE, pMatrix) # set modelview matrix glUniformMatrix4fv(self.mvMatrixUniform, 1, GL_FALSE, mvMatrix) # set color glUniform4fv(self.colorU, 1, self.col0) #enable arrays glEnableVertexAttribArray(self.vertIndex) # set buffers glBindBuffer(GL_ARRAY_BUFFER, self.vertexBuffer) glVertexAttribPointer(self.vertIndex, 3, GL_FLOAT, GL_FALSE, 0, None) # draw glDrawArrays(GL_TRIANGLES, 0, 6) # disable arrays glDisableVertexAttribArray(self.vertIndex) class Renderer: def __init__(self): pass def reshape(self, width, height): self.width = width self.height = height self.aspect = width/float(height) glViewport(0, 0, self.width, self.height) glEnable(GL_DEPTH_TEST) glDisable(GL_CULL_FACE) glClearColor(0.8, 0.8, 0.8,1.0) glutPostRedisplay() def keyPressed(self, *args): sys.exit() def draw(self): glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # build projection matrix fov = math.radians(45.0) f = 1.0/math.tan(fov/2.0) zN, zF = (0.1, 100.0) a = self.aspect pMatrix = numpy.array([f/a, 0.0, 0.0, 0.0, 0.0, f, 0.0, 0.0, 0.0, 0.0, (zF+zN)/(zN-zF), -1.0, 0.0, 0.0, 2.0*zF*zN/(zN-zF), 0.0], numpy.float32) # modelview matrix mvMatrix = numpy.array([1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.5, 0.0, -5.0, 1.0], numpy.float32) # render self.scene.render(pMatrix, mvMatrix) # swap buffers glutSwapBuffers() def run(self): glutInitDisplayMode(GLUT_RGBA) glutInitWindowSize(400, 400) self.window = glutCreateWindow("Minimal") glutReshapeFunc(self.reshape) glutDisplayFunc(self.draw) glutKeyboardFunc(self.keyPressed) # Checks for key strokes self.scene = Scene() glutMainLoop() glutInit(sys.argv) prog = Renderer() prog.run() When I use option #2 in the shader without either matrix, I get the following output: What am I doing wrong?

    Read the article

  • WCF. Https on basicHttpBinding

    - by Andrew Kalashnikov
    Hello, colleagues. I've written wcf service. Unfortunality I have to use basicHttpBinding for php callers. But I need securtiy. So I've decided use transport security with https. I host it at my IIS 6.0. I enable ssl at IIS and assign Certificate. But when I try open it through browser i get standard error. What's wrong. Please help. I can't issue that for several hours. <system.serviceModel> <bindings> <basicHttpBinding> <binding name="BindingConfiguration1" maxBufferPoolSize="2147483647" maxReceivedMessageSize="2147483647"> <readerQuotas maxDepth="2147483647" maxStringContentLength="2147483647" maxArrayLength="2147483647" maxBytesPerRead="2147483647" maxNameTableCharCount="2147483647"/> <security mode="Transport"> <transport clientCredentialType="None" /> </security> </binding> </basicHttpBinding> </bindings> <services> <service name="RegistratorService.Registrator" behaviorConfiguration="RegistratorService.Service1Behavior"> <endpoint address="https://192.168.0.8/MyService.svc" binding="basicHttpBinding" contract="RegistratorService.IRegistrator" bindingConfiguration="BindingConfiguration1"> <identity> <dns value="localhost" /> </identity> </endpoint> <endpoint address="mex" binding="mexHttpBinding" contract="IMetadataExchange"/> </service> </services> <behaviors> <serviceBehaviors> <behavior name="RegistratorService.Service1Behavior"> <serviceMetadata httpsGetEnabled="true" /> <serviceDebug includeExceptionDetailInFaults="true" /> </behavior> </serviceBehaviors> </behaviors>

    Read the article

  • migrating an embedded jetty server from v6 to v7

    - by Ceilingfish
    Hi chaps, I have an embedded servlet which I use in unit tests, looks like this: public class UnitTestWebservices extends AbstractHandler { private Server server; private Map<Route,String> data = new HashMap<Route,String>(); public UnitTestWebservices(int port) throws Exception { server = new Server(port); server.setHandler(this); server.start(); } public void handle(String url, HttpServletRequest request, HttpServletResponse response, int arg3) throws IOException, ServletException { final Route route = Route.valueOf(request.getMethod(), url); final String content = data.get(route); if(content != null) { final ServletOutputStream stream = response.getOutputStream(); stream.print(content); stream.flush(); stream.close(); } else { response.sendError(HttpServletResponse.SC_NOT_FOUND); } } .... } That's written using version 6.1.24 of Jetty. I tried switching over to use Jetty 7.1.1.v20100517, and updated that code to this: public class UnitTestWebservices extends AbstractHandler { private Server server; private Map<Route,String> data = new HashMap<Route,String>(); public UnitTestWebservices(int port) throws Exception { server = new Server(port); server.setHandler(this); server.start(); } public void handle(String url, Request request, HttpServletRequest servletRequest, HttpServletResponse response) throws IOException, ServletException { final Route route = Route.valueOf(request.getMethod(), url); final String content = data.get(route); request.setHandled(true); response.setContentType("application/json"); if(content != null) { response.setStatus(HttpServletResponse.SC_OK); final Writer stream = response.getWriter(); stream.append(content); } else { response.sendError(HttpServletResponse.SC_NOT_FOUND); } } } But whenever I tried to access make a request to the server it would hang indefinitely. Has anyone experienced anything similar?. It also printed this into the log: log4j:WARN No appenders could be found for logger (org.eclipse.jetty.util.log). log4j:WARN Please initialize the log4j system properly. org.eclipse.jetty.server.Server@670655dd STOPPED +-UnitTestWebservices@50ef5502 started

    Read the article

  • Error after installing Django (supposed PATH or PYTHONPATH "error")

    - by illuminated
    Hi all, I guess this is a PATH/PYTHONPATH error, but my attempts failed so far to make django working. System is Ubuntu 10.04, 64bit: mx:~/webapps$ cat /etc/lsb-release DISTRIB_ID=Ubuntu DISTRIB_RELEASE=10.04 DISTRIB_CODENAME=lucid DISTRIB_DESCRIPTION="Ubuntu 10.04 LTS" Python version: 2.6.5: @mx:~/webapps$ python -V Python 2.6.5 When I run django-admin.py, the following happens: mx:~/webapps$ django-admin.py Traceback (most recent call last): File "/usr/local/bin/django-admin.py", line 2, in <module> from django.core import management ImportError: No module named django.core Similar when I import django in python shell: mx:~/webapps$ python Python 2.6.5 (r265:79063, Apr 16 2010, 13:09:56) [GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import django Traceback (most recent call last): File "<stdin>", line 1, in <module> ImportError: No module named django >>> quit() More details: mx:~/webapps$ python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()" /usr/lib/python2.6/dist-packages Within python shell: Python 2.6.5 (r265:79063, Apr 16 2010, 13:09:56) [GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import sys >>> print sys.path ['', '/usr/lib/python2.6/dist-packages/django', '/usr/local/lib/python2.6/dist-packages/django/bin', '/usr/local/lib/python2.6/dist-packages/django', '/home/petra/webapps', '/usr/lib/python2.6', '/usr/lib/python2.6/plat-linux2', '/usr/lib/python2.6/lib-tk', '/usr/lib/python2.6/lib-old', '/usr/lib/python2.6/lib-dynload', '/usr/lib/python2.6/dist-packages', '/usr/lib/python2.6/dist-packages/PIL', '/usr/lib/pymodules/python2.6'] django-admin.py can be found here: mx:~/webapps$ locate django-admin.py ~/install/sources/Django-1.2.1/build/lib.linux-i686-2.6/django/bin/django-admin.py ~/install/sources/Django-1.2.1/build/scripts-2.6/django-admin.py ~/install/sources/Django-1.2.1/django/bin/django-admin.py /usr/local/bin/django-admin.py /usr/local/lib/python2.6/dist-packages/django/bin/django-admin.py /usr/local/lib/python2.6/dist-packages/django/bin/django-admin.pyc and in the end this doesn't help: export PYTHONPATH="/usr/lib/python2.6/dist-packages/django:$PYTHONPATH" nor this: export PYTHONPATH="/usr/local/lib/python2.6/dist-packages/django:$PYTHONPATH" How to solve this !? Thanks all in advance! :)

    Read the article

  • change password code error.......

    - by shimaTun
    I've created a code to change a password. Now it seem contain an error. When before I fill in the form to change password.the error is: Warning: Cannot modify header information - headers already sent by (output started at C:\Program Files\xampp\htdocs\e-Complaint(FYP)\userChangePass.php:7) in C:\Program Files\xampp\htdocs\e-Complaint(FYP)\userChangePass.php on line 126 the code: <?php # userChangePass.php //this page allows logged in user to change their password. $page_title='Change Your Password'; //if no first_name variable exists, redirect the user if(!isset($_SESSION['userid'])){ header("Location: http://" .$_SERVER['HTTP_HOST']. dirname($_SERVER['PHP_SELF'])."/index.php"); ob_end_clean(); exit(); }else{ if(isset($_POST['submit'])) {//handle form. require_once('connectioncomplaint.php'); //connec to the database //check for a new password and match againts the confirmed password. if(eregi ("^[[:alnum:]]{4,20}$", stripslashes(trim($_POST['password1'])))){ if($_POST['password1'] == $_POST['password2']){ $p =escape_data($_POST['password1']); }else{ $p=FALSE; echo'<p><font color="red" size="+1"> Your password did not match the confirmed password!</font></p>'; } }else{ $p=FALSE; echo'<p><font color="red" size="+1"> Please Enter a valid password!</font></p>'; } if($p){ //if everything OK. //make the query $query="UPDATE access SET password=PASSWORD('$p') WHERE userid={$_SESSION['userid']}"; $result=@mysql_query($query);//run the query. if(mysql_affected_rows() == 1) {//if it run ok. //send an email,if desired. echo '<p><b>your password has been changed.</b></p>'; //include('templates/footer.inc');//include the HTML footer. exit(); }else{//if it did not run ok $message= '<p>Your password could not be change due to a system error.We apolpgize for any inconvenience.</p><p>' .mysql_error() .'</p>'; } mysql_close();//close the database connection. }else{//failed the validation test. echo '<p><font color="red" size="+1"> Please try again.</font></p>'; } }//end of the main Submit conditional. } ?> the error at this line:- header("Location: http://" .$_SERVER['HTTP_HOST']. dirname($_SERVER['PHP_SELF'])."/index.php"); please help me guy...

    Read the article

  • Button control subclass: click event does not fire when using UpdatePanel

    - by nw
    Using suggestions from this thread, I created a ModernButton control that uses the HTML button tag instead of input. This control works great except when embedded within an UpdatePanel control. In this case, it does trigger the partial postback, but its click event does not fire. The control is defined thus: [ParseChildren(false)] [PersistChildren(true)] public class ModernButton : Button { private string _iconURL = ""; protected override string TagName { get { return "button"; } } protected override HtmlTextWriterTag TagKey { get { return HtmlTextWriterTag.Button; } } public new string Text { get { return ViewState["NewText"] as string; } set { ViewState["NewText"] = HttpUtility.HtmlDecode(value); } } public string IconURL { get { return this._iconURL; } set { this._iconURL = value.Trim(); } } protected override void OnPreRender(System.EventArgs e) { base.OnPreRender(e); LiteralControl textCtrl = new LiteralControl(this.Text); if (this._iconURL != "") { LiteralControl openDiv = new LiteralControl( string.Format( "<div style=\"background-image:url({0}); background-position:left center; background-repeat:no-repeat; line-height:16px; padding:3px 0 3px 22px;\">", ResolveClientUrl(this._iconURL) ) ); LiteralControl closeDiv = new LiteralControl("</div>"); Controls.AddAt(0, openDiv); Controls.Add(textCtrl); Controls.Add(closeDiv); } else { Controls.Add(textCtrl); } base.Text = UniqueID; } protected override void RenderContents(HtmlTextWriter writer) { RenderChildren(writer); } } I use it so: <asp:UpdatePanel runat="server" ID="uxSearchPanel" ChildrenAsTriggers="true" UpdateMode="Conditional"> <Triggers> <asp:AsyncPostBackTrigger ControlID="uxSearchButton" /> </Triggers> <ContentTemplate> ... <ctrl:ModernButton ID="uxSearchButton" runat="server" Text="Search" IconURL="Icons/magnifier.png" OnClick="uxSearchButton_Click"></ctrl:ModernButton> </ContentTemplate> </asp:UpdatePanel> Which renders: <div id="uxBody_uxSearchPanel"> ... <button type="submit" name="ctl00$uxBody$uxSearchButton" value="ctl00$uxBody$uxSearchButton" id="uxBody_uxSearchButton"> <div style="background-image:url(Icons/magnifier.png); background-position:left center; background-repeat:no-repeat; line-height:16px; padding:3px 0 3px 22px;">Search</div> </button> </div> The ModernButton generates a postback in every case (whether partial or full), but the server-side click event (uxSearchButton_Click) does not fire in the partial postback scenario.

    Read the article

  • memcpy segmentation fault on linux but not os x

    - by Andre
    I'm working on implementing a log based file system for a file as a class project. I have a good amount of it working on my 64 bit OS X laptop, but when I try to run the code on the CS department's 32 bit linux machines, I get a seg fault. The API we're given allows writing DISK_SECTOR_SIZE (512) bytes at a time. Our log record consists of the 512 bytes the user wants to write as well as some metadata (which sector he wants to write to, the type of operation, etc). All in all, the size of the "record" object is 528 bytes, which means each log record spans 2 sectors on the disk. The first record writes 0-512 on sector 0, and 0-15 on sector 1. The second record writes 16-512 on sector 1, and 0-31 on sector 2. The third record writes 32-512 on sector 2, and 0-47 on sector 3. ETC. So what I do is read the two sectors I'll be modifying into 2 freshly allocated buffers, copy starting at record into buf1+the calculated offset for 512-offset bytes. This works correctly on both machines. However, the second memcpy fails. Specifically, "record+DISK_SECTOR_SIZE-offset" in the below code segfaults, but only on the linux machine. Running some random tests, it gets more curious. The linux machine reports sizeof(Record) to be 528. Therefore, if I tried to memcpy from record+500 into buf for 1 byte, it shouldn't have a problem. In fact, the biggest offset I can get from record is 254. That is, memcpy(buf1, record+254, 1) works, but memcpy(buf1, record+255, 1) segfaults. Does anyone know what I'm missing? Record *record = malloc(sizeof(Record)); record->tid = tid; record->opType = OP_WRITE; record->opArg = sector; int i; for (i = 0; i < DISK_SECTOR_SIZE; i++) { record->data[i] = buf[i]; // *buf is passed into this function } char* buf1 = malloc(DISK_SECTOR_SIZE); char* buf2 = malloc(DISK_SECTOR_SIZE); d_read(ad->disk, ad->curLogSector, buf1); d_read(ad->disk, ad->curLogSector+1, buf2); memcpy(buf1+offset, record, DISK_SECTOR_SIZE-offset); memcpy(buf2, record+DISK_SECTOR_SIZE-offset, offset+sizeof(Record)-sizeof(record->data));

    Read the article

  • GWT styles not applying

    - by sernaferna
    I'm creating a GWT application that uses UiBinder, and I've come across a bizarre problem where styles aren't applying to my elements--until I refresh the browser, and then the styles briefly get applied, in that fraction of a second before the page refreshes. In other words: Open page; none of my defined styles are used. Hit Refresh For a fraction of a second the styles are used, before the page goes blank The page reloads, without the styles again I'm going to include my entire *ui.xml file, because it's not too big. <!DOCTYPE ui:UiBinder SYSTEM "http://dl.google.com/gwt/DTD/xhtml.ent"> <ui:UiBinder xmlns:ui="urn:ui:com.google.gwt.uibinder" xmlns:g="urn:import:com.google.gwt.user.client.ui"> <ui:style> .idLabelStyle { font-weight: bold; text-align: center; width: 100px; border-style: solid; border-width: 1px; margin-right: 5px; } .nameLabelStyle { font-weight: bold; text-align: center; width: 500px; border-style: solid; border-width: 1px; margin-right: 5px; } .addressLabelStyle { font-weight: bold; text-align: center; width: 500px; border-style: solid; border-width: 1px; } </ui:style> <g:HTMLPanel> <g:HorizontalPanel> <g:Label addStyleNames="{style.idLabelStyle}">ID</g:Label> <g:Label addStyleNames="{style.nameLabelStyle}">Name</g:Label> <g:Label addStyleNames="{style.addressLabelStyle}">Address</g:Label> </g:HorizontalPanel> </g:HTMLPanel> </ui:UiBinder> I really hope I'm missing something simple.

    Read the article

  • How do I get a screenshot of a given website using C#

    - by Ender
    I'm writing a specialised crawler and parser for internal use and I require the ability to take a screenshot of a web page in order to check what colours are being used throughout. The program will take in around ten web addresses and will save them as a bitmap image, from there I plan to use LockBits in order to create a list of the five most used colours within the image. To my knowledge it's the easiest way to get the colours used within a web page but if there is an easier way to do it please chime in with your suggestions. Anyway, I was going to use this program until I saw the price tag. I'm also fairly new to C#, having only used it for a few months. Can anyone provide me with a solution to my problem of taking a screenshot of a web page in order to extract the colour scheme? EDIT: Sorry for not getting back to this sooner, but I've been busy with some other things. Anyway, the code seems to work well, but the problem I am having right now is that I am running it within a form, and naturally with Application.Run() being called I cannot run two instances of the same form at once. It recommended Form.showDialog() but that broke everything. Can anyone give me a hand with this code? public static void buildScreenshotFromURL(string url) { int width = 800; int height = 600; using (WebBrowser browser = new WebBrowser()) { browser.Width = width; browser.Height = height; browser.ScrollBarsEnabled = true; // This will be called when the page finishes loading browser.DocumentCompleted += new System.Windows.Forms.WebBrowserDocumentCompletedEventHandler(OnDocumentCompleted); //browser.DocumentCompleted += OnDocumentCompleted; browser.Navigate(url); // This prevents the application from exiting until // Application.Exit is called // Application.Run() does not work as it cannot be called twice, recommended form.showDialog() // but still issues Application.Run(); } } public static void OnDocumentCompleted(object sender, WebBrowserDocumentCompletedEventArgs e) { // Define size of thumbnail neded int thumbSize = 50; // Now that the page is loaded, save it to a bitmap WebBrowser browser = (WebBrowser)sender; using (Graphics graphics = browser.CreateGraphics()) { using (Bitmap bitmap = new Bitmap(browser.Width, browser.Height, graphics)) { Rectangle bounds = new Rectangle(0, 0, bitmap.Width, bitmap.Height); browser.DrawToBitmap(bitmap, bounds); Bitmap thumbBitmap = new Bitmap(bitmap.GetThumbnailImage(thumbSize, thumbSize, thumbCall, IntPtr.Zero)); thumbBitmap.Save("screenshot.png", ImageFormat.Png); handleImage(thumbBitmap); } }

    Read the article

  • Wondering why DisplayName attribute is ignored in LabelFor on an overridden property

    - by Lasse Krantz
    Hi, today I got confused when doing a couple of <%=Html.LabelFor(m=>m.MyProperty)%> in ASP.NET MVC 2 and using the [DisplayName("Show this instead of MyProperty")] attribute from System.ComponentModel. As it turned out, when I put the attribute on an overridden property, LabelFor didn't seem to notice it. However, the [Required] attribute works fine on the overridden property, and the generated errormessage actually uses the DisplayNameAttribute. This is some trivial examplecode, the more realistic scenario is that I have a databasemodel separate from the viewmodel, but for convenience, I'd like to inherit from the databasemodel, add View-only properties and decorating the viewmodel with the attributes for the UI. public class POCOWithoutDataAnnotations { public virtual string PleaseOverrideMe { get; set; } } public class EditModel : POCOWithoutDataAnnotations { [Required] [DisplayName("This should be as label for please override me!")] public override string PleaseOverrideMe { get { return base.PleaseOverrideMe; } set { base.PleaseOverrideMe = value; } } [Required] [DisplayName("This property exists only in EditModel")] public string NonOverriddenProp { get; set; } } The strongly typed ViewPage<EditModel> contains: <div class="editor-label"> <%= Html.LabelFor(model => model.PleaseOverrideMe) %> </div> <div class="editor-field"> <%= Html.TextBoxFor(model => model.PleaseOverrideMe) %> <%= Html.ValidationMessageFor(model => model.PleaseOverrideMe) %> </div> <div class="editor-label"> <%= Html.LabelFor(model => model.NonOverriddenProp) %> </div> <div class="editor-field"> <%= Html.TextBoxFor(model => model.NonOverriddenProp) %> <%= Html.ValidationMessageFor(model => model.NonOverriddenProp) %> </div> The labels are then displayed as "PleaseOverrideMe" (not using the DisplayNameAttribute) and "This property exists only in EditModel" (using the DisplayNameAttribute) when viewing the page. If I post with empty values, triggering the validation with this ActionMethod: [HttpPost] public ActionResult Edit(EditModel model) { if (!ModelState.IsValid) return View(model); return View("Thanks"); } the <%= Html.ValidationMessageFor(model => model.PleaseOverrideMe) %> actually uses [DisplayName("This should be as label for please override me!")] attribute, and produces the default errortext "The This should be as label for please override me! field is required." Would some friendly soul shed some light on this?

    Read the article

  • ld: symbol(s) not found with OpenSSL (libssl)

    - by Benjamin
    Hi all, I'm trying to build TorTunnel on my mac. I've successfully installed the Boost library and its development files. TorTunnel also requires the OpenSSL and its development files. I've got them installed in /usr/lib/libssl.dylib and /usr/include/openssl/. When I run the make command this is the error i'm getting: g++ -ggdb -g -O2 -lssl -lboost_system-xgcc42-mt-1_38 -o torproxy TorProxy.o HybridEncryption.o Connection.o Cell.o Directory.o ServerListing.o Util.o Circuit.o CellEncrypter.o RelayCellDispatcher.o CellConsumer.o ProxyShuffler.o CreateCell.o CreatedCell.o TorTunnel.o SocksConnection.o Network.o Undefined symbols: "_BN_hex2bn", referenced from: Circuit::initializeDhParameters() in Circuit.o "_BN_free", referenced from: Circuit::~Circuit()in Circuit.o Circuit::~Circuit()in Circuit.o CreatedCell::getKeyMaterial(unsigned char**, unsigned char**)in CreatedCell.o "_DH_generate_key", referenced from: Circuit::initializeDhParameters() in Circuit.o "_PEM_read_bio_RSAPublicKey", referenced from: ServerListing::getOnionKey() in ServerListing.o "_BIO_s_mem", referenced from: Connection::initializeSSL() in Connection.o Connection::initializeSSL() in Connection.o "_DH_free", referenced from: Circuit::~Circuit()in Circuit.o "_BIO_ctrl_pending", referenced from: Connection::writeFromBuffer(boost::function)in Connection.o "_RSA_size", referenced from: HybridEncryption::encryptInSingleChunk(unsigned char*, int, unsigned char**, int*, rsa_st*)in HybridEncryption.o HybridEncryption::encryptInHybridChunk(unsigned char*, int, unsigned char**, int*, rsa_st*)in HybridEncryption.o HybridEncryption::encrypt(unsigned char*, int, unsigned char**, int*, rsa_st*)in HybridEncryption.o "_RSA_public_encrypt", referenced from: HybridEncryption::encryptInSingleChunk(unsigned char*, int, unsigned char**, int*, rsa_st*)in HybridEncryption.o HybridEncryption::encryptInHybridChunk(unsigned char*, int, unsigned char**, int*, rsa_st*)in HybridEncryption.o "_BN_num_bits", referenced from: CreateCell::CreateCell(unsigned short, dh_st*, rsa_st*)in CreateCell.o CreatedCell::getKeyMaterial(unsigned char**, unsigned char**)in CreatedCell.o CreatedCell::getKeyMaterial(unsigned char**, unsigned char**)in CreatedCell.o CreatedCell::isValid() in CreatedCell.o "_SHA1", referenced from: CellEncrypter::expandKeyMaterial(unsigned char*, int, unsigned char*, int)in CellEncrypter.o "_BN_bn2bin", referenced from: CreateCell::CreateCell(unsigned short, dh_st*, rsa_st*)in CreateCell.o "_BN_bin2bn", referenced from: CreatedCell::getKeyMaterial(unsigned char**, unsigned char**)in CreatedCell.o "_DH_compute_key", referenced from: CreatedCell::getKeyMaterial(unsigned char**, unsigned char**)in CreatedCell.o "_BIO_new", referenced from: Connection::initializeSSL() in Connection.o Connection::initializeSSL() in Connection.o "_BIO_new_mem_buf", referenced from: ServerListing::getOnionKey() in ServerListing.o "_AES_ctr128_encrypt", referenced from: HybridEncryption::AES_encrypt(unsigned char*, int, unsigned char*, unsigned char*, int)in HybridEncryption.o CellEncrypter::aesOperate(Cell&, aes_key_st*, unsigned char*, unsigned char*, unsigned int*)in CellEncrypter.o "_BIO_read", referenced from: Connection::writeFromBuffer(boost::function)in Connection.o "_SHA1_Update", referenced from: CellEncrypter::calculateDigest(SHAstate_st*, RelayCell&, unsigned char*)in CellEncrypter.o CellEncrypter::initKeyMaterial(unsigned char*)in CellEncrypter.o CellEncrypter::initKeyMaterial(unsigned char*)in CellEncrypter.o "_SHA1_Final", referenced from: CellEncrypter::calculateDigest(SHAstate_st*, RelayCell&, unsigned char*)in CellEncrypter.o "_DH_size", referenced from: CreatedCell::getKeyMaterial(unsigned char**, unsigned char**)in CreatedCell.o "_DH_new", referenced from: Circuit::initializeDhParameters() in Circuit.o "_BIO_write", referenced from: Connection::readIntoBufferComplete(boost::function, boost::system::error_code const&, unsigned long)in Connection.o "_RSA_free", referenced from: Circuit::~Circuit()in Circuit.o "_BN_dup", referenced from: Circuit::initializeDhParameters() in Circuit.o Circuit::initializeDhParameters() in Circuit.o "_BN_new", referenced from: Circuit::initializeDhParameters() in Circuit.o Circuit::initializeDhParameters() in Circuit.o "_SHA1_Init", referenced from: CellEncrypter::CellEncrypter()in CellEncrypter.o CellEncrypter::CellEncrypter()in CellEncrypter.o "_RAND_bytes", referenced from: HybridEncryption::encryptInHybridChunk(unsigned char*, int, unsigned char**, int*, rsa_st*)in HybridEncryption.o Util::getRandomId() in Util.o "_AES_set_encrypt_key", referenced from: HybridEncryption::AES_encrypt(unsigned char*, int, unsigned char*, unsigned char*, int)in HybridEncryption.o CellEncrypter::initKeyMaterial(unsigned char*)in CellEncrypter.o CellEncrypter::initKeyMaterial(unsigned char*)in CellEncrypter.o "_BN_set_word", referenced from: Circuit::initializeDhParameters() in Circuit.o "_RSA_new", referenced from: ServerListing::getOnionKey() in ServerListing.o ld: symbol(s) not found collect2: ld returned 1 exit status make: *** [torproxy] Error 1 Any idea how I could fix it?

    Read the article

  • Is the Scala 2.8 collections library a case of "the longest suicide note in history" ?

    - by oxbow_lakes
    First note the inflammatory subject title is a quotation made about the manifesto of a UK political party in the early 1980s. This question is subjective but it is a genuine question, I've made it CW and I'd like some opinions on the matter. Despite whatever my wife and coworkers keep telling me, I don't think I'm an idiot: I have a good degree in mathematics from the University of Oxford and I've been programming commercially for almost 12 years and in Scala for about a year (also commercially). I have just started to look at the Scala collections library re-implementation which is coming in the imminent 2.8 release. Those familiar with the library from 2.7 will notice that the library, from a usage perspective, has changed little. For example... > List("Paris", "London").map(_.length) res0: List[Int] List(5, 6) ...would work in either versions. The library is eminently useable: in fact it's fantastic. However, those previously unfamiliar with Scala and poking around to get a feel for the language now have to make sense of method signatures like: def map[B, That](f: A => B)(implicit bf: CanBuildFrom[Repr, B, That]): That For such simple functionality, this is a daunting signature and one which I find myself struggling to understand. Not that I think Scala was ever likely to be the next Java (or /C/C++/C#) - I don't believe its creators were aiming it at that market - but I think it is/was certainly feasible for Scala to become the next Ruby or Python (i.e. to gain a significant commercial user-base) Is this going to put people off coming to Scala? Is this going to give Scala a bad name in the commercial world as an academic plaything that only dedicated PhD students can understand? Are CTOs and heads of software going to get scared off? Was the library re-design a sensible idea? If you're using Scala commercially, are you worried about this? Are you planning to adopt 2.8 immediately or wait to see what happens? Steve Yegge once attacked Scala (mistakenly in my opinion) for what he saw as its overcomplicated type-system. I worry that someone is going to have a field day spreading fud with this API (similarly to how Josh Bloch scared the JCP out of adding closures to Java). Note - I should be clear that, whilst I believe that Josh Bloch was influential in the rejection of the BGGA closures proposal, I don't ascribe this to anything other than his honestly-held beliefs that the proposal represented a mistake.

    Read the article

  • How do I get ELMAH to work with SQL Server (permission problems)

    - by Gary McGill
    I've got ELMAH working on my (Cassini) development server, and was quite happy with it, but now that I'm trying to move everything to my production server (IIS7), the honeymoon looks like being over. I've got past the "gotcha" with IIS7, which frankly could have been better highlighted in the documentation, and if I just use the in-memory log then it works. However, I'm trying to get it to use the SQL Server log (as I do on my development system), and I'm getting an error along the lines of: The EXECUTE permission was denied on the object ELMAH_GetErrorsXml Well, fine. I know how to grant database permissions, but I'm really struggling to understand which user and which stored procs/tables I need to grant access to. The thing that's really confusing me is that I didn't have to do anything like this to get it to work on my development server. The only difference I can see is that on my development server it seems to connect as NT AUTHORITY\IUSR, whereas on my production server it seems to connect as NT AUTHORITY\NETWORK SERVICE. (It's just using a trusted connection so I've not explicitly configured it to do that - I presume it's to do with the web server). UPDATE: I've since established that because I'm using Cassini, it was actually logging in as me (an admin) and not IUSR, which explains why I didn't get any permission problems. On my development server, the IUSR account is a member of the public database role, and has access to the required database (again as "public"). There's no explicit granting of object-level permissions. [See update above - this is irrelevant]. On my production server, I've added NETWORK SERVICE in exactly the same way (public database role, explicit access to the database as "public"). Yet, I get this permission error. Why?!! [See update above - the only reason I don't get a permission error is because I'm running as an admin]. And, of course, if the fact that it works locally is just "luck", I will need to know which SPs/tables to grant access to. My guess would be all 3 SPs and not the table, but it would be good (again) to see some documentation that makes this explicit.

    Read the article

  • How should Application.Run() be called for the main presenter of a MVP WinForms app?

    - by Mr Roys
    I'm learning to apply MVP to a simple WinForms app (only one form) in C# and encountered an issue while creating the main presenter in static void Main(). Is it a good idea to expose a View from the Presenter in order to supply it as a parameter to Application.Run()? Currently, I've implemented an approach which allows me to not expose the View as a property of Presenter: static void Main() { IView view = new View(); Model model = new Model(); Presenter presenter = new Presenter(view, model); presenter.Start(); Application.Run(); } The Start and Stop methods in Presenter: public void Start() { view.Start(); } public void Stop() { view.Stop(); } The Start and Stop methods in View (a Windows Form): public void Start() { this.Show(); } public void Stop() { // only way to close a message loop called // via Application.Run(); without a Form parameter Application.Exit(); } The Application.Exit() call seems like an inelegant way to close the Form (and the application). The other alternative would be to expose the View as a public property of the Presenter in order to call Application.Run() with a Form parameter. static void Main() { IView view = new View(); Model model = new Model(); Presenter presenter = new Presenter(view, model); Application.Run(presenter.View); } The Start and Stop methods in Presenter remain the same. An additional property is added to return the View as a Form: public void Start() { view.Start(); } public void Stop() { view.Stop(); } // New property to return view as a Form for Application.Run(Form form); public System.Windows.Form View { get { return view as Form(); } } The Start and Stop methods in View (a Windows Form) would then be written as below: public void Start() { this.Show(); } public void Stop() { this.Close(); } Could anyone suggest which is the better approach and why? Or there even better ways to resolve this issue?

    Read the article

  • Which management tools would you recommend for software development?

    - by Robert Schneider
    What would you generally recommend for software developement? Which combination do you use or would you recommend? I assume that tools are needed for Version control, issues/bugs/task, Release management, Requirement management. Tools for Test management and Project management should be accounted for as well, I guess. Did I forget anything? Maybe tools for Continous Integration. I'm not interested in a halfway combination of one ore two tools like a Subversion + Bugzilla (I know they are good but for a company they might not be sufficient). And also tools like make/ant shouldn't be taken into account. I'd like to know a combination that covers all what is important for professional software development. However, it could be a single tool of course if it covers all the management issues. What do you think would be a good combination? I assume a combination should be regardes as good if the tools itself are good but also if they have good integrations. Udate: Something like Ant is just a script not a management tool. Okay: we do already have Perforce. But this is somehow generic. We have different projects that uses C-, VS/.NET-, Python-, PHP and we will be starting new projects in Java. Plenty of languages and frameworks, though some are going to be legacy. So the tools should be generic. Obviously we don't want to use Bugzilla for one project and Jira for another. The management tools have to be generic. Further thoughts: Think of sourceforge: For each project they offer a version control system, an issue tracker, a wiki, ... totally independent of what the project is about. Those are some management tools that a project usually needs. I would say that most companies need such a set of tools. But I could imagine, if a company has several projects, they need further tools too: for Project management, Quality management, ... And I could imagine there are some recommendable combinations of tools. A not really apropiate comparison to this could be LAMP (Linux, Apache, MySQL, PHP). A combination of software that has evolved since they work very well together, and are pretty useful for a lot of applications (or rather web sites). So maybe there are some recommendable management tool combinations that also fit toghether like LAMP. The integration is important. Thank you for the incredibly fast and helpful responses!!!

    Read the article

  • How to get Alfresco login ticket without user password, but with impersonating user with user principal name (UPN)

    - by dok
    I'm writing a DLL that has function for getting Alfresco login ticket without using user password, using only a user principal name (UPN). I’m calling alfresco REST API service /wcservice. I use NTLM in Alfresco. I’m impersonating users using WindowsIdentity constructor as explained here http://msdn.microsoft.com/en-us/library/ms998351.aspx#paght000023_impersonatingbyusingwindowsidentity. I checked and user is properly impersonated (I checked WindowsIdentity.GetCurrent().Name property). After impersonating a user, I try to make HttpWebRequest and set its credentials with CredentialsCache.DefaultNetworkCredentials. I get the error: The remote server returned an error: (401) Unauthorized. at System.Net.HttpWebRequest.GetResponse() When I use new NetworkCredential("username", "P@ssw0rd") to set request credentials, I get Alfresco login ticket (HttpStatusCode.OK, 200). Is there any way that I can get Alfresco login ticket without user password? Here is the code that I'm using: private string GetTicket(string UPN) { WindowsIdentity identity = new WindowsIdentity(UPN); WindowsImpersonationContext context = null; try { context = identity.Impersonate(); MakeWebRequest(); } catch (Exception e) { return e.Message + Environment.NewLine + e.StackTrace; } finally { if (context != null) { context.Undo(); } } } private string MakeWebRequest() { string URI = "http://alfrescoserver/alfresco/wcservice/mg/util/login"; HttpWebRequest request = WebRequest.Create(URI) as HttpWebRequest; request.CookieContainer = new CookieContainer(1); //request.Credentials = new NetworkCredential("username", "p@ssw0rd"); // It works with this request.Credentials = CredentialCache.DefaultNetworkCredentials; // It doesn’t work with this //request.Credentials = CredentialCache.DefaultCredentials; // It doesn’t work with this either try { using (HttpWebResponse response = request.GetResponse() as HttpWebResponse) { StreamReader sr = new StreamReader(response.GetResponseStream()); return sr.ReadToEnd(); } } catch (Exception e) { return (e.Message + Environment.NewLine + e.StackTrace); } } Here are records from Alfresco stdout.log (if it helps in any way): 17:18:04,550 DEBUG [app.servlet.NTLMAuthenticationFilter] Processing request: /alfresco/wcservice/mg/util/login SID:7453F7BD4FD2E6A61AD40A31A37733A5 17:18:04,550 DEBUG [web.scripts.DeclarativeRegistry] Web Script index lookup for uri /mg/util/login took 0.526239ms 17:18:04,550 DEBUG [app.servlet.NTLMAuthenticationFilter] New NTLM auth request from 10.**.**.** (10.**.**.**:1229) 17:18:04,566 DEBUG [app.servlet.NTLMAuthenticationFilter] Processing request: /alfresco/wcservice/mg/util/login SID:7453F7BD4FD2E6A61AD40A31A37733A5 17:18:04,566 DEBUG [web.scripts.DeclarativeRegistry] Web Script index lookup for uri /mg/util/login took 0.400909ms 17:18:04,566 DEBUG [app.servlet.NTLMAuthenticationFilter] Received type1 [Type1:0xe20882b7,Domain:<NotSet>,Wks:<NotSet>] 17:18:04,566 DEBUG [app.servlet.NTLMAuthenticationFilter] Client domain null 17:18:04,675 DEBUG [app.servlet.NTLMAuthenticationFilter] Sending NTLM type2 to client - [Type2:0x80000283,Target:AlfrescoServerA,Ch:197e2631cc3f9e0a]

    Read the article

  • Finally, upgrade from Nokia X3 to Samsung Galaxy S III

    This time, something slightly different but nonetheless not less interesting, hopefully. Living on a remote island like Mauritius, ill-praised 'Cyber Island' in the Indian Ocean, has its advantages in life style and relaxed environment to life in but in terms of technological aspects it can be quite a nightmare. Well, I guess this might be different story to report about... one day. Cyber Island Mauritius Despite it's shiny advertisement as Cyber Island and business in ICT hub to Africa, Mauritius is not on the latest track of available models in computer hardware or, in the context of this article, cellulars or smart-phone, or communication technology in general. Okay, I have to admit that this statement is only partly true. Money can buy, even here in Mauritius. Luckily, there are ways and ways to deal with this outcry of modern, read: technological, civilisation issues. Online shopping you might think? Yes, for sure, until you discover in your checkout procedure that a small island in the Indian Ocean isn't a preferred destination for delivery and the precious time you spent on putting your items into your cart and feeding your personal level of anticipation gets ruined on the last stint. Ordering from abroad saves you money Anyway, I got in touch with my personal courier and luckily there were some extra-kilos left in the luggage. First obstacle sorted, we have a Transporter! Okay, on the next occasion off to Amazon online and using their Prime service for fast delivery. Actually, the order was placed on Saturday evening and everything got delivered on Tuesday morning - nice job in less than 72 hours. Okay, among the items of that shopping rush I ordered a shiny Samsung Galaxy S III 16GB in oceanic blue - did I mention, that you hardly get a blue model in Mauritius? - for my BWE. Interesting side-notes: First, Amazon Germany dropped the prices for roughly 30% on the S3, and we got the 16GB model for less than 500 Euro (or approx. Rs. 19.500,-) compared to the usual Rs. 27.000,- on the local market. It even varies whether the local price is inclusive or exclusive VAT (15%). Second, since a while she was bothering me to get an iPhone and an iPad for her, fair enough I thought, decent hardware, posh design and reliable services. Until we watched the 'magical' introduction of Samsung's new models at the IFA exhibition, she read the bashing comments on Google+ on the iPhone 5 and I gave her a brief summary on the law suit between Apple and Samsung in the USA. So, yes, Samsung USA is right, the next big thing is already here - literally. My BWE loves the look and touch of the Galaxy S3. And for me it was more cost-effective in terms of purchases done at the App Store, ups, Play Store. Transfer of contacts, text messages and media files Okay, now that the hardware is in place, how to transfer all those contacts, text messages, media files, etc. between those two devices? In the past, I used to use the Nokia Communication Suite between various models but now for Android? Well, as usual Google and Bing are reliable friends and among the first hits I came across an article about How to Transfer Contacts from Nokia to Android. Couldn't be easier, right? Well, sort of... my main Windows systems are already running on Windows 8, and this actually caused problems with the mobile/smart-phone device drivers. The article provides the download for an older version 1.10 which upgrades to 2.11 (as time of writing this entry) but both couldn't get the Galaxy S3 and the Nokia connected. Shame on me... the product page clearly doesn't mention Windows 8 (for now) and Windows 8 isn't available for the general audience at all... After I took a spare machine running on Windows Vista everything went smooth. Software installed, upgrade done, device drivers for Android automatically downloaded and installed, and the same painless routine for the Nokia part. I think, I rebooted the system twice during the whole setup procedure but hey, it was more or less a distraction while coding some stuff in ASP.NET MVC and Telerik Kendo UI. The transfer of contacts and text messages was done via Wondershare MobileGo for Android, and all media files by moving the additional microSD card from one device to the other. But even without an external SD card, it would have been very easy to copy the files via Windows Explorer directly. Little catch and excellent service Fine, we are almost done and the only step left is to shift the SIM card... Ouch, gotcha! The X3 uses a standard size SIM card while the S III only accepts microSIM form factor. What an irony, bigger smartphone needs smaller SIM card. Luckily, the next showroom of Emtel is just 5 mins away up the road, and the service staff over there know their job. Finally, after roughly 10 mins of paper work, activation and small chit-chat, the S3 came to life on the mobile network. Owning a smart-phone now and knowing that my BWE would like to interact more on social networks away from home, especially to upload pictures and provide local 'check-ins', I activated a data package for her in advance, too. Even that it is Saturday, everything was already done and ready to be used. Nice bonus: The Emtel clerk directly offered me to set up the configuration for the Emtel data services, yes sure, go ahead, this saves me to search for that in the settings. Okay, spoiler-alert here, setting a static APN to access the Emtel network and the internet wouldn't be a challenge. But hey, she already had the phone in her hands and I could keep my eyes on the children. Well done, Emtel! Resume Thanks to the useful software package by Wondershare is was a hands-free experience to transfer all the data from a Nokia mobile on Symbian S60 to a Samsung Galaxy S III on Android Ice Cream Sandwich (ICS). In the future, this wont be a serious issue at all anymore thanks to synchronisation services and cloud storage. And for now, I'm only waiting for the official upgrades for Jelly Bean.

    Read the article

  • Azure Diagnostics: The Bad, The Ugly, and a Better Way

    - by jasont
    If you’re a .Net web developer today, no doubt you’ve enjoyed watching Windows Azure grow up over the past couple of years. The platform has scaled, stabilized (mostly), and added on a slew of great (and sometimes overdue) features. What was once just an endpoint to host a solution, developers today have tremendous flexibility and options in the platform. Organizations are building new solutions and offerings on the platform, and others have, or are in the process of, migrating existing applications out of their own data centers into the Azure cloud. Whether new application development or migrating legacy, every development shop and IT organization needs to monitor their applications in the cloud, the same as they do on premises. Azure Diagnostics has some capabilities, but what I constantly hear from users is that it’s either (a) not enough, or (b) too cumbersome to set up. Today, Stackify is happy to announce that we fully support Azure deployments, just the same as your on-premises deployments. Let’s take a look below and compare and contrast the options. Azure Diagnostics Let’s crack open the Windows Azure documentation on Azure Diagnostics and see just how easy it is to use. The high level steps are:   Step 1: Import the Diagnostics Oh, I’ve already deployed my app without the diagnostics module. Guess I can’t do anything until I do this and re-deploy. Step 2: Configure the Diagnostics (and multiple sub-steps) Do I want it all? Or just pieces of it? Whoops, forgot to include a specific performance counter, I guess I’ll have to deploy again. Wait a minute… I have to specifically code these performance counters into my role’s OnStart() method, compile and deploy again? And query and consume it myself? Step 3: (Optional) Permanently store diagnostic data Lucky for me, Azure storage has gotten pretty cheap. But how often should I move the data into storage? I want to see real-time data, so I guess that’s out now as well. Step 4: (Optional) View stored diagnostic data Optional? Of course I want to see it. Conveniently, Microsoft recommends 3 tools to do this with. Un-conveniently, none of these are web based and they all just give you access to raw data, and very little charting or real-time intelligence. Just….. data. Nevermind that one product seems to have gotten stale since a recent acquisition, and doesn’t even have screenshots!   So, let’s summarize: lots of diagnostics data is available, but think realistically. Think Dev Ops. What happens when you are in the middle of a major production performance issue and you don’t have the diagnostics you need? You are redeploying an application (and thankfully you have a great branching strategy, so you feel perfectly safe just willy-nilly launching code into prod, don’t you?) to get data, then shipping it to storage, and then digging through that data to find a needle in a haystack. Would you like to be able to troubleshoot a performance issue in the middle of the night, or on a weekend, from your iPad or home computer’s web browser? Forget it: the best you get is this spark line in the Azure portal. If it’s real pointy, you probably have an issue; but since there is no alert based on a threshold your customers have likely already let you know. And high CPU, Memory, I/O, or Network doesn’t tell you anything about where the problem is. The Better Way – Stackify Stackify supports application and server monitoring in real time, all through a great web interface. All of the things that Azure Diagnostics provides, Stackify provides for your on-premises deployments, and you don’t need to know ahead of time that you’ll need it. It’s always there, it’s always on. Azure deployments are essentially no different than on-premises. It’s a Windows Server (or Linux) in the cloud. It’s behind a different firewall than your corporate servers. That’s it. Stackify can provide the same powerful tools to your Azure deployments in two simple steps. Step 1 Add a startup task to your web or worker role and deploy. If you can’t deploy and need it right now, no worries! Remote Desktop to the Azure instance and you can execute a Powershell script to download / install Stackify.   Step 2 Log in to your account at www.stackify.com and begin monitoring as much as you want, as often as you want and see the results instantly. WMI? It’s there Event Viewer? You’ve got it. File System Access? Yes, please! Would love to make sure my web.config is correct.   IIS / App Pool Info? Yep. You can even restart it. Running Services? All of them. Start and Stop them to your heart’s content. SQL Database access? You bet’cha. Alerts and Notification? Of course! You should know before your customers let you know. … and so much more.   Conclusion Microsoft has shown, consistently, that they love developers, developers, developers. What every developer needs to realize from this is that they’ve given you a canvas, which is exactly what Azure is. It’s great infrastructure that is readily available, easy to manage, and fairly cost effective. However, the tooling is your responsibility. What you get, at best, is bare bones. App and server diagnostics should be available when you need them. While we, as developers, try to plan for and think of everything ahead of time, there will come times where we need to get data that just isn’t available. And having to go through a lot of cumbersome steps to get that data, and then have to find a friendlier way to consume it…. well, that just doesn’t make a lot of sense to me. I’d rather spend my time writing and developing features and completing bug fixes for my applications, than to be writing code to monitor and diagnose.

    Read the article

  • Merge replication stopping without errors in SQL 2008 R2

    - by Rob Farley
    A non-SQL MVP friend of mine, who also happens to be a client, asked me for some help again last week. I was planning on writing this up even before Rob Volk (@sql_r) listed his T-SQL Tuesday topic for this month. Earlier in the year, I (well, LobsterPot Solutions, although I’d been the person mostly involved) had helped out with a merge replication problem. The Merge Agent on the subscriber was just stopping every time, shortly after it started. With no errors anywhere – not in the Windows Event Log, the SQL Agent logs, not anywhere. We’d managed to get the system working again, but didn’t have a good reason about what had happened, and last week, the problem occurred again. I asked him about writing up the experience in a blog post, largely because of the red herrings that we encountered. It was an interesting experience for me, also because I didn’t end up touching my computer the whole time – just tapping on my phone via Twitter and Live Msgr. You see, the thing with replication is that a useful troubleshooting option is to reinitialise the thing. We’d done that last time, and it had started to work again – eventually. I say eventually, because the link being used between the sites is relatively slow, and it took a long while for the initialisation to finish. Meanwhile, we’d been doing some investigation into what the problem could be, and were suitably pleased when the problem disappeared. So I got a message saying that a replication problem had occurred again. Reinitialising wasn’t going to be an option this time either. In this scenario, the subscriber having the problem happened to be in a different domain to the publisher. The other subscribers (within the domain) were fine, just this one in a different domain had the problem. Part of the problem seemed to be a log file that wasn’t being backed up properly. They’d been trying to back up to a backup device that had a corruption, and the log file was growing. Turned out, this wasn’t related to the problem, but of course, any time you’re troubleshooting and you see something untoward, you wonder. Having got past that problem, my next thought was that perhaps there was a problem with the account being used. But the other subscribers were using the same account, without any problems. The client pointed out that that it was almost exactly six months since the last failure (later shown to be a complete red herring). It sounded like something might’ve expired. Checking through certificates and trusts showed no sign of anything, and besides, there wasn’t a problem running a command-prompt window using the account in question, from the subscriber box. ...except that when he ran the sqlcmd –E –S servername command I recommended, it failed with a Named Pipes error. I’ve seen problems with firewalls rejecting connections via Named Pipes but letting TCP/IP through, so I got him to look into SQL Configuration Manager to see what kind of connection was being preferred... Everything seemed fine. And strangely, he could connect via Management Studio. Turned out, he had a typo in the servername of the sqlcmd command. That particular red herring must’ve been reflected in his cheeks as he told me. During the time, I also pinged a friend of mine to find out who I should ask, and Ted Kruger (@onpnt) ‘s name came up. Ted (and thanks again, Ted – really) reconfirmed some of my thoughts around the idea of an account expiring, and also suggesting bumping up the logging to level 4 (2 is Verbose, 4 is undocumented ridiculousness). I’d just told the client to push the logging up to level 2, but the log file wasn’t appearing. Checking permissions showed that the user did have permission on the folder, but still no file was appearing. Then it was noticed that the user had been switched earlier as part of the troubleshooting, and switching it back to the real user caused the log file to appear. Still no errors. A lot more information being pushed out, but still no errors. Ted suggested making sure the FQDNs were okay from both ends, in case the servers were unable to talk to each other. DNS problems can lead to hassles which can stop replication from working. No luck there either – it was all working fine. Another server started to report a problem as well. These two boxes were both SQL 2008 R2 (SP1), while the others, still working, were SQL 2005. Around this time, the client tried an idea that I’d shown him a few years ago – using a Profiler trace to see what was being called on the servers. It turned out that the last call being made on the publisher was sp_MSenumschemachange. A quick interwebs search on that showed a problem that exists in SQL Server 2008 R2, when stored procedures have more than 4000 characters. Running that stored procedure (with the same parameters) manually on SQL 2005 listed three stored procedures, the first of which did indeed have more than 4000 characters. Still no error though, and the problem as listed at http://support.microsoft.com/kb/2539378 describes an error that should occur in the Event log. However, this problem is the type of thing that is fixed by a reinitialisation (because it doesn’t need to send the procedure change across as a transaction). And a look in the change history of the long stored procs (you all keep them, right?), showed that the problem from six months earlier could well have been down to this too. Applying SP2 (with sufficient paranoia about backups and how to get back out again if necessary) fixed the problem. The stored proc changes went through immediately after the service pack was applied, and it’s been running happily since. The funny thing is that I didn’t solve the problem. He had put the Profiler trace on the server, and had done the search that found a forum post pointing at this particular problem. I’d asked Ted too, and although he’d given some useful information, nothing that he’d come up with had actually been the solution either. Sometimes, asking for help is the most useful thing you can do. Often though, you don’t end up getting the help from the person you asked – the sounding board is actually what you need. @rob_farley

    Read the article

  • Exclude one or more elements from being connected (using connectWith) in jQuery's sortable lists

    - by Lev
    I have two lists, one with an ID of "vlist" and one with an ID of "hlist". The "vlist" holds elements which should be visible, while the "hlist" holds items that should remain hidden. The idea here is to allow the administrator of the system to specify which elements/fields should be shown on a sign-up page, and which shouldn't. The two lists are connected using "connectWith", so the administrator can drag items from the visible list to the hidden list (or vice versa). My dilemma is that there are a few fields I want locked into the visible list, but still sortable within that one list. For example, the "username", "email" and "password" fields should be locked within the visible list (as they always need to be used for registration). Is this even possible? Perhaps it is a no-brainer that I simply haven't discovered yet. I've looked around through jQuery's documentation for a while and can't seem to find anything related to this scenario. I have found how you can "cancel" specific elements in the list from being sortable altogether or even disabled from being a dropable target, but this doesn't do it. The user should still have the ability to drag these items within the "visible" list, in case they want to adjust the ordering of the locked fields. I'm also aware that you can contain sortable elements within a specific element or DOM object, but this also can't be used as this only seems to apply to the whole sortable list, and not specific elements of that list. I've even tried to see if something like this would work after I built the sortable listing(s): $('#vlist > #slist-li-username').sortable('option', 'containment', '#vlist'); Obviously, that didn't work either or I wouldn't be posting this. In case it might help, I thought I'd throw in the code I'm using now; here is the jQuery code: $(function() { $('#vlist, #hlist').sortable ({ connectWith: '.signup-set_flist', forcePlaceholderSize: true, receive: function (event, ui) { var itemID = ui.item.attr('id'); var fID = itemID.replace(/slist-li-/g, ''); var hID = 'slist-' + fID; if (ui.sender.attr('id') == 'vlist') { $('#'+hID).val(''); } else { $('#'+hID).val(fID); } } }).disableSelection(); $('#vlist > #slist-li-username').sortable('option', 'containment', '#vlist'); }); And as for the HTML, I'll upload it to here (since StackOverflow seems to break when I paste it in here - even in code mode): http://sikosoft.net/jquery-sort-connect.html Any help would greatly be appreciated! :) Oh, and be gentle as this is my first question here. ;)

    Read the article

  • Securing WebSocket applications on Glassfish

    - by Pavel Bucek
    Today we are going to cover deploying secured WebSocket applications on Glassfish and access to these services using WebSocket Client API. WebSocket server application setup Our server endpoint might look as simple as this: @ServerEndpoint("/echo") public class EchoEndpoint { @OnMessage   public String echo(String message) {     return message + " (from your server)";   } } Everything else must be configured on container level. We can start with enabling SSL, which will require web.xml to be added to your project. For starters, it might look as following: <web-app version="3.0" xmlns="http://java.sun.com/xml/ns/javaee">   <security-constraint>     <web-resource-collection>       <web-resource-name>Protected resource</web-resource-name>       <url-pattern>/*</url-pattern>       <http-method>GET</http-method>     </web-resource-collection>     <!-- https -->     <user-data-constraint>       <transport-guarantee>CONFIDENTIAL</transport-guarantee>     </user-data-constraint>   </security-constraint> </web-app> This is minimal web.xml for this task - web-resource-collection just defines URL pattern and HTTP method(s) we want to put a constraint on and user-data-constraint defines that constraint, which is in our case transport-guarantee. More information about these properties and security settings for web application can be found in Oracle Java EE 7 Tutorial. I have some simple webpage attached as well, so I can test my endpoint right away. You can find it (along with complete project) in Tyrus workspace: [webpage] [whole project]. After deploying this application to Glassfish Application Server, you should be able to hit it using your favorite browser. URL where my application resides is https://localhost:8181/sample-echo-https/ (may be different, depends on other configuration). My browser warns me about untrusted certificate (I use what freshly built Glassfish provides - self signed certificates) and after adding an exception for this site, I can see my webpage and I am able to securely connect to wss://localhost:8181/sample-echo-https/echo. WebSocket client Already mentioned demo application also contains test client, but execution of this is skipped for normal build. Reason for this is that Glassfish uses these self-signed "random" untrusted certificates and you are (in most cases) not able to connect to these services without any additional settings. Creating test WebSocket client is actually quite similar to server side, only difference is that you have to somewhere create client container and invoke connect with some additional info. Java API for WebSocket allows you to use annotated and programmatic way to construct endpoints. Server side shows the annotated case, so let's see how the programmatic approach will look. final WebSocketContainer client = ContainerProvider.getWebSocketContainer(); client.connectToServer(new Endpoint() {   @Override   public void onOpen(Session session, EndpointConfig EndpointConfig) {     try {       // register message handler - will just print out the       // received message on standard output.       session.addMessageHandler(new MessageHandler.Whole<String>() {       @Override         public void onMessage(String message) {          System.out.println("### Received: " + message);         }       });       // send a message       session.getBasicRemote().sendText("Do or do not, there is no try.");     } catch (IOException e) {       // do nothing     }   } }, ClientEndpointConfig.Builder.create().build(),    URI.create("wss://localhost:8181/sample-echo-https/echo")); This client should work with some secured endpoint with valid certificated signed by some trusted certificate authority (you can try that with wss://echo.websocket.org). Accessing our Glassfish instance will require some additional settings. You can tell Java which certificated you trust by adding -Djavax.net.ssl.trustStore property (and few others in case you are using linked sample). Complete command line when you are testing your service might need to look somewhat like: mvn clean test -Djavax.net.ssl.trustStore=$AS_MAIN/domains/domain1/config/cacerts.jks\ -Djavax.net.ssl.trustStorePassword=changeit -Dtyrus.test.host=localhost\ -DskipTests=false Where AS_MAIN points to your Glassfish instance. Note: you might need to setup keyStore and trustStore per client instead of per JVM; there is a way how to do it, but it is Tyrus proprietary feature: http://tyrus.java.net/documentation/1.2.1/user-guide.html#d0e1128. And that's it! Now nobody is able to "hear" what you are sending to or receiving from your WebSocket endpoint. There is always room for improvement, so the next step you might want to take is introduce some authentication mechanism (like HTTP Basic or Digest). This topic is more about container configuration so I'm not going to go into details, but there is one thing worth mentioning: to access services which require authorization, you might need to put this additional information to HTTP headers of first (Upgrade) request (there is not (yet) any direct support even for these fundamental mechanisms, user need to register Configurator and add headers in beforeRequest method invocation). I filed related feature request as TYRUS-228; feel free to comment/vote if you need this functionality.

    Read the article

  • How to configure TATA Photon+ EC1261 HUAWEI

    - by user3215
    I'm running ubuntu 10.04. I have a newly purchased TATA Photon+ Internet connection which supports Windows and Mac. On the Internet I found a article saying that it could be configured on Linux. I followed the steps to install it on Ubuntu from this link. I am still not able to get online, and need some help. Also, it is very slow, but I was told that I would see speeds up to 3.1MB. I dont have wvdial installed and cannot install it from apt as I'm not connected to internet Booting from windows I dowloaded "wvdial" .deb package and tried to install on ubuntu but it's ended with dependency problem. Automatically, don't know how, I got connected to internet only for once. Immediately I installed wvdial package after this I followed the tutorials(I could not browse and upload the files here) . From then it's showing that the device is connected in the network connections but no internet connection. Once I disable the device, it won't show as connected again and I'll have to restart my system. Sometimes the device itself not detected(wondering if there is any command to re-read the all devices). output of wvdialconf /etc/wvdial.cof: #wvdialconf /etc/wvdial.conf Editing `/etc/wvdial.conf'. Scanning your serial ports for a modem. ttyS0<*1>: ATQ0 V1 E1 -- failed with 2400 baud, next try: 9600 baud ttyS0<*1>: ATQ0 V1 E1 -- failed with 9600 baud, next try: 115200 baud ttyS0<*1>: ATQ0 V1 E1 -- and failed too at 115200, giving up. Modem Port Scan<*1>: S1 S2 S3 WvModem<*1>: Cannot get information for serial port. ttyUSB0<*1>: ATQ0 V1 E1 -- failed with 2400 baud, next try: 9600 baud ttyUSB0<*1>: ATQ0 V1 E1 -- failed with 9600 baud, next try: 9600 baud ttyUSB0<*1>: ATQ0 V1 E1 -- and failed too at 115200, giving up. WvModem<*1>: Cannot get information for serial port. ttyUSB1<*1>: ATQ0 V1 E1 -- failed with 2400 baud, next try: 9600 baud ttyUSB1<*1>: ATQ0 V1 E1 -- failed with 9600 baud, next try: 9600 baud ttyUSB1<*1>: ATQ0 V1 E1 -- and failed too at 115200, giving up. WvModem<*1>: Cannot get information for serial port. ttyUSB2<*1>: ATQ0 V1 E1 -- OK ttyUSB2<*1>: ATQ0 V1 E1 Z -- OK ttyUSB2<*1>: ATQ0 V1 E1 S0=0 -- OK ttyUSB2<*1>: ATQ0 V1 E1 S0=0 &C1 -- OK ttyUSB2<*1>: ATQ0 V1 E1 S0=0 &C1 &D2 -- OK ttyUSB2<*1>: ATQ0 V1 E1 S0=0 &C1 &D2 +FCLASS=0 -- OK ttyUSB2<*1>: Modem Identifier: ATI -- Manufacturer: +GMI: HUAWEI TECHNOLOGIES CO., LTD ttyUSB2<*1>: Speed 9600: AT -- OK ttyUSB2<*1>: Max speed is 9600; that should be safe. ttyUSB2<*1>: ATQ0 V1 E1 S0=0 &C1 &D2 +FCLASS=0 -- OK Found a modem on /dev/ttyUSB2. Modem configuration written to /etc/wvdial.conf. ttyUSB2<Info>: Speed 9600; init "ATQ0 V1 E1 S0=0 &C1 &D2 +FCLASS=0" output of wvdial: #wvdial --> WvDial: Internet dialer version 1.60 --> Cannot get information for serial port. --> Initializing modem. --> Sending: ATZ ATZ OK --> Sending: ATQ0 V1 E1 S0=0 &C1 &D2 +FCLASS=0 ATQ0 V1 E1 S0=0 &C1 &D2 +FCLASS=0 OK --> Sending: AT+CRM=1 AT+CRM=1 OK --> Modem initialized. --> Sending: ATDT#777 --> Waiting for carrier. ATDT#777 CONNECT --> Carrier detected. Starting PPP immediately. --> Starting pppd at Sat Oct 16 15:30:47 2010 --> Pid of pppd: 5681 --> Using interface ppp0 --> pppd: (u;[08]@s;[08]`{;[08] --> pppd: (u;[08]@s;[08]`{;[08] --> pppd: (u;[08]@s;[08]`{;[08] --> pppd: (u;[08]@s;[08]`{;[08] --> pppd: (u;[08]@s;[08]`{;[08] --> pppd: (u;[08]@s;[08]`{;[08] --> local IP address 14.96.147.104 --> pppd: (u;[08]@s;[08]`{;[08] --> remote IP address 172.29.161.223 --> pppd: (u;[08]@s;[08]`{;[08] --> primary DNS address 121.40.152.90 --> pppd: (u;[08]@s;[08]`{;[08] --> secondary DNS address 121.40.152.100 --> pppd: (u;[08]@s;[08]`{;[08] Output of log message /var/log/messages: Oct 16 15:29:44 avyakta-desktop pppd[5119]: secondary DNS address 121.242.190.180 Oct 16 15:29:58 desktop pppd[5119]: Terminating on signal 15 Oct 16 15:29:58 desktop pppd[5119]: Connect time 0.3 minutes. Oct 16 15:29:58 desktop pppd[5119]: Sent 0 bytes, received 177 bytes. Oct 16 15:29:58 desktop pppd[5119]: Connection terminated. Oct 16 15:30:47 desktop pppd[5681]: pppd 2.4.5 started by root, uid 0 Oct 16 15:30:47 desktop pppd[5681]: Using interface ppp0 Oct 16 15:30:47 desktop pppd[5681]: Connect: ppp0 <--> /dev/ttyUSB2 Oct 16 15:30:47 desktop pppd[5681]: CHAP authentication succeeded Oct 16 15:30:47 desktop pppd[5681]: CHAP authentication succeeded Oct 16 15:30:48 desktop pppd[5681]: local IP address 14.96.147.104 Oct 16 15:30:48 desktop pppd[5681]: remote IP address 172.29.161.223 Oct 16 15:30:48 desktop pppd[5681]: primary DNS address 121.40.152.90 Oct 16 15:30:48 desktop pppd[5681]: secondary DNS address 121.40.152.100 EDIT 1 : I tried the following sudo stop network-manager sudo killall modem-manager sudo /usr/sbin/modem-manager --debug > ~/mm.log 2>&1 & sudo /usr/sbin/NetworkManager --no-daemon > ~/nm.log 2>&1 & Output of mm.log: #vim ~/mm.log: ** Message: Loaded plugin Option High-Speed ** Message: Loaded plugin Option ** Message: Loaded plugin Huawei ** Message: Loaded plugin Longcheer ** Message: Loaded plugin AnyData ** Message: Loaded plugin ZTE ** Message: Loaded plugin Ericsson MBM ** Message: Loaded plugin Sierra ** Message: Loaded plugin Generic ** Message: Loaded plugin Gobi ** Message: Loaded plugin Novatel ** Message: Loaded plugin Nokia ** Message: Loaded plugin MotoC Output of nm.log: #vim ~/nm.log: NetworkManager: <info> starting... NetworkManager: <info> modem-manager is now available NetworkManager: SCPlugin-Ifupdown: init! NetworkManager: SCPlugin-Ifupdown: update_system_hostname NetworkManager: SCPluginIfupdown: guessed connection type (eth0) = 802-3-ethernet NetworkManager: SCPlugin-Ifupdown: update_connection_setting_from_if_block: name:eth0, type:802-3-ethernet, id:Ifupdown (eth0), uuid: 681b428f-beaf-8932-dce4-678ed5bae28e NetworkManager: SCPlugin-Ifupdown: addresses count: 1 NetworkManager: SCPlugin-Ifupdown: No dns-nameserver configured in /etc/network/interfaces NetworkManager: nm-ifupdown-connection.c.119 - invalid connection read from /etc/network/interfaces: (1) addresses NetworkManager: SCPluginIfupdown: management mode: unmanaged NetworkManager: SCPlugin-Ifupdown: devices added (path: /sys/devices/pci0000:00/0000:00:14.4/0000:02:02.0/net/eth1, iface: eth1) NetworkManager: SCPlugin-Ifupdown: device added (path: /sys/devices/pci0000:00/0000:00:14.4/0000:02:02.0/net/eth1, iface: eth1): no ifupdown configuration found. NetworkManager: SCPlugin-Ifupdown: devices added (path: /sys/devices/virtual/net/lo, iface: lo) @

    Read the article

  • ASP.Net JSON Web Service Post Form Data

    - by Will D
    I have a ASP.NET web service decorated with System.Web.Script.Services.ScriptService() so it can return json formatted data. This much is working for me, but ASP.Net has a requirement that parameters to the web service must be in json in order to get json out. I'm using jquery to run my ajax calls and there doesn't seem to be an easy way to create a nice javascript object from the form elements. I have looked at serialiseArray in the json2 library but it doesn't encode the field names as property name in the object. If you have 2 form elements like this <input type="text" name="namefirst" id="namefirst" value="John"/> <input type="text" name="namelast" id="namelast" value="Doe"/> calling $("form").serialize() will get you the standard query string namefirst=John&namelast=Doe calling JSON.stringify($("form").serializeArray()) will get you the (bulky) json representation [{"name":"namefirst","value":"John"},{"name":"namelast","value":"Doe"}] This will work when passing to the web service but its ugly as you have to have code like this to read it in: Public Class NameValuePair Public name As String Public value As String End Class <WebMethod()> _ Public Function GetQuote(ByVal nvp As NameValuePair()) As String End Function You would also have to wrap that json text inside another object nameed nvp to make the web service happy. Then its more work as all you have is an array of NameValuePair when you want an associative array. I might be kidding myself but i imagined something more elegant when i started this project - more like this Public Class Person Public namefirst As String Public namelast As String End Class which would require the json to look something like this: {"namefirst":"John","namelast":"Doe"} Is there an easy way to do this? Obviously it is simple for a form with two parameters but when you have a very large form concatenating strings gets ugly. Having nested objects would also complicate things The cludge I have settled on for the moment is to use the standard name value pair format stuffed inside a json object. This is compact and fast {"q":"namefirst=John&namelast=Doe"} then have a web method like this on the server that parses the query string into an associate array. <WebMethod()> _ Public Function AjaxForm(ByVal q As String) as string Dim params As NameValueCollection = HttpUtility.ParseQueryString(q) 'do stuff return "Hello" End Sub As far a cludges go this one seems reasonably elegant in terms of amount of code, but my question is: is there a better way? Is there a generally accepted way of passing form data to asp.net web/script services?

    Read the article

  • Decompressing a very large serialized object and managing memory

    - by Mike_G
    I have an object that contains tons of data used for reports. In order to get this object from the server to the client I first serialize the object in a memory stream, then compress it using the Gzip stream of .NET. I then send the compressed object as a byte[] to the client. The problem is on some clients, when they get the byte[] and try to decompress and deserialize the object, a System.OutOfMemory exception is thrown. Ive read that this exception can be caused by new() a bunch of objects, or holding on to a bunch of strings. Both of these are happening during the deserialization process. So my question is: How do I prevent the exception (any good strategies)? The client needs all of the data, and ive trimmed down the number of strings as much as i can. edit: here is the code i am using to serialize/compress (implemented as extension methods) public static byte[] SerializeObject<T>(this object obj, T serializer) where T: XmlObjectSerializer { Type t = obj.GetType(); if (!Attribute.IsDefined(t, typeof(DataContractAttribute))) return null; byte[] initialBytes; using (MemoryStream stream = new MemoryStream()) { serializer.WriteObject(stream, obj); initialBytes = stream.ToArray(); } return initialBytes; } public static byte[] CompressObject<T>(this object obj, T serializer) where T : XmlObjectSerializer { Type t = obj.GetType(); if(!Attribute.IsDefined(t, typeof(DataContractAttribute))) return null; byte[] initialBytes = obj.SerializeObject(serializer); byte[] compressedBytes; using (MemoryStream stream = new MemoryStream(initialBytes)) { using (MemoryStream output = new MemoryStream()) { using (GZipStream zipper = new GZipStream(output, CompressionMode.Compress)) { Pump(stream, zipper); } compressedBytes = output.ToArray(); } } return compressedBytes; } internal static void Pump(Stream input, Stream output) { byte[] bytes = new byte[4096]; int n; while ((n = input.Read(bytes, 0, bytes.Length)) != 0) { output.Write(bytes, 0, n); } } And here is my code for decompress/deserialize: public static T DeSerializeObject<T,TU>(this byte[] serializedObject, TU deserializer) where TU: XmlObjectSerializer { using (MemoryStream stream = new MemoryStream(serializedObject)) { return (T)deserializer.ReadObject(stream); } } public static T DecompressObject<T, TU>(this byte[] compressedBytes, TU deserializer) where TU: XmlObjectSerializer { byte[] decompressedBytes; using(MemoryStream stream = new MemoryStream(compressedBytes)) { using(MemoryStream output = new MemoryStream()) { using(GZipStream zipper = new GZipStream(stream, CompressionMode.Decompress)) { ObjectExtensions.Pump(zipper, output); } decompressedBytes = output.ToArray(); } } return decompressedBytes.DeSerializeObject<T, TU>(deserializer); } The object that I am passing is a wrapper object, it just contains all the relevant objects that hold the data. The number of objects can be a lot (depending on the reports date range), but ive seen as many as 25k strings. One thing i did forget to mention is I am using WCF, and since the inner objects are passed individually through other WCF calls, I am using the DataContract serializer, and all my objects are marked with the DataContract attribute.

    Read the article

< Previous Page | 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814  | Next Page >