Search Results

Search found 180 results on 8 pages for 'norm'.

Page 8/8 | < Previous Page | 4 5 6 7 8 

  • python- scipy optimization

    - by pear
    In scipy fmin_slsqp (Sequential Least Squares Quadratic Programming), I tried reading the code 'slsqp.py' provided with the scipy package, to find what are the criteria to get the exit_modes 0? I cannot find which statements in the code produce this exit mode? Please help me 'slsqp.py' code as follows, exit_modes = { -1 : "Gradient evaluation required (g & a)", 0 : "Optimization terminated successfully.", 1 : "Function evaluation required (f & c)", 2 : "More equality constraints than independent variables", 3 : "More than 3*n iterations in LSQ subproblem", 4 : "Inequality constraints incompatible", 5 : "Singular matrix E in LSQ subproblem", 6 : "Singular matrix C in LSQ subproblem", 7 : "Rank-deficient equality constraint subproblem HFTI", 8 : "Positive directional derivative for linesearch", 9 : "Iteration limit exceeded" } def fmin_slsqp( func, x0 , eqcons=[], f_eqcons=None, ieqcons=[], f_ieqcons=None, bounds = [], fprime = None, fprime_eqcons=None, fprime_ieqcons=None, args = (), iter = 100, acc = 1.0E-6, iprint = 1, full_output = 0, epsilon = _epsilon ): # Now do a lot of function wrapping # Wrap func feval, func = wrap_function(func, args) # Wrap fprime, if provided, or approx_fprime if not if fprime: geval, fprime = wrap_function(fprime,args) else: geval, fprime = wrap_function(approx_fprime,(func,epsilon)) if f_eqcons: # Equality constraints provided via f_eqcons ceval, f_eqcons = wrap_function(f_eqcons,args) if fprime_eqcons: # Wrap fprime_eqcons geval, fprime_eqcons = wrap_function(fprime_eqcons,args) else: # Wrap approx_jacobian geval, fprime_eqcons = wrap_function(approx_jacobian, (f_eqcons,epsilon)) else: # Equality constraints provided via eqcons[] eqcons_prime = [] for i in range(len(eqcons)): eqcons_prime.append(None) if eqcons[i]: # Wrap eqcons and eqcons_prime ceval, eqcons[i] = wrap_function(eqcons[i],args) geval, eqcons_prime[i] = wrap_function(approx_fprime, (eqcons[i],epsilon)) if f_ieqcons: # Inequality constraints provided via f_ieqcons ceval, f_ieqcons = wrap_function(f_ieqcons,args) if fprime_ieqcons: # Wrap fprime_ieqcons geval, fprime_ieqcons = wrap_function(fprime_ieqcons,args) else: # Wrap approx_jacobian geval, fprime_ieqcons = wrap_function(approx_jacobian, (f_ieqcons,epsilon)) else: # Inequality constraints provided via ieqcons[] ieqcons_prime = [] for i in range(len(ieqcons)): ieqcons_prime.append(None) if ieqcons[i]: # Wrap ieqcons and ieqcons_prime ceval, ieqcons[i] = wrap_function(ieqcons[i],args) geval, ieqcons_prime[i] = wrap_function(approx_fprime, (ieqcons[i],epsilon)) # Transform x0 into an array. x = asfarray(x0).flatten() # Set the parameters that SLSQP will need # meq = The number of equality constraints if f_eqcons: meq = len(f_eqcons(x)) else: meq = len(eqcons) if f_ieqcons: mieq = len(f_ieqcons(x)) else: mieq = len(ieqcons) # m = The total number of constraints m = meq + mieq # la = The number of constraints, or 1 if there are no constraints la = array([1,m]).max() # n = The number of independent variables n = len(x) # Define the workspaces for SLSQP n1 = n+1 mineq = m - meq + n1 + n1 len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \ + 2*meq + n1 +(n+1)*n/2 + 2*m + 3*n + 3*n1 + 1 len_jw = mineq w = zeros(len_w) jw = zeros(len_jw) # Decompose bounds into xl and xu if len(bounds) == 0: bounds = [(-1.0E12, 1.0E12) for i in range(n)] elif len(bounds) != n: raise IndexError, \ 'SLSQP Error: If bounds is specified, len(bounds) == len(x0)' else: for i in range(len(bounds)): if bounds[i][0] > bounds[i][1]: raise ValueError, \ 'SLSQP Error: lb > ub in bounds[' + str(i) +'] ' + str(bounds[4]) xl = array( [ b[0] for b in bounds ] ) xu = array( [ b[1] for b in bounds ] ) # Initialize the iteration counter and the mode value mode = array(0,int) acc = array(acc,float) majiter = array(iter,int) majiter_prev = 0 # Print the header if iprint >= 2 if iprint >= 2: print "%5s %5s %16s %16s" % ("NIT","FC","OBJFUN","GNORM") while 1: if mode == 0 or mode == 1: # objective and constraint evaluation requird # Compute objective function fx = func(x) # Compute the constraints if f_eqcons: c_eq = f_eqcons(x) else: c_eq = array([ eqcons[i](x) for i in range(meq) ]) if f_ieqcons: c_ieq = f_ieqcons(x) else: c_ieq = array([ ieqcons[i](x) for i in range(len(ieqcons)) ]) # Now combine c_eq and c_ieq into a single matrix if m == 0: # no constraints c = zeros([la]) else: # constraints exist if meq > 0 and mieq == 0: # only equality constraints c = c_eq if meq == 0 and mieq > 0: # only inequality constraints c = c_ieq if meq > 0 and mieq > 0: # both equality and inequality constraints exist c = append(c_eq, c_ieq) if mode == 0 or mode == -1: # gradient evaluation required # Compute the derivatives of the objective function # For some reason SLSQP wants g dimensioned to n+1 g = append(fprime(x),0.0) # Compute the normals of the constraints if fprime_eqcons: a_eq = fprime_eqcons(x) else: a_eq = zeros([meq,n]) for i in range(meq): a_eq[i] = eqcons_prime[i](x) if fprime_ieqcons: a_ieq = fprime_ieqcons(x) else: a_ieq = zeros([mieq,n]) for i in range(mieq): a_ieq[i] = ieqcons_prime[i](x) # Now combine a_eq and a_ieq into a single a matrix if m == 0: # no constraints a = zeros([la,n]) elif meq > 0 and mieq == 0: # only equality constraints a = a_eq elif meq == 0 and mieq > 0: # only inequality constraints a = a_ieq elif meq > 0 and mieq > 0: # both equality and inequality constraints exist a = vstack((a_eq,a_ieq)) a = concatenate((a,zeros([la,1])),1) # Call SLSQP slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw) # Print the status of the current iterate if iprint > 2 and the # major iteration has incremented if iprint >= 2 and majiter > majiter_prev: print "%5i %5i % 16.6E % 16.6E" % (majiter,feval[0], fx,linalg.norm(g)) # If exit mode is not -1 or 1, slsqp has completed if abs(mode) != 1: break majiter_prev = int(majiter) # Optimization loop complete. Print status if requested if iprint >= 1: print exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')' print " Current function value:", fx print " Iterations:", majiter print " Function evaluations:", feval[0] print " Gradient evaluations:", geval[0] if not full_output: return x else: return [list(x), float(fx), int(majiter), int(mode), exit_modes[int(mode)] ]

    Read the article

  • NoSQL with RavenDB and ASP.NET MVC - Part 1

    - by shiju
     A while back, I have blogged NoSQL with MongoDB, NoRM and ASP.NET MVC Part 1 and Part 2 on how to use MongoDB with an ASP.NET MVC application. The NoSQL movement is getting big attention and RavenDB is the latest addition to the NoSQL and document database world. RavenDB is an Open Source (with a commercial option) document database for the .NET/Windows platform developed  by Ayende Rahien.  Raven stores schema-less JSON documents, allow you to define indexes using Linq queries and focus on low latency and high performance. RavenDB is .NET focused document database which comes with a fully functional .NET client API  and supports LINQ. RavenDB comes with two components, a server and a client API. RavenDB is a REST based system, so you can write your own HTTP cleint API. As a .NET developer, RavenDB is becoming my favorite document database. Unlike other document databases, RavenDB is supports transactions using System.Transactions. Also it's supports both embedded and server mode of database. You can access RavenDB site at http://ravendb.netA demo App with ASP.NET MVCLet's create a simple demo app with RavenDB and ASP.NET MVC. To work with RavenDB, do the following steps. Go to http://ravendb.net/download and download the latest build.Unzip the downloaded file.Go to the /Server directory and run the RavenDB.exe. This will start the RavenDB server listening on localhost:8080You can change the port of RavenDB  by modifying the "Raven/Port" appSetting value in the RavenDB.exe.config file.When running the RavenDB, it will automatically create a database in the /Data directory. You can change the directory name data by modifying "Raven/DataDirt" appSetting value in the RavenDB.exe.config file.RavenDB provides a browser based admin tool. When the Raven server is running, You can be access the browser based admin tool and view and edit documents and index using your browser admin tool. The web admin tool available at http://localhost:8080The below is the some screen shots of web admin tool     Working with ASP.NET MVC  To working with RavenDB in our demo ASP.NET MVC application, do the following steps Step 1 - Add reference to Raven Cleint API In our ASP.NET MVC application, Add a reference to the Raven.Client.Lightweight.dll from the Client directory. Step 2 - Create DocumentStoreThe document store would be created once per application. Let's create a DocumentStore on application start-up in the Global.asax.cs. documentStore = new DocumentStore { Url = "http://localhost:8080/" }; documentStore.Initialise(); The above code will create a Raven DB document store and will be listening the server locahost at port 8080    Step 3 - Create DocumentSession on BeginRequest   Let's create a DocumentSession on BeginRequest event in the Global.asax.cs. We are using the document session for every unit of work. In our demo app, every HTTP request would be a single Unit of Work (UoW). BeginRequest += (sender, args) =>   HttpContext.Current.Items[RavenSessionKey] = documentStore.OpenSession(); Step 4 - Destroy the DocumentSession on EndRequest  EndRequest += (o, eventArgs) => {     var disposable = HttpContext.Current.Items[RavenSessionKey] as IDisposable;     if (disposable != null)         disposable.Dispose(); };  At the end of HTTP request, we are destroying the DocumentSession  object.The below  code block shown all the code in the Global.asax.cs  private const string RavenSessionKey = "RavenMVC.Session"; private static DocumentStore documentStore;   protected void Application_Start() { //Create a DocumentStore in Application_Start //DocumentStore should be created once per application and stored as a singleton. documentStore = new DocumentStore { Url = "http://localhost:8080/" }; documentStore.Initialise(); AreaRegistration.RegisterAllAreas(); RegisterRoutes(RouteTable.Routes); //DI using Unity 2.0 ConfigureUnity(); }   public MvcApplication() { //Create a DocumentSession on BeginRequest   //create a document session for every unit of work BeginRequest += (sender, args) =>     HttpContext.Current.Items[RavenSessionKey] = documentStore.OpenSession(); //Destroy the DocumentSession on EndRequest EndRequest += (o, eventArgs) => { var disposable = HttpContext.Current.Items[RavenSessionKey] as IDisposable; if (disposable != null) disposable.Dispose(); }; }   //Getting the current DocumentSession public static IDocumentSession CurrentSession {   get { return (IDocumentSession)HttpContext.Current.Items[RavenSessionKey]; } }  We have setup all necessary code in the Global.asax.cs for working with RavenDB. For our demo app, Let’s write a domain class  public class Category {       public string Id { get; set; }       [Required(ErrorMessage = "Name Required")]     [StringLength(25, ErrorMessage = "Must be less than 25 characters")]     public string Name { get; set;}     public string Description { get; set; }   } We have created simple domain entity Category. Let's create repository class for performing CRUD operations against our domain entity Category.  public interface ICategoryRepository {     Category Load(string id);     IEnumerable<Category> GetCategories();     void Save(Category category);     void Delete(string id);       }    public class CategoryRepository : ICategoryRepository {     private IDocumentSession session;     public CategoryRepository()     {             session = MvcApplication.CurrentSession;     }     //Load category based on Id     public Category Load(string id)     {         return session.Load<Category>(id);     }     //Get all categories     public IEnumerable<Category> GetCategories()     {         var categories= session.LuceneQuery<Category>()                 .WaitForNonStaleResults()             .ToArray();         return categories;       }     //Insert/Update category     public void Save(Category category)     {         if (string.IsNullOrEmpty(category.Id))         {             //insert new record             session.Store(category);         }         else         {             //edit record             var categoryToEdit = Load(category.Id);             categoryToEdit.Name = category.Name;             categoryToEdit.Description = category.Description;         }         //save the document session         session.SaveChanges();     }     //delete a category     public void Delete(string id)     {         var category = Load(id);         session.Delete<Category>(category);         session.SaveChanges();     }        } For every CRUD operations, we are taking the current document session object from HttpContext object. session = MvcApplication.CurrentSession; We are calling the static method CurrentSession from the Global.asax.cs public static IDocumentSession CurrentSession {     get { return (IDocumentSession)HttpContext.Current.Items[RavenSessionKey]; } }  Retrieve Entities  The Load method get the single Category object based on the Id. RavenDB is working based on the REST principles and the Id would be like categories/1. The Id would be created by automatically when a new object is inserted to the document store. The REST uri categories/1 represents a single category object with Id representation of 1.   public Category Load(string id) {    return session.Load<Category>(id); } The GetCategories method returns all the categories calling the session.LuceneQuery method. RavenDB is using a lucen query syntax for querying. I will explain more details about querying and indexing in my future posts.   public IEnumerable<Category> GetCategories() {     var categories= session.LuceneQuery<Category>()             .WaitForNonStaleResults()         .ToArray();     return categories;   } Insert/Update entityFor insert/Update a Category entity, we have created Save method in repository class. If  the Id property of Category is null, we call Store method of Documentsession for insert a new record. For editing a existing record, we load the Category object and assign the values to the loaded Category object. The session.SaveChanges() will save the changes to document store.  //Insert/Update category public void Save(Category category) {     if (string.IsNullOrEmpty(category.Id))     {         //insert new record         session.Store(category);     }     else     {         //edit record         var categoryToEdit = Load(category.Id);         categoryToEdit.Name = category.Name;         categoryToEdit.Description = category.Description;     }     //save the document session     session.SaveChanges(); }  Delete Entity  In the Delete method, we call the document session's delete method and call the SaveChanges method to reflect changes in the document store.  public void Delete(string id) {     var category = Load(id);     session.Delete<Category>(category);     session.SaveChanges(); }  Let’s create ASP.NET MVC controller and controller actions for handling CRUD operations for the domain class Category  public class CategoryController : Controller { private ICategoryRepository categoyRepository; //DI enabled constructor public CategoryController(ICategoryRepository categoyRepository) {     this.categoyRepository = categoyRepository; } public ActionResult Index() {         var categories = categoyRepository.GetCategories();     if (categories == null)         return RedirectToAction("Create");     return View(categories); }   [HttpGet] public ActionResult Edit(string id) {     var category = categoyRepository.Load(id);         return View("Save",category); } // GET: /Category/Create [HttpGet] public ActionResult Create() {     var category = new Category();     return View("Save", category); } [HttpPost] public ActionResult Save(Category category) {     if (!ModelState.IsValid)     {         return View("Save", category);     }           categoyRepository.Save(category);         return RedirectToAction("Index");     }        [HttpPost] public ActionResult Delete(string id) {     categoyRepository.Delete(id);     var categories = categoyRepository.GetCategories();     return PartialView("CategoryList", categories);      }        }  RavenDB is an awesome document database and I hope that it will be the winner in .NET space of document database world.  The source code of demo application available at http://ravenmvc.codeplex.com/

    Read the article

  • A way of doing real-world test-driven development (and some thoughts about it)

    - by Thomas Weller
    Lately, I exchanged some arguments with Derick Bailey about some details of the red-green-refactor cycle of the Test-driven development process. In short, the issue revolved around the fact that it’s not enough to have a test red or green, but it’s also important to have it red or green for the right reasons. While for me, it’s sufficient to initially have a NotImplementedException in place, Derick argues that this is not totally correct (see these two posts: Red/Green/Refactor, For The Right Reasons and Red For The Right Reason: Fail By Assertion, Not By Anything Else). And he’s right. But on the other hand, I had no idea how his insights could have any practical consequence for my own individual interpretation of the red-green-refactor cycle (which is not really red-green-refactor, at least not in its pure sense, see the rest of this article). This made me think deeply for some days now. In the end I found out that the ‘right reason’ changes in my understanding depending on what development phase I’m in. To make this clear (at least I hope it becomes clear…) I started to describe my way of working in some detail, and then something strange happened: The scope of the article slightly shifted from focusing ‘only’ on the ‘right reason’ issue to something more general, which you might describe as something like  'Doing real-world TDD in .NET , with massive use of third-party add-ins’. This is because I feel that there is a more general statement about Test-driven development to make:  It’s high time to speak about the ‘How’ of TDD, not always only the ‘Why’. Much has been said about this, and me myself also contributed to that (see here: TDD is not about testing, it's about how we develop software). But always justifying what you do is very unsatisfying in the long run, it is inherently defensive, and it costs time and effort that could be used for better and more important things. And frankly: I’m somewhat sick and tired of repeating time and again that the test-driven way of software development is highly preferable for many reasons - I don’t want to spent my time exclusively on stating the obvious… So, again, let’s say it clearly: TDD is programming, and programming is TDD. Other ways of programming (code-first, sometimes called cowboy-coding) are exceptional and need justification. – I know that there are many people out there who will disagree with this radical statement, and I also know that it’s not a description of the real world but more of a mission statement or something. But nevertheless I’m absolutely sure that in some years this statement will be nothing but a platitude. Side note: Some parts of this post read as if I were paid by Jetbrains (the manufacturer of the ReSharper add-in – R#), but I swear I’m not. Rather I think that Visual Studio is just not production-complete without it, and I wouldn’t even consider to do professional work without having this add-in installed... The three parts of a software component Before I go into some details, I first should describe my understanding of what belongs to a software component (assembly, type, or method) during the production process (i.e. the coding phase). Roughly, I come up with the three parts shown below:   First, we need to have some initial sort of requirement. This can be a multi-page formal document, a vague idea in some programmer’s brain of what might be needed, or anything in between. In either way, there has to be some sort of requirement, be it explicit or not. – At the C# micro-level, the best way that I found to formulate that is to define interfaces for just about everything, even for internal classes, and to provide them with exhaustive xml comments. The next step then is to re-formulate these requirements in an executable form. This is specific to the respective programming language. - For C#/.NET, the Gallio framework (which includes MbUnit) in conjunction with the ReSharper add-in for Visual Studio is my toolset of choice. The third part then finally is the production code itself. It’s development is entirely driven by the requirements and their executable formulation. This is the delivery, the two other parts are ‘only’ there to make its production possible, to give it a decent quality and reliability, and to significantly reduce related costs down the maintenance timeline. So while the first two parts are not really relevant for the customer, they are very important for the developer. The customer (or in Scrum terms: the Product Owner) is not interested at all in how  the product is developed, he is only interested in the fact that it is developed as cost-effective as possible, and that it meets his functional and non-functional requirements. The rest is solely a matter of the developer’s craftsmanship, and this is what I want to talk about during the remainder of this article… An example To demonstrate my way of doing real-world TDD, I decided to show the development of a (very) simple Calculator component. The example is deliberately trivial and silly, as examples always are. I am totally aware of the fact that real life is never that simple, but I only want to show some development principles here… The requirement As already said above, I start with writing down some words on the initial requirement, and I normally use interfaces for that, even for internal classes - the typical question “intf or not” doesn’t even come to mind. I need them for my usual workflow and using them automatically produces high componentized and testable code anyway. To think about their usage in every single situation would slow down the production process unnecessarily. So this is what I begin with: namespace Calculator {     /// <summary>     /// Defines a very simple calculator component for demo purposes.     /// </summary>     public interface ICalculator     {         /// <summary>         /// Gets the result of the last successful operation.         /// </summary>         /// <value>The last result.</value>         /// <remarks>         /// Will be <see langword="null" /> before the first successful operation.         /// </remarks>         double? LastResult { get; }       } // interface ICalculator   } // namespace Calculator So, I’m not beginning with a test, but with a sort of code declaration - and still I insist on being 100% test-driven. There are three important things here: Starting this way gives me a method signature, which allows to use IntelliSense and AutoCompletion and thus eliminates the danger of typos - one of the most regular, annoying, time-consuming, and therefore expensive sources of error in the development process. In my understanding, the interface definition as a whole is more of a readable requirement document and technical documentation than anything else. So this is at least as much about documentation than about coding. The documentation must completely describe the behavior of the documented element. I normally use an IoC container or some sort of self-written provider-like model in my architecture. In either case, I need my components defined via service interfaces anyway. - I will use the LinFu IoC framework here, for no other reason as that is is very simple to use. The ‘Red’ (pt. 1)   First I create a folder for the project’s third-party libraries and put the LinFu.Core dll there. Then I set up a test project (via a Gallio project template), and add references to the Calculator project and the LinFu dll. Finally I’m ready to write the first test, which will look like the following: namespace Calculator.Test {     [TestFixture]     public class CalculatorTest     {         private readonly ServiceContainer container = new ServiceContainer();           [Test]         public void CalculatorLastResultIsInitiallyNull()         {             ICalculator calculator = container.GetService<ICalculator>();               Assert.IsNull(calculator.LastResult);         }       } // class CalculatorTest   } // namespace Calculator.Test       This is basically the executable formulation of what the interface definition states (part of). Side note: There’s one principle of TDD that is just plain wrong in my eyes: I’m talking about the Red is 'does not compile' thing. How could a compiler error ever be interpreted as a valid test outcome? I never understood that, it just makes no sense to me. (Or, in Derick’s terms: this reason is as wrong as a reason ever could be…) A compiler error tells me: Your code is incorrect, but nothing more.  Instead, the ‘Red’ part of the red-green-refactor cycle has a clearly defined meaning to me: It means that the test works as intended and fails only if its assumptions are not met for some reason. Back to our Calculator. When I execute the above test with R#, the Gallio plugin will give me this output: So this tells me that the test is red for the wrong reason: There’s no implementation that the IoC-container could load, of course. So let’s fix that. With R#, this is very easy: First, create an ICalculator - derived type:        Next, implement the interface members: And finally, move the new class to its own file: So far my ‘work’ was six mouse clicks long, the only thing that’s left to do manually here, is to add the Ioc-specific wiring-declaration and also to make the respective class non-public, which I regularly do to force my components to communicate exclusively via interfaces: This is what my Calculator class looks like as of now: using System; using LinFu.IoC.Configuration;   namespace Calculator {     [Implements(typeof(ICalculator))]     internal class Calculator : ICalculator     {         public double? LastResult         {             get             {                 throw new NotImplementedException();             }         }     } } Back to the test fixture, we have to put our IoC container to work: [TestFixture] public class CalculatorTest {     #region Fields       private readonly ServiceContainer container = new ServiceContainer();       #endregion // Fields       #region Setup/TearDown       [FixtureSetUp]     public void FixtureSetUp()     {        container.LoadFrom(AppDomain.CurrentDomain.BaseDirectory, "Calculator.dll");     }       ... Because I have a R# live template defined for the setup/teardown method skeleton as well, the only manual coding here again is the IoC-specific stuff: two lines, not more… The ‘Red’ (pt. 2) Now, the execution of the above test gives the following result: This time, the test outcome tells me that the method under test is called. And this is the point, where Derick and I seem to have somewhat different views on the subject: Of course, the test still is worthless regarding the red/green outcome (or: it’s still red for the wrong reasons, in that it gives a false negative). But as far as I am concerned, I’m not really interested in the test outcome at this point of the red-green-refactor cycle. Rather, I only want to assert that my test actually calls the right method. If that’s the case, I will happily go on to the ‘Green’ part… The ‘Green’ Making the test green is quite trivial. Just make LastResult an automatic property:     [Implements(typeof(ICalculator))]     internal class Calculator : ICalculator     {         public double? LastResult { get; private set; }     }         One more round… Now on to something slightly more demanding (cough…). Let’s state that our Calculator exposes an Add() method:         ...   /// <summary>         /// Adds the specified operands.         /// </summary>         /// <param name="operand1">The operand1.</param>         /// <param name="operand2">The operand2.</param>         /// <returns>The result of the additon.</returns>         /// <exception cref="ArgumentException">         /// Argument <paramref name="operand1"/> is &lt; 0.<br/>         /// -- or --<br/>         /// Argument <paramref name="operand2"/> is &lt; 0.         /// </exception>         double Add(double operand1, double operand2);       } // interface ICalculator A remark: I sometimes hear the complaint that xml comment stuff like the above is hard to read. That’s certainly true, but irrelevant to me, because I read xml code comments with the CR_Documentor tool window. And using that, it looks like this:   Apart from that, I’m heavily using xml code comments (see e.g. here for a detailed guide) because there is the possibility of automating help generation with nightly CI builds (using MS Sandcastle and the Sandcastle Help File Builder), and then publishing the results to some intranet location.  This way, a team always has first class, up-to-date technical documentation at hand about the current codebase. (And, also very important for speeding up things and avoiding typos: You have IntelliSense/AutoCompletion and R# support, and the comments are subject to compiler checking…).     Back to our Calculator again: Two more R# – clicks implement the Add() skeleton:         ...           public double Add(double operand1, double operand2)         {             throw new NotImplementedException();         }       } // class Calculator As we have stated in the interface definition (which actually serves as our requirement document!), the operands are not allowed to be negative. So let’s start implementing that. Here’s the test: [Test] [Row(-0.5, 2)] public void AddThrowsOnNegativeOperands(double operand1, double operand2) {     ICalculator calculator = container.GetService<ICalculator>();       Assert.Throws<ArgumentException>(() => calculator.Add(operand1, operand2)); } As you can see, I’m using a data-driven unit test method here, mainly for these two reasons: Because I know that I will have to do the same test for the second operand in a few seconds, I save myself from implementing another test method for this purpose. Rather, I only will have to add another Row attribute to the existing one. From the test report below, you can see that the argument values are explicitly printed out. This can be a valuable documentation feature even when everything is green: One can quickly review what values were tested exactly - the complete Gallio HTML-report (as it will be produced by the Continuous Integration runs) shows these values in a quite clear format (see below for an example). Back to our Calculator development again, this is what the test result tells us at the moment: So we’re red again, because there is not yet an implementation… Next we go on and implement the necessary parameter verification to become green again, and then we do the same thing for the second operand. To make a long story short, here’s the test and the method implementation at the end of the second cycle: // in CalculatorTest:   [Test] [Row(-0.5, 2)] [Row(295, -123)] public void AddThrowsOnNegativeOperands(double operand1, double operand2) {     ICalculator calculator = container.GetService<ICalculator>();       Assert.Throws<ArgumentException>(() => calculator.Add(operand1, operand2)); }   // in Calculator: public double Add(double operand1, double operand2) {     if (operand1 < 0.0)     {         throw new ArgumentException("Value must not be negative.", "operand1");     }     if (operand2 < 0.0)     {         throw new ArgumentException("Value must not be negative.", "operand2");     }     throw new NotImplementedException(); } So far, we have sheltered our method from unwanted input, and now we can safely operate on the parameters without further caring about their validity (this is my interpretation of the Fail Fast principle, which is regarded here in more detail). Now we can think about the method’s successful outcomes. First let’s write another test for that: [Test] [Row(1, 1, 2)] public void TestAdd(double operand1, double operand2, double expectedResult) {     ICalculator calculator = container.GetService<ICalculator>();       double result = calculator.Add(operand1, operand2);       Assert.AreEqual(expectedResult, result); } Again, I’m regularly using row based test methods for these kinds of unit tests. The above shown pattern proved to be extremely helpful for my development work, I call it the Defined-Input/Expected-Output test idiom: You define your input arguments together with the expected method result. There are two major benefits from that way of testing: In the course of refining a method, it’s very likely to come up with additional test cases. In our case, we might add tests for some edge cases like ‘one of the operands is zero’ or ‘the sum of the two operands causes an overflow’, or maybe there’s an external test protocol that has to be fulfilled (e.g. an ISO norm for medical software), and this results in the need of testing against additional values. In all these scenarios we only have to add another Row attribute to the test. Remember that the argument values are written to the test report, so as a side-effect this produces valuable documentation. (This can become especially important if the fulfillment of some sort of external requirements has to be proven). So your test method might look something like that in the end: [Test, Description("Arguments: operand1, operand2, expectedResult")] [Row(1, 1, 2)] [Row(0, 999999999, 999999999)] [Row(0, 0, 0)] [Row(0, double.MaxValue, double.MaxValue)] [Row(4, double.MaxValue - 2.5, double.MaxValue)] public void TestAdd(double operand1, double operand2, double expectedResult) {     ICalculator calculator = container.GetService<ICalculator>();       double result = calculator.Add(operand1, operand2);       Assert.AreEqual(expectedResult, result); } And this will produce the following HTML report (with Gallio):   Not bad for the amount of work we invested in it, huh? - There might be scenarios where reports like that can be useful for demonstration purposes during a Scrum sprint review… The last requirement to fulfill is that the LastResult property is expected to store the result of the last operation. I don’t show this here, it’s trivial enough and brings nothing new… And finally: Refactor (for the right reasons) To demonstrate my way of going through the refactoring portion of the red-green-refactor cycle, I added another method to our Calculator component, namely Subtract(). Here’s the code (tests and production): // CalculatorTest.cs:   [Test, Description("Arguments: operand1, operand2, expectedResult")] [Row(1, 1, 0)] [Row(0, 999999999, -999999999)] [Row(0, 0, 0)] [Row(0, double.MaxValue, -double.MaxValue)] [Row(4, double.MaxValue - 2.5, -double.MaxValue)] public void TestSubtract(double operand1, double operand2, double expectedResult) {     ICalculator calculator = container.GetService<ICalculator>();       double result = calculator.Subtract(operand1, operand2);       Assert.AreEqual(expectedResult, result); }   [Test, Description("Arguments: operand1, operand2, expectedResult")] [Row(1, 1, 0)] [Row(0, 999999999, -999999999)] [Row(0, 0, 0)] [Row(0, double.MaxValue, -double.MaxValue)] [Row(4, double.MaxValue - 2.5, -double.MaxValue)] public void TestSubtractGivesExpectedLastResult(double operand1, double operand2, double expectedResult) {     ICalculator calculator = container.GetService<ICalculator>();       calculator.Subtract(operand1, operand2);       Assert.AreEqual(expectedResult, calculator.LastResult); }   ...   // ICalculator.cs: /// <summary> /// Subtracts the specified operands. /// </summary> /// <param name="operand1">The operand1.</param> /// <param name="operand2">The operand2.</param> /// <returns>The result of the subtraction.</returns> /// <exception cref="ArgumentException"> /// Argument <paramref name="operand1"/> is &lt; 0.<br/> /// -- or --<br/> /// Argument <paramref name="operand2"/> is &lt; 0. /// </exception> double Subtract(double operand1, double operand2);   ...   // Calculator.cs:   public double Subtract(double operand1, double operand2) {     if (operand1 < 0.0)     {         throw new ArgumentException("Value must not be negative.", "operand1");     }       if (operand2 < 0.0)     {         throw new ArgumentException("Value must not be negative.", "operand2");     }       return (this.LastResult = operand1 - operand2).Value; }   Obviously, the argument validation stuff that was produced during the red-green part of our cycle duplicates the code from the previous Add() method. So, to avoid code duplication and minimize the number of code lines of the production code, we do an Extract Method refactoring. One more time, this is only a matter of a few mouse clicks (and giving the new method a name) with R#: Having done that, our production code finally looks like that: using System; using LinFu.IoC.Configuration;   namespace Calculator {     [Implements(typeof(ICalculator))]     internal class Calculator : ICalculator     {         #region ICalculator           public double? LastResult { get; private set; }           public double Add(double operand1, double operand2)         {             ThrowIfOneOperandIsInvalid(operand1, operand2);               return (this.LastResult = operand1 + operand2).Value;         }           public double Subtract(double operand1, double operand2)         {             ThrowIfOneOperandIsInvalid(operand1, operand2);               return (this.LastResult = operand1 - operand2).Value;         }           #endregion // ICalculator           #region Implementation (Helper)           private static void ThrowIfOneOperandIsInvalid(double operand1, double operand2)         {             if (operand1 < 0.0)             {                 throw new ArgumentException("Value must not be negative.", "operand1");             }               if (operand2 < 0.0)             {                 throw new ArgumentException("Value must not be negative.", "operand2");             }         }           #endregion // Implementation (Helper)       } // class Calculator   } // namespace Calculator But is the above worth the effort at all? It’s obviously trivial and not very impressive. All our tests were green (for the right reasons), and refactoring the code did not change anything. It’s not immediately clear how this refactoring work adds value to the project. Derick puts it like this: STOP! Hold on a second… before you go any further and before you even think about refactoring what you just wrote to make your test pass, you need to understand something: if your done with your requirements after making the test green, you are not required to refactor the code. I know… I’m speaking heresy, here. Toss me to the wolves, I’ve gone over to the dark side! Seriously, though… if your test is passing for the right reasons, and you do not need to write any test or any more code for you class at this point, what value does refactoring add? Derick immediately answers his own question: So why should you follow the refactor portion of red/green/refactor? When you have added code that makes the system less readable, less understandable, less expressive of the domain or concern’s intentions, less architecturally sound, less DRY, etc, then you should refactor it. I couldn’t state it more precise. From my personal perspective, I’d add the following: You have to keep in mind that real-world software systems are usually quite large and there are dozens or even hundreds of occasions where micro-refactorings like the above can be applied. It’s the sum of them all that counts. And to have a good overall quality of the system (e.g. in terms of the Code Duplication Percentage metric) you have to be pedantic on the individual, seemingly trivial cases. My job regularly requires the reading and understanding of ‘foreign’ code. So code quality/readability really makes a HUGE difference for me – sometimes it can be even the difference between project success and failure… Conclusions The above described development process emerged over the years, and there were mainly two things that guided its evolution (you might call it eternal principles, personal beliefs, or anything in between): Test-driven development is the normal, natural way of writing software, code-first is exceptional. So ‘doing TDD or not’ is not a question. And good, stable code can only reliably be produced by doing TDD (yes, I know: many will strongly disagree here again, but I’ve never seen high-quality code – and high-quality code is code that stood the test of time and causes low maintenance costs – that was produced code-first…) It’s the production code that pays our bills in the end. (Though I have seen customers these days who demand an acceptance test battery as part of the final delivery. Things seem to go into the right direction…). The test code serves ‘only’ to make the production code work. But it’s the number of delivered features which solely counts at the end of the day - no matter how much test code you wrote or how good it is. With these two things in mind, I tried to optimize my coding process for coding speed – or, in business terms: productivity - without sacrificing the principles of TDD (more than I’d do either way…).  As a result, I consider a ratio of about 3-5/1 for test code vs. production code as normal and desirable. In other words: roughly 60-80% of my code is test code (This might sound heavy, but that is mainly due to the fact that software development standards only begin to evolve. The entire software development profession is very young, historically seen; only at the very beginning, and there are no viable standards yet. If you think about software development as a kind of casting process, where the test code is the mold and the resulting production code is the final product, then the above ratio sounds no longer extraordinary…) Although the above might look like very much unnecessary work at first sight, it’s not. With the aid of the mentioned add-ins, doing all the above is a matter of minutes, sometimes seconds (while writing this post took hours and days…). The most important thing is to have the right tools at hand. Slow developer machines or the lack of a tool or something like that - for ‘saving’ a few 100 bucks -  is just not acceptable and a very bad decision in business terms (though I quite some times have seen and heard that…). Production of high-quality products needs the usage of high-quality tools. This is a platitude that every craftsman knows… The here described round-trip will take me about five to ten minutes in my real-world development practice. I guess it’s about 30% more time compared to developing the ‘traditional’ (code-first) way. But the so manufactured ‘product’ is of much higher quality and massively reduces maintenance costs, which is by far the single biggest cost factor, as I showed in this previous post: It's the maintenance, stupid! (or: Something is rotten in developerland.). In the end, this is a highly cost-effective way of software development… But on the other hand, there clearly is a trade-off here: coding speed vs. code quality/later maintenance costs. The here described development method might be a perfect fit for the overwhelming majority of software projects, but there certainly are some scenarios where it’s not - e.g. if time-to-market is crucial for a software project. So this is a business decision in the end. It’s just that you have to know what you’re doing and what consequences this might have… Some last words First, I’d like to thank Derick Bailey again. His two aforementioned posts (which I strongly recommend for reading) inspired me to think deeply about my own personal way of doing TDD and to clarify my thoughts about it. I wouldn’t have done that without this inspiration. I really enjoy that kind of discussions… I agree with him in all respects. But I don’t know (yet?) how to bring his insights into the described production process without slowing things down. The above described method proved to be very “good enough” in my practical experience. But of course, I’m open to suggestions here… My rationale for now is: If the test is initially red during the red-green-refactor cycle, the ‘right reason’ is: it actually calls the right method, but this method is not yet operational. Later on, when the cycle is finished and the tests become part of the regular, automated Continuous Integration process, ‘red’ certainly must occur for the ‘right reason’: in this phase, ‘red’ MUST mean nothing but an unfulfilled assertion - Fail By Assertion, Not By Anything Else!

    Read the article

  • value types in the vm

    - by john.rose
    value types in the vm p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px Times} p.p2 {margin: 0.0px 0.0px 14.0px 0.0px; font: 14.0px Times} p.p3 {margin: 0.0px 0.0px 12.0px 0.0px; font: 14.0px Times} p.p4 {margin: 0.0px 0.0px 15.0px 0.0px; font: 14.0px Times} p.p5 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px Courier} p.p6 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px Courier; min-height: 17.0px} p.p7 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px Times; min-height: 18.0px} p.p8 {margin: 0.0px 0.0px 0.0px 36.0px; text-indent: -36.0px; font: 14.0px Times; min-height: 18.0px} p.p9 {margin: 0.0px 0.0px 12.0px 0.0px; font: 14.0px Times; min-height: 18.0px} p.p10 {margin: 0.0px 0.0px 12.0px 0.0px; font: 14.0px Times; color: #000000} li.li1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px Times} li.li7 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px Times; min-height: 18.0px} span.s1 {font: 14.0px Courier} span.s2 {color: #000000} span.s3 {font: 14.0px Courier; color: #000000} ol.ol1 {list-style-type: decimal} Or, enduring values for a changing world. Introduction A value type is a data type which, generally speaking, is designed for being passed by value in and out of methods, and stored by value in data structures. The only value types which the Java language directly supports are the eight primitive types. Java indirectly and approximately supports value types, if they are implemented in terms of classes. For example, both Integer and String may be viewed as value types, especially if their usage is restricted to avoid operations appropriate to Object. In this note, we propose a definition of value types in terms of a design pattern for Java classes, accompanied by a set of usage restrictions. We also sketch the relation of such value types to tuple types (which are a JVM-level notion), and point out JVM optimizations that can apply to value types. This note is a thought experiment to extend the JVM’s performance model in support of value types. The demonstration has two phases.  Initially the extension can simply use design patterns, within the current bytecode architecture, and in today’s Java language. But if the performance model is to be realized in practice, it will probably require new JVM bytecode features, changes to the Java language, or both.  We will look at a few possibilities for these new features. An Axiom of Value In the context of the JVM, a value type is a data type equipped with construction, assignment, and equality operations, and a set of typed components, such that, whenever two variables of the value type produce equal corresponding values for their components, the values of the two variables cannot be distinguished by any JVM operation. Here are some corollaries: A value type is immutable, since otherwise a copy could be constructed and the original could be modified in one of its components, allowing the copies to be distinguished. Changing the component of a value type requires construction of a new value. The equals and hashCode operations are strictly component-wise. If a value type is represented by a JVM reference, that reference cannot be successfully synchronized on, and cannot be usefully compared for reference equality. A value type can be viewed in terms of what it doesn’t do. We can say that a value type omits all value-unsafe operations, which could violate the constraints on value types.  These operations, which are ordinarily allowed for Java object types, are pointer equality comparison (the acmp instruction), synchronization (the monitor instructions), all the wait and notify methods of class Object, and non-trivial finalize methods. The clone method is also value-unsafe, although for value types it could be treated as the identity function. Finally, and most importantly, any side effect on an object (however visible) also counts as an value-unsafe operation. A value type may have methods, but such methods must not change the components of the value. It is reasonable and useful to define methods like toString, equals, and hashCode on value types, and also methods which are specifically valuable to users of the value type. Representations of Value Value types have two natural representations in the JVM, unboxed and boxed. An unboxed value consists of the components, as simple variables. For example, the complex number x=(1+2i), in rectangular coordinate form, may be represented in unboxed form by the following pair of variables: /*Complex x = Complex.valueOf(1.0, 2.0):*/ double x_re = 1.0, x_im = 2.0; These variables might be locals, parameters, or fields. Their association as components of a single value is not defined to the JVM. Here is a sample computation which computes the norm of the difference between two complex numbers: double distance(/*Complex x:*/ double x_re, double x_im,         /*Complex y:*/ double y_re, double y_im) {     /*Complex z = x.minus(y):*/     double z_re = x_re - y_re, z_im = x_im - y_im;     /*return z.abs():*/     return Math.sqrt(z_re*z_re + z_im*z_im); } A boxed representation groups component values under a single object reference. The reference is to a ‘wrapper class’ that carries the component values in its fields. (A primitive type can naturally be equated with a trivial value type with just one component of that type. In that view, the wrapper class Integer can serve as a boxed representation of value type int.) The unboxed representation of complex numbers is practical for many uses, but it fails to cover several major use cases: return values, array elements, and generic APIs. The two components of a complex number cannot be directly returned from a Java function, since Java does not support multiple return values. The same story applies to array elements: Java has no ’array of structs’ feature. (Double-length arrays are a possible workaround for complex numbers, but not for value types with heterogeneous components.) By generic APIs I mean both those which use generic types, like Arrays.asList and those which have special case support for primitive types, like String.valueOf and PrintStream.println. Those APIs do not support unboxed values, and offer some problems to boxed values. Any ’real’ JVM type should have a story for returns, arrays, and API interoperability. The basic problem here is that value types fall between primitive types and object types. Value types are clearly more complex than primitive types, and object types are slightly too complicated. Objects are a little bit dangerous to use as value carriers, since object references can be compared for pointer equality, and can be synchronized on. Also, as many Java programmers have observed, there is often a performance cost to using wrapper objects, even on modern JVMs. Even so, wrapper classes are a good starting point for talking about value types. If there were a set of structural rules and restrictions which would prevent value-unsafe operations on value types, wrapper classes would provide a good notation for defining value types. This note attempts to define such rules and restrictions. Let’s Start Coding Now it is time to look at some real code. Here is a definition, written in Java, of a complex number value type. @ValueSafe public final class Complex implements java.io.Serializable {     // immutable component structure:     public final double re, im;     private Complex(double re, double im) {         this.re = re; this.im = im;     }     // interoperability methods:     public String toString() { return "Complex("+re+","+im+")"; }     public List<Double> asList() { return Arrays.asList(re, im); }     public boolean equals(Complex c) {         return re == c.re && im == c.im;     }     public boolean equals(@ValueSafe Object x) {         return x instanceof Complex && equals((Complex) x);     }     public int hashCode() {         return 31*Double.valueOf(re).hashCode()                 + Double.valueOf(im).hashCode();     }     // factory methods:     public static Complex valueOf(double re, double im) {         return new Complex(re, im);     }     public Complex changeRe(double re2) { return valueOf(re2, im); }     public Complex changeIm(double im2) { return valueOf(re, im2); }     public static Complex cast(@ValueSafe Object x) {         return x == null ? ZERO : (Complex) x;     }     // utility methods and constants:     public Complex plus(Complex c)  { return new Complex(re+c.re, im+c.im); }     public Complex minus(Complex c) { return new Complex(re-c.re, im-c.im); }     public double abs() { return Math.sqrt(re*re + im*im); }     public static final Complex PI = valueOf(Math.PI, 0.0);     public static final Complex ZERO = valueOf(0.0, 0.0); } This is not a minimal definition, because it includes some utility methods and other optional parts.  The essential elements are as follows: The class is marked as a value type with an annotation. The class is final, because it does not make sense to create subclasses of value types. The fields of the class are all non-private and final.  (I.e., the type is immutable and structurally transparent.) From the supertype Object, all public non-final methods are overridden. The constructor is private. Beyond these bare essentials, we can observe the following features in this example, which are likely to be typical of all value types: One or more factory methods are responsible for value creation, including a component-wise valueOf method. There are utility methods for complex arithmetic and instance creation, such as plus and changeIm. There are static utility constants, such as PI. The type is serializable, using the default mechanisms. There are methods for converting to and from dynamically typed references, such as asList and cast. The Rules In order to use value types properly, the programmer must avoid value-unsafe operations.  A helpful Java compiler should issue errors (or at least warnings) for code which provably applies value-unsafe operations, and should issue warnings for code which might be correct but does not provably avoid value-unsafe operations.  No such compilers exist today, but to simplify our account here, we will pretend that they do exist. A value-safe type is any class, interface, or type parameter marked with the @ValueSafe annotation, or any subtype of a value-safe type.  If a value-safe class is marked final, it is in fact a value type.  All other value-safe classes must be abstract.  The non-static fields of a value class must be non-public and final, and all its constructors must be private. Under the above rules, a standard interface could be helpful to define value types like Complex.  Here is an example: @ValueSafe public interface ValueType extends java.io.Serializable {     // All methods listed here must get redefined.     // Definitions must be value-safe, which means     // they may depend on component values only.     List<? extends Object> asList();     int hashCode();     boolean equals(@ValueSafe Object c);     String toString(); } //@ValueSafe inherited from supertype: public final class Complex implements ValueType { … The main advantage of such a conventional interface is that (unlike an annotation) it is reified in the runtime type system.  It could appear as an element type or parameter bound, for facilities which are designed to work on value types only.  More broadly, it might assist the JVM to perform dynamic enforcement of the rules for value types. Besides types, the annotation @ValueSafe can mark fields, parameters, local variables, and methods.  (This is redundant when the type is also value-safe, but may be useful when the type is Object or another supertype of a value type.)  Working forward from these annotations, an expression E is defined as value-safe if it satisfies one or more of the following: The type of E is a value-safe type. E names a field, parameter, or local variable whose declaration is marked @ValueSafe. E is a call to a method whose declaration is marked @ValueSafe. E is an assignment to a value-safe variable, field reference, or array reference. E is a cast to a value-safe type from a value-safe expression. E is a conditional expression E0 ? E1 : E2, and both E1 and E2 are value-safe. Assignments to value-safe expressions and initializations of value-safe names must take their values from value-safe expressions. A value-safe expression may not be the subject of a value-unsafe operation.  In particular, it cannot be synchronized on, nor can it be compared with the “==” operator, not even with a null or with another value-safe type. In a program where all of these rules are followed, no value-type value will be subject to a value-unsafe operation.  Thus, the prime axiom of value types will be satisfied, that no two value type will be distinguishable as long as their component values are equal. More Code To illustrate these rules, here are some usage examples for Complex: Complex pi = Complex.valueOf(Math.PI, 0); Complex zero = pi.changeRe(0);  //zero = pi; zero.re = 0; ValueType vtype = pi; @SuppressWarnings("value-unsafe")   Object obj = pi; @ValueSafe Object obj2 = pi; obj2 = new Object();  // ok List<Complex> clist = new ArrayList<Complex>(); clist.add(pi);  // (ok assuming List.add param is @ValueSafe) List<ValueType> vlist = new ArrayList<ValueType>(); vlist.add(pi);  // (ok) List<Object> olist = new ArrayList<Object>(); olist.add(pi);  // warning: "value-unsafe" boolean z = pi.equals(zero); boolean z1 = (pi == zero);  // error: reference comparison on value type boolean z2 = (pi == null);  // error: reference comparison on value type boolean z3 = (pi == obj2);  // error: reference comparison on value type synchronized (pi) { }  // error: synch of value, unpredictable result synchronized (obj2) { }  // unpredictable result Complex qq = pi; qq = null;  // possible NPE; warning: “null-unsafe" qq = (Complex) obj;  // warning: “null-unsafe" qq = Complex.cast(obj);  // OK @SuppressWarnings("null-unsafe")   Complex empty = null;  // possible NPE qq = empty;  // possible NPE (null pollution) The Payoffs It follows from this that either the JVM or the java compiler can replace boxed value-type values with unboxed ones, without affecting normal computations.  Fields and variables of value types can be split into their unboxed components.  Non-static methods on value types can be transformed into static methods which take the components as value parameters. Some common questions arise around this point in any discussion of value types. Why burden the programmer with all these extra rules?  Why not detect programs automagically and perform unboxing transparently?  The answer is that it is easy to break the rules accidently unless they are agreed to by the programmer and enforced.  Automatic unboxing optimizations are tantalizing but (so far) unreachable ideal.  In the current state of the art, it is possible exhibit benchmarks in which automatic unboxing provides the desired effects, but it is not possible to provide a JVM with a performance model that assures the programmer when unboxing will occur.  This is why I’m writing this note, to enlist help from, and provide assurances to, the programmer.  Basically, I’m shooting for a good set of user-supplied “pragmas” to frame the desired optimization. Again, the important thing is that the unboxing must be done reliably, or else programmers will have no reason to work with the extra complexity of the value-safety rules.  There must be a reasonably stable performance model, wherein using a value type has approximately the same performance characteristics as writing the unboxed components as separate Java variables. There are some rough corners to the present scheme.  Since Java fields and array elements are initialized to null, value-type computations which incorporate uninitialized variables can produce null pointer exceptions.  One workaround for this is to require such variables to be null-tested, and the result replaced with a suitable all-zero value of the value type.  That is what the “cast” method does above. Generically typed APIs like List<T> will continue to manipulate boxed values always, at least until we figure out how to do reification of generic type instances.  Use of such APIs will elicit warnings until their type parameters (and/or relevant members) are annotated or typed as value-safe.  Retrofitting List<T> is likely to expose flaws in the present scheme, which we will need to engineer around.  Here are a couple of first approaches: public interface java.util.List<@ValueSafe T> extends Collection<T> { … public interface java.util.List<T extends Object|ValueType> extends Collection<T> { … (The second approach would require disjunctive types, in which value-safety is “contagious” from the constituent types.) With more transformations, the return value types of methods can also be unboxed.  This may require significant bytecode-level transformations, and would work best in the presence of a bytecode representation for multiple value groups, which I have proposed elsewhere under the title “Tuples in the VM”. But for starters, the JVM can apply this transformation under the covers, to internally compiled methods.  This would give a way to express multiple return values and structured return values, which is a significant pain-point for Java programmers, especially those who work with low-level structure types favored by modern vector and graphics processors.  The lack of multiple return values has a strong distorting effect on many Java APIs. Even if the JVM fails to unbox a value, there is still potential benefit to the value type.  Clustered computing systems something have copy operations (serialization or something similar) which apply implicitly to command operands.  When copying JVM objects, it is extremely helpful to know when an object’s identity is important or not.  If an object reference is a copied operand, the system may have to create a proxy handle which points back to the original object, so that side effects are visible.  Proxies must be managed carefully, and this can be expensive.  On the other hand, value types are exactly those types which a JVM can “copy and forget” with no downside. Array types are crucial to bulk data interfaces.  (As data sizes and rates increase, bulk data becomes more important than scalar data, so arrays are definitely accompanying us into the future of computing.)  Value types are very helpful for adding structure to bulk data, so a successful value type mechanism will make it easier for us to express richer forms of bulk data. Unboxing arrays (i.e., arrays containing unboxed values) will provide better cache and memory density, and more direct data movement within clustered or heterogeneous computing systems.  They require the deepest transformations, relative to today’s JVM.  There is an impedance mismatch between value-type arrays and Java’s covariant array typing, so compromises will need to be struck with existing Java semantics.  It is probably worth the effort, since arrays of unboxed value types are inherently more memory-efficient than standard Java arrays, which rely on dependent pointer chains. It may be sufficient to extend the “value-safe” concept to array declarations, and allow low-level transformations to change value-safe array declarations from the standard boxed form into an unboxed tuple-based form.  Such value-safe arrays would not be convertible to Object[] arrays.  Certain connection points, such as Arrays.copyOf and System.arraycopy might need additional input/output combinations, to allow smooth conversion between arrays with boxed and unboxed elements. Alternatively, the correct solution may have to wait until we have enough reification of generic types, and enough operator overloading, to enable an overhaul of Java arrays. Implicit Method Definitions The example of class Complex above may be unattractively complex.  I believe most or all of the elements of the example class are required by the logic of value types. If this is true, a programmer who writes a value type will have to write lots of error-prone boilerplate code.  On the other hand, I think nearly all of the code (except for the domain-specific parts like plus and minus) can be implicitly generated. Java has a rule for implicitly defining a class’s constructor, if no it defines no constructors explicitly.  Likewise, there are rules for providing default access modifiers for interface members.  Because of the highly regular structure of value types, it might be reasonable to perform similar implicit transformations on value types.  Here’s an example of a “highly implicit” definition of a complex number type: public class Complex implements ValueType {  // implicitly final     public double re, im;  // implicitly public final     //implicit methods are defined elementwise from te fields:     //  toString, asList, equals(2), hashCode, valueOf, cast     //optionally, explicit methods (plus, abs, etc.) would go here } In other words, with the right defaults, a simple value type definition can be a one-liner.  The observant reader will have noticed the similarities (and suitable differences) between the explicit methods above and the corresponding methods for List<T>. Another way to abbreviate such a class would be to make an annotation the primary trigger of the functionality, and to add the interface(s) implicitly: public @ValueType class Complex { … // implicitly final, implements ValueType (But to me it seems better to communicate the “magic” via an interface, even if it is rooted in an annotation.) Implicitly Defined Value Types So far we have been working with nominal value types, which is to say that the sequence of typed components is associated with a name and additional methods that convey the intention of the programmer.  A simple ordered pair of floating point numbers can be variously interpreted as (to name a few possibilities) a rectangular or polar complex number or Cartesian point.  The name and the methods convey the intended meaning. But what if we need a truly simple ordered pair of floating point numbers, without any further conceptual baggage?  Perhaps we are writing a method (like “divideAndRemainder”) which naturally returns a pair of numbers instead of a single number.  Wrapping the pair of numbers in a nominal type (like “QuotientAndRemainder”) makes as little sense as wrapping a single return value in a nominal type (like “Quotient”).  What we need here are structural value types commonly known as tuples. For the present discussion, let us assign a conventional, JVM-friendly name to tuples, roughly as follows: public class java.lang.tuple.$DD extends java.lang.tuple.Tuple {      double $1, $2; } Here the component names are fixed and all the required methods are defined implicitly.  The supertype is an abstract class which has suitable shared declarations.  The name itself mentions a JVM-style method parameter descriptor, which may be “cracked” to determine the number and types of the component fields. The odd thing about such a tuple type (and structural types in general) is it must be instantiated lazily, in response to linkage requests from one or more classes that need it.  The JVM and/or its class loaders must be prepared to spin a tuple type on demand, given a simple name reference, $xyz, where the xyz is cracked into a series of component types.  (Specifics of naming and name mangling need some tasteful engineering.) Tuples also seem to demand, even more than nominal types, some support from the language.  (This is probably because notations for non-nominal types work best as combinations of punctuation and type names, rather than named constructors like Function3 or Tuple2.)  At a minimum, languages with tuples usually (I think) have some sort of simple bracket notation for creating tuples, and a corresponding pattern-matching syntax (or “destructuring bind”) for taking tuples apart, at least when they are parameter lists.  Designing such a syntax is no simple thing, because it ought to play well with nominal value types, and also with pre-existing Java features, such as method parameter lists, implicit conversions, generic types, and reflection.  That is a task for another day. Other Use Cases Besides complex numbers and simple tuples there are many use cases for value types.  Many tuple-like types have natural value-type representations. These include rational numbers, point locations and pixel colors, and various kinds of dates and addresses. Other types have a variable-length ‘tail’ of internal values. The most common example of this is String, which is (mathematically) a sequence of UTF-16 character values. Similarly, bit vectors, multiple-precision numbers, and polynomials are composed of sequences of values. Such types include, in their representation, a reference to a variable-sized data structure (often an array) which (somehow) represents the sequence of values. The value type may also include ’header’ information. Variable-sized values often have a length distribution which favors short lengths. In that case, the design of the value type can make the first few values in the sequence be direct ’header’ fields of the value type. In the common case where the header is enough to represent the whole value, the tail can be a shared null value, or even just a null reference. Note that the tail need not be an immutable object, as long as the header type encapsulates it well enough. This is the case with String, where the tail is a mutable (but never mutated) character array. Field types and their order must be a globally visible part of the API.  The structure of the value type must be transparent enough to have a globally consistent unboxed representation, so that all callers and callees agree about the type and order of components  that appear as parameters, return types, and array elements.  This is a trade-off between efficiency and encapsulation, which is forced on us when we remove an indirection enjoyed by boxed representations.  A JVM-only transformation would not care about such visibility, but a bytecode transformation would need to take care that (say) the components of complex numbers would not get swapped after a redefinition of Complex and a partial recompile.  Perhaps constant pool references to value types need to declare the field order as assumed by each API user. This brings up the delicate status of private fields in a value type.  It must always be possible to load, store, and copy value types as coordinated groups, and the JVM performs those movements by moving individual scalar values between locals and stack.  If a component field is not public, what is to prevent hostile code from plucking it out of the tuple using a rogue aload or astore instruction?  Nothing but the verifier, so we may need to give it more smarts, so that it treats value types as inseparable groups of stack slots or locals (something like long or double). My initial thought was to make the fields always public, which would make the security problem moot.  But public is not always the right answer; consider the case of String, where the underlying mutable character array must be encapsulated to prevent security holes.  I believe we can win back both sides of the tradeoff, by training the verifier never to split up the components in an unboxed value.  Just as the verifier encapsulates the two halves of a 64-bit primitive, it can encapsulate the the header and body of an unboxed String, so that no code other than that of class String itself can take apart the values. Similar to String, we could build an efficient multi-precision decimal type along these lines: public final class DecimalValue extends ValueType {     protected final long header;     protected private final BigInteger digits;     public DecimalValue valueOf(int value, int scale) {         assert(scale >= 0);         return new DecimalValue(((long)value << 32) + scale, null);     }     public DecimalValue valueOf(long value, int scale) {         if (value == (int) value)             return valueOf((int)value, scale);         return new DecimalValue(-scale, new BigInteger(value));     } } Values of this type would be passed between methods as two machine words. Small values (those with a significand which fits into 32 bits) would be represented without any heap data at all, unless the DecimalValue itself were boxed. (Note the tension between encapsulation and unboxing in this case.  It would be better if the header and digits fields were private, but depending on where the unboxing information must “leak”, it is probably safer to make a public revelation of the internal structure.) Note that, although an array of Complex can be faked with a double-length array of double, there is no easy way to fake an array of unboxed DecimalValues.  (Either an array of boxed values or a transposed pair of homogeneous arrays would be reasonable fallbacks, in a current JVM.)  Getting the full benefit of unboxing and arrays will require some new JVM magic. Although the JVM emphasizes portability, system dependent code will benefit from using machine-level types larger than 64 bits.  For example, the back end of a linear algebra package might benefit from value types like Float4 which map to stock vector types.  This is probably only worthwhile if the unboxing arrays can be packed with such values. More Daydreams A more finely-divided design for dynamic enforcement of value safety could feature separate marker interfaces for each invariant.  An empty marker interface Unsynchronizable could cause suitable exceptions for monitor instructions on objects in marked classes.  More radically, a Interchangeable marker interface could cause JVM primitives that are sensitive to object identity to raise exceptions; the strangest result would be that the acmp instruction would have to be specified as raising an exception. @ValueSafe public interface ValueType extends java.io.Serializable,         Unsynchronizable, Interchangeable { … public class Complex implements ValueType {     // inherits Serializable, Unsynchronizable, Interchangeable, @ValueSafe     … It seems possible that Integer and the other wrapper types could be retro-fitted as value-safe types.  This is a major change, since wrapper objects would be unsynchronizable and their references interchangeable.  It is likely that code which violates value-safety for wrapper types exists but is uncommon.  It is less plausible to retro-fit String, since the prominent operation String.intern is often used with value-unsafe code. We should also reconsider the distinction between boxed and unboxed values in code.  The design presented above obscures that distinction.  As another thought experiment, we could imagine making a first class distinction in the type system between boxed and unboxed representations.  Since only primitive types are named with a lower-case initial letter, we could define that the capitalized version of a value type name always refers to the boxed representation, while the initial lower-case variant always refers to boxed.  For example: complex pi = complex.valueOf(Math.PI, 0); Complex boxPi = pi;  // convert to boxed myList.add(boxPi); complex z = myList.get(0);  // unbox Such a convention could perhaps absorb the current difference between int and Integer, double and Double. It might also allow the programmer to express a helpful distinction among array types. As said above, array types are crucial to bulk data interfaces, but are limited in the JVM.  Extending arrays beyond the present limitations is worth thinking about; for example, the Maxine JVM implementation has a hybrid object/array type.  Something like this which can also accommodate value type components seems worthwhile.  On the other hand, does it make sense for value types to contain short arrays?  And why should random-access arrays be the end of our design process, when bulk data is often sequentially accessed, and it might make sense to have heterogeneous streams of data as the natural “jumbo” data structure.  These considerations must wait for another day and another note. More Work It seems to me that a good sequence for introducing such value types would be as follows: Add the value-safety restrictions to an experimental version of javac. Code some sample applications with value types, including Complex and DecimalValue. Create an experimental JVM which internally unboxes value types but does not require new bytecodes to do so.  Ensure the feasibility of the performance model for the sample applications. Add tuple-like bytecodes (with or without generic type reification) to a major revision of the JVM, and teach the Java compiler to switch in the new bytecodes without code changes. A staggered roll-out like this would decouple language changes from bytecode changes, which is always a convenient thing. A similar investigation should be applied (concurrently) to array types.  In this case, it seems to me that the starting point is in the JVM: Add an experimental unboxing array data structure to a production JVM, perhaps along the lines of Maxine hybrids.  No bytecode or language support is required at first; everything can be done with encapsulated unsafe operations and/or method handles. Create an experimental JVM which internally unboxes value types but does not require new bytecodes to do so.  Ensure the feasibility of the performance model for the sample applications. Add tuple-like bytecodes (with or without generic type reification) to a major revision of the JVM, and teach the Java compiler to switch in the new bytecodes without code changes. That’s enough musing me for now.  Back to work!

    Read the article

  • SPARC T5-4 LDoms for RAC and WebLogic Clusters

    - by Jeff Taylor-Oracle
    I wanted to use two Oracle SPARC T5-4 servers to simultaneously host both Oracle RAC and a WebLogic Server Cluster. I chose to use Oracle VM Server for SPARC to create a cluster like this: There are plenty of trade offs and decisions that need to be made, for example: Rather than configuring the system by hand, you might want to use an Oracle SuperCluster T5-8 My configuration is similar to jsavit's: Availability Best Practices - Example configuring a T5-8 but I chose to ignore some of the advice. Maybe I should have included an  alternate service domain, but I decided that I already had enough redundancy Both Oracle SPARC T5-4 servers were to be configured like this: Cntl 0.25  4  64GB                     App LDom                    2.75 CPU's                                        44 cores                                          704 GB              DB LDom      One CPU         16 cores         256 GB   The systems started with everything in the primary domain: # ldm list NAME             STATE      FLAGS   CONS    VCPU  MEMORY   UTIL  NORM  UPTIME primary          active     -n-c--  UART    512   1023G    0.0%  0.0%  11m # ldm list-spconfig factory-default [current] primary # ldm list -o core,memory,physio NAME              primary           CORE     CID    CPUSET     0      (0, 1, 2, 3, 4, 5, 6, 7)     1      (8, 9, 10, 11, 12, 13, 14, 15)     2      (16, 17, 18, 19, 20, 21, 22, 23) -- SNIP     62     (496, 497, 498, 499, 500, 501, 502, 503)     63     (504, 505, 506, 507, 508, 509, 510, 511) MEMORY     RA               PA               SIZE                 0x30000000       0x30000000       255G     0x80000000000    0x80000000000    256G     0x100000000000   0x100000000000   256G     0x180000000000   0x180000000000   256G # Give this memory block to the DB LDom IO     DEVICE                           PSEUDONYM        OPTIONS     pci@300                          pci_0                pci@340                          pci_1                pci@380                          pci_2                pci@3c0                          pci_3                pci@400                          pci_4                pci@440                          pci_5                pci@480                          pci_6                pci@4c0                          pci_7                pci@300/pci@1/pci@0/pci@6        /SYS/RCSA/PCIE1     pci@300/pci@1/pci@0/pci@c        /SYS/RCSA/PCIE2     pci@300/pci@1/pci@0/pci@4/pci@0/pci@c /SYS/MB/SASHBA0     pci@300/pci@1/pci@0/pci@4/pci@0/pci@8 /SYS/RIO/NET0        pci@340/pci@1/pci@0/pci@6        /SYS/RCSA/PCIE3     pci@340/pci@1/pci@0/pci@c        /SYS/RCSA/PCIE4     pci@380/pci@1/pci@0/pci@a        /SYS/RCSA/PCIE9     pci@380/pci@1/pci@0/pci@4        /SYS/RCSA/PCIE10     pci@3c0/pci@1/pci@0/pci@e        /SYS/RCSA/PCIE11     pci@3c0/pci@1/pci@0/pci@8        /SYS/RCSA/PCIE12     pci@400/pci@1/pci@0/pci@e        /SYS/RCSA/PCIE5     pci@400/pci@1/pci@0/pci@8        /SYS/RCSA/PCIE6     pci@440/pci@1/pci@0/pci@e        /SYS/RCSA/PCIE7     pci@440/pci@1/pci@0/pci@8        /SYS/RCSA/PCIE8     pci@480/pci@1/pci@0/pci@a        /SYS/RCSA/PCIE13     pci@480/pci@1/pci@0/pci@4        /SYS/RCSA/PCIE14     pci@4c0/pci@1/pci@0/pci@8        /SYS/RCSA/PCIE15     pci@4c0/pci@1/pci@0/pci@4        /SYS/RCSA/PCIE16     pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@c /SYS/MB/SASHBA1     pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@4 /SYS/RIO/NET2    Added an additional service processor configuration: # ldm add-spconfig split # ldm list-spconfig factory-default primary split [current] And removed many of the resources from the primary domain: # ldm start-reconf primary # ldm set-core 4 primary # ldm set-memory 32G primary # ldm rm-io pci@340 primary # ldm rm-io pci@380 primary # ldm rm-io pci@3c0 primary # ldm rm-io pci@400 primary # ldm rm-io pci@440 primary # ldm rm-io pci@480 primary # ldm rm-io pci@4c0 primary # init 6 Needed to add resources to the guest domains: # ldm add-domain db # ldm set-core cid=`seq -s"," 48 63` db # ldm add-memory mblock=0x180000000000:256G db # ldm add-io pci@480 db # ldm add-io pci@4c0 db # ldm add-domain app # ldm set-core 44 app # ldm set-memory 704G  app # ldm add-io pci@340 app # ldm add-io pci@380 app # ldm add-io pci@3c0 app # ldm add-io pci@400 app # ldm add-io pci@440 app Needed to set up services: # ldm add-vds primary-vds0 primary # ldm add-vcc port-range=5000-5100 primary-vcc0 primary Needed to add a virtual network port for the WebLogic application domain: # ipadm NAME              CLASS/TYPE STATE        UNDER      ADDR lo0               loopback   ok           --         --    lo0/v4         static     ok           --         ...    lo0/v6         static     ok           --         ... net0              ip         ok           --         ...    net0/v4        static     ok           --         xxx.xxx.xxx.xxx/24    net0/v6        addrconf   ok           --         ....    net0/v6        addrconf   ok           --         ... net8              ip         ok           --         --    net8/v4        static     ok           --         ... # dladm show-phys LINK              MEDIA                STATE      SPEED  DUPLEX    DEVICE net1              Ethernet             unknown    0      unknown   ixgbe1 net0              Ethernet             up         1000   full      ixgbe0 net8              Ethernet             up         10     full      usbecm2 # ldm add-vsw net-dev=net0 primary-vsw0 primary # ldm add-vnet vnet1 primary-vsw0 app Needed to add a virtual disk to the WebLogic application domain: # format Searching for disks...done AVAILABLE DISK SELECTIONS:        0. c0t5000CCA02505F874d0 <HITACHI-H106060SDSUN600G-A2B0-558.91GB>           /scsi_vhci/disk@g5000cca02505f874           /dev/chassis/SPARC_T5-4.AK00084038/SYS/SASBP0/HDD0/disk        1. c0t5000CCA02506C468d0 <HITACHI-H106060SDSUN600G-A2B0-558.91GB>           /scsi_vhci/disk@g5000cca02506c468           /dev/chassis/SPARC_T5-4.AK00084038/SYS/SASBP0/HDD1/disk        2. c0t5000CCA025067E5Cd0 <HITACHI-H106060SDSUN600G-A2B0-558.91GB>           /scsi_vhci/disk@g5000cca025067e5c           /dev/chassis/SPARC_T5-4.AK00084038/SYS/SASBP0/HDD2/disk        3. c0t5000CCA02506C258d0 <HITACHI-H106060SDSUN600G-A2B0-558.91GB>           /scsi_vhci/disk@g5000cca02506c258           /dev/chassis/SPARC_T5-4.AK00084038/SYS/SASBP0/HDD3/disk Specify disk (enter its number): ^C # ldm add-vdsdev /dev/dsk/c0t5000CCA02506C468d0s2 HDD1@primary-vds0 # ldm add-vdisk HDD1 HDD1@primary-vds0 app Add some additional spice to the pot: # ldm set-variable auto-boot\\?=false db # ldm set-variable auto-boot\\?=false app # ldm set-var boot-device=HDD1 app Bind the logical domains: # ldm bind db # ldm bind app At the end of the process, the system is set up like this: # ldm list -o core,memory,physio NAME             primary          CORE     CID    CPUSET     0      (0, 1, 2, 3, 4, 5, 6, 7)     1      (8, 9, 10, 11, 12, 13, 14, 15)     2      (16, 17, 18, 19, 20, 21, 22, 23)     3      (24, 25, 26, 27, 28, 29, 30, 31) MEMORY     RA               PA               SIZE                0x30000000       0x30000000       32G IO     DEVICE                           PSEUDONYM        OPTIONS     pci@300                          pci_0               pci@300/pci@1/pci@0/pci@6        /SYS/RCSA/PCIE1     pci@300/pci@1/pci@0/pci@c        /SYS/RCSA/PCIE2     pci@300/pci@1/pci@0/pci@4/pci@0/pci@c /SYS/MB/SASHBA0     pci@300/pci@1/pci@0/pci@4/pci@0/pci@8 /SYS/RIO/NET0   ------------------------------------------------------------------------------ NAME             app              CORE     CID    CPUSET     4      (32, 33, 34, 35, 36, 37, 38, 39)     5      (40, 41, 42, 43, 44, 45, 46, 47)     6      (48, 49, 50, 51, 52, 53, 54, 55)     7      (56, 57, 58, 59, 60, 61, 62, 63)     8      (64, 65, 66, 67, 68, 69, 70, 71)     9      (72, 73, 74, 75, 76, 77, 78, 79)     10     (80, 81, 82, 83, 84, 85, 86, 87)     11     (88, 89, 90, 91, 92, 93, 94, 95)     12     (96, 97, 98, 99, 100, 101, 102, 103)     13     (104, 105, 106, 107, 108, 109, 110, 111)     14     (112, 113, 114, 115, 116, 117, 118, 119)     15     (120, 121, 122, 123, 124, 125, 126, 127)     16     (128, 129, 130, 131, 132, 133, 134, 135)     17     (136, 137, 138, 139, 140, 141, 142, 143)     18     (144, 145, 146, 147, 148, 149, 150, 151)     19     (152, 153, 154, 155, 156, 157, 158, 159)     20     (160, 161, 162, 163, 164, 165, 166, 167)     21     (168, 169, 170, 171, 172, 173, 174, 175)     22     (176, 177, 178, 179, 180, 181, 182, 183)     23     (184, 185, 186, 187, 188, 189, 190, 191)     24     (192, 193, 194, 195, 196, 197, 198, 199)     25     (200, 201, 202, 203, 204, 205, 206, 207)     26     (208, 209, 210, 211, 212, 213, 214, 215)     27     (216, 217, 218, 219, 220, 221, 222, 223)     28     (224, 225, 226, 227, 228, 229, 230, 231)     29     (232, 233, 234, 235, 236, 237, 238, 239)     30     (240, 241, 242, 243, 244, 245, 246, 247)     31     (248, 249, 250, 251, 252, 253, 254, 255)     32     (256, 257, 258, 259, 260, 261, 262, 263)     33     (264, 265, 266, 267, 268, 269, 270, 271)     34     (272, 273, 274, 275, 276, 277, 278, 279)     35     (280, 281, 282, 283, 284, 285, 286, 287)     36     (288, 289, 290, 291, 292, 293, 294, 295)     37     (296, 297, 298, 299, 300, 301, 302, 303)     38     (304, 305, 306, 307, 308, 309, 310, 311)     39     (312, 313, 314, 315, 316, 317, 318, 319)     40     (320, 321, 322, 323, 324, 325, 326, 327)     41     (328, 329, 330, 331, 332, 333, 334, 335)     42     (336, 337, 338, 339, 340, 341, 342, 343)     43     (344, 345, 346, 347, 348, 349, 350, 351)     44     (352, 353, 354, 355, 356, 357, 358, 359)     45     (360, 361, 362, 363, 364, 365, 366, 367)     46     (368, 369, 370, 371, 372, 373, 374, 375)     47     (376, 377, 378, 379, 380, 381, 382, 383) MEMORY     RA               PA               SIZE                0x30000000       0x830000000      192G     0x4000000000     0x80000000000    256G     0x8080000000     0x100000000000   256G IO     DEVICE                           PSEUDONYM        OPTIONS     pci@340                          pci_1               pci@380                          pci_2               pci@3c0                          pci_3               pci@400                          pci_4               pci@440                          pci_5               pci@340/pci@1/pci@0/pci@6        /SYS/RCSA/PCIE3     pci@340/pci@1/pci@0/pci@c        /SYS/RCSA/PCIE4     pci@380/pci@1/pci@0/pci@a        /SYS/RCSA/PCIE9     pci@380/pci@1/pci@0/pci@4        /SYS/RCSA/PCIE10     pci@3c0/pci@1/pci@0/pci@e        /SYS/RCSA/PCIE11     pci@3c0/pci@1/pci@0/pci@8        /SYS/RCSA/PCIE12     pci@400/pci@1/pci@0/pci@e        /SYS/RCSA/PCIE5     pci@400/pci@1/pci@0/pci@8        /SYS/RCSA/PCIE6     pci@440/pci@1/pci@0/pci@e        /SYS/RCSA/PCIE7     pci@440/pci@1/pci@0/pci@8        /SYS/RCSA/PCIE8 ------------------------------------------------------------------------------ NAME             db               CORE     CID    CPUSET     48     (384, 385, 386, 387, 388, 389, 390, 391)     49     (392, 393, 394, 395, 396, 397, 398, 399)     50     (400, 401, 402, 403, 404, 405, 406, 407)     51     (408, 409, 410, 411, 412, 413, 414, 415)     52     (416, 417, 418, 419, 420, 421, 422, 423)     53     (424, 425, 426, 427, 428, 429, 430, 431)     54     (432, 433, 434, 435, 436, 437, 438, 439)     55     (440, 441, 442, 443, 444, 445, 446, 447)     56     (448, 449, 450, 451, 452, 453, 454, 455)     57     (456, 457, 458, 459, 460, 461, 462, 463)     58     (464, 465, 466, 467, 468, 469, 470, 471)     59     (472, 473, 474, 475, 476, 477, 478, 479)     60     (480, 481, 482, 483, 484, 485, 486, 487)     61     (488, 489, 490, 491, 492, 493, 494, 495)     62     (496, 497, 498, 499, 500, 501, 502, 503)     63     (504, 505, 506, 507, 508, 509, 510, 511) MEMORY     RA               PA               SIZE                0x80000000       0x180000000000   256G IO     DEVICE                           PSEUDONYM        OPTIONS     pci@480                          pci_6               pci@4c0                          pci_7               pci@480/pci@1/pci@0/pci@a        /SYS/RCSA/PCIE13     pci@480/pci@1/pci@0/pci@4        /SYS/RCSA/PCIE14     pci@4c0/pci@1/pci@0/pci@8        /SYS/RCSA/PCIE15     pci@4c0/pci@1/pci@0/pci@4        /SYS/RCSA/PCIE16     pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@c /SYS/MB/SASHBA1     pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@4 /SYS/RIO/NET2   Start the domains: # ldm start app LDom app started # ldm start db LDom db started Make sure to start the vntsd service that was created, above. # svcs -a | grep ldo disabled        8:38:38 svc:/ldoms/vntsd:default online          8:38:58 svc:/ldoms/agents:default online          8:39:25 svc:/ldoms/ldmd:default # svcadm enable vntsd Now use the MAC address to configure the Solaris 11 Automated Installation. Database Logical Domain # telnet localhost 5000 {0} ok devalias screen                   /pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@7/display@0 disk7                    /pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@c/scsi@0/disk@p3 disk6                    /pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@c/scsi@0/disk@p2 disk5                    /pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@c/scsi@0/disk@p1 disk4                    /pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@c/scsi@0/disk@p0 scsi1                    /pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@c/scsi@0 net3                     /pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@4/network@0,1 net2                     /pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@4/network@0 virtual-console          /virtual-devices/console@1 name                     aliases {0} ok boot net2 Boot device: /pci@4c0/pci@1/pci@0/pci@c/pci@0/pci@4/network@0  File and args: 1000 Mbps full duplex Link up Requesting Internet Address for xx:xx:xx:xx:xx:xx Requesting Internet Address for xx:xx:xx:xx:xx:xx WLS Logical Domain # telnet localhost 5001 {0} ok devalias hdd1                     /virtual-devices@100/channel-devices@200/disk@0 vnet1                    /virtual-devices@100/channel-devices@200/network@0 net                      /virtual-devices@100/channel-devices@200/network@0 disk                     /virtual-devices@100/channel-devices@200/disk@0 virtual-console          /virtual-devices/console@1 name                     aliases {0} ok boot net Boot device: /virtual-devices@100/channel-devices@200/network@0  File and args: Requesting Internet Address for xx:xx:xx:xx:xx:xx Requesting Internet Address for xx:xx:xx:xx:xx:xx Repeat the process for the second SPARC T5-4, install Solaris, RAC and WebLogic Cluster, and you are ready to go. Maybe buying a SuperCluster would have been easier.

    Read the article

< Previous Page | 4 5 6 7 8