Search Results

Search found 43978 results on 1760 pages for 'select case'.

Page 397/1760 | < Previous Page | 393 394 395 396 397 398 399 400 401 402 403 404  | Next Page >

  • WPF TreeView MouseDown

    - by imekon
    I've got something like this in a TreeView: <DataTemplate x:Key="myTemplate"> <StackPanel MouseDown="OnItemMouseDown"> ... </StackPanel> </DataTemplate> Using this I get the mouse down events if I click on items in the stack panel. However... there seems to be another item behind the stack panel that is the TreeViewItem - it's very hard to hit, but not impossible, and that's when the problems start to occur. I had a go at handling PreviewMouseDown on TreeViewItem, however that seems to require e.Handled = false otherwise standard tree view behaviour stops working. Ok, Here's the source code... MainWindow.xaml <Window x:Class="WPFMultiSelectTree.MainWindow" xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:local="clr-namespace:WPFMultiSelectTree" Title="Multiple Selection Tree" Height="300" Width="300"> <Window.Resources> <!-- Declare the classes that convert bool to Visibility --> <local:VisibilityConverter x:Key="visibilityConverter"/> <local:VisibilityInverter x:Key="visibilityInverter"/> <!-- Set the style for any tree view item --> <Style TargetType="TreeViewItem"> <Style.Triggers> <DataTrigger Binding="{Binding Selected}" Value="True"> <Setter Property="Background" Value="DarkBlue"/> <Setter Property="Foreground" Value="White"/> </DataTrigger> </Style.Triggers> <EventSetter Event="PreviewMouseDown" Handler="OnTreePreviewMouseDown"/> </Style> <!-- Declare a hierarchical data template for the tree view items --> <HierarchicalDataTemplate x:Key="RecursiveTemplate" ItemsSource="{Binding Children}"> <StackPanel Margin="2" Orientation="Horizontal" MouseDown="OnTreeMouseDown"> <Ellipse Width="12" Height="12" Fill="Green"/> <TextBlock Margin="2" Text="{Binding Name}" Visibility="{Binding Editing, Converter={StaticResource visibilityInverter}}"/> <TextBox Margin="2" Text="{Binding Name}" KeyDown="OnTextBoxKeyDown" IsVisibleChanged="OnTextBoxIsVisibleChanged" Visibility="{Binding Editing, Converter={StaticResource visibilityConverter}}"/> <TextBlock Margin="2" Text="{Binding Index, StringFormat=({0})}"/> </StackPanel> </HierarchicalDataTemplate> <!-- Declare a simple template for a list box --> <DataTemplate x:Key="ListTemplate"> <TextBlock Text="{Binding Name}"/> </DataTemplate> </Window.Resources> <Grid> <!-- Declare the rows in this grid --> <Grid.RowDefinitions> <RowDefinition Height="Auto"/> <RowDefinition/> <RowDefinition Height="Auto"/> <RowDefinition/> </Grid.RowDefinitions> <!-- The first header --> <TextBlock Grid.Row="0" Margin="5" Background="PowderBlue">Multiple selection tree view</TextBlock> <!-- The tree view --> <TreeView Name="m_tree" Margin="2" Grid.Row="1" ItemsSource="{Binding Children}" ItemTemplate="{StaticResource RecursiveTemplate}"/> <!-- The second header --> <TextBlock Grid.Row="2" Margin="5" Background="PowderBlue">The currently selected items in the tree</TextBlock> <!-- The list box --> <ListBox Name="m_list" Margin="2" Grid.Row="3" ItemsSource="{Binding .}" ItemTemplate="{StaticResource ListTemplate}"/> </Grid> </Window> MainWindow.xaml.cs /// <summary> /// Interaction logic for MainWindow.xaml /// </summary> public partial class MainWindow : Window { private Container m_root; private Container m_first; private ObservableCollection<Container> m_selection; private string m_current; /// <summary> /// Constructor /// </summary> public MainWindow() { InitializeComponent(); m_selection = new ObservableCollection<Container>(); m_root = new Container("root"); for (int parents = 0; parents < 50; parents++) { Container parent = new Container(String.Format("parent{0}", parents + 1)); for (int children = 0; children < 1000; children++) { parent.Add(new Container(String.Format("child{0}", children + 1))); } m_root.Add(parent); } m_tree.DataContext = m_root; m_list.DataContext = m_selection; m_first = null; } /// <summary> /// Has the shift key been pressed? /// </summary> private bool ShiftPressed { get { return Keyboard.IsKeyDown(Key.LeftShift) || Keyboard.IsKeyDown(Key.RightShift); } } /// <summary> /// Has the control key been pressed? /// </summary> private bool CtrlPressed { get { return Keyboard.IsKeyDown(Key.LeftCtrl) || Keyboard.IsKeyDown(Key.RightCtrl); } } /// <summary> /// Clear down the selection list /// </summary> private void DeselectAndClear() { foreach(Container container in m_selection) { container.Selected = false; } m_selection.Clear(); } /// <summary> /// Add the container to the list (if not already present), /// mark as selected /// </summary> /// <param name="container"></param> private void AddToSelection(Container container) { if (container == null) { return; } foreach (Container child in m_selection) { if (child == container) { return; } } container.Selected = true; m_selection.Add(container); } /// <summary> /// Remove container from list, mark as not selected /// </summary> /// <param name="container"></param> private void RemoveFromSelection(Container container) { m_selection.Remove(container); container.Selected = false; } /// <summary> /// Process single click on a tree item /// /// Normally just select an item /// /// SHIFT-Click extends selection /// CTRL-Click toggles a selection /// </summary> /// <param name="sender"></param> private void OnTreeSingleClick(object sender) { FrameworkElement element = sender as FrameworkElement; if (element != null) { Container container = element.DataContext as Container; if (container != null) { if (CtrlPressed) { if (container.Selected) { RemoveFromSelection(container); } else { AddToSelection(container); } } else if (ShiftPressed) { if (container.Parent == m_first.Parent) { if (container.Index < m_first.Index) { Container item = container; for (int i = container.Index; i < m_first.Index; i++) { AddToSelection(item); item = item.Next; if (item == null) { break; } } } else if (container.Index > m_first.Index) { Container item = m_first; for (int i = m_first.Index; i <= container.Index; i++) { AddToSelection(item); item = item.Next; if (item == null) { break; } } } } } else { DeselectAndClear(); m_first = container; AddToSelection(container); } } } } /// <summary> /// Process double click on tree item /// </summary> /// <param name="sender"></param> private void OnTreeDoubleClick(object sender) { FrameworkElement element = sender as FrameworkElement; if (element != null) { Container container = element.DataContext as Container; if (container != null) { container.Editing = true; m_current = container.Name; } } } /// <summary> /// Clicked on the stack panel in the tree view /// /// Double left click: /// /// Switch to editing mode (flips visibility of textblock and textbox) /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void OnTreeMouseDown(object sender, MouseButtonEventArgs e) { Debug.WriteLine("StackPanel mouse down"); switch(e.ChangedButton) { case MouseButton.Left: switch (e.ClickCount) { case 2: OnTreeDoubleClick(sender); e.Handled = true; break; } break; } } /// <summary> /// Clicked on tree view item in tree /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void OnTreePreviewMouseDown(object sender, MouseButtonEventArgs e) { Debug.WriteLine("TreeViewItem preview mouse down"); switch (e.ChangedButton) { case MouseButton.Left: switch (e.ClickCount) { case 1: { // We've had a single click on a tree view item // Unfortunately this is the WHOLE tree item, including the +/- // symbol to the left. The tree doesn't do a selection, so we // have to filter this out... MouseDevice device = e.Device as MouseDevice; Debug.WriteLine(String.Format("Tree item clicked on: {0}", device.DirectlyOver.GetType().ToString())); // This is bad. The whole point of WPF is for the code // not to know what the UI has - yet here we are testing for // it as a workaround. Sigh... if (device.DirectlyOver.GetType() != typeof(Path)) { OnTreeSingleClick(sender); } // Cannot say handled - if we do it stops the tree working! //e.Handled = true; } break; } break; } } /// <summary> /// Key press in text box /// /// Return key finishes editing /// Escape key finishes editing, restores original value (this doesn't work!) /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void OnTextBoxKeyDown(object sender, KeyEventArgs e) { switch(e.Key) { case Key.Return: { TextBox box = sender as TextBox; if (box != null) { Container container = box.DataContext as Container; if (container != null) { container.Editing = false; e.Handled = true; } } } break; case Key.Escape: { TextBox box = sender as TextBox; if (box != null) { Container container = box.DataContext as Container; if (container != null) { container.Editing = false; container.Name = m_current; e.Handled = true; } } } break; } } /// <summary> /// When text box becomes visible, grab focus and select all text in it. /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void OnTextBoxIsVisibleChanged(object sender, DependencyPropertyChangedEventArgs e) { bool visible = (bool)e.NewValue; if (visible) { TextBox box = sender as TextBox; if (box != null) { box.Focus(); box.SelectAll(); } } } } Here's the Container class public class Container : INotifyPropertyChanged { private string m_name; private ObservableCollection<Container> m_children; private Container m_parent; private bool m_selected; private bool m_editing; /// <summary> /// Constructor /// </summary> /// <param name="name">name of object</param> public Container(string name) { m_name = name; m_children = new ObservableCollection<Container>(); m_parent = null; m_selected = false; m_editing = false; } /// <summary> /// Name of object /// </summary> public string Name { get { return m_name; } set { if (m_name != value) { m_name = value; OnPropertyChanged("Name"); } } } /// <summary> /// Index of object in parent's children /// /// If there's no parent, the index is -1 /// </summary> public int Index { get { if (m_parent != null) { return m_parent.Children.IndexOf(this); } return -1; } } /// <summary> /// Get the next item, assuming this is parented /// /// Returns null if end of list reached, or no parent /// </summary> public Container Next { get { if (m_parent != null) { int index = Index + 1; if (index < m_parent.Children.Count) { return m_parent.Children[index]; } } return null; } } /// <summary> /// List of children /// </summary> public ObservableCollection<Container> Children { get { return m_children; } } /// <summary> /// Selected status /// </summary> public bool Selected { get { return m_selected; } set { if (m_selected != value) { m_selected = value; OnPropertyChanged("Selected"); } } } /// <summary> /// Editing status /// </summary> public bool Editing { get { return m_editing; } set { if (m_editing != value) { m_editing = value; OnPropertyChanged("Editing"); } } } /// <summary> /// Parent of this object /// </summary> public Container Parent { get { return m_parent; } set { m_parent = value; } } /// <summary> /// WPF Property Changed event /// </summary> public event PropertyChangedEventHandler PropertyChanged; /// <summary> /// Handler to inform WPF that a property has changed /// </summary> /// <param name="name"></param> private void OnPropertyChanged(string name) { if (PropertyChanged != null) { PropertyChanged(this, new PropertyChangedEventArgs(name)); } } /// <summary> /// Add a child to this container /// </summary> /// <param name="child"></param> public void Add(Container child) { m_children.Add(child); child.m_parent = this; } /// <summary> /// Remove a child from this container /// </summary> /// <param name="child"></param> public void Remove(Container child) { m_children.Remove(child); child.m_parent = null; } } The two classes VisibilityConverter and VisibilityInverter are implementations of IValueConverter that translates bool to Visibility. They make sure the TextBlock is displayed when not editing, and the TextBox is displayed when editing.

    Read the article

  • Reading input from a text file, omits the first and adds a nonsense value to the end?

    - by Greenhouse Gases
    Hi there When I input locations from a txt file I am getting a peculiar error where it seems to miss off the first entry, yet add a garbage entry to the end of the link list (it is designed to take the name, latitude and longitude for each location you will notice). I imagine this to be an issue with where it starts collecting the inputs and where it stops but I cant find the error!! It reads the first line correctly but then skips to the next before adding it because during testing for the bug it had no record of the first location Lisbon though whilst stepping into the method call it was reading it. Very bizarre but hopefully someone knows the issue. Here is firstly my header file: #include <string> struct locationNode { char nodeCityName [35]; double nodeLati; double nodeLongi; locationNode* Next; void CorrectCase() // Correct upper and lower case letters of input { int MAX_SIZE = 35; int firstLetVal = this->nodeCityName[0], letVal; int n = 1; // variable for name index from second letter onwards if((this->nodeCityName[0] >90) && (this->nodeCityName[0] < 123)) // First letter is lower case { firstLetVal = firstLetVal - 32; // Capitalise first letter this->nodeCityName[0] = firstLetVal; } while(n <= MAX_SIZE - 1) { if((this->nodeCityName[n] >= 65) && (this->nodeCityName[n] <= 90)) { letVal = this->nodeCityName[n] + 32; this->nodeCityName[n] = letVal; } n++; } //cityNameInput = this->nodeCityName; } }; class Locations { private: int size; public: Locations(){ }; // constructor for the class locationNode* Head; //int Add(locationNode* Item); }; And here is the file containing main: // U08221.cpp : main project file. #include "stdafx.h" #include "Locations.h" #include <iostream> #include <string> #include <fstream> using namespace std; int n = 0,x, locationCount = 0, MAX_SIZE = 35; string cityNameInput; char targetCity[35]; bool acceptedInput = false, userInputReq = true, match = false, nodeExists = false;// note: addLocation(), set to true to enable user input as opposed to txt file locationNode *start_ptr = NULL; // pointer to first entry in the list locationNode *temp, *temp2; // Part is a pointer to a new locationNode we can assign changing value followed by a call to Add locationNode *seek, *bridge; void setElementsNull(char cityParam[]) { int y=0, count =0; while(cityParam[y] != NULL) { y++; } while(y < MAX_SIZE) { cityParam[y] = NULL; y++; } } void addLocation() { temp = new locationNode; // declare the space for a pointer item and assign a temporary pointer to it if(!userInputReq) // bool that determines whether user input is required in adding the node to the list { cout << endl << "Enter the name of the location: "; cin >> temp->nodeCityName; temp->CorrectCase(); setElementsNull(temp->nodeCityName); cout << endl << "Please enter the latitude value for this location: "; cin >> temp->nodeLati; cout << endl << "Please enter the longitude value for this location: "; cin >> temp->nodeLongi; cout << endl; } temp->Next = NULL; //set to NULL as when one is added it is currently the last in the list and so can not point to the next if(start_ptr == NULL){ // if list is currently empty, start_ptr will point to this node start_ptr = temp; } else { temp2 = start_ptr; // We know this is not NULL - list not empty! while (temp2->Next != NULL) { temp2 = temp2->Next; // Move to next link in chain until reach end of list } temp2->Next = temp; } ++locationCount; // increment counter for number of records in list if(!userInputReq){ cout << "Location sucessfully added to the database! There are " << locationCount << " location(s) stored" << endl; } } void populateList(){ ifstream inputFile; inputFile.open ("locations.txt", ios::in); userInputReq = true; temp = new locationNode; // declare the space for a pointer item and assign a temporary pointer to it do { inputFile.get(temp->nodeCityName, 35, ' '); setElementsNull(temp->nodeCityName); inputFile >> temp->nodeLati; inputFile >> temp->nodeLongi; setElementsNull(temp->nodeCityName); if(temp->nodeCityName[0] == 10) //remove linefeed from input { for(int i = 0; temp->nodeCityName[i] != NULL; i++) { temp->nodeCityName[i] = temp->nodeCityName[i + 1]; } } addLocation(); } while(!inputFile.eof()); userInputReq = false; cout << "Successful!" << endl << "List contains: " << locationCount << " entries" << endl; cout << endl; inputFile.close(); } bool nodeExistTest(char targetCity[]) // see if entry is present in the database { match = false; seek = start_ptr; int letters = 0, letters2 = 0, x = 0, y = 0; while(targetCity[y] != NULL) { letters2++; y++; } while(x <= locationCount) // locationCount is number of entries currently in list { y=0, letters = 0; while(seek->nodeCityName[y] != NULL) // count letters in the current name { letters++; y++; } if(letters == letters2) // same amount of letters in the name { y = 0; while(y <= letters) // compare each letter against one another { if(targetCity[y] == seek->nodeCityName[y]) { match = true; y++; } else { match = false; y = letters + 1; // no match, terminate comparison } } } if(match) { x = locationCount + 1; //found match so terminate loop } else{ if(seek->Next != NULL) { bridge = seek; seek = seek->Next; x++; } else { x = locationCount + 1; // end of list so terminate loop } } } return match; } void deleteRecord() // complete this { int junction = 0; locationNode *place; cout << "Enter the name of the city you wish to remove" << endl; cin >> targetCity; setElementsNull(targetCity); if(nodeExistTest(targetCity)) //if this node does exist { if(seek == start_ptr) // if it is the first in the list { junction = 1; } if(seek != start_ptr && seek->Next == NULL) // if it is last in the list { junction = 2; } switch(junction) // will alter list accordingly dependant on where the searched for link is { case 1: start_ptr = start_ptr->Next; delete seek; --locationCount; break; case 2: place = seek; seek = bridge; delete place; --locationCount; break; default: bridge->Next = seek->Next; delete seek; --locationCount; break; } } else { cout << targetCity << "That entry does not currently exist" << endl << endl << endl; } } void searchDatabase() { char choice; cout << "Enter search term..." << endl; cin >> targetCity; if(nodeExistTest(targetCity)) { cout << "Entry: " << endl << endl; } else { cout << "Sorry, that city is not currently present in the list." << endl << "Would you like to add this city now Y/N?" << endl; cin >> choice; /*while(choice != ('Y' || 'N')) { cout << "Please enter a valid choice..." << endl; cin >> choice; }*/ switch(choice) { case 'Y': addLocation(); break; case 'N': break; default : cout << "Invalid choice" << endl; break; } } } void printDatabase() { temp = start_ptr; // set temp to the start of the list do { if (temp == NULL) { cout << "You have reached the end of the database" << endl; } else { // Display details for what temp points to at that stage cout << "Location : " << temp->nodeCityName << endl; cout << "Latitude : " << temp->nodeLati << endl; cout << "Longitude : " << temp->nodeLongi << endl; cout << endl; // Move on to next locationNode if one exists temp = temp->Next; } } while (temp != NULL); } void nameValidation(string name) { n = 0; // start from first letter x = name.size(); while(!acceptedInput) { if((name[n] >= 65) && (name[n] <= 122)) // is in the range of letters { while(n <= x - 1) { while((name[n] >=91) && (name[n] <=97)) // ERROR!! { cout << "Please enter a valid city name" << endl; cin >> name; } n++; } } else { cout << "Please enter a valid city name" << endl; cin >> name; } if(n <= x - 1) { acceptedInput = true; } } cityNameInput = name; } int main(array<System::String ^> ^args) { //main contains test calls to functions at present cout << "Populating list..."; populateList(); printDatabase(); deleteRecord(); printDatabase(); cin >> cityNameInput; } The text file contains this (ignore the names, they are just for testing!!): Lisbon 45 47 Fattah 45 47 Darius 42 49 Peter 45 27 Sarah 85 97 Michelle 45 47 John 25 67 Colin 35 87 Shiron 40 57 George 34 45 Sean 22 33 The output omits Lisbon, but adds on a garbage entry with nonsense values. Any ideas why? Thank you in advance.

    Read the article

  • Example: Communication between Activity and Service using Messaging

    - by Lance Lefebure
    I couldn't find any examples of how to send messages between an activity and a service, and spent far too many hours figuring this out. Here is an example project for others to reference. This example allows you to start or stop a service directly, and separately bind/unbind from the service. When the service is running, it increments a number at 10Hz. If the activity is bound to the service, it will display the current value. Data is transferred as an Integer and as a String so you can see how to do that two different ways. There are also buttons in the activity to send messages to the service (changes the increment-by value). Screenshot: AndroidManifest.xml: <?xml version="1.0" encoding="utf-8"?> <manifest xmlns:android="http://schemas.android.com/apk/res/android" package="com.exampleservice" android:versionCode="1" android:versionName="1.0"> <application android:icon="@drawable/icon" android:label="@string/app_name"> <activity android:name=".MainActivity" android:label="@string/app_name"> <intent-filter> <action android:name="android.intent.action.MAIN" /> <category android:name="android.intent.category.LAUNCHER" /> </intent-filter> </activity> <service android:name=".MyService"></service> </application> <uses-sdk android:minSdkVersion="8" /> </manifest> res\values\strings.xml: <?xml version="1.0" encoding="utf-8"?> <resources> <string name="app_name">ExampleService</string> <string name="service_started">Example Service started</string> <string name="service_label">Example Service Label</string> </resources> res\layout\main.xml: <?xml version="1.0" encoding="utf-8"?> <LinearLayout xmlns:android="http://schemas.android.com/apk/res/android" android:orientation="vertical" android:layout_width="fill_parent" android:layout_height="fill_parent" > <RelativeLayout android:id="@+id/RelativeLayout01" android:layout_width="fill_parent" android:layout_height="wrap_content"> <Button android:id="@+id/btnStart" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Start Service"></Button> <Button android:id="@+id/btnStop" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Stop Service" android:layout_alignParentRight="true"></Button> </RelativeLayout> <RelativeLayout android:id="@+id/RelativeLayout02" android:layout_width="fill_parent" android:layout_height="wrap_content"> <Button android:id="@+id/btnBind" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Bind to Service"></Button> <Button android:id="@+id/btnUnbind" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Unbind from Service" android:layout_alignParentRight="true"></Button> </RelativeLayout> <TextView android:id="@+id/textStatus" android:textSize="24sp" android:layout_width="fill_parent" android:layout_height="wrap_content" android:text="Status Goes Here" /> <TextView android:id="@+id/textIntValue" android:textSize="24sp" android:layout_width="fill_parent" android:layout_height="wrap_content" android:text="Integer Value Goes Here" /> <TextView android:id="@+id/textStrValue" android:textSize="24sp" android:layout_width="fill_parent" android:layout_height="wrap_content" android:text="String Value Goes Here" /> <RelativeLayout android:id="@+id/RelativeLayout03" android:layout_width="fill_parent" android:layout_height="wrap_content"> <Button android:id="@+id/btnUpby1" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Increment by 1"></Button> <Button android:id="@+id/btnUpby10" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Increment by 10" android:layout_alignParentRight="true"></Button> </RelativeLayout> </LinearLayout> src\com.exampleservice\MainActivity.java: package com.exampleservice; import android.app.Activity; import android.content.ComponentName; import android.content.Context; import android.content.Intent; import android.content.ServiceConnection; import android.os.Bundle; import android.os.Handler; import android.os.IBinder; import android.os.Message; import android.os.Messenger; import android.os.RemoteException; import android.util.Log; import android.view.View; import android.view.View.OnClickListener; import android.widget.Button; import android.widget.TextView; public class MainActivity extends Activity { Button btnStart, btnStop, btnBind, btnUnbind, btnUpby1, btnUpby10; TextView textStatus, textIntValue, textStrValue; Messenger mService = null; boolean mIsBound; final Messenger mMessenger = new Messenger(new IncomingHandler()); class IncomingHandler extends Handler { @Override public void handleMessage(Message msg) { switch (msg.what) { case MyService.MSG_SET_INT_VALUE: textIntValue.setText("Int Message: " + msg.arg1); break; case MyService.MSG_SET_STRING_VALUE: String str1 = msg.getData().getString("str1"); textStrValue.setText("Str Message: " + str1); break; default: super.handleMessage(msg); } } } private ServiceConnection mConnection = new ServiceConnection() { public void onServiceConnected(ComponentName className, IBinder service) { mService = new Messenger(service); textStatus.setText("Attached."); try { Message msg = Message.obtain(null, MyService.MSG_REGISTER_CLIENT); msg.replyTo = mMessenger; mService.send(msg); } catch (RemoteException e) { // In this case the service has crashed before we could even do anything with it } } public void onServiceDisconnected(ComponentName className) { // This is called when the connection with the service has been unexpectedly disconnected - process crashed. mService = null; textStatus.setText("Disconnected."); } }; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); btnStart = (Button)findViewById(R.id.btnStart); btnStop = (Button)findViewById(R.id.btnStop); btnBind = (Button)findViewById(R.id.btnBind); btnUnbind = (Button)findViewById(R.id.btnUnbind); textStatus = (TextView)findViewById(R.id.textStatus); textIntValue = (TextView)findViewById(R.id.textIntValue); textStrValue = (TextView)findViewById(R.id.textStrValue); btnUpby1 = (Button)findViewById(R.id.btnUpby1); btnUpby10 = (Button)findViewById(R.id.btnUpby10); btnStart.setOnClickListener(btnStartListener); btnStop.setOnClickListener(btnStopListener); btnBind.setOnClickListener(btnBindListener); btnUnbind.setOnClickListener(btnUnbindListener); btnUpby1.setOnClickListener(btnUpby1Listener); btnUpby10.setOnClickListener(btnUpby10Listener); restoreMe(savedInstanceState); CheckIfServiceIsRunning(); } @Override protected void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); outState.putString("textStatus", textStatus.getText().toString()); outState.putString("textIntValue", textIntValue.getText().toString()); outState.putString("textStrValue", textStrValue.getText().toString()); } private void restoreMe(Bundle state) { if (state!=null) { textStatus.setText(state.getString("textStatus")); textIntValue.setText(state.getString("textIntValue")); textStrValue.setText(state.getString("textStrValue")); } } private void CheckIfServiceIsRunning() { //If the service is running when the activity starts, we want to automatically bind to it. if (MyService.isRunning()) { doBindService(); } } private OnClickListener btnStartListener = new OnClickListener() { public void onClick(View v){ startService(new Intent(MainActivity.this, MyService.class)); } }; private OnClickListener btnStopListener = new OnClickListener() { public void onClick(View v){ doUnbindService(); stopService(new Intent(MainActivity.this, MyService.class)); } }; private OnClickListener btnBindListener = new OnClickListener() { public void onClick(View v){ doBindService(); } }; private OnClickListener btnUnbindListener = new OnClickListener() { public void onClick(View v){ doUnbindService(); } }; private OnClickListener btnUpby1Listener = new OnClickListener() { public void onClick(View v){ sendMessageToService(1); } }; private OnClickListener btnUpby10Listener = new OnClickListener() { public void onClick(View v){ sendMessageToService(10); } }; private void sendMessageToService(int intvaluetosend) { if (mIsBound) { if (mService != null) { try { Message msg = Message.obtain(null, MyService.MSG_SET_INT_VALUE, intvaluetosend, 0); msg.replyTo = mMessenger; mService.send(msg); } catch (RemoteException e) { } } } } void doBindService() { bindService(new Intent(this, MyService.class), mConnection, Context.BIND_AUTO_CREATE); mIsBound = true; textStatus.setText("Binding."); } void doUnbindService() { if (mIsBound) { // If we have received the service, and hence registered with it, then now is the time to unregister. if (mService != null) { try { Message msg = Message.obtain(null, MyService.MSG_UNREGISTER_CLIENT); msg.replyTo = mMessenger; mService.send(msg); } catch (RemoteException e) { // There is nothing special we need to do if the service has crashed. } } // Detach our existing connection. unbindService(mConnection); mIsBound = false; textStatus.setText("Unbinding."); } } @Override protected void onDestroy() { super.onDestroy(); try { doUnbindService(); } catch (Throwable t) { Log.e("MainActivity", "Failed to unbind from the service", t); } } } src\com.exampleservice\MyService.java: package com.exampleservice; import java.util.ArrayList; import java.util.Timer; import java.util.TimerTask; import android.app.Notification; import android.app.NotificationManager; import android.app.PendingIntent; import android.app.Service; import android.content.Intent; import android.os.Bundle; import android.os.Handler; import android.os.IBinder; import android.os.Message; import android.os.Messenger; import android.os.RemoteException; import android.util.Log; public class MyService extends Service { private NotificationManager nm; private Timer timer = new Timer(); private int counter = 0, incrementby = 1; private static boolean isRunning = false; ArrayList<Messenger> mClients = new ArrayList<Messenger>(); // Keeps track of all current registered clients. int mValue = 0; // Holds last value set by a client. static final int MSG_REGISTER_CLIENT = 1; static final int MSG_UNREGISTER_CLIENT = 2; static final int MSG_SET_INT_VALUE = 3; static final int MSG_SET_STRING_VALUE = 4; final Messenger mMessenger = new Messenger(new IncomingHandler()); // Target we publish for clients to send messages to IncomingHandler. @Override public IBinder onBind(Intent intent) { return mMessenger.getBinder(); } class IncomingHandler extends Handler { // Handler of incoming messages from clients. @Override public void handleMessage(Message msg) { switch (msg.what) { case MSG_REGISTER_CLIENT: mClients.add(msg.replyTo); break; case MSG_UNREGISTER_CLIENT: mClients.remove(msg.replyTo); break; case MSG_SET_INT_VALUE: incrementby = msg.arg1; break; default: super.handleMessage(msg); } } } private void sendMessageToUI(int intvaluetosend) { for (int i=mClients.size()-1; i>=0; i--) { try { // Send data as an Integer mClients.get(i).send(Message.obtain(null, MSG_SET_INT_VALUE, intvaluetosend, 0)); //Send data as a String Bundle b = new Bundle(); b.putString("str1", "ab" + intvaluetosend + "cd"); Message msg = Message.obtain(null, MSG_SET_STRING_VALUE); msg.setData(b); mClients.get(i).send(msg); } catch (RemoteException e) { // The client is dead. Remove it from the list; we are going through the list from back to front so this is safe to do inside the loop. mClients.remove(i); } } } @Override public void onCreate() { super.onCreate(); Log.i("MyService", "Service Started."); showNotification(); timer.scheduleAtFixedRate(new TimerTask(){ public void run() {onTimerTick();}}, 0, 100L); isRunning = true; } private void showNotification() { nm = (NotificationManager)getSystemService(NOTIFICATION_SERVICE); // In this sample, we'll use the same text for the ticker and the expanded notification CharSequence text = getText(R.string.service_started); // Set the icon, scrolling text and timestamp Notification notification = new Notification(R.drawable.icon, text, System.currentTimeMillis()); // The PendingIntent to launch our activity if the user selects this notification PendingIntent contentIntent = PendingIntent.getActivity(this, 0, new Intent(this, MainActivity.class), 0); // Set the info for the views that show in the notification panel. notification.setLatestEventInfo(this, getText(R.string.service_label), text, contentIntent); // Send the notification. // We use a layout id because it is a unique number. We use it later to cancel. nm.notify(R.string.service_started, notification); } @Override public int onStartCommand(Intent intent, int flags, int startId) { Log.i("MyService", "Received start id " + startId + ": " + intent); return START_STICKY; // run until explicitly stopped. } public static boolean isRunning() { return isRunning; } private void onTimerTick() { Log.i("TimerTick", "Timer doing work." + counter); try { counter += incrementby; sendMessageToUI(counter); } catch (Throwable t) { //you should always ultimately catch all exceptions in timer tasks. Log.e("TimerTick", "Timer Tick Failed.", t); } } @Override public void onDestroy() { super.onDestroy(); if (timer != null) {timer.cancel();} counter=0; nm.cancel(R.string.service_started); // Cancel the persistent notification. Log.i("MyService", "Service Stopped."); isRunning = false; } }

    Read the article

  • Example: Communication between Activity and Service using Messaging

    - by Lance Lefebure
    I couldn't find any examples of how to send messages between an activity and a service, and spent far too many hours figuring this out. Here is an example project for others to reference. This example allows you to start or stop a service directly, and separately bind/unbind from the service. When the service is running, it increments a number at 10Hz. If the activity is bound to the service, it will display the current value. Data is transferred as an Integer and as a String so you can see how to do that two different ways. There are also buttons in the activity to send messages to the service (changes the increment-by value). Screenshot: AndroidManifest.xml: <?xml version="1.0" encoding="utf-8"?> <manifest xmlns:android="http://schemas.android.com/apk/res/android" package="com.exampleservice" android:versionCode="1" android:versionName="1.0"> <application android:icon="@drawable/icon" android:label="@string/app_name"> <activity android:name=".MainActivity" android:label="@string/app_name"> <intent-filter> <action android:name="android.intent.action.MAIN" /> <category android:name="android.intent.category.LAUNCHER" /> </intent-filter> </activity> <service android:name=".MyService"></service> </application> <uses-sdk android:minSdkVersion="8" /> </manifest> res\values\strings.xml: <?xml version="1.0" encoding="utf-8"?> <resources> <string name="app_name">ExampleService</string> <string name="service_started">Example Service started</string> <string name="service_label">Example Service Label</string> </resources> res\layout\main.xml: <?xml version="1.0" encoding="utf-8"?> <LinearLayout xmlns:android="http://schemas.android.com/apk/res/android" android:orientation="vertical" android:layout_width="fill_parent" android:layout_height="fill_parent" > <RelativeLayout android:id="@+id/RelativeLayout01" android:layout_width="fill_parent" android:layout_height="wrap_content"> <Button android:id="@+id/btnStart" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Start Service"></Button> <Button android:id="@+id/btnStop" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Stop Service" android:layout_alignParentRight="true"></Button> </RelativeLayout> <RelativeLayout android:id="@+id/RelativeLayout02" android:layout_width="fill_parent" android:layout_height="wrap_content"> <Button android:id="@+id/btnBind" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Bind to Service"></Button> <Button android:id="@+id/btnUnbind" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Unbind from Service" android:layout_alignParentRight="true"></Button> </RelativeLayout> <TextView android:id="@+id/textStatus" android:textSize="24sp" android:layout_width="fill_parent" android:layout_height="wrap_content" android:text="Status Goes Here" /> <TextView android:id="@+id/textIntValue" android:textSize="24sp" android:layout_width="fill_parent" android:layout_height="wrap_content" android:text="Integer Value Goes Here" /> <TextView android:id="@+id/textStrValue" android:textSize="24sp" android:layout_width="fill_parent" android:layout_height="wrap_content" android:text="String Value Goes Here" /> <RelativeLayout android:id="@+id/RelativeLayout03" android:layout_width="fill_parent" android:layout_height="wrap_content"> <Button android:id="@+id/btnUpby1" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Increment by 1"></Button> <Button android:id="@+id/btnUpby10" android:layout_width="wrap_content" android:layout_height="wrap_content" android:text="Increment by 10" android:layout_alignParentRight="true"></Button> </RelativeLayout> </LinearLayout> src\com.exampleservice\MainActivity.java: package com.exampleservice; import android.app.Activity; import android.content.ComponentName; import android.content.Context; import android.content.Intent; import android.content.ServiceConnection; import android.os.Bundle; import android.os.Handler; import android.os.IBinder; import android.os.Message; import android.os.Messenger; import android.os.RemoteException; import android.util.Log; import android.view.View; import android.view.View.OnClickListener; import android.widget.Button; import android.widget.TextView; public class MainActivity extends Activity { Button btnStart, btnStop, btnBind, btnUnbind, btnUpby1, btnUpby10; TextView textStatus, textIntValue, textStrValue; Messenger mService = null; boolean mIsBound; final Messenger mMessenger = new Messenger(new IncomingHandler()); class IncomingHandler extends Handler { @Override public void handleMessage(Message msg) { switch (msg.what) { case MyService.MSG_SET_INT_VALUE: textIntValue.setText("Int Message: " + msg.arg1); break; case MyService.MSG_SET_STRING_VALUE: String str1 = msg.getData().getString("str1"); textStrValue.setText("Str Message: " + str1); break; default: super.handleMessage(msg); } } } private ServiceConnection mConnection = new ServiceConnection() { public void onServiceConnected(ComponentName className, IBinder service) { mService = new Messenger(service); textStatus.setText("Attached."); try { Message msg = Message.obtain(null, MyService.MSG_REGISTER_CLIENT); msg.replyTo = mMessenger; mService.send(msg); } catch (RemoteException e) { // In this case the service has crashed before we could even do anything with it } } public void onServiceDisconnected(ComponentName className) { // This is called when the connection with the service has been unexpectedly disconnected - process crashed. mService = null; textStatus.setText("Disconnected."); } }; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); btnStart = (Button)findViewById(R.id.btnStart); btnStop = (Button)findViewById(R.id.btnStop); btnBind = (Button)findViewById(R.id.btnBind); btnUnbind = (Button)findViewById(R.id.btnUnbind); textStatus = (TextView)findViewById(R.id.textStatus); textIntValue = (TextView)findViewById(R.id.textIntValue); textStrValue = (TextView)findViewById(R.id.textStrValue); btnUpby1 = (Button)findViewById(R.id.btnUpby1); btnUpby10 = (Button)findViewById(R.id.btnUpby10); btnStart.setOnClickListener(btnStartListener); btnStop.setOnClickListener(btnStopListener); btnBind.setOnClickListener(btnBindListener); btnUnbind.setOnClickListener(btnUnbindListener); btnUpby1.setOnClickListener(btnUpby1Listener); btnUpby10.setOnClickListener(btnUpby10Listener); restoreMe(savedInstanceState); CheckIfServiceIsRunning(); } @Override protected void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); outState.putString("textStatus", textStatus.getText().toString()); outState.putString("textIntValue", textIntValue.getText().toString()); outState.putString("textStrValue", textStrValue.getText().toString()); } private void restoreMe(Bundle state) { if (state!=null) { textStatus.setText(state.getString("textStatus")); textIntValue.setText(state.getString("textIntValue")); textStrValue.setText(state.getString("textStrValue")); } } private void CheckIfServiceIsRunning() { //If the service is running when the activity starts, we want to automatically bind to it. if (MyService.isRunning()) { doBindService(); } } private OnClickListener btnStartListener = new OnClickListener() { public void onClick(View v){ startService(new Intent(MainActivity.this, MyService.class)); } }; private OnClickListener btnStopListener = new OnClickListener() { public void onClick(View v){ doUnbindService(); stopService(new Intent(MainActivity.this, MyService.class)); } }; private OnClickListener btnBindListener = new OnClickListener() { public void onClick(View v){ doBindService(); } }; private OnClickListener btnUnbindListener = new OnClickListener() { public void onClick(View v){ doUnbindService(); } }; private OnClickListener btnUpby1Listener = new OnClickListener() { public void onClick(View v){ sendMessageToService(1); } }; private OnClickListener btnUpby10Listener = new OnClickListener() { public void onClick(View v){ sendMessageToService(10); } }; private void sendMessageToService(int intvaluetosend) { if (mIsBound) { if (mService != null) { try { Message msg = Message.obtain(null, MyService.MSG_SET_INT_VALUE, intvaluetosend, 0); msg.replyTo = mMessenger; mService.send(msg); } catch (RemoteException e) { } } } } void doBindService() { bindService(new Intent(this, MyService.class), mConnection, Context.BIND_AUTO_CREATE); mIsBound = true; textStatus.setText("Binding."); } void doUnbindService() { if (mIsBound) { // If we have received the service, and hence registered with it, then now is the time to unregister. if (mService != null) { try { Message msg = Message.obtain(null, MyService.MSG_UNREGISTER_CLIENT); msg.replyTo = mMessenger; mService.send(msg); } catch (RemoteException e) { // There is nothing special we need to do if the service has crashed. } } // Detach our existing connection. unbindService(mConnection); mIsBound = false; textStatus.setText("Unbinding."); } } @Override protected void onDestroy() { super.onDestroy(); try { doUnbindService(); } catch (Throwable t) { Log.e("MainActivity", "Failed to unbind from the service", t); } } } src\com.exampleservice\MyService.java: package com.exampleservice; import java.util.ArrayList; import java.util.Timer; import java.util.TimerTask; import android.app.Notification; import android.app.NotificationManager; import android.app.PendingIntent; import android.app.Service; import android.content.Intent; import android.os.Bundle; import android.os.Handler; import android.os.IBinder; import android.os.Message; import android.os.Messenger; import android.os.RemoteException; import android.util.Log; public class MyService extends Service { private NotificationManager nm; private Timer timer = new Timer(); private int counter = 0, incrementby = 1; private static boolean isRunning = false; ArrayList<Messenger> mClients = new ArrayList<Messenger>(); // Keeps track of all current registered clients. int mValue = 0; // Holds last value set by a client. static final int MSG_REGISTER_CLIENT = 1; static final int MSG_UNREGISTER_CLIENT = 2; static final int MSG_SET_INT_VALUE = 3; static final int MSG_SET_STRING_VALUE = 4; final Messenger mMessenger = new Messenger(new IncomingHandler()); // Target we publish for clients to send messages to IncomingHandler. @Override public IBinder onBind(Intent intent) { return mMessenger.getBinder(); } class IncomingHandler extends Handler { // Handler of incoming messages from clients. @Override public void handleMessage(Message msg) { switch (msg.what) { case MSG_REGISTER_CLIENT: mClients.add(msg.replyTo); break; case MSG_UNREGISTER_CLIENT: mClients.remove(msg.replyTo); break; case MSG_SET_INT_VALUE: incrementby = msg.arg1; break; default: super.handleMessage(msg); } } } private void sendMessageToUI(int intvaluetosend) { for (int i=mClients.size()-1; i>=0; i--) { try { // Send data as an Integer mClients.get(i).send(Message.obtain(null, MSG_SET_INT_VALUE, intvaluetosend, 0)); //Send data as a String Bundle b = new Bundle(); b.putString("str1", "ab" + intvaluetosend + "cd"); Message msg = Message.obtain(null, MSG_SET_STRING_VALUE); msg.setData(b); mClients.get(i).send(msg); } catch (RemoteException e) { // The client is dead. Remove it from the list; we are going through the list from back to front so this is safe to do inside the loop. mClients.remove(i); } } } @Override public void onCreate() { super.onCreate(); Log.i("MyService", "Service Started."); showNotification(); timer.scheduleAtFixedRate(new TimerTask(){ public void run() {onTimerTick();}}, 0, 100L); isRunning = true; } private void showNotification() { nm = (NotificationManager)getSystemService(NOTIFICATION_SERVICE); // In this sample, we'll use the same text for the ticker and the expanded notification CharSequence text = getText(R.string.service_started); // Set the icon, scrolling text and timestamp Notification notification = new Notification(R.drawable.icon, text, System.currentTimeMillis()); // The PendingIntent to launch our activity if the user selects this notification PendingIntent contentIntent = PendingIntent.getActivity(this, 0, new Intent(this, MainActivity.class), 0); // Set the info for the views that show in the notification panel. notification.setLatestEventInfo(this, getText(R.string.service_label), text, contentIntent); // Send the notification. // We use a layout id because it is a unique number. We use it later to cancel. nm.notify(R.string.service_started, notification); } @Override public int onStartCommand(Intent intent, int flags, int startId) { Log.i("MyService", "Received start id " + startId + ": " + intent); return START_STICKY; // run until explicitly stopped. } public static boolean isRunning() { return isRunning; } private void onTimerTick() { Log.i("TimerTick", "Timer doing work." + counter); try { counter += incrementby; sendMessageToUI(counter); } catch (Throwable t) { //you should always ultimately catch all exceptions in timer tasks. Log.e("TimerTick", "Timer Tick Failed.", t); } } @Override public void onDestroy() { super.onDestroy(); if (timer != null) {timer.cancel();} counter=0; nm.cancel(R.string.service_started); // Cancel the persistent notification. Log.i("MyService", "Service Stopped."); isRunning = false; } }

    Read the article

  • Xml failing to deserialise

    - by Carnotaurus
    I call a method to get my pages [see GetPages(String xmlFullFilePath)]. The FromXElement method is supposed to deserialise the LitePropertyData elements to strongly type LitePropertyData objects. Instead it fails on the following line: return (T)xmlSerializer.Deserialize(memoryStream); and gives the following error: <LitePropertyData xmlns=''> was not expected. What am I doing wrong? I have included the methods that I call and the xml data: public static T FromXElement<T>(this XElement xElement) { using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(xElement.ToString()))) { var xmlSerializer = new XmlSerializer(typeof(T)); return (T)xmlSerializer.Deserialize(memoryStream); } } public static List<LitePageData> GetPages(String xmlFullFilePath) { XDocument document = XDocument.Load(xmlFullFilePath); List<LitePageData> results = (from record in document.Descendants("row") select new LitePageData { Guid = IsValid(record, "Guid") ? record.Element("Guid").Value : null, ParentID = IsValid(record, "ParentID") ? Convert.ToInt32(record.Element("ParentID").Value) : (Int32?)null, Created = Convert.ToDateTime(record.Element("Created").Value), Changed = Convert.ToDateTime(record.Element("Changed").Value), Name = record.Element("Name").Value, ID = Convert.ToInt32(record.Element("ID").Value), LitePageTypeID = IsValid(record, "ParentID") ? Convert.ToInt32(record.Element("ParentID").Value) : (Int32?)null, Html = record.Element("Html").Value, FriendlyName = record.Element("FriendlyName").Value, Properties = record.Element("Properties") != null ? record.Element("Properties").Element("LitePropertyData").FromXElement<List<LitePropertyData>>() : new List<LitePropertyData>() }).ToList(); return results; } Here is the xml: <?xml version="1.0" encoding="utf-8"?> <root> <rows> <row> <ID>1</ID> <ImageUrl></ImageUrl> <Html>Home page</Html> <Created>01-01-2012</Created> <Changed>01-01-2012</Changed> <Name>Home page</Name> <FriendlyName>home-page</FriendlyName> </row> <row xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <Guid>edeaf468-f490-4271-bf4d-be145bc6a1fd</Guid> <ID>8</ID> <Name>Unused</Name> <ParentID>1</ParentID> <Created>2006-03-25T10:57:17</Created> <Changed>2012-07-17T12:24:30.0984747+01:00</Changed> <ChangedBy /> <LitePageTypeID xsi:nil="true" /> <Html> What is the purpose of this option? This option checks the current document for accessibility issues. It uses Bobby to provide details of whether the current web page conforms to W3C's WCAG criteria for web content accessibility. Issues with Bobby and Cynthia Bobby and Cynthia are free services that supposedly allow a user to expose web page accessibility barriers. It is something of a guide but perhaps a blunt instrument. I tested a few of the webpages that I have designed. Sure enough, my pages fall short and for good reason. I am not about to claim that Bobby and Cynthia are useless. Although it is useful and commendable tool, it project appears to be overly ambitious. Nevertheless, let me explain my issues with Bobby and Cynthia: First, certain W3C standards for designing web documents are often too strict and unworkable. For instance, in some versions W3C standards for HTML, certain tags should not include a particular attribute, whereas in others they are requisite if the document is to be ???well-formed???. The standard that a designer chooses is determined usually by the requirements specification document. This specifies which browsers and versions of those browsers that the web page is expected to correctly display. Forcing a hypertext document to conform strictly to a specific W3C standard for HTML is often no simple task. In the worst case, it cannot conform without losing some aesthetics or accessibility functionality. Second, the case of HTML documents is not an isolated case. Standards for XML, XSL, JavaScript, VBScript, are analogous. Therefore, you might imagine the problems when you begin to combine these languages and formats in an HTML document. Third, there is always more than one way to skin a cat. For example, Bobby and Cynthia may flag those IMG tags that do not contain a TITLE attribute. There might be good reason that a web developer chooses not to include the title attribute. The title attribute has a limited numbers of characters and does not support carriage returns. This is a major defect in the design of this tag. In fact, before the TITLE attribute was supported, there was the ALT attribute. Most browsers support both, yet they both perform a similar function. However, both attributes share the same deficiencies. In practice, there are instances where neither attribute would be used. Instead, for example, the developer would write some JavaScript or VBScript to circumvent these deficiencies. The concern is that Bobby and Cynthia would not notice this because it does not ???understand??? what the JavaScript does. </Html> <FriendlyName>unused</FriendlyName> <IsDeleted>false</IsDeleted> <Properties> <LitePropertyData> <Description>Image for the page</Description> <DisplayEditUI>true</DisplayEditUI> <OwnerTab>1</OwnerTab> <DisplayName>Image Url</DisplayName> <FieldOrder>1</FieldOrder> <IsRequired>false</IsRequired> <Name>ImageUrl</Name> <IsModified>false</IsModified> <ParentPageID>3</ParentPageID> <Type>String</Type> <Value xsi:type="xsd:string">smarter.jpg</Value> </LitePropertyData> <LitePropertyData> <Description>WebItemApplicationEnum</Description> <DisplayEditUI>true</DisplayEditUI> <OwnerTab>1</OwnerTab> <DisplayName>WebItemApplicationEnum</DisplayName> <FieldOrder>1</FieldOrder> <IsRequired>false</IsRequired> <Name>WebItemApplicationEnum</Name> <IsModified>false</IsModified> <ParentPageID>3</ParentPageID> <Type>Number</Type> <Value xsi:type="xsd:string">1</Value> </LitePropertyData> </Properties> <Seo> <Author>Phil Carney</Author> <Classification /> <Copyright>Carnotaurus</Copyright> <Description> What is the purpose of this option? This option checks the current document for accessibility issues. It uses Bobby to provide details of whether the current web page conforms to W3C's WCAG criteria for web content accessibility. Issues with Bobby and Cynthia Bobby and Cynthia are free services that supposedly allow a user to expose web page accessibility barriers. It is something of a guide but perhaps a blunt instrument. I tested a few of the webpages that I have designed. Sure enough, my pages fall short and for good reason. I am not about to claim that Bobby and Cynthia are useless. Although it is useful and commendable tool, it project appears to be overly ambitious. Nevertheless, let me explain my issues with Bobby and Cynthia: First, certain W3C standards for designing web documents are often too strict and unworkable. For instance, in some versions W3C standards for HTML, certain tags should not include a particular attribute, whereas in others they are requisite if the document is to be ???well-formed???. The standard that a designer chooses is determined usually by the requirements specification document. This specifies which browsers and versions of those browsers that the web page is expected to correctly display. Forcing a hypertext document to conform strictly to a specific W3C standard for HTML is often no simple task. In the worst case, it cannot conform without losing some aesthetics or accessibility functionality. Second, the case of HTML documents is not an isolated case. Standards for XML, XSL, JavaScript, VBScript, are analogous. Therefore, you might imagine the problems when you begin to combine these languages and formats in an HTML document. Third, there is always more than one way to skin a cat. For example, Bobby and Cynthia may flag those IMG tags that do not contain a TITLE attribute. There might be good reason that a web developer chooses not to include the title attribute. The title attribute has a limited numbers of characters and does not support carriage returns. This is a major defect in the design of this tag. In fact, before the TITLE attribute was supported, there was the ALT attribute. Most browsers support both, yet they both perform a similar function. However, both attributes share the same deficiencies. In practice, there are instances where neither attribute would be used. Instead, for example, the developer would write some JavaScript or VBScript to circumvent these deficiencies. The concern is that Bobby and Cynthia would not notice this because it does not ???understand??? what the JavaScript does. </Description> <Keywords>unused</Keywords> <Title>unused</Title> </Seo> </row> </rows> </root> EDIT Here are my entities: public class LitePropertyData { public virtual string Description { get; set; } public virtual bool DisplayEditUI { get; set; } public int OwnerTab { get; set; } public virtual string DisplayName { get; set; } public int FieldOrder { get; set; } public bool IsRequired { get; set; } public string Name { get; set; } public virtual bool IsModified { get; set; } public virtual int ParentPageID { get; set; } public LiteDataType Type { get; set; } public object Value { get; set; } } [Serializable] public class LitePageData { public String Guid { get; set; } public Int32 ID { get; set; } public String Name { get; set; } public Int32? ParentID { get; set; } public DateTime Created { get; set; } public String CreatedBy { get; set; } public DateTime Changed { get; set; } public String ChangedBy { get; set; } public Int32? LitePageTypeID { get; set; } public String Html { get; set; } public String FriendlyName { get; set; } public Boolean IsDeleted { get; set; } public List<LitePropertyData> Properties { get; set; } public LiteSeoPageData Seo { get; set; } /// <summary> /// Saves the specified XML full file path. /// </summary> /// <param name="xmlFullFilePath">The XML full file path.</param> public void Save(String xmlFullFilePath) { XDocument doc = XDocument.Load(xmlFullFilePath); XElement demoNode = this.ToXElement<LitePageData>(); demoNode.Name = "row"; doc.Descendants("rows").Single().Add(demoNode); doc.Save(xmlFullFilePath); } }

    Read the article

  • XML\Jquery create listings based on user selection

    - by Sirius Mane
    Alright, so what I need to try and accomplish is having a static web page that will display information pulled from an XML document and render it to the screen without refreshing. Basic AJAX stuff I guess. The trick is, as I'm trying to think this through I keep coming into 'logical' barriers mentally. Objectives: -Have a chart which displays baseball team names, wins, losses, ties. In my XML doc there is a 'pending' status, so games not completed should not be displayed.(Need help here) -Have a selection list which allows you to select a team which is populated from XML doc. (done) -Upon selecting a particular team from the aforementioned selection list the page should display in a separate area all of the planned games for that team. Including pending. Basically all of the games associated with that team and the dates (which is included in the XML file). (Need help here) What I have so far: HTML\JS <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <link rel="stylesheet" href="batty.css" type="text/css" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Little Batty League</title> <script type="text/javascript" src="library.js"></script> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript"> var IE = window.ActiveXObject ? true: false; var MOZ = document.implementation.createDocument ? true: false; $(document).ready(function(){ $.ajax({ type: "GET", url: "schedule.xml", dataType: "xml", success: function(xml) { var select = $('#mySelect'); $(xml).find('Teams').each(function(){ var title = $(this).find('Team').text(); select.append("<option/><option class='ddheader'>"+title+"</option>"); }); select.children(":first").text("please make a selection").attr("selected",true); } }); }); </script> </script> </head> <body onLoad="init()"> <!-- container start --> <div id="container"> <!-- banner start --> <div id="banner"> <img src="images/mascot.jpg" width="324" height="112" alt="Mascot" /> <!-- buttons start --> <table width="900" border="0" cellpadding="0" cellspacing="0"> <tr> <td><div class="menuButton"><a href="index.html">Home</a></div></td> <td><div class="menuButton"><a href="schedule.html">Schedule</a></div></td> <td><div class="menuButton"><a href="contact.html">Contact</a></div></td> <td><div class="menuButton"><a href="about.html">About</a></div></td> </tr> </table> <!-- buttons end --> </div> <!-- banner end --> <!-- content start --> <div id="content"> <br /> <form> <select id="mySelect"> <option>please make a selection</option> </select> </form> </div> <!-- content end --> <!-- footer start --> <div id="footer"> &copy; 2012 Batty League </div> <!-- footer end --> </div> <!-- container end --> </body> </html> And the XML is: <?xml version="1.0" encoding="utf-8"?> <Schedule season="1"> <Teams> <Team>Bluejays</Team> </Teams> <Teams> <Team>Chickens</Team> </Teams> <Teams> <Team>Lions</Team> </Teams> <Teams> <Team>Pixies</Team> </Teams> <Teams> <Team>Zombies</Team> </Teams> <Teams> <Team>Wombats</Team> </Teams> <Game status="Played"> <Home_Team>Chickens</Home_Team> <Away_Team>Bluejays</Away_Team> <Date>2012-01-10T09:00:00</Date> </Game> <Game status="Pending"> <Home_Team>Bluejays </Home_Team> <Away_Team>Chickens</Away_Team> <Date>2012-01-11T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Bluejays</Home_Team> <Away_Team>Lions</Away_Team> <Date>2012-01-18T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Lions</Home_Team> <Away_Team>Bluejays</Away_Team> <Date>2012-01-19T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Bluejays</Home_Team> <Away_Team>Pixies</Away_Team> <Date>2012-01-21T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Pixies</Home_Team> <Away_Team>Bluejays</Away_Team> <Date>2012-01-23T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Bluejays</Home_Team> <Away_Team>Zombies</Away_Team> <Date>2012-01-25T09:00:00</Date> </Game> <Game status="Pending"> <Home_Team>Zombies</Home_Team> <Away_Team>Bluejays</Away_Team> <Date>2012-01-27T09:00:00</Date> </Game> <Game status="Pending"> <Home_Team>Bluejays</Home_Team> <Away_Team>Wombats</Away_Team> <Date>2012-01-28T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Wombats</Home_Team> <Away_Team>Bluejays</Away_Team> <Date>2012-01-30T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Chickens</Home_Team> <Away_Team>Lions</Away_Team> <Date>2012-01-31T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Lions</Home_Team> <Away_Team>Chickens</Away_Team> <Date>2012-02-04T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Chickens</Home_Team> <Away_Team>Pixies</Away_Team> <Date>2012-02-05T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Pixies</Home_Team> <Away_Team>Chickens</Away_Team> <Date>2012-02-07T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Chickens</Home_Team> <Away_Team>Zombies</Away_Team> <Date>2012-02-08T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Zombies</Home_Team> <Away_Team>Chickens</Away_Team> <Date>2012-02-10T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Lions</Home_Team> <Away_Team>Pixies</Away_Team> <Date>2012-02-12T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Pixies </Home_Team> <Away_Team>Lions</Away_Team> <Date>2012-02-14T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Lions</Home_Team> <Away_Team>Zombies</Away_Team> <Date>2012-02-15T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Zombies</Home_Team> <Away_Team>Lions</Away_Team> <Date>2012-02-16T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Lions</Home_Team> <Away_Team>Wombats</Away_Team> <Date>2012-01-23T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Wombats</Home_Team> <Away_Team>Lions</Away_Team> <Date>2012-02-24T09:00:00</Date> </Game> <Game status="Pending"> <Home_Team>Pixies</Home_Team> <Away_Team>Zombies</Away_Team> <Date>2012-02-25T09:00:00</Date> </Game> <Game status="Pending"> <Home_Team>Zombies</Home_Team> <Away_Team>Pixies</Away_Team> <Date>2012-02-26T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Pixies</Home_Team> <Away_Team>Wombats</Away_Team> <Date>2012-02-27T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Wombats</Home_Team> <Away_Team>Pixies</Away_Team> <Date>2012-02-28T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Zombies</Home_Team> <Away_Team>Wombats</Away_Team> <Date>2012-02-04T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Wombats</Home_Team> <Away_Team>Zombies</Away_Team> <Date>2012-02-05T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Wombats</Home_Team> <Away_Team>Chickens</Away_Team> <Date>2012-02-07T09:00:00</Date> </Game> <Game status="Played"> <Home_Team>Chickens</Home_Team> <Away_Team>Wombats</Away_Team> <Date>2012-02-08T09:00:00</Date> </Game> </Schedule> If anybody can point me to Jquery code\modules that would greatly help me with this I'd be appreciate. Any help right now would be great, I'm just banging my head against a wall. I'm trying to avoid using XSLT transforms because I absolutely despise XML and I'm not good with it. So I'd -like- to just use Javascript\PHP\etc with only a sprinkling of the necessary XML where possible..

    Read the article

  • Streamed mp3 only plays for 1 second

    - by angel6
    Hi, I'm using the plaympeg.c (modified) code of smpeg as a media player. I've got ffserver running as a streaming server. I'm a streaming an mp3 file over http. But when I run plaympeg.c, it plays the streamed file only for a second. When I run plaympeg again, it starts off from where it left and plays for 1 second. Does anyone know why this happens an how to fix it? I've tested it out on WMP and it plays the entire file in one go. So, i guess it's not a problem with the streaming or ffserver.conf include include include include /* #ifdef unix */ include include include include include include include define NET_SUPPORT /* General network support */ define HTTP_SUPPORT /* HTTP support */ ifdef NET_SUPPORT include include include include endif include "smpeg.h" ifdef NET_SUPPORT int tcp_open(char * address, int port) { struct sockaddr_in stAddr; struct hostent * host; int sock; struct linger l; memset(&stAddr,0,sizeof(stAddr)); stAddr.sin_family = AF_INET ; stAddr.sin_port = htons(port); if((host = gethostbyname(address)) == NULL) return(0); stAddr.sin_addr = *((struct in_addr *) host-h_addr_list[0]) ; if((sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) return(0); l.l_onoff = 1; l.l_linger = 5; if(setsockopt(sock, SOL_SOCKET, SO_LINGER, (char*) &l, sizeof(l)) < 0) return(0); if(connect(sock, (struct sockaddr *) &stAddr, sizeof(stAddr)) < 0) return(0); return(sock); } ifdef HTTP_SUPPORT int http_open(char * arg) { char * host; int port; char * request; int tcp_sock; char http_request[1024]; char c; printf("\nin http_open passed parameter = %s\n",arg); /* Check for URL syntax */ if(strncmp(arg, "http://", strlen("http://"))) return(0); /* Parse URL */ port = 80; host = arg + strlen("http://"); if((request = strchr(host, '/')) == NULL) return(0); request++ = 0; if(strchr(host, ':') != NULL) / port is specified */ { port = atoi(strchr(host, ':') + 1); *strchr(host, ':') = 0; } /* Open a TCP socket */ if(!(tcp_sock = tcp_open(host, port))) { perror("http_open"); return(0); } /* Send HTTP GET request */ sprintf(http_request, "GET /%s HTTP/1.0\r\n" "User-Agent: Mozilla/2.0 (Win95; I)\r\n" "Pragma: no-cache\r\n" "Host: %s\r\n" "Accept: /\r\n" "\r\n", request, host); send(tcp_sock, http_request, strlen(http_request), 0); /* Parse server reply */ do read(tcp_sock, &c, sizeof(char)); while(c != ' '); read(tcp_sock, http_request, 4*sizeof(char)); http_request[4] = 0; if(strcmp(http_request, "200 ")) { fprintf(stderr, "http_open: "); do { read(tcp_sock, &c, sizeof(char)); fprintf(stderr, "%c", c); } while(c != '\r'); fprintf(stderr, "\n"); return(0); } return(tcp_sock); } endif endif void update(SDL_Surface *screen, Sint32 x, Sint32 y, Uint32 w, Uint32 h) { if ( screen-flags & SDL_DOUBLEBUF ) { SDL_Flip(screen); } } /* Flag telling the UI that the movie or song should be skipped */ int done; void next_movie(int sig) { done = 1; } int main(int argc, char *argv[]) { int use_audio, use_video; int fullscreen; int scalesize; int scale_width, scale_height; int loop_play; int i, pause; int volume; Uint32 seek; float skip; int bilinear_filtering; SDL_Surface *screen; SMPEG *mpeg; SMPEG_Info info; char *basefile; SDL_version sdlver; SMPEG_version smpegver; int fd; char buf[32]; int status; printf("\nchecking command line options "); /* Get the command line options */ use_audio = 1; use_video = 1; fullscreen = 0; scalesize = 1; scale_width = 0; scale_height = 0; loop_play = 0; volume = 100; seek = 0; skip = 0; bilinear_filtering = 0; fd = 0; for ( i=1; argv[i] && (argv[i][0] == '-') && (argv[i][1] != 0); ++i ) { if ( strcmp(argv[i], "--fullscreen") == 0 ) { fullscreen = 1; } else if ((strcmp(argv[i], "--seek") == 0)||(strcmp(argv[i], "-S") == 0)) { ++i; if ( argv[i] ) { seek = atol(argv[i]); } } else if ((strcmp(argv[i], "--volume") == 0)||(strcmp(argv[i], "-v") == 0)) { ++i; if (i >= argc) { fprintf(stderr, "Please specify volume when using --volume or -v\n"); return(1); } if ( argv[i] ) { volume = atoi(argv[i]); } if ( ( volume < 0 ) || ( volume 100 ) ) { fprintf(stderr, "Volume must be between 0 and 100\n"); volume = 100; } } else { fprintf(stderr, "Warning: Unknown option: %s\n", argv[i]); } } printf("\nuse video = %d, use audio = %d\n",use_video, use_audio); printf("\ngoing to check input parameters\n"); if defined(linux) || defined(FreeBSD) /* Plaympeg doesn't need a mouse */ putenv("SDL_NOMOUSE=1"); endif /* Play the mpeg files! */ status = 0; for ( ; argv[i]; ++i ) { /* Initialize SDL */ if ( use_video ) { if ((SDL_Init(SDL_INIT_VIDEO) < 0) || !SDL_VideoDriverName(buf, 1)) { fprintf(stderr, "Warning: Couldn't init SDL video: %s\n", SDL_GetError()); fprintf(stderr, "Will ignore video stream\n"); use_video = 0; } printf("\ninitialised video\n"); } if ( use_audio ) { if ((SDL_Init(SDL_INIT_AUDIO) < 0) || !SDL_AudioDriverName(buf, 1)) { fprintf(stderr, "Warning: Couldn't init SDL audio: %s\n", SDL_GetError()); fprintf(stderr, "Will ignore audio stream\n"); use_audio = 0; } } /* Allow Ctrl-C when there's no video output */ signal(SIGINT, next_movie); printf("\nchecking defined supports\n"); /* Create the MPEG stream */ ifdef NET_SUPPORT printf("\ndefined NET_SUPPORT\n"); ifdef HTTP_SUPPORT printf("\ndefined HTTP_SUPPORT\n"); /* Check if source is an http URL */ printf("\nabout to call http_open\n"); printf("\nhere we go\n"); if((fd = http_open(argv[i])) != 0) mpeg = SMPEG_new_descr(fd, &info, use_audio); else endif endif { if(strcmp(argv[i], "-") == 0) /* Use stdin for input */ mpeg = SMPEG_new_descr(0, &info, use_audio); else mpeg = SMPEG_new(argv[i], &info, use_audio); } if ( SMPEG_error(mpeg) ) { fprintf(stderr, "%s: %s\n", argv[i], SMPEG_error(mpeg)); SMPEG_delete(mpeg); status = -1; continue; } SMPEG_enableaudio(mpeg, use_audio); SMPEG_enablevideo(mpeg, use_video); SMPEG_setvolume(mpeg, volume); /* Print information about the video */ basefile = strrchr(argv[i], '/'); if ( basefile ) { ++basefile; } else { basefile = argv[i]; } if ( info.has_audio && info.has_video ) { printf("%s: MPEG system stream (audio/video)\n", basefile); } else if ( info.has_audio ) { printf("%s: MPEG audio stream\n", basefile); } else if ( info.has_video ) { printf("%s: MPEG video stream\n", basefile); } if ( info.has_video ) { printf("\tVideo %dx%d resolution\n", info.width, info.height); } if ( info.has_audio ) { printf("\tAudio %s\n", info.audio_string); } if ( info.total_size ) { printf("\tSize: %d\n", info.total_size); } if ( info.total_time ) { printf("\tTotal time: %f\n", info.total_time); } /* Set up video display if needed */ if ( info.has_video && use_video ) { const SDL_VideoInfo *video_info; Uint32 video_flags; int video_bpp; int width, height; /* Get the "native" video mode */ video_info = SDL_GetVideoInfo(); switch (video_info->vfmt->BitsPerPixel) { case 16: case 24: case 32: video_bpp = video_info->vfmt->BitsPerPixel; break; default: video_bpp = 16; break; } if ( scale_width ) { width = scale_width; } else { width = info.width; } width *= scalesize; if ( scale_height ) { height = scale_height; } else { height = info.height; } height *= scalesize; video_flags = SDL_SWSURFACE; if ( fullscreen ) { video_flags = SDL_FULLSCREEN|SDL_DOUBLEBUF|SDL_HWSURFACE; } video_flags |= SDL_ASYNCBLIT; video_flags |= SDL_RESIZABLE; screen = SDL_SetVideoMode(width, height, video_bpp, video_flags); if ( screen == NULL ) { fprintf(stderr, "Unable to set %dx%d video mode: %s\n", width, height, SDL_GetError()); continue; } SDL_WM_SetCaption(argv[i], "plaympeg"); if ( screen->flags & SDL_FULLSCREEN ) { SDL_ShowCursor(0); } SMPEG_setdisplay(mpeg, screen, NULL, update); SMPEG_scaleXY(mpeg, screen->w, screen->h); } else { SDL_QuitSubSystem(SDL_INIT_VIDEO); } /* Set any special playback parameters */ if ( loop_play ) { SMPEG_loop(mpeg, 1); } /* Seek starting position */ if(seek) SMPEG_seek(mpeg, seek); /* Skip seconds to starting position */ if(skip) SMPEG_skip(mpeg, skip); /* Play it, and wait for playback to complete */ SMPEG_play(mpeg); done = 0; pause = 0; while ( ! done && ( pause || (SMPEG_status(mpeg) == SMPEG_PLAYING) ) ) { SDL_Event event; while ( use_video && SDL_PollEvent(&event) ) { switch (event.type) { case SDL_VIDEORESIZE: { SDL_Surface *old_screen = screen; SMPEG_pause(mpeg); screen = SDL_SetVideoMode(event.resize.w, event.resize.h, screen->format->BitsPerPixel, screen->flags); if ( old_screen != screen ) { SMPEG_setdisplay(mpeg, screen, NULL, update); } SMPEG_scaleXY(mpeg, screen-w, screen-h); SMPEG_pause(mpeg); } break; case SDL_KEYDOWN: if ( (event.key.keysym.sym == SDLK_ESCAPE) || (event.key.keysym.sym == SDLK_q) ) { // Quit done = 1; } else if ( event.key.keysym.sym == SDLK_RETURN ) { // toggle fullscreen if ( event.key.keysym.mod & KMOD_ALT ) { SDL_WM_ToggleFullScreen(screen); fullscreen = (screen-flags & SDL_FULLSCREEN); SDL_ShowCursor(!fullscreen); } } else if ( event.key.keysym.sym == SDLK_UP ) { // Volume up if ( volume < 100 ) { if ( event.key.keysym.mod & KMOD_SHIFT ) { // 10+ volume += 10; } else if ( event.key.keysym.mod & KMOD_CTRL ) { // 100+ volume = 100; } else { // 1+ volume++; } if ( volume 100 ) volume = 100; SMPEG_setvolume(mpeg, volume); } } else if ( event.key.keysym.sym == SDLK_DOWN ) { // Volume down if ( volume 0 ) { if ( event.key.keysym.mod & KMOD_SHIFT ) { volume -= 10; } else if ( event.key.keysym.mod & KMOD_CTRL ) { volume = 0; } else { volume--; } if ( volume < 0 ) volume = 0; SMPEG_setvolume(mpeg, volume); } } else if ( event.key.keysym.sym == SDLK_PAGEUP ) { // Full volume volume = 100; SMPEG_setvolume(mpeg, volume); } else if ( event.key.keysym.sym == SDLK_PAGEDOWN ) { // Volume off volume = 0; SMPEG_setvolume(mpeg, volume); } else if ( event.key.keysym.sym == SDLK_SPACE ) { // Toggle play / pause if ( SMPEG_status(mpeg) == SMPEG_PLAYING ) { SMPEG_pause(mpeg); pause = 1; } else { SMPEG_play(mpeg); pause = 0; } } else if ( event.key.keysym.sym == SDLK_RIGHT ) { // Forward if ( event.key.keysym.mod & KMOD_SHIFT ) { SMPEG_skip(mpeg, 100); } else if ( event.key.keysym.mod & KMOD_CTRL ) { SMPEG_skip(mpeg, 50); } else { SMPEG_skip(mpeg, 5); } } else if ( event.key.keysym.sym == SDLK_LEFT ) { // Reverse if ( event.key.keysym.mod & KMOD_SHIFT ) { } else if ( event.key.keysym.mod & KMOD_CTRL ) { } else { } } else if ( event.key.keysym.sym == SDLK_KP_MINUS ) { // Scale minus if ( scalesize > 1 ) { scalesize--; } } else if ( event.key.keysym.sym == SDLK_KP_PLUS ) { // Scale plus scalesize++; } else if ( event.key.keysym.sym == SDLK_f ) { // Toggle filtering on/off if ( bilinear_filtering ) { SMPEG_Filter *filter = SMPEGfilter_null(); filter = SMPEG_filter( mpeg, filter ); filter-destroy(filter); bilinear_filtering = 0; } else { SMPEG_Filter *filter = SMPEGfilter_bilinear(); filter = SMPEG_filter( mpeg, filter ); filter-destroy(filter); bilinear_filtering = 1; } } break; case SDL_QUIT: done = 1; break; default: break; } } SDL_Delay(1000/2); } SMPEG_delete(mpeg); } SDL_Quit(); if defined(HTTP_SUPPORT) if(fd) close(fd); endif return(status); }

    Read the article

  • The program is executing properly on dev C++ but is giving problem in Linux.The movement is becoming

    - by srinija
    #include<stdio.h> #include<GL/glut.h> GLfloat v[3][24]={{100.0,300.0,350.0,50.0,100.0,120.0,120.0,100.0,260.0,280.0, 280.0,260.0,140.0,160.0,160.0,140.0,180.0,200.0,200.0,180.0, 220.0,240.0,240.0,220.0},{100.0,100.0,200.0,200.0,160.0, 160.0,180.0,180.0,160.0,160.0,180.0,180.0,160.0,160.0,180.0, 180.0,160.0,160.0,180.0,180.0,160.0,160.0,180.0,180.0}, {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}}; GLfloat v1[3][16]={{50.0,350.0,350.0,50.0,100.0,300.0,300.0,100.0,125.0,175.0, 175.0,125.0,225.0,275.0,275.0,225.0},{200.0,200.0,210.0, 210.0,210.0,210.0,240.0,240.0,240.0,240.0,310.0,310.0,240.0, 240.0,310.0,310.0},{1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, 1.0,1.0,1.0,1.0,1.0,1.0}}; GLfloat colors[4][3]={{0.0,0.0,1.0},{0.9961,0.9961,0.65625},{1.0,0.0,1.0}, {1.0,.0,1.0}}; static float q,w,e; static float fq,fw,fe; static GLfloat wa=0,wb=0,wc=0,ba,bb,bc; int flag; void myinit(void) { glClearColor(0.506,.7,1,0.0); glPointSize(2.0); glLoadIdentity(); glOrtho(0.0,499.0,0.0,499.0,-300.0,300.0); } void draw_top_boxes(GLint i,GLint j) { glColor3f(1.0,0.0,0.0); glBegin(GL_POLYGON); glColor3fv(colors[j]); // to draw the boat glVertex2f(v1[0][i+0],v1[1][i+0]); glColor3fv(colors[j+1]); glVertex2f(v1[0][i+1],v1[1][i+1]); glColor3fv(colors[j+2]); glVertex2f(v1[0][i+2],v1[1][i+2]); glColor3fv(colors[j+3]); glVertex2f(v1[0][i+3],v1[1][i+3]); glEnd(); } void draw_polygon(GLint i) { glBegin(GL_POLYGON); // to draw the boat glColor3f(0.0,0.0,0.0); glColor3fv(colors[0]); glVertex2f(v[0][i+0],v[1][i+0]); glColor3fv(colors[1]); glVertex2f(v[0][i+1],v[1][i+1]); glColor3fv(colors[2]); glVertex2f(v[0][i+2],v[1][i+2]); glColor3fv(colors[3]); glVertex2f(v[0][i+3],v[1][i+3]); glEnd(); } void draw_boat() { draw_polygon(0); draw_polygon(4); draw_polygon(8); draw_polygon(12); draw_polygon(16); draw_polygon(20); draw_top_boxes(0,0); draw_top_boxes(4,0); draw_top_boxes(8,0); draw_top_boxes(12,0); glFlush(); glPopMatrix(); glPopMatrix(); } void draw_water() { GLfloat i; GLfloat x=0,y=103,j=0; GLfloat k; glPushMatrix(); glTranslatef(wa,wb,wc); glPushMatrix(); glColor3f(0,0,1); for(k=y;k>0;k-=6) { for(i=1;i<30;i++) { glBegin(GL_LINES); glVertex2f(j,k); glVertex2f(j+10,k); glEnd(); j=j+20; } j=0; } glPopMatrix(); glPopMatrix(); } void draw_fishes() { glPushMatrix(); glTranslatef(fq,12.0,fe); glPushMatrix(); glColor3f(.99609375,0.2578125,0.2578125); glBegin(GL_TRIANGLES); glVertex2f(100,80); glVertex2f(100,60); glVertex2f(85,70); glEnd(); glColor3f(.99609375,0.2578125,0.2578125); glBegin(GL_TRIANGLES); glVertex2f(100,70); glVertex2f(110,75); glVertex2f(110,65); glEnd(); glColor3f(0,0,0); glBegin(GL_POINTS); glVertex2f(90,71); glEnd(); glColor3f(.99609375,0.2578125,0.2578125); glBegin(GL_TRIANGLES); glVertex2f(200,80); glVertex2f(200,60); glVertex2f(185,70); glEnd(); glColor3f(.99609375,0.2578125,0.2578125); glBegin(GL_TRIANGLES); glVertex2f(200,70); glVertex2f(210,75); glVertex2f(210,65); glEnd(); glColor3f(0,0,0); glBegin(GL_POINTS); glVertex2f(190,71); glEnd(); glPopMatrix(); glPopMatrix(); glFlush(); } void draw_cloud() { GLfloat m=100,n=400,o=10; for(int i=0;i<7;i++) { glPushMatrix(); glColor3f(1.0,1.0,1.0); if(i==1) glTranslated(125,415,10); else if(i==3||i==5) glTranslated(m,n+5,o); else glTranslated(m,n,o); glutSolidSphere(20.0,5000,150); glPopMatrix(); m+=10; } } void draw_square() { glColor3f(0,0.5,0.996); glBegin(GL_POLYGON); glVertex2f(0,0); glVertex2f(1000,0); glVertex2f(0,300); glVertex2f(1000,300); glEnd(); glFlush(); } void draw_brotate() { glPushMatrix(); glColor3f(0.96,0.5,0.25); //to draw body of the bird glTranslated(300,400,10); glScalef(3,1,1); glutSolidSphere(6,50000,15); glPopMatrix(); glPushMatrix(); glTranslated(323,400,10); glutSolidSphere(5,50000,15); glPopMatrix(); glColor3f(0,0,0); glBegin(GL_POINTS); glVertex2f(325,401); glEnd(); glColor3f(0.96,0.5,0.25); //to draw wings glBegin(GL_LINES); glVertex2f(294,394); glVertex2f(286,389); glEnd(); glBegin(GL_LINES); glVertex2f(286,389); glVertex2f(295,391); glEnd(); glBegin(GL_LINES); glVertex2f(295,391); glVertex2f(285,385); glEnd(); glBegin(GL_LINES); glVertex2f(285,385); glVertex2f(309,395); glEnd(); glBegin(GL_LINES); glVertex2f(294,406); glVertex2f(286,411); glEnd(); glBegin(GL_LINES); glVertex2f(286,411); glVertex2f(295,409); glEnd(); glBegin(GL_LINES); glVertex2f(295,409); glVertex2f(285,415); glEnd(); glBegin(GL_LINES); glVertex2f(285,415); glVertex2f(309,406); glEnd(); glColor3f(0.96,0.5,0.25); } void draw_bird() { GLfloat x=200,y=400,z=10; draw_brotate(); glBegin(GL_LINES); //draw legs of the bird glVertex2f(285,402); glVertex2f(275,402); glEnd(); glBegin(GL_LINES); glVertex2f(285,398); glVertex2f(275,398); glEnd(); glBegin(GL_LINES); glVertex2f(275,402); glVertex2f(270,405); glEnd(); glBegin(GL_LINES); glVertex2f(275,402); glVertex2f(270,398); glEnd(); glBegin(GL_LINES); glVertex2f(275,398); glVertex2f(273,400); glEnd(); glBegin(GL_LINES); glVertex2f(275,398); glVertex2f(270,395); glEnd(); glBegin(GL_LINES); glVertex2f(323,405); glVertex2f(323,407); glEnd(); glPushMatrix(); glTranslatef(323,409,10); glutSolidSphere(2,200,20); glPopMatrix(); glBegin(GL_TRIANGLES); glVertex2f(328,400); glVertex2f(331,397); glVertex2f(327,398.5); glEnd(); glFlush(); } void drawstars() { glColor3f(1.0,1.0,1.0); glBegin(GL_POINTS); glVertex3f(300.0,400.0,10.0); glVertex3f(200,400.0,10.0); glVertex3f(150,450.0,10.0); glVertex3f(100,470.0,10.0); glVertex3f(50,450.0,10.0); glVertex3f(50,350.0,10.0); glVertex3f(90,365.0,10.0); glVertex3f(350,450.0,10.0); glVertex3f(275,470.0,10.0); glVertex3f(280,430.0,10.0); glVertex3f(250,400.0,10.0); glVertex3f(450,450.0,10.0); glVertex3f(430,430.0,10.0); glVertex3f(430,470.0,10.0); glVertex3f(300,450.0,10.0); glVertex3f(265,380.0,10.0); glVertex3f(235,450.0,10.0); glEnd(); } void draw_all() { glClear(GL_COLOR_BUFFER_BIT); if(flag==0) { glDisable(GL_LIGHTING); //immp one draw_square(); draw_cloud(); glClearColor(0.506,.7,1,0.0); glTranslatef(q,w,e); glPushMatrix(); glColor3f(1.0,0.0,0.0); draw_boat(); draw_fishes(); glPushMatrix(); glColor3f(1.0,1.0,0.0); glTranslated(400,400,10); glutSolidSphere(20.0,5000,150); glPopMatrix(); } if(flag==1) { glDisable(GL_LIGHTING); //imp one draw_square(); draw_cloud(); glClearColor(0.9960,0.7070,0.3164,0.0); glTranslatef(q,w,e); glPushMatrix(); glColor3f(1.0,0.0,0.0); draw_boat(); draw_fishes(); glPushMatrix(); glColor3f(1.0,1.0,0.0); glTranslated(400,400,10); glutSolidSphere(20.0,500,100); glPopMatrix(); } if(flag==2) { // just try and change values in these arrays, specially the position array drawstars(); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); // GLfloat emission[]={0.1,0.1,0.1,0.0}; GLfloat diffuse[] = { 0.40, 0.40,0.40, 1.0 }; GLfloat ambiance[] = { 0.5, 0.5,0.5, 1.0 }; GLfloat specular[] = { 1.3, 1.3,.3, 1.0 }; GLfloat intensity[]={500.0}; GLfloat position[] = { 10,30,-30,1.0 }; glLightfv (GL_LIGHT0, GL_POSITION, position); glLightfv (GL_LIGHT0, GL_DIFFUSE,diffuse); glLightfv (GL_LIGHT0, GL_AMBIENT,ambiance); glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER,GL_TRUE); glLightfv (GL_LIGHT0, GL_SPECULAR,specular); glLightfv (GL_LIGHT0, GL_INTENSITY,intensity); glColor3f(0,0.5,0.996); glBegin(GL_POLYGON); glVertex2f(0,0); glVertex2f(1000,0); glVertex2f(0,150); glVertex2f(1000,150); glEnd(); glTranslatef(q,w,e); glPushMatrix(); glColor3f(1.0,0.0,0.0); draw_boat(); draw_fishes(); glDisable(GL_LIGHTING); glDisable(GL_LIGHT0); draw_cloud(); glClearColor(0.0,0.0,0.0,0.0); glPushMatrix(); glColor3f(1.0,1.0,1.0); glTranslated(400,400,10); glutSolidSphere(20.0,500,100); glPopMatrix(); glColor3f(1.0,1.0,1.0); glBegin(GL_POINTS); glVertex3f(300.0,400.0,10.0); glEnd(); } glPushMatrix(); glTranslatef(ba,bb,bc); glPushMatrix(); draw_bird(); glPopMatrix(); glPopMatrix(); GLfloat i; glPushMatrix(); GLfloat x=0,y=100,j=0; int k; //draw_water(); Sleep(60); q+=5; fq-=3.5; if(q>=440.0) //470 q=-390.0; //400 if(fq<=-300) //500 fq=400.0; //400 wa-=1; if(wa<=(-20)) wa=-0.5; ba+=6; if(ba>=500) ba=-400; glFlush(); glutSwapBuffers(); } void display(void) { draw_all(); } void color_menu(int id) { switch(id) { case 1: flag=0;break; case 2: flag=1;break; case 3: flag=2;break; case 4: exit(0); break; } glutPostRedisplay(); } void main_menu(int id) { switch(id) { case 1: break; case 2:exit(0);break; glutPostRedisplay(); } } int main(int argc,char **argv) { int sub_menu; glutInit(&argc,argv); glutInitDisplayMode(GLUT_RGB|GLUT_DOUBLE); glutInitWindowSize(1000,1000); glutInitWindowPosition(0,0); glutCreateWindow("Ship"); sub_menu=glutCreateMenu(color_menu); glutAddMenuEntry("Morning",1); glutAddMenuEntry("Evening",2); glutAddMenuEntry("Night",3); glutAddMenuEntry("Quit",4); glutCreateMenu(main_menu); glutAddSubMenu("View",sub_menu); glutAddMenuEntry("Quit",2); glutAttachMenu(GLUT_RIGHT_BUTTON); glutDisplayFunc(display); glutIdleFunc(display); myinit(); glutMainLoop(); glFlush(); }

    Read the article

  • This code is working properly in Dev C++ .But on Linux platform it is giving problem with the moveme

    - by srinija
    #include<stdio.h> #include<GL/glut.h> #include<stdlib.h> GLfloat v[3][24]={{100.0,300.0,350.0,50.0,100.0,120.0,120.0,100.0,260.0,280.0, 280.0,260.0,140.0,160.0,160.0,140.0,180.0,200.0,200.0,180.0, 220.0,240.0,240.0,220.0},{100.0,100.0,200.0,200.0,160.0, 160.0,180.0,180.0,160.0,160.0,180.0,180.0,160.0,160.0,180.0, 180.0,160.0,160.0,180.0,180.0,160.0,160.0,180.0,180.0}, {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}}; GLfloat v1[3][16]={{50.0,350.0,350.0,50.0,100.0,300.0,300.0,100.0,125.0,175.0, 175.0,125.0,225.0,275.0,275.0,225.0},{200.0,200.0,210.0, 210.0,210.0,210.0,240.0,240.0,240.0,240.0,310.0,310.0,240.0, 240.0,310.0,310.0},{1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, 1.0,1.0,1.0,1.0,1.0,1.0}}; GLfloat colors[4][3]={{0.0,0.0,1.0},{0.9961,0.9961,0.65625},{1.0,0.0,1.0}, {1.0,.0,1.0}}; static float q,w,e; static float fq,fw,fe; static GLfloat wa=0,wb=0,wc=0,ba,bb,bc; int flag; void myinit(void) { glClearColor(0.506,.7,1,0.0); glPointSize(2.0); glLoadIdentity(); glOrtho(0.0,499.0,0.0,499.0,-300.0,300.0); } void draw_top_boxes(GLint i,GLint j) { glColor3f(1.0,0.0,0.0); glBegin(GL_POLYGON); glColor3fv(colors[j]); // to draw the boat glVertex2f(v1[0][i+0],v1[1][i+0]); glColor3fv(colors[j+1]); glVertex2f(v1[0][i+1],v1[1][i+1]); glColor3fv(colors[j+2]); glVertex2f(v1[0][i+2],v1[1][i+2]); glColor3fv(colors[j+3]); glVertex2f(v1[0][i+3],v1[1][i+3]); glEnd(); } void draw_polygon(GLint i) { glBegin(GL_POLYGON); // to draw the boat glColor3f(0.0,0.0,0.0); glColor3fv(colors[0]); glVertex2f(v[0][i+0],v[1][i+0]); glColor3fv(colors[1]); glVertex2f(v[0][i+1],v[1][i+1]); glColor3fv(colors[2]); glVertex2f(v[0][i+2],v[1][i+2]); glColor3fv(colors[3]); glVertex2f(v[0][i+3],v[1][i+3]); glEnd(); } void draw_boat() { draw_polygon(0); draw_polygon(4); draw_polygon(8); draw_polygon(12); draw_polygon(16); draw_polygon(20); draw_top_boxes(0,0); draw_top_boxes(4,0); draw_top_boxes(8,0); draw_top_boxes(12,0); glFlush(); glPopMatrix(); glPopMatrix(); } void draw_water() { GLfloat i; GLfloat x=0,y=103,j=0; GLfloat k; glPushMatrix(); glTranslatef(wa,wb,wc); glPushMatrix(); glColor3f(0,0,1); for(k=y;k>0;k-=6) { for(i=1;i<30;i++) { glBegin(GL_LINES); glVertex2f(j,k); glVertex2f(j+10,k); glEnd(); j=j+20; } j=0; } glPopMatrix(); glPopMatrix(); } void draw_fishes() { glPushMatrix(); glTranslatef(fq,12.0,fe); glPushMatrix(); glColor3f(.99609375,0.2578125,0.2578125); glBegin(GL_TRIANGLES); glVertex2f(100,80); glVertex2f(100,60); glVertex2f(85,70); glEnd(); glColor3f(.99609375,0.2578125,0.2578125); glBegin(GL_TRIANGLES); glVertex2f(100,70); glVertex2f(110,75); glVertex2f(110,65); glEnd(); glColor3f(0,0,0); glBegin(GL_POINTS); glVertex2f(90,71); glEnd(); glColor3f(.99609375,0.2578125,0.2578125); glBegin(GL_TRIANGLES); glVertex2f(200,80); glVertex2f(200,60); glVertex2f(185,70); glEnd(); glColor3f(.99609375,0.2578125,0.2578125); glBegin(GL_TRIANGLES); glVertex2f(200,70); glVertex2f(210,75); glVertex2f(210,65); glEnd(); glColor3f(0,0,0); glBegin(GL_POINTS); glVertex2f(190,71); glEnd(); glPopMatrix(); glPopMatrix(); glFlush(); } void draw_cloud() { GLfloat m=100,n=400,o=10; for(int i=0;i<7;i++) { glPushMatrix(); glColor3f(1.0,1.0,1.0); if(i==1) glTranslated(125,415,10); else if(i==3||i==5) glTranslated(m,n+5,o); else glTranslated(m,n,o); glutSolidSphere(20.0,5000,150); glPopMatrix(); m+=10; } } void draw_square() { glColor3f(0,0.5,0.996); glBegin(GL_POLYGON); glVertex2f(0,0); glVertex2f(1000,0); glVertex2f(0,300); glVertex2f(1000,300); glEnd(); glFlush(); } void draw_brotate() { glPushMatrix(); glColor3f(0.96,0.5,0.25); //to draw body of the bird glTranslated(300,400,10); glScalef(3,1,1); glutSolidSphere(6,50000,15); glPopMatrix(); glPushMatrix(); glTranslated(323,400,10); glutSolidSphere(5,50000,15); glPopMatrix(); glColor3f(0,0,0); glBegin(GL_POINTS); glVertex2f(325,401); glEnd(); glColor3f(0.96,0.5,0.25); //to draw wings glBegin(GL_LINES); glVertex2f(294,394); glVertex2f(286,389); glEnd(); glBegin(GL_LINES); glVertex2f(286,389); glVertex2f(295,391); glEnd(); glBegin(GL_LINES); glVertex2f(295,391); glVertex2f(285,385); glEnd(); glBegin(GL_LINES); glVertex2f(285,385); glVertex2f(309,395); glEnd(); glBegin(GL_LINES); glVertex2f(294,406); glVertex2f(286,411); glEnd(); glBegin(GL_LINES); glVertex2f(286,411); glVertex2f(295,409); glEnd(); glBegin(GL_LINES); glVertex2f(295,409); glVertex2f(285,415); glEnd(); glBegin(GL_LINES); glVertex2f(285,415); glVertex2f(309,406); glEnd(); glColor3f(0.96,0.5,0.25); } void draw_bird() { GLfloat x=200,y=400,z=10; draw_brotate(); glBegin(GL_LINES); //draw legs of the bird glVertex2f(285,402); glVertex2f(275,402); glEnd(); glBegin(GL_LINES); glVertex2f(285,398); glVertex2f(275,398); glEnd(); glBegin(GL_LINES); glVertex2f(275,402); glVertex2f(270,405); glEnd(); glBegin(GL_LINES); glVertex2f(275,402); glVertex2f(270,398); glEnd(); glBegin(GL_LINES); glVertex2f(275,398); glVertex2f(273,400); glEnd(); glBegin(GL_LINES); glVertex2f(275,398); glVertex2f(270,395); glEnd(); glBegin(GL_LINES); glVertex2f(323,405); glVertex2f(323,407); glEnd(); glPushMatrix(); glTranslatef(323,409,10); glutSolidSphere(2,200,20); glPopMatrix(); glBegin(GL_TRIANGLES); glVertex2f(328,400); glVertex2f(331,397); glVertex2f(327,398.5); glEnd(); glFlush(); } void drawstars() { glColor3f(1.0,1.0,1.0); glBegin(GL_POINTS); glVertex3f(300.0,400.0,10.0); glVertex3f(200,400.0,10.0); glVertex3f(150,450.0,10.0); glVertex3f(100,470.0,10.0); glVertex3f(50,450.0,10.0); glVertex3f(50,350.0,10.0); glVertex3f(90,365.0,10.0); glVertex3f(350,450.0,10.0); glVertex3f(275,470.0,10.0); glVertex3f(280,430.0,10.0); glVertex3f(250,400.0,10.0); glVertex3f(450,450.0,10.0); glVertex3f(430,430.0,10.0); glVertex3f(430,470.0,10.0); glVertex3f(300,450.0,10.0); glVertex3f(265,380.0,10.0); glVertex3f(235,450.0,10.0); glEnd(); } void draw_all() { glClear(GL_COLOR_BUFFER_BIT); if(flag==0) { glDisable(GL_LIGHTING); //immp one draw_square(); draw_cloud(); glClearColor(0.506,.7,1,0.0); glTranslatef(q,w,e); glPushMatrix(); glColor3f(1.0,0.0,0.0); draw_boat(); draw_fishes(); glPushMatrix(); glColor3f(1.0,1.0,0.0); glTranslated(400,400,10); glutSolidSphere(20.0,5000,150); glPopMatrix(); } if(flag==1) { glDisable(GL_LIGHTING); //imp one draw_square(); draw_cloud(); glClearColor(0.9960,0.7070,0.3164,0.0); glTranslatef(q,w,e); glPushMatrix(); glColor3f(1.0,0.0,0.0); draw_boat(); draw_fishes(); glPushMatrix(); glColor3f(1.0,1.0,0.0); glTranslated(400,400,10); glutSolidSphere(20.0,500,100); glPopMatrix(); } if(flag==2) { // just try and change values in these arrays, specially the position array drawstars(); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); // GLfloat emission[]={0.1,0.1,0.1,0.0}; GLfloat diffuse[] = { 0.40, 0.40,0.40, 1.0 }; GLfloat ambiance[] = { 0.5, 0.5,0.5, 1.0 }; GLfloat specular[] = { 1.3, 1.3,.3, 1.0 }; GLfloat intensity[]={500.0}; GLfloat position[] = { 10,30,-30,1.0 }; glLightfv (GL_LIGHT0, GL_POSITION, position); glLightfv (GL_LIGHT0, GL_DIFFUSE,diffuse); glLightfv (GL_LIGHT0, GL_AMBIENT,ambiance); glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER,GL_TRUE); glLightfv (GL_LIGHT0, GL_SPECULAR,specular); glLightfv (GL_LIGHT0, GL_INTENSITY,intensity); glColor3f(0,0.5,0.996); glBegin(GL_POLYGON); glVertex2f(0,0); glVertex2f(1000,0); glVertex2f(0,150); glVertex2f(1000,150); glEnd(); glTranslatef(q,w,e); glPushMatrix(); glColor3f(1.0,0.0,0.0); draw_boat(); draw_fishes(); glDisable(GL_LIGHTING); glDisable(GL_LIGHT0); draw_cloud(); glClearColor(0.0,0.0,0.0,0.0); glPushMatrix(); glColor3f(1.0,1.0,1.0); glTranslated(400,400,10); glutSolidSphere(20.0,500,100); glPopMatrix(); glColor3f(1.0,1.0,1.0); glBegin(GL_POINTS); glVertex3f(300.0,400.0,10.0); glEnd(); } glPushMatrix(); glTranslatef(ba,bb,bc); glPushMatrix(); draw_bird(); glPopMatrix(); glPopMatrix(); GLfloat i; glPushMatrix(); GLfloat x=0,y=100,j=0; int k; //draw_water(); q+=25; fq-=3.5; if(q>=440.0) //470 q=-390.0; //400 if(fq<=-300) //500 fq=400.0; //400 wa-=1; if(wa<=(-20)) wa=-0.5; ba+=6; if(ba>=500) ba=-400; glFlush(); glutSwapBuffers(); } void display(void) { draw_all(); } void color_menu(int id) { switch(id) { case 1: flag=0;break; case 2: flag=1;break; case 3: flag=2;break; case 4: exit(0); break; } glutPostRedisplay(); } void main_menu(int id) { switch(id) { case 1: break; case 2:exit(0);break; glutPostRedisplay(); } } int main(int argc,char **argv) { int sub_menu; glutInit(&argc,argv); glutInitDisplayMode(GLUT_RGB|GLUT_DOUBLE); glutInitWindowSize(1000,1000); glutInitWindowPosition(0,0); glutCreateWindow("Ship"); sub_menu=glutCreateMenu(color_menu); glutAddMenuEntry("Morning",1); glutAddMenuEntry("Evening",2); glutAddMenuEntry("Night",3); glutAddMenuEntry("Quit",4); glutCreateMenu(main_menu); glutAddSubMenu("View",sub_menu); glutAddMenuEntry("Quit",2); glutAttachMenu(GLUT_RIGHT_BUTTON); glutDisplayFunc(display); glutIdleFunc(display); myinit(); glutMainLoop(); glFlush(); }

    Read the article

  • TFS 2010 SDK: Connecting to TFS 2010 Programmatically&ndash;Part 1

    - by Tarun Arora
    Technorati Tags: Team Foundation Server 2010,TFS 2010 SDK,TFS API,TFS Programming,TFS ALM   Download Working Demo Great! You have reached that point where you would like to extend TFS 2010. The first step is to connect to TFS programmatically. 1. Download TFS 2010 SDK => http://visualstudiogallery.msdn.microsoft.com/25622469-19d8-4959-8e5c-4025d1c9183d?SRC=VSIDE 2. Alternatively you can also download this from the visual studio extension manager 3. Create a new Windows Forms Application project and add reference to TFS Common and client dlls Note - If Microsoft.TeamFoundation.Client and Microsoft.TeamFoundation.Common do not appear on the .NET tab of the References dialog box, use the Browse tab to add the assemblies. You can find them at %ProgramFiles%\Microsoft Visual Studio 10.0\Common7\IDE\ReferenceAssemblies\v2.0. using Microsoft.TeamFoundation.Client; using Microsoft.TeamFoundation.Framework.Client; using Microsoft.TeamFoundation.Framework.Common;   4. There are several ways to connect to TFS, the two classes of interest are, Option 1 – Class – TfsTeamProjectCollectionClass namespace Microsoft.TeamFoundation.Client { public class TfsTeamProjectCollection : TfsConnection { public TfsTeamProjectCollection(RegisteredProjectCollection projectCollection); public TfsTeamProjectCollection(Uri uri); public TfsTeamProjectCollection(RegisteredProjectCollection projectCollection, IdentityDescriptor identityToImpersonate); public TfsTeamProjectCollection(Uri uri, ICredentials credentials); public TfsTeamProjectCollection(Uri uri, ICredentialsProvider credentialsProvider); public TfsTeamProjectCollection(Uri uri, IdentityDescriptor identityToImpersonate); public TfsTeamProjectCollection(RegisteredProjectCollection projectCollection, ICredentials credentials, ICredentialsProvider credentialsProvider); public TfsTeamProjectCollection(Uri uri, ICredentials credentials, ICredentialsProvider credentialsProvider); public TfsTeamProjectCollection(RegisteredProjectCollection projectCollection, ICredentials credentials, ICredentialsProvider credentialsProvider, IdentityDescriptor identityToImpersonate); public TfsTeamProjectCollection(Uri uri, ICredentials credentials, ICredentialsProvider credentialsProvider, IdentityDescriptor identityToImpersonate); public override CatalogNode CatalogNode { get; } public TfsConfigurationServer ConfigurationServer { get; internal set; } public override string Name { get; } public static Uri GetFullyQualifiedUriForName(string name); protected override object GetServiceInstance(Type serviceType, object serviceInstance); protected override object InitializeTeamFoundationObject(string fullName, object instance); } } Option 2 – Class – TfsConfigurationServer namespace Microsoft.TeamFoundation.Client { public class TfsConfigurationServer : TfsConnection { public TfsConfigurationServer(RegisteredConfigurationServer application); public TfsConfigurationServer(Uri uri); public TfsConfigurationServer(RegisteredConfigurationServer application, IdentityDescriptor identityToImpersonate); public TfsConfigurationServer(Uri uri, ICredentials credentials); public TfsConfigurationServer(Uri uri, ICredentialsProvider credentialsProvider); public TfsConfigurationServer(Uri uri, IdentityDescriptor identityToImpersonate); public TfsConfigurationServer(RegisteredConfigurationServer application, ICredentials credentials, ICredentialsProvider credentialsProvider); public TfsConfigurationServer(Uri uri, ICredentials credentials, ICredentialsProvider credentialsProvider); public TfsConfigurationServer(RegisteredConfigurationServer application, ICredentials credentials, ICredentialsProvider credentialsProvider, IdentityDescriptor identityToImpersonate); public TfsConfigurationServer(Uri uri, ICredentials credentials, ICredentialsProvider credentialsProvider, IdentityDescriptor identityToImpersonate); public override CatalogNode CatalogNode { get; } public override string Name { get; } protected override object GetServiceInstance(Type serviceType, object serviceInstance); public TfsTeamProjectCollection GetTeamProjectCollection(Guid collectionId); protected override object InitializeTeamFoundationObject(string fullName, object instance); } }   Note – The TeamFoundationServer class is obsolete. Use the TfsTeamProjectCollection or TfsConfigurationServer classes to talk to a 2010 Team Foundation Server. In order to talk to a 2005 or 2008 Team Foundation Server use the TfsTeamProjectCollection class. 5. Sample code for programmatically connecting to TFS 2010 using the TFS 2010 API How do i know what the URI of my TFS server is, Note – You need to be have Team Project Collection view details permission in order to connect, expect to receive an authorization failure message if you do not have sufficient permissions. Case 1: Connect by Uri string _myUri = @"https://tfs.codeplex.com:443/tfs/tfs30"; TfsConfigurationServer configurationServer = TfsConfigurationServerFactory.GetConfigurationServer(new Uri(_myUri)); Case 2: Connect by Uri, prompt for credentials string _myUri = @"https://tfs.codeplex.com:443/tfs/tfs30"; TfsConfigurationServer configurationServer = TfsConfigurationServerFactory.GetConfigurationServer(new Uri(_myUri), new UICredentialsProvider()); configurationServer.EnsureAuthenticated(); Case 3: Connect by Uri, custom credentials In order to use this method of connectivity you need to implement the interface ICredentailsProvider public class ConnectByImplementingCredentialsProvider : ICredentialsProvider { public ICredentials GetCredentials(Uri uri, ICredentials iCredentials) { return new NetworkCredential("UserName", "Password", "Domain"); } public void NotifyCredentialsAuthenticated(Uri uri) { throw new ApplicationException("Unable to authenticate"); } } And now consume the implementation of the interface, string _myUri = @"https://tfs.codeplex.com:443/tfs/tfs30"; ConnectByImplementingCredentialsProvider connect = new ConnectByImplementingCredentialsProvider(); ICredentials iCred = new NetworkCredential("UserName", "Password", "Domain"); connect.GetCredentials(new Uri(_myUri), iCred); TfsConfigurationServer configurationServer = TfsConfigurationServerFactory.GetConfigurationServer(new Uri(_myUri), connect); configurationServer.EnsureAuthenticated();   6. Programmatically query TFS 2010 using the TFS SDK for all Team Project Collections and retrieve all Team Projects and output the display name and description of each team project. CatalogNode catalogNode = configurationServer.CatalogNode; ReadOnlyCollection<CatalogNode> tpcNodes = catalogNode.QueryChildren( new Guid[] { CatalogResourceTypes.ProjectCollection }, false, CatalogQueryOptions.None); // tpc = Team Project Collection foreach (CatalogNode tpcNode in tpcNodes) { Guid tpcId = new Guid(tpcNode.Resource.Properties["InstanceId"]); TfsTeamProjectCollection tpc = configurationServer.GetTeamProjectCollection(tpcId); // Get catalog of tp = 'Team Projects' for the tpc = 'Team Project Collection' var tpNodes = tpcNode.QueryChildren( new Guid[] { CatalogResourceTypes.TeamProject }, false, CatalogQueryOptions.None); foreach (var p in tpNodes) { Debug.Write(Environment.NewLine + " Team Project : " + p.Resource.DisplayName + " - " + p.Resource.Description + Environment.NewLine); } }   Output   You can download a working demo that uses TFS SDK 2010 to programmatically connect to TFS 2010. Screen Shots of the attached demo application, Share this post :

    Read the article

  • Aggregate SharePoint Event/Items into your Calendar view using Calendar Overlay

    - by eJugnoo
    One of the most common features I have seen in common use for SharePoint (prior to 2010) in Intranet environments for Team site is Calendar’s. Not only the Calendar list type, but also the ability to add a Calendar view to any list that has the desired columns to construct a Calendar – such as Start, End, Title etc. While this was all great for a single site/calendar, the problem of having to track numerous calendar’s remained. With introduction of Outlook 2007 bi-directional integration with SharePoint, and particularly the ability of Outlook to overlay calendar helped bridge the gap. Now one could connect to number of team sites, and setup Calendar overlays in Outlook using varying colours, to easily identify event source and yet benefit from the plotting of events on single Calendar view. This was all good, but each user in your Enterprise was supposed to setup in a “pull” fashion. This is good for flexibility, not so good when you need to “push” consistency and productivity (re-use). So, what was missing on SharePoint is the ability to have server-side overlay’s that everyone can see – in a single place, aggregating multiple sources. Until SharePoint 2010 arrived! Calendars Overlay in SharePoint 2010 There are Calendar lists and Calendar views. View can be created for almost all lists, as far as you have desired column’s in a list like Start, End, Title etc. to be able to describe and plot an item in a Calendar format. In SharePoint 2010, create a new Calendar list. Go to Calendar ribbon tab, and click Calendar Overlay. You get the screen with list of existing Overlay’s associated with current Calendar (list – in our case). Click on “New Calendar”… Notice the breadcrumb! You are adding Overlay to existing list (Team Calendar – in our case). You have choice of “pulling” Calendar info from an existing Calendar (list/view) in SharePoint or even from Exchange! Set standard info like a name, description and decide the colour you want for the items in aggregated Calendar overlay. Select the source site/list/view, anywhere in farm. When you select Exchange as source of Calendar, you get option to add OWA and Exchange Web Service url. I will cover details of connecting with Exchange in another post, and focus on Overlay’s with SharePoint for this one. Once you have added a new Calendar overlay to existing Calendar veiw, you get something like below for Day view, Week view, and Month view respectively Notice the Overlay colours: Now, if you decide to connect this Calendar to Outlook to sync the items, it will only sync items from main view, and not from Overlay source. So such Overlay of calendar’s is server-side aggregation only. That increases my curiosity, so I try adding the Calendar list view as a web-part on a new page. As you see, this instance of view didn’t include item from source that we had added to default Calendar view. This is – probably – due to the fact that this is a new web-part view for the page. If you want to add overlay to this one, you have to redo that from Ribbon. This also means, subject to purpose and context you get the flexibility to decide what overlay is suited. Also you can only add 10 Overlay’s to an existing view instance. Conclusion Calendar Overlay is clearly a very useful feature that fills a gap of not being able to aggregate information from multiple sources into a Calendar view within context of current items. Source of items can be existing SharePoint calendar views on any site, or even Exchange (via OWA/Exchange web services). List type for source doesn’t matter, it just need a Calendar view type available. You can have 10 overlays. Overlays are for the specific view only, and are server-side only – which means they do not get synced in Outlook. While you can drag-drop current list items, you cannot edit overlay items as they are read-only within scope of current Calendar view. You can of course click on source Overlay item to edit at the source. I’d like to hear, how you think Overlay’s will help you in your case, or how you are already using them... Enjoy SharePoint! --Sharad

    Read the article

  • Getting TF215097 error after modifying a build process template in TFS Team Build 2010

    - by Jakob Ehn
    When embracing Team Build 2010, you typically want to define several different build process templates for different scenarios. Common examples here are CI builds, QA builds and release builds. For example, in a contiuous build you often have no interest in publishing to the symbol store, you might or might not want to associate changesets and work items etc. The build server is often heavily occupied as it is, so you don’t want to have it doing more that necessary. Try to define a set of build process templates that are used across your company. In previous versions of TFS Team Build, there was no easy way to do this. But in TFS 2010 it is very easy so there is no excuse to not do it! :-)   I ran into a scenario today where I had an existing build definition that was based on our release build process template. In this template, we have defined several different build process parameters that control the release build. These are placed into its own sectionin the Build Process Parameters editor. This is done using the ProcessParameterMetadataCollection element, I will explain how this works in a future post.   I won’t go into details on these parametes, the issue for this blog post is what happens when you modify a build process template so that it is no longer compatible with the build definition, i.e. a breaking change. In this case, I removed a parameter that was no longer necessary. After merging the new build process template to one of the projects and queued a new release build, I got this error:   TF215097: An error occurred while initializing a build for build definition <Build Definition Name>: The values provided for the root activity's arguments did not satisfy the root activity's requirements: 'DynamicActivity': The following keys from the input dictionary do not map to arguments and must be removed: <Parameter Name>.  Please note that argument names are case sensitive. Parameter name: rootArgumentValues <Parameter Name> was the parameter that I removed so it was pretty easy to understand why the error had occurred. However, it is not entirely obvious how to fix the problem. When open the build definition everything looks OK, the removed build process parameter is not there, and I can open the build process template without any validation warnings. The problem here is that all settings specific to a particular build definition is stored in the TFS database. In TFS 2005, everything that was related to a build was stored in TFS source control in files (TFSBuild.proj, WorkspaceMapping.xml..). In TFS 2008, many of these settings were moved into the database. Still, lots of things were stored in TFSBuild.proj, such as the solution and configuration to build, wether to execute tests or not. In TFS 2010, all settings for a build definition is stored in the database. If we look inside the database we can see what this looks like. The table tbl_BuildDefinition contains all information for a build definition. One of the columns is called ProcessParameters and contains a serialized representation of a Dictionary that is the underlying object where these settings are stoded. Here is an example:   <Dictionary x:TypeArguments="x:String, x:Object" xmlns="clr-namespace:System.Collections.Generic;assembly=mscorlib" xmlns:mtbwa="clr-namespace:Microsoft.TeamFoundation.Build.Workflow.Activities;assembly=Microsoft.TeamFoundation.Build.Workflow" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"> <mtbwa:BuildSettings x:Key="BuildSettings" ProjectsToBuild="$/PathToProject.sln"> <mtbwa:BuildSettings.PlatformConfigurations> <mtbwa:PlatformConfigurationList Capacity="4"> <mtbwa:PlatformConfiguration Configuration="Release" Platform="Any CPU" /> </mtbwa:PlatformConfigurationList> </mtbwa:BuildSettings.PlatformConfigurations> </mtbwa:BuildSettings> <mtbwa:AgentSettings x:Key="AgentSettings" Tags="Agent1" /> <x:Boolean x:Key="DisableTests">True</x:Boolean> <x:String x:Key="ReleaseRepositorySolution">ERP</x:String> <x:Int32 x:Key="Major">2</x:Int32> <x:Int32 x:Key="Minor">3</x:Int32> </Dictionary> Here we can see that it is really only the non-default values that are persisted into the databasen. So, the problem in my case was that I removed one of the parameteres from the build process template, but the parameter and its value still existed in the build definition database. The solution to the problem is to refresh the build definition and save it. In the process tab, there is a Refresh button that will reload the build definition and the process template and synchronize them:   After refreshing the build definition and saving it, the build was running successfully again.

    Read the article

  • Upload File to Windows Azure Blob in Chunks through ASP.NET MVC, JavaScript and HTML5

    - by Shaun
    Originally posted on: http://geekswithblogs.net/shaunxu/archive/2013/07/01/upload-file-to-windows-azure-blob-in-chunks-through-asp.net.aspxMany people are using Windows Azure Blob Storage to store their data in the cloud. Blob storage provides 99.9% availability with easy-to-use API through .NET SDK and HTTP REST. For example, we can store JavaScript files, images, documents in blob storage when we are building an ASP.NET web application on a Web Role in Windows Azure. Or we can store our VHD files in blob and mount it as a hard drive in our cloud service. If you are familiar with Windows Azure, you should know that there are two kinds of blob: page blob and block blob. The page blob is optimized for random read and write, which is very useful when you need to store VHD files. The block blob is optimized for sequential/chunk read and write, which has more common usage. Since we can upload block blob in blocks through BlockBlob.PutBlock, and them commit them as a whole blob with invoking the BlockBlob.PutBlockList, it is very powerful to upload large files, as we can upload blocks in parallel, and provide pause-resume feature. There are many documents, articles and blog posts described on how to upload a block blob. Most of them are focus on the server side, which means when you had received a big file, stream or binaries, how to upload them into blob storage in blocks through .NET SDK.  But the problem is, how can we upload these large files from client side, for example, a browser. This questioned to me when I was working with a Chinese customer to help them build a network disk production on top of azure. The end users upload their files from the web portal, and then the files will be stored in blob storage from the Web Role. My goal is to find the best way to transform the file from client (end user’s machine) to the server (Web Role) through browser. In this post I will demonstrate and describe what I had done, to upload large file in chunks with high speed, and save them as blocks into Windows Azure Blob Storage.   Traditional Upload, Works with Limitation The simplest way to implement this requirement is to create a web page with a form that contains a file input element and a submit button. 1: @using (Html.BeginForm("About", "Index", FormMethod.Post, new { enctype = "multipart/form-data" })) 2: { 3: <input type="file" name="file" /> 4: <input type="submit" value="upload" /> 5: } And then in the backend controller, we retrieve the whole content of this file and upload it in to the blob storage through .NET SDK. We can split the file in blocks and upload them in parallel and commit. The code had been well blogged in the community. 1: [HttpPost] 2: public ActionResult About(HttpPostedFileBase file) 3: { 4: var container = _client.GetContainerReference("test"); 5: container.CreateIfNotExists(); 6: var blob = container.GetBlockBlobReference(file.FileName); 7: var blockDataList = new Dictionary<string, byte[]>(); 8: using (var stream = file.InputStream) 9: { 10: var blockSizeInKB = 1024; 11: var offset = 0; 12: var index = 0; 13: while (offset < stream.Length) 14: { 15: var readLength = Math.Min(1024 * blockSizeInKB, (int)stream.Length - offset); 16: var blockData = new byte[readLength]; 17: offset += stream.Read(blockData, 0, readLength); 18: blockDataList.Add(Convert.ToBase64String(BitConverter.GetBytes(index)), blockData); 19:  20: index++; 21: } 22: } 23:  24: Parallel.ForEach(blockDataList, (bi) => 25: { 26: blob.PutBlock(bi.Key, new MemoryStream(bi.Value), null); 27: }); 28: blob.PutBlockList(blockDataList.Select(b => b.Key).ToArray()); 29:  30: return RedirectToAction("About"); 31: } This works perfect if we selected an image, a music or a small video to upload. But if I selected a large file, let’s say a 6GB HD-movie, after upload for about few minutes the page will be shown as below and the upload will be terminated. In ASP.NET there is a limitation of request length and the maximized request length is defined in the web.config file. It’s a number which less than about 4GB. So if we want to upload a really big file, we cannot simply implement in this way. Also, in Windows Azure, a cloud service network load balancer will terminate the connection if exceed the timeout period. From my test the timeout looks like 2 - 3 minutes. Hence, when we need to upload a large file we cannot just use the basic HTML elements. Besides the limitation mentioned above, the simple HTML file upload cannot provide rich upload experience such as chunk upload, pause and pause-resume. So we need to find a better way to upload large file from the client to the server.   Upload in Chunks through HTML5 and JavaScript In order to break those limitation mentioned above we will try to upload the large file in chunks. This takes some benefit to us such as - No request size limitation: Since we upload in chunks, we can define the request size for each chunks regardless how big the entire file is. - No timeout problem: The size of chunks are controlled by us, which means we should be able to make sure request for each chunk upload will not exceed the timeout period of both ASP.NET and Windows Azure load balancer. It was a big challenge to upload big file in chunks until we have HTML5. There are some new features and improvements introduced in HTML5 and we will use them to implement our solution.   In HTML5, the File interface had been improved with a new method called “slice”. It can be used to read part of the file by specifying the start byte index and the end byte index. For example if the entire file was 1024 bytes, file.slice(512, 768) will read the part of this file from the 512nd byte to 768th byte, and return a new object of interface called "Blob”, which you can treat as an array of bytes. In fact,  a Blob object represents a file-like object of immutable, raw data. The File interface is based on Blob, inheriting blob functionality and expanding it to support files on the user's system. For more information about the Blob please refer here. File and Blob is very useful to implement the chunk upload. We will use File interface to represent the file the user selected from the browser and then use File.slice to read the file in chunks in the size we wanted. For example, if we wanted to upload a 10MB file with 512KB chunks, then we can read it in 512KB blobs by using File.slice in a loop.   Assuming we have a web page as below. User can select a file, an input box to specify the block size in KB and a button to start upload. 1: <div> 2: <input type="file" id="upload_files" name="files[]" /><br /> 3: Block Size: <input type="number" id="block_size" value="512" name="block_size" />KB<br /> 4: <input type="button" id="upload_button_blob" name="upload" value="upload (blob)" /> 5: </div> Then we can have the JavaScript function to upload the file in chunks when user clicked the button. 1: <script type="text/javascript"> 1: 2: $(function () { 3: $("#upload_button_blob").click(function () { 4: }); 5: });</script> Firstly we need to ensure the client browser supports the interfaces we are going to use. Just try to invoke the File, Blob and FormData from the “window” object. If any of them is “undefined” the condition result will be “false” which means your browser doesn’t support these premium feature and it’s time for you to get your browser updated. FormData is another new feature we are going to use in the future. It could generate a temporary form for us. We will use this interface to create a form with chunk and associated metadata when invoked the service through ajax. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: if (window.File && window.Blob && window.FormData) { 4: alert("Your brwoser is awesome, let's rock!"); 5: } 6: else { 7: alert("Oh man plz update to a modern browser before try is cool stuff out."); 8: return; 9: } 10: }); Each browser supports these interfaces by their own implementation and currently the Blob, File and File.slice are supported by Chrome 21, FireFox 13, IE 10, Opera 12 and Safari 5.1 or higher. After that we worked on the files the user selected one by one since in HTML5, user can select multiple files in one file input box. 1: var files = $("#upload_files")[0].files; 2: for (var i = 0; i < files.length; i++) { 3: var file = files[i]; 4: var fileSize = file.size; 5: var fileName = file.name; 6: } Next, we calculated the start index and end index for each chunks based on the size the user specified from the browser. We put them into an array with the file name and the index, which will be used when we upload chunks into Windows Azure Blob Storage as blocks since we need to specify the target blob name and the block index. At the same time we will store the list of all indexes into another variant which will be used to commit blocks into blob in Azure Storage once all chunks had been uploaded successfully. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10:  11: // calculate the start and end byte index for each blocks(chunks) 12: // with the index, file name and index list for future using 13: var blockSizeInKB = $("#block_size").val(); 14: var blockSize = blockSizeInKB * 1024; 15: var blocks = []; 16: var offset = 0; 17: var index = 0; 18: var list = ""; 19: while (offset < fileSize) { 20: var start = offset; 21: var end = Math.min(offset + blockSize, fileSize); 22:  23: blocks.push({ 24: name: fileName, 25: index: index, 26: start: start, 27: end: end 28: }); 29: list += index + ","; 30:  31: offset = end; 32: index++; 33: } 34: } 35: }); Now we have all chunks’ information ready. The next step should be upload them one by one to the server side, and at the server side when received a chunk it will upload as a block into Blob Storage, and finally commit them with the index list through BlockBlobClient.PutBlockList. But since all these invokes are ajax calling, which means not synchronized call. So we need to introduce a new JavaScript library to help us coordinate the asynchronize operation, which named “async.js”. You can download this JavaScript library here, and you can find the document here. I will not explain this library too much in this post. We will put all procedures we want to execute as a function array, and pass into the proper function defined in async.js to let it help us to control the execution sequence, in series or in parallel. Hence we will define an array and put the function for chunk upload into this array. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4:  5: // start to upload each files in chunks 6: var files = $("#upload_files")[0].files; 7: for (var i = 0; i < files.length; i++) { 8: var file = files[i]; 9: var fileSize = file.size; 10: var fileName = file.name; 11: // calculate the start and end byte index for each blocks(chunks) 12: // with the index, file name and index list for future using 13: ... ... 14:  15: // define the function array and push all chunk upload operation into this array 16: blocks.forEach(function (block) { 17: putBlocks.push(function (callback) { 18: }); 19: }); 20: } 21: }); 22: }); As you can see, I used File.slice method to read each chunks based on the start and end byte index we calculated previously, and constructed a temporary HTML form with the file name, chunk index and chunk data through another new feature in HTML5 named FormData. Then post this form to the backend server through jQuery.ajax. This is the key part of our solution. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10: // calculate the start and end byte index for each blocks(chunks) 11: // with the index, file name and index list for future using 12: ... ... 13: // define the function array and push all chunk upload operation into this array 14: blocks.forEach(function (block) { 15: putBlocks.push(function (callback) { 16: // load blob based on the start and end index for each chunks 17: var blob = file.slice(block.start, block.end); 18: // put the file name, index and blob into a temporary from 19: var fd = new FormData(); 20: fd.append("name", block.name); 21: fd.append("index", block.index); 22: fd.append("file", blob); 23: // post the form to backend service (asp.net mvc controller action) 24: $.ajax({ 25: url: "/Home/UploadInFormData", 26: data: fd, 27: processData: false, 28: contentType: "multipart/form-data", 29: type: "POST", 30: success: function (result) { 31: if (!result.success) { 32: alert(result.error); 33: } 34: callback(null, block.index); 35: } 36: }); 37: }); 38: }); 39: } 40: }); Then we will invoke these functions one by one by using the async.js. And once all functions had been executed successfully I invoked another ajax call to the backend service to commit all these chunks (blocks) as the blob in Windows Azure Storage. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10: // calculate the start and end byte index for each blocks(chunks) 11: // with the index, file name and index list for future using 12: ... ... 13: // define the function array and push all chunk upload operation into this array 14: ... ... 15: // invoke the functions one by one 16: // then invoke the commit ajax call to put blocks into blob in azure storage 17: async.series(putBlocks, function (error, result) { 18: var data = { 19: name: fileName, 20: list: list 21: }; 22: $.post("/Home/Commit", data, function (result) { 23: if (!result.success) { 24: alert(result.error); 25: } 26: else { 27: alert("done!"); 28: } 29: }); 30: }); 31: } 32: }); That’s all in the client side. The outline of our logic would be - Calculate the start and end byte index for each chunks based on the block size. - Defined the functions of reading the chunk form file and upload the content to the backend service through ajax. - Execute the functions defined in previous step with “async.js”. - Commit the chunks by invoking the backend service in Windows Azure Storage finally.   Save Chunks as Blocks into Blob Storage In above we finished the client size JavaScript code. It uploaded the file in chunks to the backend service which we are going to implement in this step. We will use ASP.NET MVC as our backend service, and it will receive the chunks, upload into Windows Azure Bob Storage in blocks, then finally commit as one blob. As in the client side we uploaded chunks by invoking the ajax call to the URL "/Home/UploadInFormData", I created a new action under the Index controller and it only accepts HTTP POST request. 1: [HttpPost] 2: public JsonResult UploadInFormData() 3: { 4: var error = string.Empty; 5: try 6: { 7: } 8: catch (Exception e) 9: { 10: error = e.ToString(); 11: } 12:  13: return new JsonResult() 14: { 15: Data = new 16: { 17: success = string.IsNullOrWhiteSpace(error), 18: error = error 19: } 20: }; 21: } Then I retrieved the file name, index and the chunk content from the Request.Form object, which was passed from our client side. And then, used the Windows Azure SDK to create a blob container (in this case we will use the container named “test”.) and create a blob reference with the blob name (same as the file name). Then uploaded the chunk as a block of this blob with the index, since in Blob Storage each block must have an index (ID) associated with so that finally we can put all blocks as one blob by specifying their block ID list. 1: [HttpPost] 2: public JsonResult UploadInFormData() 3: { 4: var error = string.Empty; 5: try 6: { 7: var name = Request.Form["name"]; 8: var index = int.Parse(Request.Form["index"]); 9: var file = Request.Files[0]; 10: var id = Convert.ToBase64String(BitConverter.GetBytes(index)); 11:  12: var container = _client.GetContainerReference("test"); 13: container.CreateIfNotExists(); 14: var blob = container.GetBlockBlobReference(name); 15: blob.PutBlock(id, file.InputStream, null); 16: } 17: catch (Exception e) 18: { 19: error = e.ToString(); 20: } 21:  22: return new JsonResult() 23: { 24: Data = new 25: { 26: success = string.IsNullOrWhiteSpace(error), 27: error = error 28: } 29: }; 30: } Next, I created another action to commit the blocks into blob once all chunks had been uploaded. Similarly, I retrieved the blob name from the Request.Form. I also retrieved the chunks ID list, which is the block ID list from the Request.Form in a string format, split them as a list, then invoked the BlockBlob.PutBlockList method. After that our blob will be shown in the container and ready to be download. 1: [HttpPost] 2: public JsonResult Commit() 3: { 4: var error = string.Empty; 5: try 6: { 7: var name = Request.Form["name"]; 8: var list = Request.Form["list"]; 9: var ids = list 10: .Split(',') 11: .Where(id => !string.IsNullOrWhiteSpace(id)) 12: .Select(id => Convert.ToBase64String(BitConverter.GetBytes(int.Parse(id)))) 13: .ToArray(); 14:  15: var container = _client.GetContainerReference("test"); 16: container.CreateIfNotExists(); 17: var blob = container.GetBlockBlobReference(name); 18: blob.PutBlockList(ids); 19: } 20: catch (Exception e) 21: { 22: error = e.ToString(); 23: } 24:  25: return new JsonResult() 26: { 27: Data = new 28: { 29: success = string.IsNullOrWhiteSpace(error), 30: error = error 31: } 32: }; 33: } Now we finished all code we need. The whole process of uploading would be like this below. Below is the full client side JavaScript code. 1: <script type="text/javascript" src="~/Scripts/async.js"></script> 2: <script type="text/javascript"> 3: $(function () { 4: $("#upload_button_blob").click(function () { 5: // assert the browser support html5 6: if (window.File && window.Blob && window.FormData) { 7: alert("Your brwoser is awesome, let's rock!"); 8: } 9: else { 10: alert("Oh man plz update to a modern browser before try is cool stuff out."); 11: return; 12: } 13:  14: // start to upload each files in chunks 15: var files = $("#upload_files")[0].files; 16: for (var i = 0; i < files.length; i++) { 17: var file = files[i]; 18: var fileSize = file.size; 19: var fileName = file.name; 20:  21: // calculate the start and end byte index for each blocks(chunks) 22: // with the index, file name and index list for future using 23: var blockSizeInKB = $("#block_size").val(); 24: var blockSize = blockSizeInKB * 1024; 25: var blocks = []; 26: var offset = 0; 27: var index = 0; 28: var list = ""; 29: while (offset < fileSize) { 30: var start = offset; 31: var end = Math.min(offset + blockSize, fileSize); 32:  33: blocks.push({ 34: name: fileName, 35: index: index, 36: start: start, 37: end: end 38: }); 39: list += index + ","; 40:  41: offset = end; 42: index++; 43: } 44:  45: // define the function array and push all chunk upload operation into this array 46: var putBlocks = []; 47: blocks.forEach(function (block) { 48: putBlocks.push(function (callback) { 49: // load blob based on the start and end index for each chunks 50: var blob = file.slice(block.start, block.end); 51: // put the file name, index and blob into a temporary from 52: var fd = new FormData(); 53: fd.append("name", block.name); 54: fd.append("index", block.index); 55: fd.append("file", blob); 56: // post the form to backend service (asp.net mvc controller action) 57: $.ajax({ 58: url: "/Home/UploadInFormData", 59: data: fd, 60: processData: false, 61: contentType: "multipart/form-data", 62: type: "POST", 63: success: function (result) { 64: if (!result.success) { 65: alert(result.error); 66: } 67: callback(null, block.index); 68: } 69: }); 70: }); 71: }); 72:  73: // invoke the functions one by one 74: // then invoke the commit ajax call to put blocks into blob in azure storage 75: async.series(putBlocks, function (error, result) { 76: var data = { 77: name: fileName, 78: list: list 79: }; 80: $.post("/Home/Commit", data, function (result) { 81: if (!result.success) { 82: alert(result.error); 83: } 84: else { 85: alert("done!"); 86: } 87: }); 88: }); 89: } 90: }); 91: }); 92: </script> And below is the full ASP.NET MVC controller code. 1: public class HomeController : Controller 2: { 3: private CloudStorageAccount _account; 4: private CloudBlobClient _client; 5:  6: public HomeController() 7: : base() 8: { 9: _account = CloudStorageAccount.Parse(CloudConfigurationManager.GetSetting("DataConnectionString")); 10: _client = _account.CreateCloudBlobClient(); 11: } 12:  13: public ActionResult Index() 14: { 15: ViewBag.Message = "Modify this template to jump-start your ASP.NET MVC application."; 16:  17: return View(); 18: } 19:  20: [HttpPost] 21: public JsonResult UploadInFormData() 22: { 23: var error = string.Empty; 24: try 25: { 26: var name = Request.Form["name"]; 27: var index = int.Parse(Request.Form["index"]); 28: var file = Request.Files[0]; 29: var id = Convert.ToBase64String(BitConverter.GetBytes(index)); 30:  31: var container = _client.GetContainerReference("test"); 32: container.CreateIfNotExists(); 33: var blob = container.GetBlockBlobReference(name); 34: blob.PutBlock(id, file.InputStream, null); 35: } 36: catch (Exception e) 37: { 38: error = e.ToString(); 39: } 40:  41: return new JsonResult() 42: { 43: Data = new 44: { 45: success = string.IsNullOrWhiteSpace(error), 46: error = error 47: } 48: }; 49: } 50:  51: [HttpPost] 52: public JsonResult Commit() 53: { 54: var error = string.Empty; 55: try 56: { 57: var name = Request.Form["name"]; 58: var list = Request.Form["list"]; 59: var ids = list 60: .Split(',') 61: .Where(id => !string.IsNullOrWhiteSpace(id)) 62: .Select(id => Convert.ToBase64String(BitConverter.GetBytes(int.Parse(id)))) 63: .ToArray(); 64:  65: var container = _client.GetContainerReference("test"); 66: container.CreateIfNotExists(); 67: var blob = container.GetBlockBlobReference(name); 68: blob.PutBlockList(ids); 69: } 70: catch (Exception e) 71: { 72: error = e.ToString(); 73: } 74:  75: return new JsonResult() 76: { 77: Data = new 78: { 79: success = string.IsNullOrWhiteSpace(error), 80: error = error 81: } 82: }; 83: } 84: } And if we selected a file from the browser we will see our application will upload chunks in the size we specified to the server through ajax call in background, and then commit all chunks in one blob. Then we can find the blob in our Windows Azure Blob Storage.   Optimized by Parallel Upload In previous example we just uploaded our file in chunks. This solved the problem that ASP.NET MVC request content size limitation as well as the Windows Azure load balancer timeout. But it might introduce the performance problem since we uploaded chunks in sequence. In order to improve the upload performance we could modify our client side code a bit to make the upload operation invoked in parallel. The good news is that, “async.js” library provides the parallel execution function. If you remembered the code we invoke the service to upload chunks, it utilized “async.series” which means all functions will be executed in sequence. Now we will change this code to “async.parallel”. This will invoke all functions in parallel. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10: // calculate the start and end byte index for each blocks(chunks) 11: // with the index, file name and index list for future using 12: ... ... 13: // define the function array and push all chunk upload operation into this array 14: ... ... 15: // invoke the functions one by one 16: // then invoke the commit ajax call to put blocks into blob in azure storage 17: async.parallel(putBlocks, function (error, result) { 18: var data = { 19: name: fileName, 20: list: list 21: }; 22: $.post("/Home/Commit", data, function (result) { 23: if (!result.success) { 24: alert(result.error); 25: } 26: else { 27: alert("done!"); 28: } 29: }); 30: }); 31: } 32: }); In this way all chunks will be uploaded to the server side at the same time to maximize the bandwidth usage. This should work if the file was not very large and the chunk size was not very small. But for large file this might introduce another problem that too many ajax calls are sent to the server at the same time. So the best solution should be, upload the chunks in parallel with maximum concurrency limitation. The code below specified the concurrency limitation to 4, which means at the most only 4 ajax calls could be invoked at the same time. 1: $("#upload_button_blob").click(function () { 2: // assert the browser support html5 3: ... ... 4: // start to upload each files in chunks 5: var files = $("#upload_files")[0].files; 6: for (var i = 0; i < files.length; i++) { 7: var file = files[i]; 8: var fileSize = file.size; 9: var fileName = file.name; 10: // calculate the start and end byte index for each blocks(chunks) 11: // with the index, file name and index list for future using 12: ... ... 13: // define the function array and push all chunk upload operation into this array 14: ... ... 15: // invoke the functions one by one 16: // then invoke the commit ajax call to put blocks into blob in azure storage 17: async.parallelLimit(putBlocks, 4, function (error, result) { 18: var data = { 19: name: fileName, 20: list: list 21: }; 22: $.post("/Home/Commit", data, function (result) { 23: if (!result.success) { 24: alert(result.error); 25: } 26: else { 27: alert("done!"); 28: } 29: }); 30: }); 31: } 32: });   Summary In this post we discussed how to upload files in chunks to the backend service and then upload them into Windows Azure Blob Storage in blocks. We focused on the frontend side and leverage three new feature introduced in HTML 5 which are - File.slice: Read part of the file by specifying the start and end byte index. - Blob: File-like interface which contains the part of the file content. - FormData: Temporary form element that we can pass the chunk alone with some metadata to the backend service. Then we discussed the performance consideration of chunk uploading. Sequence upload cannot provide maximized upload speed, but the unlimited parallel upload might crash the browser and server if too many chunks. So we finally came up with the solution to upload chunks in parallel with the concurrency limitation. We also demonstrated how to utilize “async.js” JavaScript library to help us control the asynchronize call and the parallel limitation.   Regarding the chunk size and the parallel limitation value there is no “best” value. You need to test vary composition and find out the best one for your particular scenario. It depends on the local bandwidth, client machine cores and the server side (Windows Azure Cloud Service Virtual Machine) cores, memory and bandwidth. Below is one of my performance test result. The client machine was Windows 8 IE 10 with 4 cores. I was using Microsoft Cooperation Network. The web site was hosted on Windows Azure China North data center (in Beijing) with one small web role (1.7GB 1 core CPU, 1.75GB memory with 100Mbps bandwidth). The test cases were - Chunk size: 512KB, 1MB, 2MB, 4MB. - Upload Mode: Sequence, parallel (unlimited), parallel with limit (4 threads, 8 threads). - Chunk Format: base64 string, binaries. - Target file: 100MB. - Each case was tested 3 times. Below is the test result chart. Some thoughts, but not guidance or best practice: - Parallel gets better performance than series. - No significant performance improvement between parallel 4 threads and 8 threads. - Transform with binaries provides better performance than base64. - In all cases, chunk size in 1MB - 2MB gets better performance.   Hope this helps, Shaun All documents and related graphics, codes are provided "AS IS" without warranty of any kind. Copyright © Shaun Ziyan Xu. This work is licensed under the Creative Commons License.

    Read the article

  • Node.js Adventure - When Node Flying in Wind

    - by Shaun
    In the first post of this series I mentioned some popular modules in the community, such as underscore, async, etc.. I also listed a module named “Wind (zh-CN)”, which is created by one of my friend, Jeff Zhao (zh-CN). Now I would like to use a separated post to introduce this module since I feel it brings a new async programming style in not only Node.js but JavaScript world. If you know or heard about the new feature in C# 5.0 called “async and await”, or you learnt F#, you will find the “Wind” brings the similar async programming experience in JavaScript. By using “Wind”, we can write async code that looks like the sync code. The callbacks, async stats and exceptions will be handled by “Wind” automatically and transparently.   What’s the Problem: Dense “Callback” Phobia Let’s firstly back to my second post in this series. As I mentioned in that post, when we wanted to read some records from SQL Server we need to open the database connection, and then execute the query. In Node.js all IO operation are designed as async callback pattern which means when the operation was done, it will invoke a function which was taken from the last parameter. For example the database connection opening code would be like this. 1: sql.open(connectionString, function(error, conn) { 2: if(error) { 3: // some error handling code 4: } 5: else { 6: // connection opened successfully 7: } 8: }); And then if we need to query the database the code would be like this. It nested in the previous function. 1: sql.open(connectionString, function(error, conn) { 2: if(error) { 3: // some error handling code 4: } 5: else { 6: // connection opened successfully 7: conn.queryRaw(command, function(error, results) { 8: if(error) { 9: // failed to execute this command 10: } 11: else { 12: // records retrieved successfully 13: } 14: }; 15: } 16: }); Assuming if we need to copy some data from this database to another then we need to open another connection and execute the command within the function under the query function. 1: sql.open(connectionString, function(error, conn) { 2: if(error) { 3: // some error handling code 4: } 5: else { 6: // connection opened successfully 7: conn.queryRaw(command, function(error, results) { 8: if(error) { 9: // failed to execute this command 10: } 11: else { 12: // records retrieved successfully 13: target.open(targetConnectionString, function(error, t_conn) { 14: if(error) { 15: // connect failed 16: } 17: else { 18: t_conn.queryRaw(copy_command, function(error, results) { 19: if(error) { 20: // copy failed 21: } 22: else { 23: // and then, what do you want to do now... 24: } 25: }; 26: } 27: }; 28: } 29: }; 30: } 31: }); This is just an example. In the real project the logic would be more complicated. This means our application might be messed up and the business process will be fragged by many callback functions. I would like call this “Dense Callback Phobia”. This might be a challenge how to make code straightforward and easy to read, something like below. 1: try 2: { 3: // open source connection 4: var s_conn = sqlConnect(s_connectionString); 5: // retrieve data 6: var results = sqlExecuteCommand(s_conn, s_command); 7: 8: // open target connection 9: var t_conn = sqlConnect(t_connectionString); 10: // prepare the copy command 11: var t_command = getCopyCommand(results); 12: // execute the copy command 13: sqlExecuteCommand(s_conn, t_command); 14: } 15: catch (ex) 16: { 17: // error handling 18: }   What’s the Problem: Sync-styled Async Programming Similar as the previous problem, the callback-styled async programming model makes the upcoming operation as a part of the current operation, and mixed with the error handling code. So it’s very hard to understand what on earth this code will do. And since Node.js utilizes non-blocking IO mode, we cannot invoke those operations one by one, as they will be executed concurrently. For example, in this post when I tried to copy the records from Windows Azure SQL Database (a.k.a. WASD) to Windows Azure Table Storage, if I just insert the data into table storage one by one and then print the “Finished” message, I will see the message shown before the data had been copied. This is because all operations were executed at the same time. In order to make the copy operation and print operation executed synchronously I introduced a module named “async” and the code was changed as below. 1: async.forEach(results.rows, 2: function (row, callback) { 3: var resource = { 4: "PartitionKey": row[1], 5: "RowKey": row[0], 6: "Value": row[2] 7: }; 8: client.insertEntity(tableName, resource, function (error) { 9: if (error) { 10: callback(error); 11: } 12: else { 13: console.log("entity inserted."); 14: callback(null); 15: } 16: }); 17: }, 18: function (error) { 19: if (error) { 20: error["target"] = "insertEntity"; 21: res.send(500, error); 22: } 23: else { 24: console.log("all done."); 25: res.send(200, "Done!"); 26: } 27: }); It ensured that the “Finished” message will be printed when all table entities had been inserted. But it cannot promise that the records will be inserted in sequence. It might be another challenge to make the code looks like in sync-style? 1: try 2: { 3: forEach(row in rows) { 4: var entity = { /* ... */ }; 5: tableClient.insert(tableName, entity); 6: } 7:  8: console.log("Finished"); 9: } 10: catch (ex) { 11: console.log(ex); 12: }   How “Wind” Helps “Wind” is a JavaScript library which provides the control flow with plain JavaScript for asynchronous programming (and more) without additional pre-compiling steps. It’s available in NPM so that we can install it through “npm install wind”. Now let’s create a very simple Node.js application as the example. This application will take some website URLs from the command arguments and tried to retrieve the body length and print them in console. Then at the end print “Finish”. I’m going to use “request” module to make the HTTP call simple so I also need to install by the command “npm install request”. The code would be like this. 1: var request = require("request"); 2:  3: // get the urls from arguments, the first two arguments are `node.exe` and `fetch.js` 4: var args = process.argv.splice(2); 5:  6: // main function 7: var main = function() { 8: for(var i = 0; i < args.length; i++) { 9: // get the url 10: var url = args[i]; 11: // send the http request and try to get the response and body 12: request(url, function(error, response, body) { 13: if(!error && response.statusCode == 200) { 14: // log the url and the body length 15: console.log( 16: "%s: %d.", 17: response.request.uri.href, 18: body.length); 19: } 20: else { 21: // log error 22: console.log(error); 23: } 24: }); 25: } 26: 27: // finished 28: console.log("Finished"); 29: }; 30:  31: // execute the main function 32: main(); Let’s execute this application. (I made them in multi-lines for better reading.) 1: node fetch.js 2: "http://www.igt.com/us-en.aspx" 3: "http://www.igt.com/us-en/games.aspx" 4: "http://www.igt.com/us-en/cabinets.aspx" 5: "http://www.igt.com/us-en/systems.aspx" 6: "http://www.igt.com/us-en/interactive.aspx" 7: "http://www.igt.com/us-en/social-gaming.aspx" 8: "http://www.igt.com/support.aspx" Below is the output. As you can see the finish message was printed at the beginning, and the pages’ length retrieved in a different order than we specified. This is because in this code the request command, console logging command are executed asynchronously and concurrently. Now let’s introduce “Wind” to make them executed in order, which means it will request the websites one by one, and print the message at the end.   First of all we need to import the “Wind” package and make sure the there’s only one global variant named “Wind”, and ensure it’s “Wind” instead of “wind”. 1: var Wind = require("wind");   Next, we need to tell “Wind” which code will be executed asynchronously so that “Wind” can control the execution process. In this case the “request” operation executed asynchronously so we will create a “Task” by using a build-in helps function in “Wind” named Wind.Async.Task.create. 1: var requestBodyLengthAsync = function(url) { 2: return Wind.Async.Task.create(function(t) { 3: request(url, function(error, response, body) { 4: if(error || response.statusCode != 200) { 5: t.complete("failure", error); 6: } 7: else { 8: var data = 9: { 10: uri: response.request.uri.href, 11: length: body.length 12: }; 13: t.complete("success", data); 14: } 15: }); 16: }); 17: }; The code above created a “Task” from the original request calling code. In “Wind” a “Task” means an operation will be finished in some time in the future. A “Task” can be started by invoke its start() method, but no one knows when it actually will be finished. The Wind.Async.Task.create helped us to create a task. The only parameter is a function where we can put the actual operation in, and then notify the task object it’s finished successfully or failed by using the complete() method. In the code above I invoked the request method. If it retrieved the response successfully I set the status of this task as “success” with the URL and body length. If it failed I set this task as “failure” and pass the error out.   Next, we will change the main() function. In “Wind” if we want a function can be controlled by Wind we need to mark it as “async”. This should be done by using the code below. 1: var main = eval(Wind.compile("async", function() { 2: })); When the application is running, Wind will detect “eval(Wind.compile(“async”, function” and generate an anonymous code from the body of this original function. Then the application will run the anonymous code instead of the original one. In our example the main function will be like this. 1: var main = eval(Wind.compile("async", function() { 2: for(var i = 0; i < args.length; i++) { 3: try 4: { 5: var result = $await(requestBodyLengthAsync(args[i])); 6: console.log( 7: "%s: %d.", 8: result.uri, 9: result.length); 10: } 11: catch (ex) { 12: console.log(ex); 13: } 14: } 15: 16: console.log("Finished"); 17: })); As you can see, when I tried to request the URL I use a new command named “$await”. It tells Wind, the operation next to $await will be executed asynchronously, and the main thread should be paused until it finished (or failed). So in this case, my application will be pause when the first response was received, and then print its body length, then try the next one. At the end, print the finish message.   Finally, execute the main function. The full code would be like this. 1: var request = require("request"); 2: var Wind = require("wind"); 3:  4: var args = process.argv.splice(2); 5:  6: var requestBodyLengthAsync = function(url) { 7: return Wind.Async.Task.create(function(t) { 8: request(url, function(error, response, body) { 9: if(error || response.statusCode != 200) { 10: t.complete("failure", error); 11: } 12: else { 13: var data = 14: { 15: uri: response.request.uri.href, 16: length: body.length 17: }; 18: t.complete("success", data); 19: } 20: }); 21: }); 22: }; 23:  24: var main = eval(Wind.compile("async", function() { 25: for(var i = 0; i < args.length; i++) { 26: try 27: { 28: var result = $await(requestBodyLengthAsync(args[i])); 29: console.log( 30: "%s: %d.", 31: result.uri, 32: result.length); 33: } 34: catch (ex) { 35: console.log(ex); 36: } 37: } 38: 39: console.log("Finished"); 40: })); 41:  42: main().start();   Run our new application. At the beginning we will see the compiled and generated code by Wind. Then we can see the pages were requested one by one, and at the end the finish message was printed. Below is the code Wind generated for us. As you can see the original code, the output code were shown. 1: // Original: 2: function () { 3: for(var i = 0; i < args.length; i++) { 4: try 5: { 6: var result = $await(requestBodyLengthAsync(args[i])); 7: console.log( 8: "%s: %d.", 9: result.uri, 10: result.length); 11: } 12: catch (ex) { 13: console.log(ex); 14: } 15: } 16: 17: console.log("Finished"); 18: } 19:  20: // Compiled: 21: /* async << function () { */ (function () { 22: var _builder_$0 = Wind.builders["async"]; 23: return _builder_$0.Start(this, 24: _builder_$0.Combine( 25: _builder_$0.Delay(function () { 26: /* var i = 0; */ var i = 0; 27: /* for ( */ return _builder_$0.For(function () { 28: /* ; i < args.length */ return i < args.length; 29: }, function () { 30: /* ; i ++) { */ i ++; 31: }, 32: /* try { */ _builder_$0.Try( 33: _builder_$0.Delay(function () { 34: /* var result = $await(requestBodyLengthAsync(args[i])); */ return _builder_$0.Bind(requestBodyLengthAsync(args[i]), function (result) { 35: /* console.log("%s: %d.", result.uri, result.length); */ console.log("%s: %d.", result.uri, result.length); 36: return _builder_$0.Normal(); 37: }); 38: }), 39: /* } catch (ex) { */ function (ex) { 40: /* console.log(ex); */ console.log(ex); 41: return _builder_$0.Normal(); 42: /* } */ }, 43: null 44: ) 45: /* } */ ); 46: }), 47: _builder_$0.Delay(function () { 48: /* console.log("Finished"); */ console.log("Finished"); 49: return _builder_$0.Normal(); 50: }) 51: ) 52: ); 53: /* } */ })   How Wind Works Someone may raise a big concern when you find I utilized “eval” in my code. Someone may assume that Wind utilizes “eval” to execute some code dynamically while “eval” is very low performance. But I would say, Wind does NOT use “eval” to run the code. It only use “eval” as a flag to know which code should be compiled at runtime. When the code was firstly been executed, Wind will check and find “eval(Wind.compile(“async”, function”. So that it knows this function should be compiled. Then it utilized parse-js to analyze the inner JavaScript and generated the anonymous code in memory. Then it rewrite the original code so that when the application was running it will use the anonymous one instead of the original one. Since the code generation was done at the beginning of the application was started, in the future no matter how long our application runs and how many times the async function was invoked, it will use the generated code, no need to generate again. So there’s no significant performance hurt when using Wind.   Wind in My Previous Demo Let’s adopt Wind into one of my previous demonstration and to see how it helps us to make our code simple, straightforward and easy to read and understand. In this post when I implemented the functionality that copied the records from my WASD to table storage, the logic would be like this. 1, Open database connection. 2, Execute a query to select all records from the table. 3, Recreate the table in Windows Azure table storage. 4, Create entities from each of the records retrieved previously, and then insert them into table storage. 5, Finally, show message as the HTTP response. But as the image below, since there are so many callbacks and async operations, it’s very hard to understand my logic from the code. Now let’s use Wind to rewrite our code. First of all, of course, we need the Wind package. Then we need to include the package files into project and mark them as “Copy always”. Add the Wind package into the source code. Pay attention to the variant name, you must use “Wind” instead of “wind”. 1: var express = require("express"); 2: var async = require("async"); 3: var sql = require("node-sqlserver"); 4: var azure = require("azure"); 5: var Wind = require("wind"); Now we need to create some async functions by using Wind. All async functions should be wrapped so that it can be controlled by Wind which are open database, retrieve records, recreate table (delete and create) and insert entity in table. Below are these new functions. All of them are created by using Wind.Async.Task.create. 1: sql.openAsync = function (connectionString) { 2: return Wind.Async.Task.create(function (t) { 3: sql.open(connectionString, function (error, conn) { 4: if (error) { 5: t.complete("failure", error); 6: } 7: else { 8: t.complete("success", conn); 9: } 10: }); 11: }); 12: }; 13:  14: sql.queryAsync = function (conn, query) { 15: return Wind.Async.Task.create(function (t) { 16: conn.queryRaw(query, function (error, results) { 17: if (error) { 18: t.complete("failure", error); 19: } 20: else { 21: t.complete("success", results); 22: } 23: }); 24: }); 25: }; 26:  27: azure.recreateTableAsync = function (tableName) { 28: return Wind.Async.Task.create(function (t) { 29: client.deleteTable(tableName, function (error, successful, response) { 30: console.log("delete table finished"); 31: client.createTableIfNotExists(tableName, function (error, successful, response) { 32: console.log("create table finished"); 33: if (error) { 34: t.complete("failure", error); 35: } 36: else { 37: t.complete("success", null); 38: } 39: }); 40: }); 41: }); 42: }; 43:  44: azure.insertEntityAsync = function (tableName, entity) { 45: return Wind.Async.Task.create(function (t) { 46: client.insertEntity(tableName, entity, function (error, entity, response) { 47: if (error) { 48: t.complete("failure", error); 49: } 50: else { 51: t.complete("success", null); 52: } 53: }); 54: }); 55: }; Then in order to use these functions we will create a new function which contains all steps for data copying. 1: var copyRecords = eval(Wind.compile("async", function (req, res) { 2: try { 3: } 4: catch (ex) { 5: console.log(ex); 6: res.send(500, "Internal error."); 7: } 8: })); Let’s execute steps one by one with the “$await” keyword introduced by Wind so that it will be invoked in sequence. First is to open the database connection. 1: var copyRecords = eval(Wind.compile("async", function (req, res) { 2: try { 3: // connect to the windows azure sql database 4: var conn = $await(sql.openAsync(connectionString)); 5: console.log("connection opened"); 6: } 7: catch (ex) { 8: console.log(ex); 9: res.send(500, "Internal error."); 10: } 11: })); Then retrieve all records from the database connection. 1: var copyRecords = eval(Wind.compile("async", function (req, res) { 2: try { 3: // connect to the windows azure sql database 4: var conn = $await(sql.openAsync(connectionString)); 5: console.log("connection opened"); 6: // retrieve all records from database 7: var results = $await(sql.queryAsync(conn, "SELECT * FROM [Resource]")); 8: console.log("records selected. count = %d", results.rows.length); 9: } 10: catch (ex) { 11: console.log(ex); 12: res.send(500, "Internal error."); 13: } 14: })); After recreated the table, we need to create the entities and insert them into table storage. 1: var copyRecords = eval(Wind.compile("async", function (req, res) { 2: try { 3: // connect to the windows azure sql database 4: var conn = $await(sql.openAsync(connectionString)); 5: console.log("connection opened"); 6: // retrieve all records from database 7: var results = $await(sql.queryAsync(conn, "SELECT * FROM [Resource]")); 8: console.log("records selected. count = %d", results.rows.length); 9: if (results.rows.length > 0) { 10: // recreate the table 11: $await(azure.recreateTableAsync(tableName)); 12: console.log("table created"); 13: // insert records in table storage one by one 14: for (var i = 0; i < results.rows.length; i++) { 15: var entity = { 16: "PartitionKey": results.rows[i][1], 17: "RowKey": results.rows[i][0], 18: "Value": results.rows[i][2] 19: }; 20: $await(azure.insertEntityAsync(tableName, entity)); 21: console.log("entity inserted"); 22: } 23: } 24: } 25: catch (ex) { 26: console.log(ex); 27: res.send(500, "Internal error."); 28: } 29: })); Finally, send response back to the browser. 1: var copyRecords = eval(Wind.compile("async", function (req, res) { 2: try { 3: // connect to the windows azure sql database 4: var conn = $await(sql.openAsync(connectionString)); 5: console.log("connection opened"); 6: // retrieve all records from database 7: var results = $await(sql.queryAsync(conn, "SELECT * FROM [Resource]")); 8: console.log("records selected. count = %d", results.rows.length); 9: if (results.rows.length > 0) { 10: // recreate the table 11: $await(azure.recreateTableAsync(tableName)); 12: console.log("table created"); 13: // insert records in table storage one by one 14: for (var i = 0; i < results.rows.length; i++) { 15: var entity = { 16: "PartitionKey": results.rows[i][1], 17: "RowKey": results.rows[i][0], 18: "Value": results.rows[i][2] 19: }; 20: $await(azure.insertEntityAsync(tableName, entity)); 21: console.log("entity inserted"); 22: } 23: // send response 24: console.log("all done"); 25: res.send(200, "All done!"); 26: } 27: } 28: catch (ex) { 29: console.log(ex); 30: res.send(500, "Internal error."); 31: } 32: })); If we compared with the previous code we will find now it became more readable and much easy to understand. It’s very easy to know what this function does even though without any comments. When user go to URL “/was/copyRecords” we will execute the function above. The code would be like this. 1: app.get("/was/copyRecords", function (req, res) { 2: copyRecords(req, res).start(); 3: }); And below is the logs printed in local compute emulator console. As we can see the functions executed one by one and then finally the response back to me browser.   Scaffold Functions in Wind Wind provides not only the async flow control and compile functions, but many scaffold methods as well. We can build our async code more easily by using them. I’m going to introduce some basic scaffold functions here. In the code above I created some functions which wrapped from the original async function such as open database, create table, etc.. All of them are very similar, created a task by using Wind.Async.Task.create, return error or result object through Task.complete function. In fact, Wind provides some functions for us to create task object from the original async functions. If the original async function only has a callback parameter, we can use Wind.Async.Binding.fromCallback method to get the task object directly. For example the code below returned the task object which wrapped the file exist check function. 1: var Wind = require("wind"); 2: var fs = require("fs"); 3:  4: fs.existsAsync = Wind.Async.Binding.fromCallback(fs.exists); In Node.js a very popular async function pattern is that, the first parameter in the callback function represent the error object, and the other parameters is the return values. In this case we can use another build-in function in Wind named Wind.Async.Binding.fromStandard. For example, the open database function can be created from the code below. 1: sql.openAsync = Wind.Async.Binding.fromStandard(sql.open); 2:  3: /* 4: sql.openAsync = function (connectionString) { 5: return Wind.Async.Task.create(function (t) { 6: sql.open(connectionString, function (error, conn) { 7: if (error) { 8: t.complete("failure", error); 9: } 10: else { 11: t.complete("success", conn); 12: } 13: }); 14: }); 15: }; 16: */ When I was testing the scaffold functions under Wind.Async.Binding I found for some functions, such as the Azure SDK insert entity function, cannot be processed correctly. So I personally suggest writing the wrapped method manually.   Another scaffold method in Wind is the parallel tasks coordination. In this example, the steps of open database, retrieve records and recreated table should be invoked one by one, but it can be executed in parallel when copying data from database to table storage. In Wind there’s a scaffold function named Task.whenAll which can be used here. Task.whenAll accepts a list of tasks and creates a new task. It will be returned only when all tasks had been completed, or any errors occurred. For example in the code below I used the Task.whenAll to make all copy operation executed at the same time. 1: var copyRecordsInParallel = eval(Wind.compile("async", function (req, res) { 2: try { 3: // connect to the windows azure sql database 4: var conn = $await(sql.openAsync(connectionString)); 5: console.log("connection opened"); 6: // retrieve all records from database 7: var results = $await(sql.queryAsync(conn, "SELECT * FROM [Resource]")); 8: console.log("records selected. count = %d", results.rows.length); 9: if (results.rows.length > 0) { 10: // recreate the table 11: $await(azure.recreateTableAsync(tableName)); 12: console.log("table created"); 13: // insert records in table storage in parallal 14: var tasks = new Array(results.rows.length); 15: for (var i = 0; i < results.rows.length; i++) { 16: var entity = { 17: "PartitionKey": results.rows[i][1], 18: "RowKey": results.rows[i][0], 19: "Value": results.rows[i][2] 20: }; 21: tasks[i] = azure.insertEntityAsync(tableName, entity); 22: } 23: $await(Wind.Async.Task.whenAll(tasks)); 24: // send response 25: console.log("all done"); 26: res.send(200, "All done!"); 27: } 28: } 29: catch (ex) { 30: console.log(ex); 31: res.send(500, "Internal error."); 32: } 33: })); 34:  35: app.get("/was/copyRecordsInParallel", function (req, res) { 36: copyRecordsInParallel(req, res).start(); 37: });   Besides the task creation and coordination, Wind supports the cancellation solution so that we can send the cancellation signal to the tasks. It also includes exception solution which means any exceptions will be reported to the caller function.   Summary In this post I introduced a Node.js module named Wind, which created by my friend Jeff Zhao. As you can see, different from other async library and framework, adopted the idea from F# and C#, Wind utilizes runtime code generation technology to make it more easily to write async, callback-based functions in a sync-style way. By using Wind there will be almost no callback, and the code will be very easy to understand. Currently Wind is still under developed and improved. There might be some problems but the author, Jeff, should be very happy and enthusiastic to learn your problems, feedback, suggestion and comments. You can contact Jeff by - Email: [email protected] - Group: https://groups.google.com/d/forum/windjs - GitHub: https://github.com/JeffreyZhao/wind/issues   Source code can be download here.   Hope this helps, Shaun All documents and related graphics, codes are provided "AS IS" without warranty of any kind. Copyright © Shaun Ziyan Xu. This work is licensed under the Creative Commons License.

    Read the article

  • Creating packages in code – Execute SQL Task

    The Execute SQL Task is for obvious reasons very well used, so I thought if you are building packages in code the chances are you will be using it. Using the task basic features of the task are quite straightforward, add the task and set some properties, just like any other. When you start interacting with variables though it can be a little harder to grasp so these samples should see you through. Some of these more advanced features are explained in much more detail in our ever popular post The Execute SQL Task, here I’ll just be showing you how to implement them in code. The abbreviated code blocks below demonstrate the different features of the task. The complete code has been encapsulated into a sample class which you can download (ExecSqlPackage.cs). Each feature described has its own method in the sample class which is mentioned after the code block. This first sample just shows adding the task, setting the basic properties for a connection and of course an SQL statement. Package package = new Package(); // Add the SQL OLE-DB connection ConnectionManager sqlConnection = AddSqlConnection(package, "localhost", "master"); // Add the SQL Task package.Executables.Add("STOCK:SQLTask"); // Get the task host wrapper TaskHost taskHost = package.Executables[0] as TaskHost; // Set required properties taskHost.Properties["Connection"].SetValue(taskHost, sqlConnection.ID); taskHost.Properties["SqlStatementSource"].SetValue(taskHost, "SELECT * FROM sysobjects"); For the full version of this code, see the CreatePackage method in the sample class. The AddSqlConnection method is a helper method that adds an OLE-DB connection to the package, it is of course in the sample class file too. Returning a single value with a Result Set The following sample takes a different approach, getting a reference to the ExecuteSQLTask object task itself, rather than just using the non-specific TaskHost as above. Whilst it means we need to add an extra reference to our project (Microsoft.SqlServer.SQLTask) it makes coding much easier as we have compile time validation of any property and types we use. For the more complex properties that is very valuable and saves a lot of time during development. The query has also been changed to return a single value, one row and one column. The sample shows how we can return that value into a variable, which we also add to our package in the code. To do this manually you would set the Result Set property on the General page to Single Row and map the variable on the Result Set page in the editor. Package package = new Package(); // Add the SQL OLE-DB connection ConnectionManager sqlConnection = AddSqlConnection(package, "localhost", "master"); // Add the SQL Task package.Executables.Add("STOCK:SQLTask"); // Get the task host wrapper TaskHost taskHost = package.Executables[0] as TaskHost; // Add variable to hold result value package.Variables.Add("Variable", false, "User", 0); // Get the task object ExecuteSQLTask task = taskHost.InnerObject as ExecuteSQLTask; // Set core properties task.Connection = sqlConnection.Name; task.SqlStatementSource = "SELECT id FROM sysobjects WHERE name = 'sysrowsets'"; // Set single row result set task.ResultSetType = ResultSetType.ResultSetType_SingleRow; // Add result set binding, map the id column to variable task.ResultSetBindings.Add(); IDTSResultBinding resultBinding = task.ResultSetBindings.GetBinding(0); resultBinding.ResultName = "id"; resultBinding.DtsVariableName = "User::Variable"; For the full version of this code, see the CreatePackageResultVariable method in the sample class. The other types of Result Set behaviour are just a variation on this theme, set the property and map the result binding as required. Parameter Mapping for SQL Statements This final example uses a parameterised SQL statement, with the coming from a variable. The syntax varies slightly between connection types, as explained in the Working with Parameters and Return Codes in the Execute SQL Taskhelp topic, but OLE-DB is the most commonly used, for which a question mark is the parameter value placeholder. Package package = new Package(); // Add the SQL OLE-DB connection ConnectionManager sqlConnection = AddSqlConnection(package, ".", "master"); // Add the SQL Task package.Executables.Add("STOCK:SQLTask"); // Get the task host wrapper TaskHost taskHost = package.Executables[0] as TaskHost; // Get the task object ExecuteSQLTask task = taskHost.InnerObject as ExecuteSQLTask; // Set core properties task.Connection = sqlConnection.Name; task.SqlStatementSource = "SELECT id FROM sysobjects WHERE name = ?"; // Add variable to hold parameter value package.Variables.Add("Variable", false, "User", "sysrowsets"); // Add input parameter binding task.ParameterBindings.Add(); IDTSParameterBinding parameterBinding = task.ParameterBindings.GetBinding(0); parameterBinding.DtsVariableName = "User::Variable"; parameterBinding.ParameterDirection = ParameterDirections.Input; parameterBinding.DataType = (int)OleDBDataTypes.VARCHAR; parameterBinding.ParameterName = "0"; parameterBinding.ParameterSize = 255; For the full version of this code, see the CreatePackageParameterVariable method in the sample class. You’ll notice the data type has to be specified for the parameter IDTSParameterBinding .DataType Property, and these type codes are connection specific too. My enumeration I wrote several years ago is shown below was probably done by reverse engineering a package and also the API header file, but I recently found a very handy post that covers more connections as well for exactly this, Setting the DataType of IDTSParameterBinding objects (Execute SQL Task). /// <summary> /// Enumeration of OLE-DB types, used when mapping OLE-DB parameters. /// </summary> private enum OleDBDataTypes { BYTE = 0x11, CURRENCY = 6, DATE = 7, DB_VARNUMERIC = 0x8b, DBDATE = 0x85, DBTIME = 0x86, DBTIMESTAMP = 0x87, DECIMAL = 14, DOUBLE = 5, FILETIME = 0x40, FLOAT = 4, GUID = 0x48, LARGE_INTEGER = 20, LONG = 3, NULL = 1, NUMERIC = 0x83, NVARCHAR = 130, SHORT = 2, SIGNEDCHAR = 0x10, ULARGE_INTEGER = 0x15, ULONG = 0x13, USHORT = 0x12, VARCHAR = 0x81, VARIANT_BOOL = 11 } Download Sample code ExecSqlPackage.cs (10KB)

    Read the article

  • New MySQL Cluster 7.3 Previews: Foreign Keys, NoSQL Node.js API and Auto-Tuned Clusters

    - by Mat Keep
    At this weeks MySQL Connect conference, Oracle previewed an exciting new wave of developments for MySQL Cluster, further extending its simplicity and flexibility by expanding the range of use-cases, adding new NoSQL options, and automating configuration. What’s new: Development Release 1: MySQL Cluster 7.3 with Foreign Keys Early Access “Labs” Preview: MySQL Cluster NoSQL API for Node.js Early Access “Labs” Preview: MySQL Cluster GUI-Based Auto-Installer In this blog, I'll introduce you to the features being previewed. Review the blogs listed below for more detail on each of the specific features discussed. Save the date!: A live webinar is scheduled for Thursday 25th October at 0900 Pacific Time / 1600UTC where we will discuss each of these enhancements in more detail. Registration will be open soon and published to the MySQL webinars page MySQL Cluster 7.3: Development Release 1 The first MySQL Cluster 7.3 Development Milestone Release (DMR) previews Foreign Keys, bringing powerful new functionality to MySQL Cluster while eliminating development complexity. Foreign Key support has been one of the most requested enhancements to MySQL Cluster – enabling users to simplify their data models and application logic – while extending the range of use-cases for both custom projects requiring referential integrity and packaged applications, such as eCommerce, CRM, CMS, etc. Implementation The Foreign Key functionality is implemented directly within the MySQL Cluster data nodes, allowing any client API accessing the cluster to benefit from them – whether they are SQL or one of the NoSQL interfaces (Memcached, C++, Java, JPA, HTTP/REST or the new Node.js API - discussed later.) The core referential actions defined in the SQL:2003 standard are implemented: CASCADE RESTRICT NO ACTION SET NULL In addition, the MySQL Cluster implementation supports the online adding and dropping of Foreign Keys, ensuring the Cluster continues to serve both read and write requests during the operation.  This represents a further enhancement to MySQL Cluster's support for on0line schema changes, ie adding and dropping indexes, adding columns, etc.  Read this blog for a demonstration of using Foreign Keys with MySQL Cluster.  Getting Started with MySQL Cluster 7.3 DMR1: Users can download either the source or binary and evaluate the MySQL Cluster 7.3 DMR with Foreign Keys now! (Select the Development Release tab). MySQL Cluster NoSQL API for Node.js Node.js is hot! In a little over 3 years, it has become one of the most popular environments for developing next generation web, cloud, mobile and social applications. Bringing JavaScript from the browser to the server, the design goal of Node.js is to build new real-time applications supporting millions of client connections, serviced by a single CPU core. Making it simple to further extend the flexibility and power of Node.js to the database layer, we are previewing the Node.js Javascript API for MySQL Cluster as an Early Access release, available for download now from http://labs.mysql.com/. Select the following build: MySQL-Cluster-NoSQL-Connector-for-Node-js Alternatively, you can clone the project at the MySQL GitHub page.  Implemented as a module for the V8 engine, the new API provides Node.js with a native, asynchronous JavaScript interface that can be used to both query and receive results sets directly from MySQL Cluster, without transformations to SQL. Figure 1: MySQL Cluster NoSQL API for Node.js enables end-to-end JavaScript development Rather than just presenting a simple interface to the database, the Node.js module integrates the MySQL Cluster native API library directly within the web application itself, enabling developers to seamlessly couple their high performance, distributed applications with a high performance, distributed, persistence layer delivering 99.999% availability. The new Node.js API joins a rich array of NoSQL interfaces available for MySQL Cluster. Whichever API is chosen for an application, SQL and NoSQL can be used concurrently across the same data set, providing the ultimate in developer flexibility.  Get started with MySQL Cluster NoSQL API for Node.js tutorial MySQL Cluster GUI-Based Auto-Installer Compatible with both MySQL Cluster 7.2 and 7.3, the Auto-Installer makes it simple for DevOps teams to quickly configure and provision highly optimized MySQL Cluster deployments – whether on-premise or in the cloud. Implemented with a standard HTML GUI and Python-based web server back-end, the Auto-Installer intelligently configures MySQL Cluster based on application requirements and auto-discovered hardware resources Figure 2: Automated Tuning and Configuration of MySQL Cluster Developed by the same engineering team responsible for the MySQL Cluster database, the installer provides standardized configurations that make it simple, quick and easy to build stable and high performance clustered environments. The auto-installer is previewed as an Early Access release, available for download now from http://labs.mysql.com/, by selecting the MySQL-Cluster-Auto-Installer build. You can read more about getting started with the MySQL Cluster auto-installer here. Watch the YouTube video for a demonstration of using the MySQL Cluster auto-installer Getting Started with MySQL Cluster If you are new to MySQL Cluster, the Getting Started guide will walk you through installing an evaluation cluster on a singe host (these guides reflect MySQL Cluster 7.2, but apply equally well to 7.3 and the Early Access previews). Or use the new MySQL Cluster Auto-Installer! Download the Guide to Scaling Web Databases with MySQL Cluster (to learn more about its architecture, design and ideal use-cases). Post any questions to the MySQL Cluster forum where our Engineering team and the MySQL Cluster community will attempt to assist you. Post any bugs you find to the MySQL bug tracking system (select MySQL Cluster from the Category drop-down menu) And if you have any feedback, please post them to the Comments section here or in the blogs referenced in this article. Summary MySQL Cluster 7.2 is the GA, production-ready release of MySQL Cluster. The first Development Release of MySQL Cluster 7.3 and the Early Access previews give you the opportunity to preview and evaluate future developments in the MySQL Cluster database, and we are very excited to be able to share that with you. Let us know how you get along with MySQL Cluster 7.3, and other features that you want to see in future releases, by using the comments of this blog.

    Read the article

  • Creating packages in code – Execute SQL Task

    The Execute SQL Task is for obvious reasons very well used, so I thought if you are building packages in code the chances are you will be using it. Using the task basic features of the task are quite straightforward, add the task and set some properties, just like any other. When you start interacting with variables though it can be a little harder to grasp so these samples should see you through. Some of these more advanced features are explained in much more detail in our ever popular post The Execute SQL Task, here I’ll just be showing you how to implement them in code. The abbreviated code blocks below demonstrate the different features of the task. The complete code has been encapsulated into a sample class which you can download (ExecSqlPackage.cs). Each feature described has its own method in the sample class which is mentioned after the code block. This first sample just shows adding the task, setting the basic properties for a connection and of course an SQL statement. Package package = new Package(); // Add the SQL OLE-DB connection ConnectionManager sqlConnection = AddSqlConnection(package, "localhost", "master"); // Add the SQL Task package.Executables.Add("STOCK:SQLTask"); // Get the task host wrapper TaskHost taskHost = package.Executables[0] as TaskHost; // Set required properties taskHost.Properties["Connection"].SetValue(taskHost, sqlConnection.ID); taskHost.Properties["SqlStatementSource"].SetValue(taskHost, "SELECT * FROM sysobjects"); For the full version of this code, see the CreatePackage method in the sample class. The AddSqlConnection method is a helper method that adds an OLE-DB connection to the package, it is of course in the sample class file too. Returning a single value with a Result Set The following sample takes a different approach, getting a reference to the ExecuteSQLTask object task itself, rather than just using the non-specific TaskHost as above. Whilst it means we need to add an extra reference to our project (Microsoft.SqlServer.SQLTask) it makes coding much easier as we have compile time validation of any property and types we use. For the more complex properties that is very valuable and saves a lot of time during development. The query has also been changed to return a single value, one row and one column. The sample shows how we can return that value into a variable, which we also add to our package in the code. To do this manually you would set the Result Set property on the General page to Single Row and map the variable on the Result Set page in the editor. Package package = new Package(); // Add the SQL OLE-DB connection ConnectionManager sqlConnection = AddSqlConnection(package, "localhost", "master"); // Add the SQL Task package.Executables.Add("STOCK:SQLTask"); // Get the task host wrapper TaskHost taskHost = package.Executables[0] as TaskHost; // Add variable to hold result value package.Variables.Add("Variable", false, "User", 0); // Get the task object ExecuteSQLTask task = taskHost.InnerObject as ExecuteSQLTask; // Set core properties task.Connection = sqlConnection.Name; task.SqlStatementSource = "SELECT id FROM sysobjects WHERE name = 'sysrowsets'"; // Set single row result set task.ResultSetType = ResultSetType.ResultSetType_SingleRow; // Add result set binding, map the id column to variable task.ResultSetBindings.Add(); IDTSResultBinding resultBinding = task.ResultSetBindings.GetBinding(0); resultBinding.ResultName = "id"; resultBinding.DtsVariableName = "User::Variable"; For the full version of this code, see the CreatePackageResultVariable method in the sample class. The other types of Result Set behaviour are just a variation on this theme, set the property and map the result binding as required. Parameter Mapping for SQL Statements This final example uses a parameterised SQL statement, with the coming from a variable. The syntax varies slightly between connection types, as explained in the Working with Parameters and Return Codes in the Execute SQL Taskhelp topic, but OLE-DB is the most commonly used, for which a question mark is the parameter value placeholder. Package package = new Package(); // Add the SQL OLE-DB connection ConnectionManager sqlConnection = AddSqlConnection(package, ".", "master"); // Add the SQL Task package.Executables.Add("STOCK:SQLTask"); // Get the task host wrapper TaskHost taskHost = package.Executables[0] as TaskHost; // Get the task object ExecuteSQLTask task = taskHost.InnerObject as ExecuteSQLTask; // Set core properties task.Connection = sqlConnection.Name; task.SqlStatementSource = "SELECT id FROM sysobjects WHERE name = ?"; // Add variable to hold parameter value package.Variables.Add("Variable", false, "User", "sysrowsets"); // Add input parameter binding task.ParameterBindings.Add(); IDTSParameterBinding parameterBinding = task.ParameterBindings.GetBinding(0); parameterBinding.DtsVariableName = "User::Variable"; parameterBinding.ParameterDirection = ParameterDirections.Input; parameterBinding.DataType = (int)OleDBDataTypes.VARCHAR; parameterBinding.ParameterName = "0"; parameterBinding.ParameterSize = 255; For the full version of this code, see the CreatePackageParameterVariable method in the sample class. You’ll notice the data type has to be specified for the parameter IDTSParameterBinding .DataType Property, and these type codes are connection specific too. My enumeration I wrote several years ago is shown below was probably done by reverse engineering a package and also the API header file, but I recently found a very handy post that covers more connections as well for exactly this, Setting the DataType of IDTSParameterBinding objects (Execute SQL Task). /// <summary> /// Enumeration of OLE-DB types, used when mapping OLE-DB parameters. /// </summary> private enum OleDBDataTypes { BYTE = 0x11, CURRENCY = 6, DATE = 7, DB_VARNUMERIC = 0x8b, DBDATE = 0x85, DBTIME = 0x86, DBTIMESTAMP = 0x87, DECIMAL = 14, DOUBLE = 5, FILETIME = 0x40, FLOAT = 4, GUID = 0x48, LARGE_INTEGER = 20, LONG = 3, NULL = 1, NUMERIC = 0x83, NVARCHAR = 130, SHORT = 2, SIGNEDCHAR = 0x10, ULARGE_INTEGER = 0x15, ULONG = 0x13, USHORT = 0x12, VARCHAR = 0x81, VARIANT_BOOL = 11 } Download Sample code ExecSqlPackage.cs (10KB)

    Read the article

  • Connecting to DB2 from SSIS

    - by Christopher House
    The project I'm currently working on involves moving various pieces of data from a legacy DB2 environment to some SQL Server and flat file locations.  Most of the data flows are real time, so they were a natural fit for the client's MQSeries on their iSeries servers and BizTalk to handle the messaging.  Some of the data flows, however, are daily batch type transmissions.  For the daily batch transmissions, it was decided that we'd use SSIS to pull the data direct from DB2 to either a SQL Server or flat file.  I'm not at all an SSIS guy, I've done a bit here and there, but mainly for situations were we needed to move data from a dev environment to QA, mostly informal stuff like that.  And, as much as I'm not an SSIS guy, I'm even less a DB2/iSeries guy.  Prior to this engagement, my knowledge of DB2 was limited to the fact that it's an IBM product and that it was probably a DBMS flatform (that's what the DB in DB2 means, right?).   One of my first goals when I came onto this project was to develop of POC SSIS package to pull some data from DB2 and dump it to a flat file.  It sounded like a pretty straight forward task.  As always, the devil is in the details.  Configuring the DB2 connection manager took a bit of trial and error.  As such, I thought I'd post my experiences here in hopes that they might save someone the efforts I went through.  That being said, please keep in mind, as I pointed out, I'm not at all a DB2 guy, so my terminology and explanations may not be 100% spot on. Before you get started, you need to figure out how you're going to connect to DB2.  From the research I did, it looks like there are a few options.  IBM has both an OLE DB and .Net data provider which can be found here.  I installed their client access tools and tried to use both the .Net and OLE DB providers but I received an error message from both when attempting to connect to the iSeries that indicated I needed a license for a product called DB2 Connect.  I inquired with one of my client's iSeries resources about a license for this product and it appears they didn't have one, so that meant the IBM drivers were out.  The other option that I found quite a bit of discussion around was Microsoft's OLE DB Provider for DB2.  This driver is part of the feature pack for SQL Server 2008 Enterprise Edition and can be downloaded here. As it turns out, I already had Microsoft's driver installed on my dev VM, which stuck me as odd since I hadn't installed it.  I discovered that the driver is installed with the BizTalk adapter pack for host systems, which was also installed on my VM.  However, it looks like the version used by the adapter pack is newer than the version provided in the SQL Server feature pack.   Once you get the driver installed, create a connection manager in your package just like you normally would and select the Microsoft OLE DB Provider for DB2 from the list of available drivers. After you select the driver, you'll need to enter in your host name, login credentials and initial catalog. A couple of things to note here.  First, the Initial catalog needs to be the same as your host name.  Not sure why that is, but trust me, it just does.  Second, for credentials, in my environment, we're using what the client's iSeries people refer to as "profiles".  I guess this is similar to SQL auth in the SQL Server world.  In other words, they've given me a username and password for connecting to DB, so I've entered it here. Next, click the Data Links button.  On the Data Links screen, enter your package collection on the first tab. Package collection is one of those DB2 concepts I'm still trying to figure out.  From the little bit I've read, packages are used to control SQL compilation and each DB2 connection needs one.  The package collection, I believe, controls where your package is created.  One of the iSeries folks I've been working with told me that I should always use QGPL for my package collection, as QGPL is "general purpose" and doesn't require any additional authority. Next click the ellipsis next to the Network drop-down.  Here you'll want to enter your host name again. Again, not sure why you need to do this, but trust me, my connection wouldn't work until I entered my hostname here. Finally, go to the Advanced tab, select your DBMS platform and check Process binary as character. My environment is DB2 on the iSeries and iSeries is the replacement for AS/400, so I selected DB2/AS400 for my platform.  Process binary as character was necessary to handle some of the DB2 data types.  I had a few columns that showed all their data as "System.Byte[]".  Checking Process binary as character resolved this. At this point, you should be good to go.  You can go back to the Connection tab on the Data Links dialog to perform a couple of tests to validate your configuration.  The Test Connection button is obvious, this just verifies you can connect to the host using the configuration data you've entered.  The Packages button will attempt to connect to the host and create the packages required to execute queries. This isn't meant to be a comprehensive look SSIS and DB2, these are just some of the notes I've come up with since I've started working with DB2 and SSIS.  I'm sure as I continue developing my packages, I'll find more quirks and will post them here.

    Read the article

  • How To Replace Notepad in Windows 7

    - by Trevor Bekolay
    It used to be that Notepad was a necessary evil because it started up quickly and let us catch a quick glimpse of plain text files. Now, there are a bevy of capable Notepad replacements that are just as fast, but also have great feature sets. Before following the rest of this how-to, ensure that you’re logged into an account with Administrator access. Note: The following instructions involve modifying some Windows system folders. Don’t mess anything up while you’re in there! If you follow our instructions closely, you’ll be fine. Choose your replacement There are a ton of great Notepad replacements, including Notepad2, Metapad, and Notepad++. The best one for you will depend on what types of text files you open and what you do with them. We’re going to use Notepad++ in this how-to. The first step is to find the executable file that you’ll replace Notepad with. Usually this will be the only file with the .exe file extension in the folder where you installed your text editor. Copy the executable file to your desktop and try to open it, to make sure that it works when opened from a different folder. In the Notepad++ case, a special little .exe file is available for the explicit purpose of replacing Notepad.If we run it from the desktop, it opens up Notepad++ in all its glory. Back up Notepad You will probably never go back once you switch, but you never know. You can backup Notepad to a special location if you’d like, but we find it’s easiest to just keep a backed up copy of Notepad in the folders it was originally located. In Windows 7, Notepad resides in: C:\Windows C:\Windows\System32 C:\Windows\SysWOW64 in 64-bit versions only Navigate to each of those directories and copy Notepad. Paste it into the same folder. If prompted, choose to Copy, but keep both files. You can keep your backup as “notepad (2).exe”, but we prefer to rename it to “notepad.exe.bak”. Do this for all of the folders that have Notepad (2 total for 32-bit Windows 7, 3 total for 64-bit). Take control of Notepad and delete it Even if you’re on an administrator account, you can’t just delete Notepad – Microsoft has made some security gains in this respect. Fortunately for us, it’s still possible to take control of a file and delete it without resorting to nasty hacks like disabling UAC. Navigate to one of the directories that contain Notepad. Right-click on it and select Properties.   Switch to the Security tab, then click on the Advanced button. Note that the owner of the file is a user called “TrustedInstaller”. You can’t do much with files owned by TrustedInstaller, so let’s take control of it. Click the Edit… button. Select the desired owner (you could choose your own account, but we’re going to give any Administrator control) and click OK. You’ll get a message that you need to close and reopen the Properties windows to edit permissions. Before doing that, confirm that the owner has changed to what you selected. Click OK, then OK again to close the Properties window. Right-click on Notepad and click on Properties again. Switch to the Security tab. Click on Edit…. Select the appropriate group or user name in the list at the top, then add a checkmark in the checkbox beside Full control in the Allow column. Click OK, then Yes to the dialog box that pops up. Click OK again to close the Properties window. Now you can delete Notepad, by either selecting it and pressing Delete on the keyboard, or right-click on it and click Delete.   You’re now free from Notepad’s foul clutches! Repeat this procedure for the remaining folders (or folder, on 32-bit Windows 7). Drop in your replacement Copy your Notepad replacement’s executable, which should still be on your desktop. Browse to the two or three folders listed above and copy your .exe to those locations. If prompted for Administrator permission, click Continue. If your executable file was named something other than “notepad.exe”, rename it to “notepad.exe”. Don’t be alarmed if the thumbnail still shows the old Notepad icon. Double click on Notepad and your replacement should open. To make doubly sure that it works, press Win+R to bring up the Run dialog box and enter “notepad” into the text field. Press enter or click OK. Now you can allow Windows to open files with Notepad by default with little to no shame! All without restarting or having to disable UAC! Similar Articles Productive Geek Tips Search and Replace Specific Formatting (fonts, styles,etc) in Microsoft WordHow to Drag Files to the Taskbar to Open Them in Windows 7Customize the Windows 7 or Vista Send To MenuKill Processes from the Windows Command LineChange Your Windows 7 Library Icons the Easy Way TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 PCmover Professional Use My TextTools to Edit and Organize Text Discovery Channel LIFE Theme (Win7) Increase the size of Taskbar Previews (Win 7) Scan your PC for nasties with Panda ActiveScan CleanMem – Memory Cleaner AceStock – The Personal Stock Monitor

    Read the article

  • Why should you choose Oracle WebLogic 12c instead of JBoss EAP 6?

    - by Ricardo Ferreira
    In this post, I will cover some technical differences between Oracle WebLogic 12c and JBoss EAP 6, which was released a couple days ago from Red Hat. This article claims to help you in the evaluation of key points that you should consider when choosing for an Java EE application server. In the following sections, I will present to you some important aspects that most customers ask us when they are seriously evaluating for an middleware infrastructure, specially if you are considering JBoss for some reason. I would suggest that you keep the following question in mind while you are reading the points: "Why should I choose JBoss instead of WebLogic?" 1) Multi Datacenter Deployment and Clustering - D/R ("Disaster & Recovery") architecture support is embedded on the WebLogic Server 12c product. JBoss EAP 6 on the other hand has no direct D/R support included, Red Hat relies on third-part tools with higher prices. When you consider a middleware solution to host your business critical application, you should worry with every architectural aspect that are related with the solution. Fail-over support is one little aspect of a truly reliable solution. If you do not worry about D/R, your solution will not be reliable. Having said that, with Red Hat and JBoss EAP 6, you have this extra cost that will increase considerably the total cost of ownership of the solution. As we commonly hear from analysts, open-source are not so cheaper when you start seeing the big picture. - WebLogic Server 12c supports advanced LAN clustering, detection of death servers and have a common alert framework. JBoss EAP 6 on the other hand has limited LAN clustering support with no server death detection. They do not generate any alerts when servers goes down (only if you buy JBoss ON which is a separated technology, but until now does not support JBoss EAP 6) and manual intervention are required when servers goes down. In most cases, admin people must rely on "kill -9", "tail -f someFile.log" and "ps ax | grep java" commands to manage failures and clustering anomalies. - WebLogic Server 12c supports the concept of Node Manager, which is a separated process that runs on the physical | virtual servers that allows extend the administration of the cluster to WebLogic managed servers that are often distributed across multiple machines and geographic locations. JBoss EAP 6 on the other hand has no equivalent technology. Whole server instances must be managed individually. - WebLogic Server 12c Node Manager supports Coherence to boost performance when managing servers. JBoss EAP 6 on the other hand has no similar technology. There is no way to coordinate JBoss and infiniband instances provided by JBoss using high throughput and low latency protocols like InfiniBand. The Node Manager feature also allows another very important feature that JBoss EAP lacks: secure the administration. When using WebLogic Node Manager, all the administration tasks are sent to the managed servers in a secure tunel protected by a certificate, which means that the transport layer that separates the WebLogic administration console from the managed servers are secured by SSL. - WebLogic Server 12c are now integrated with OTD ("Oracle Traffic Director") which is a web server technology derived from the former Sun iPlanet Web Server. This software complements the web server support offered by OHS ("Oracle HTTP Server"). Using OTD, WebLogic instances are load-balanced by a high powerful software that knows how to handle SDP ("Socket Direct Protocol") over InfiniBand, which boost performance when used with engineered systems technologies like Oracle Exalogic Elastic Cloud. JBoss EAP 6 on the other hand only offers support to Apache Web Server with custom modules created to deal with JBoss clusters, but only across standard TCP/IP networks.  2) Application and Runtime Diagnostics - WebLogic Server 12c have diagnostics capabilities embedded on the server called WLDF ("WebLogic Diagnostic Framework") so there is no need to rely on third-part tools. JBoss EAP 6 on the other hand has no diagnostics capabilities. Their only diagnostics tool is the log generated by the application server. Admin people are encouraged to analyse thousands of log lines to find out what is going on. - WebLogic Server 12c complement WLDF with JRockit MC ("Mission Control"), which provides to administrators and developers a complete insight about the JVM performance, behavior and possible bottlenecks. WebLogic Server 12c also have an classloader analysis tool embedded, and even a log analyzer tool that enables administrators and developers to view logs of multiple servers at the same time. JBoss EAP 6 on the other hand relies on third-part tools to do something similar. Again, only log searching are offered to find out whats going on. - WebLogic Server 12c offers end-to-end traceability and monitoring available through Oracle EM ("Enterprise Manager"), including monitoring of business transactions that flows through web servers, ESBs, application servers and database servers, all of this with high deep JVM analysis and diagnostics. JBoss EAP 6 on the other hand, even using JBoss ON ("Operations Network"), which is a separated technology, does not support those features. Red Hat relies on third-part tools to provide direct Oracle database traceability across JVMs. One of those tools are Oracle EM for non-Oracle middleware that manage JBoss, Tomcat, Websphere and IIS transparently. - WebLogic Server 12c with their JRockit support offers a tool called JRockit Flight Recorder, which can give developers a complete visibility of a certain period of application production monitoring with zero extra overhead. This automatic recording allows you to deep analyse threads latency, memory leaks, thread contention, resource utilization, stack overflow damages and GC ("Garbage Collection") cycles, to observe in real time stop-the-world phenomenons, generational, reference count and parallel collects and mutator threads analysis. JBoss EAP 6 don't even dream to support something similar, even because they don't have their own JVM. 3) Application Server Administration - WebLogic Server 12c offers a complete administration console complemented with scripting and macro-like recording capabilities. A single WebLogic console can managed up to hundreds of WebLogic servers belonging to the same domain. JBoss EAP 6 on the other hand has a limited console and provides a XML centric administration. JBoss, after ten years, started the development of a rudimentary centralized administration that still leave a lot of administration tasks aside, so admin people and developers must touch scripts and XML configuration files for most advanced and even simple administration tasks. This lead applications to error prone and risky deployments. Even using JBoss ON, JBoss EAP are not able to offer decent administration features for admin people which must be high skilled in JBoss internal architecture and its managing capabilities. - Oracle EM is available to manage multiple domains, databases, application servers, operating systems and virtualization, with a complete end-to-end visibility. JBoss ON does not provide management capabilities across the complete architecture, only basic monitoring. Even deployment must be done aside JBoss ON which does no integrate well with others softwares than JBoss. Until now, JBoss ON does not supports JBoss EAP 6, so even their minimal support for JBoss are not available for JBoss EAP 6 leaving customers uncovered and subject to high skilled JBoss admin people. - WebLogic Server 12c has the same administration model whatever is the topology selected by the customer. JBoss EAP 6 on the other hand differentiates between two operational models: standalone-mode and domain-mode, that are not consistent with each other. Depending on the mode used, the administration skill is different. - WebLogic Server 12c has no point-of-failures processes, and it does not need to define any specialized server. Domain model in WebLogic is available for years (at least ten years or more) and is production proven. JBoss EAP 6 on the other hand needs special processes to garantee JBoss integrity, the PC ("Process-Controller") and the HC ("Host-Controller"). Different from WebLogic, the domain model in JBoss is quite new (one year at tops) of maturity, and need to mature considerably until start doing things like WebLogic domain model does. - WebLogic Server 12c supports parallel deployment model which enables some artifacts being deployed at the same time. JBoss EAP 6 on the other hand does not have any similar feature. Every deployment are done atomically in the containers. This means that if you have a huge EAR (an EAR of 120 MB of size for instance) and deploy onto JBoss EAP 6, this EAR will take some minutes in order to starting accept thread requests. The same EAR deployed onto WebLogic Server 12c will reduce the deployment time at least in 2X compared to JBoss. 4) Support and Upgrades - WebLogic Server 12c has patch management available. JBoss EAP 6 on the other hand has no patch management available, each JBoss EAP instance should be patched manually. To achieve such feature, you need to buy a separated technology called JBoss ON ("Operations Network") that manage this type of stuff. But until now, JBoss ON does not support JBoss EAP 6 so, in practice, JBoss EAP 6 does not have this feature. - WebLogic Server 12c supports previuous WebLogic domains without any reconfiguration since its kernel is robust and mature since its creation in 1995. JBoss EAP 6 on the other hand has a proven lack of supportability between JBoss AS 4, 5, 6 and 7. Different kernels and messaging engines were implemented in JBoss stack in the last five years reveling their incapacity to create a well architected and proven middleware technology. - WebLogic Server 12c has patch prescription based on customer configuration. JBoss EAP 6 on the other hand has no such capability. People need to create ticket supports and have their installations revised by Red Hat support guys to gain some patch prescription from them. - Oracle WebLogic Server independent of the version has 8 years of support of new patches and has lifetime release of existing patches beyond that. JBoss EAP 6 on the other hand provides patches for a specific application server version up to 5 years after the release date. JBoss EAP 4 and previous versions had only 4 years. A good question that Red Hat will argue to answer is: "what happens when you find issues after year 5"?  5) RAC ("Real Application Clusters") Support - WebLogic Server 12c ships with a specific JDBC driver to leverage Oracle RAC clustering capabilities (Fast-Application-Notification, Transaction Affinity, Fast-Connection-Failover, etc). Oracle JDBC thin driver are also available. JBoss EAP 6 on the other hand ships only the standard Oracle JDBC thin driver. Load balancing with Oracle RAC are not supported. Manual intervention in case of planned or unplanned RAC downtime are necessary. In JBoss EAP 6, situation does not reestablish automatically after downtime. - WebLogic Server 12c has a feature called Active GridLink for Oracle RAC which provides up to 3X performance on OLTP applications. This seamless integration between WebLogic and Oracle database enable more value added to critical business applications leveraging their investments in Oracle database technology and Oracle middleware. JBoss EAP 6 on the other hand has no performance gains at all, even when admin people implement some kind of connection-pooling tuning. - WebLogic Server 12c also supports transaction and web session affinity to the Oracle RAC, which provides aditional gains of performance. This is particularly interesting if you are creating a reliable solution that are distributed not only in an LAN cluster, but into a different data center. JBoss EAP 6 on the other hand has no such support. 6) Standards and Technology Support - WebLogic Server 12c is fully Java EE 6 compatible and production ready since december of 2011. JBoss EAP 6 on the other hand became fully compatible with Java EE 6 only in the community version after three months, and production ready only in a few days considering that this article was written in June of 2012. Red Hat says that they are the masters of innovation and technology proliferation, but compared with Oracle and even other proprietary vendors like IBM, they historically speaking are lazy to deliver the most newest technologies and standards adherence. - Oracle is the steward of Java, driving innovation into the platform from commercial and open-source vendors. Red Hat on the other hand does not have its own JVM and relies on third-part JVMs to complete their application server offer. 95% of Red Hat customers are using Oracle HotSpot as JVM, which means that without Oracle involvement, their support are limited exclusively to the application server layer and we all know that most problems are happens in the JVM layer. - WebLogic Server 12c supports natively JDK 7, which empower developers to explore the maximum of the Java platform productivity when writing code. This feature differentiate WebLogic from others application servers (except GlassFish that are also managed by Oracle) because the usage of JDK 7 introduce such remarkable productivity features like the "try-with-resources" enhancement, catching multiple exceptions with one try block, Strings in the switch statements, JVM improvements in terms of JDBC, I/O, networking, security, concurrency and of course, the most important feature of Java 7: native support for multiple non-Java languages. More features regarding JDK 7 can be found here. JBoss EAP 6 on the other hand does not support JDK 7 officially, they comment in their community version that "Java SE 7 can be used with JBoss 7" which does not gives you any guarantees of enterprise support for JDK 7. - Oracle WebLogic Server 12c supports integration with Spring framework allowing Spring applications to use WebLogic special transaction manager, exposing bean interfaces to WebLogic MBeans to take advantage of all WebLogic monitoring and administration advantages. JBoss EAP 6 on the other hand has no special integration with Spring. In fact, Red Hat offers a suspicious package called "JBoss Web Platform" that in theory supports Spring, but in practice this package does not offers any special integration. It is just a facility for Red Hat customers to have support from both JBoss and Spring technology using the same customer support. 7) Lightweight Development - Oracle WebLogic Server 12c and Oracle GlassFish are completely integrated and can share applications without any modifications. Starting with the 12c version, WebLogic now understands natively GlassFish deployment descriptors and specific configurations in order to offer you a truly and reliable migration path from a community Java EE application server to a enterprise middleware product like WebLogic. JBoss EAP 6 on the other hand has no support to natively reuse an existing (or still in development) application from JBoss AS community server. Users of JBoss suffer of critical issues during deployment time that includes: changing the libraries and dependencies of the application, patching the DTD or XSD deployment descriptors, refactoring of the application layers due classloading issues and anomalies, rebuilding of persistence, business and web layers due issues with "usage of the certified version of an certain dependency" or "frameworks that Red Hat potentially does not recommend" etc. If you have the culture or enterprise IT directive of developing Java EE applications using community middleware to in a certain future, transition to enterprise (supported by a vendor) middleware, Oracle WebLogic plus Oracle GlassFish offers you a more sustainable solution. - WebLogic Server 12c has a very light ZIP distribution (less than 165 MB). JBoss EAP 6 ZIP size is around 130 MB, together with JBoss ON you have more 100 MB resulting in a higher download footprint. This is particularly interesting if you plan to use automated setup of application server instances (for example, to rapidly setup a development or staging environment) using Maven or Hudson. - WebLogic Server 12c has a complete integration with Maven allowing developers to setup WebLogic domains with few commands. Tasks like downloading WebLogic, installation, domain creation, data sources deployment are completely integrated. JBoss EAP 6 on the other hand has a limited offer integration with those tools.  - WebLogic Server 12c has a startup mode called WLX that turns-off EJB, JMS and JCA containers leaving enabled only the web container with Java EE 6 web profile. JBoss EAP 6 on the other hand has no such feature, you need to disable manually the containers that you do not want to use. - WebLogic Server 12c supports fastswap, which enables you to change classes without redeployment. This is particularly interesting if you are developing patches for the application that is already deployed and you do not want to redeploy the entire application. This is the same behavior that most application servers offers to JSP pages, but with WebLogic Server 12c, you have the same feature for Java classes in general. JBoss EAP 6 on the other hand has no such support. Even JBoss EAP 5 does not support this until now. 8) JMS and Messaging - WebLogic Server 12c has a proven and high scalable JMS implementation since its initial release in 1995. JBoss EAP 6 on the other hand has a still immature technology called HornetQ, which was introduced in JBoss EAP 5 replacing everything that was implemented in the previous versions. Red Hat loves to introduce new technologies across JBoss versions, playing around with customers and their investments. And when they are asked about why they have changed the implementation and caused such a mess, their answer is always: "the previous implementation was inadequate and not aligned with the community strategy so we are creating a new a improved one". This Red Hat practice leads to uncomfortable investments that in a near future (sometimes less than a year) will be affected in someway. - WebLogic Server 12c has troubleshooting and monitoring features included on the WebLogic console and WLDF. JBoss EAP 6 on the other hand has no direct monitoring on the console, activity is reflected only on the logs, no debug logs available in case of JMS issues. - WebLogic Server 12c has extremely good performance and scalability. JBoss EAP 6 on the other hand has a JMS storage mechanism relying on Oracle database or MySQL. This means that if an issue in production happens and Red Hat affirms that an performance issue is happening due to database problems, they will not support you on the performance issue. They will orient you to call Oracle instead. - WebLogic Server 12c supports messaging enterprise features like SAF ("Store and Forward"), Distributed Queues/Topics and Foreign JMS providers support that leverage JMS implementations without compromise developer code making things completely transparent. JBoss EAP 6 on the other hand do not even dream to support such features. 9) Caching and Grid - Coherence, which is the leading and most mature data grid technology from Oracle, is available since early 2000 and was integrated with WebLogic in 2009. Coherence and WebLogic clusters can be both managed from WebLogic administrative console. Even Node Manager supports Coherence. JBoss on the other hand discontinued JBoss Cache, which was their caching implementation just like they did with the messaging implementation (JBossMQ) which was a issue for long term customers. JBoss EAP 6 ships InfiniSpan version 1.0 which is immature and lack a proven record of successful cases and reliability. - WebLogic Server 12c has a feature called ActiveCache which uses Coherence to, without any code changes, replicate HTTP sessions from both WebLogic and other application servers like JBoss, Tomcat, Websphere, GlassFish and even Microsoft IIS. JBoss EAP 6 on the other hand does have such support and even when they do in the future, they probably will support only their own application server. - Coherence can be used to manage both L1 and L2 cache levels, providing support to Oracle TopLink and others JPA compliant implementations, even Hibernate. JBoss EAP 6 and Infinispan on the other hand supports only Hibernate. And most important of all: Infinispan does not have any successful case of L1 or L2 caching level support using Hibernate, which lead us to reflect about its viability. 10) Performance - WebLogic Server 12c is certified with Oracle Exalogic Elastic Cloud and can run unchanged applications at this engineered system. This approach can benefit customers from Exalogic optimization's of both kernel and JVM layers to boost performance in terms of 10X for web, OLTP, JMS and grid applications. JBoss EAP 6 on the other hand has no investment on engineered systems: customers do not have the choice to deploy on a Java ultra fast system if their project becomes relevant and performance issues are detected. - WebLogic Server 12c maintains a performance gain across each new release: starting on WebLogic 5.1, the overall performance gain has been close to 4X, which close to a 20% gain release by release. JBoss on the other hand does not provide SPECJAppServer or SPECJEnterprise performance benchmarks. Their so called "performance gains" remains hidden in their customer environments, which lead us to think if it is true or not since we will never get access to those environments. - WebLogic Server 12c has industry performance benchmarks with submissions across platforms and configurations leading SPECJ. Oracle WebLogic leads SPECJAppServer performance in multiple categories, fitting all customer topologies like: dual-node, single-node, multi-node and multi-node with RAC. JBoss... again, does not provide any SPECJAppServer performance benchmarks. - WebLogic Server 12c has a feature called work manager which allows your application to embrace new performance levels based on critical resource utilization of the CPUs usage. Work managers prioritizes work and allocates threads based on an execution model that takes into account administrator-defined parameters and actual run-time performance and throughput. JBoss EAP 6 on the other hand has no compared feature and probably they never will. Not supporting such feature like work managers, JBoss EAP 6 forces admin people and specially developers to uncover performance gains in a intrusive way, rewriting the code and doing performance refactorings. 11) Professional Services Support - WebLogic Server 12c and any other technology sold by Oracle give customers the possibility of hire OCS ("Oracle Consulting Services") to manage critical scenarios, deployment assistance of new applications, high skilled consultancy of architecture, best practices and people allocation together with customer teams. All OCS services are available without any restrictions, having the customer bought software from Oracle or just starting their implementation before any acquisition. JBoss EAP 6 or Red Hat to be more specifically, only offers professional services if you buy subscriptions from them. If you are developing a new critical application for your business and need the help of Red Hat for a serious issue or architecture decision, they will probably say: "OK... I can help you but after you buy subscriptions from me". Red Hat also does not allows their professional services consultants to manage environments that uses community based software. They will probably force you to first buy a subscription, download their "enterprise" version and them, optionally hire their consultants. - Oracle provides you our university to educate your team into our technologies, including of course specialized trainings of WebLogic application server. At any time and location, you can hire Oracle to train your team so you get trustful knowledge according to your specific needs. Certifications for the products are also available if your technical people desire to differentiate themselves as professionals. Red Hat on the other hand have a limited pool of resources to train your team in their technologies. Basically they are selling training and certification for RHEL ("Red Hat Enterprise Linux") but if you demand more specialized training in JBoss middleware, they will probably connect you to some "certified" partner localized training since they are apparently discontinuing their education center, at least here in Brazil. They were not able to reproduce their success with RHEL education to their middleware division since they need first sell the subscriptions to after gives you specialized training. And again, they only offer you specialized training based on their enterprise version (EAP in the case of JBoss) which means that the courses will be a quite outdated. There are reports of developers that took official training's from Red Hat at this year (2012) and in a certain JBoss advanced course, Red Hat supposedly covered JBossMQ as the messaging subsystem, and even the printed material provided was based on JBossMQ since the training was created for JBoss EAP 4.3. 12) Encouraging Transparency without Ulterior Motives - WebLogic Server 12c like any other software from Oracle can be downloaded any time from anywhere, you should only possess an OTN ("Oracle Technology Network") credential and you can download any enterprise software how many times you want. And is not some kind of "trial" version. It is the official binaries that will be running for ever in your data center. Oracle does not encourages the usage of "specific versions" of our software. The binaries you buy from Oracle are the same binaries anyone in the world could download and use for testing and personal education. JBoss EAP 6 on the other hand are not available for download unless you buy a subscription and get access to the Red Hat enterprise repositories. If you need to test, learn or just start creating your application using Red Hat's middleware software, you should download it from the community website. You are not allowed to download the enterprise version that, according to Red Hat are more secure, reliable and robust. But no one of us want to start the development of a software with an unsecured, unreliable and not scalable middleware right? So what you do? You are "invited" by Red Hat to buy subscriptions from them to get access to the "cool" version of the software. - WebLogic Server 12c prices are publicly available in the Oracle website. If you want to know right now how much WebLogic will cost to your organization, just click here and get access to our price list. In the case of WebLogic, check out the "US Oracle Technology Commercial Price List". Oracle also encourages you to get in touch with a sales representative to discuss discounts that would make possible the investment into our technology. But you are not required to do this, only if you are interested in buying our technology or maybe you want to discuss some discount scenarios. JBoss EAP 6 on the other hand does not have its cost publicly available in Red Hat's website or in any other media, at least is not so easy to get such information. The only link you will possibly find in their website is a "Contact a Sales Representative" link. This is not a very good relationship between an customer and an vendor. This is not an example of transparency, mainly when the software are sold as open. In this situations, customers expects to see the software prices publicly available, so they can have the chance to decide, based on the existing features of the software, if the cost is fair or not. Conclusion Oracle WebLogic is the most mature, secure, reliable and scalable Java EE application server of the market, and have a proven record of success around the globe to prove it's majority. Don't lose the chance to discover today how WebLogic could fit your needs and sustain your global IT middleware strategy, no matter if your strategy are completely based on the Cloud or not.

    Read the article

  • Enabling Http caching and compression in IIS 7 for asp.net websites

    - by anil.kasalanati
    Caching – There are 2 ways to set Http caching 1-      Use Max age property 2-      Expires header. Doing the changes via IIS Console – 1.       Select the website for which you want to enable caching and then select Http Responses in the features tab       2.       Select the Expires webcontent and on changing the After setting you can generate the max age property for the cache control    3.       Following is the screenshot of the headers   Then you can use some tool like fiddler and see 302 response coming from the server. Doing it web.config way – We can add static content section in the system.webserver section <system.webServer>   <staticContent>             <clientCache cacheControlMode="UseMaxAge" cacheControlMaxAge="365.00:00:00" />   </staticContent> Compression - By default static compression is enabled on IIS 7.0 but the only thing which falls under that category is CSS but this is not enough for most of the websites using lots of javascript.  If you just thought by enabling dynamic compression would fix this then you are wrong so please follow following steps –   In some machines the dynamic compression is not enabled and following are the steps to enable it – Open server manager Roles > Web Server (IIS) Role Services (scroll down) > Add Role Services Add desired role (Web Server > Performance > Dynamic Content Compression) Next, Install, Wait…Done!   ?  Roles > Web Server (IIS) ?  Role Services (scroll down) > Add Role Services     Add desired role (Web Server > Performance > Dynamic Content Compression)     Next, Install, Wait…Done!     Enable  - ?  Open server manager ?  Roles > Web Server (IIS) > Internet Information Services (IIS) Manager   Next pane: Sites > Default Web Site > Your Web Site Main pane: IIS > Compression         Then comes the custom configuration for encrypting javascript resources. The problem is that the compression in IIS 7 completely works on the mime types and by default there is a mismatch in the mime types Go to following location C:\Windows\System32\inetsrv\config Open applicationHost.config The mimemap is as follows  <mimeMap fileExtension=".js" mimeType="application/javascript" />   So the section in the staticTypes should be changed          <add mimeType="application/javascript" enabled="true" />     Doing the web.config way –   We can add following section in the system.webserver section <system.webServer> <urlCompression doDynamicCompression="false"  doStaticCompression="true"/> More Information/References – ·         http://weblogs.asp.net/owscott/archive/2009/02/22/iis-7-compression-good-bad-how-much.aspx ·         http://www.west-wind.com/weblog/posts/98538.aspx  

    Read the article

  • Multitask Like a Pro with AquaSnap

    - by Matthew Guay
    Are you tired of shuffling back and forth between windows?  Here’s a handy app that can help you keep all of your windows organized and accessible. AquaSnap is a great free utility that helps you use multiple windows at the same time easily and efficiently.  One of Windows 7’s greatest new features is Aero Snap, which lets you easily view windows side by side by simply dragging windows to side of your screen.  After using Windows 7 for the past year, Aero Snap is one of the features we really miss when using older versions of Windows. With AquaSnap, you now have all of the features of Aero Snap and more in Windows 2000, XP, Vista, and of course Windows 7.  Not only does it give you Aero Snap features, but AquaSnap also gives you more control over your windows to make you more productive. Getting Started AquaSnap is a a free download for Windows 2000, XP, Vista, and 7.  Download the small installer (link below) and install it with the default settings. AquaSnap automatically runs as soon as it is installed, and you will notice a new icon in your system tray. Now you can go ahead and put it to use.  Drag a window to any edge or corner of your desktop, and you will see an icon showing what part of the screen the window will cover. Dragging it to the side of the screen expanded the window to fill the right half of the screen, just like the default Aero Snap in Windows 7.  You can drag the window away to restore it to its former size. AquaSnap works on any corner of the screen too, so you can have 4 windows side-by-side.  We already have 3 windows snapped to the corners, and notice that we’re dragging a fourth window to the bottom right corner. You can also snap windows to the bottom and top of the screen.  Here we have Word snapped to the bottom half of the screen, and we’re dragging Chrome to the top. You can even snap internal windows in Multiple Document Interface (MDI) programs such as Excel.  Here we are snapping a workbook in Excel to the left to view 2 workbooks side-by-side.   Additionally, AquaSnap lets you keep any window always on top.  Simply shake any window, and it will turn semi-transparent and stay on top of all other windows.  Notice the transparent calculator here on top of Excel. All of AquaSnap’s features work great in Windows 2000, XP, and Vista too.  Here we are snapping IE6 to the left of the screen in XP. Here are 3 windows snapped to the sides in XP.  You can mix the snap modes, and have, for instance, two windows on the right side and one window on the left.  This is a great way to maximize productivity if you need more space in one of the windows. Even AquaShake works to keep a window transparent and on top in XP. Settings AquaSnap has a detailed settings dialog where you can tweak it to work exactly like you want.  Simply right-click on its icon in the taskbar, and select Settings. From the first screen, you can choose if you want AquaSnap to start with Windows, and if you want it to show an icon in the system tray.  If you turn off the system tray icon, you can access the AquaSnap settings from Start > All Programs > AquaSnap > Configuration (or simply search for Configuration in Vista or Windows 7). The second tab in settings lets you choose what you want each snapping region to do.  You can also choose two other presets, including AeroSnap (which works just like the default Aero Snap in Windows 7) and AquaSnap simple (which only snaps at the edges of the screen, not the corners). The third tab lets you increase or decrease the opacity of pinned windows when using AquaShake, and also lets you increase or decrease the shaking sensitivity.  Additionally, if you prefer the standard AeroShake functionality, which minimizes all other open windows when you shake a window, you can choose that too. The fourth tab lets you activate an optional feature, AquaGlass.  If you activate this, it will make windows turn transparent when you drag them across the screen.   Finally, the last tab lets you change the color and opacity of the preview rectangle, or simply turn it off. Or, if you want to temporarily turn AquaSnap off, simply right-click on its icon and select Off.  In Windows 7, turning off AquaSnap will restore your standard Windows Aero Snap functionality, and in other version of Windows it will stop letting you snap windows at all.  You can then repeat the steps and select On when you want to use AquaSnap again. Conclusion AquaSnap is a handy tool to make you more productive at your computer.  With a wide variety of useful features, there’s something here for everyone.  Download AquaSnap Similar Articles Productive Geek Tips How to Get Virtual Desktops on Windows XP TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips DVDFab 6 Revo Uninstaller Pro Registry Mechanic 9 for Windows PC Tools Internet Security Suite 2010 Out of band Security Update for Internet Explorer 7 Cool Looking Screensavers for Windows SyncToy syncs Files and Folders across Computers on a Network (or partitions on the same drive) If it were only this easy Classic Cinema Online offers 100’s of OnDemand Movies OutSync will Sync Photos of your Friends on Facebook and Outlook

    Read the article

  • Big Data – Buzz Words: Importance of Relational Database in Big Data World – Day 9 of 21

    - by Pinal Dave
    In yesterday’s blog post we learned what is HDFS. In this article we will take a quick look at the importance of the Relational Database in Big Data world. A Big Question? Here are a few questions I often received since the beginning of the Big Data Series - Does the relational database have no space in the story of the Big Data? Does relational database is no longer relevant as Big Data is evolving? Is relational database not capable to handle Big Data? Is it true that one no longer has to learn about relational data if Big Data is the final destination? Well, every single time when I hear that one person wants to learn about Big Data and is no longer interested in learning about relational database, I find it as a bit far stretched. I am not here to give ambiguous answers of It Depends. I am personally very clear that one who is aspiring to become Big Data Scientist or Big Data Expert they should learn about relational database. NoSQL Movement The reason for the NoSQL Movement in recent time was because of the two important advantages of the NoSQL databases. Performance Flexible Schema In personal experience I have found that when I use NoSQL I have found both of the above listed advantages when I use NoSQL database. There are instances when I found relational database too much restrictive when my data is unstructured as well as they have in the datatype which my Relational Database does not support. It is the same case when I have found that NoSQL solution performing much better than relational databases. I must say that I am a big fan of NoSQL solutions in the recent times but I have also seen occasions and situations where relational database is still perfect fit even though the database is growing increasingly as well have all the symptoms of the big data. Situations in Relational Database Outperforms Adhoc reporting is the one of the most common scenarios where NoSQL is does not have optimal solution. For example reporting queries often needs to aggregate based on the columns which are not indexed as well are built while the report is running, in this kind of scenario NoSQL databases (document database stores, distributed key value stores) database often does not perform well. In the case of the ad-hoc reporting I have often found it is much easier to work with relational databases. SQL is the most popular computer language of all the time. I have been using it for almost over 10 years and I feel that I will be using it for a long time in future. There are plenty of the tools, connectors and awareness of the SQL language in the industry. Pretty much every programming language has a written drivers for the SQL language and most of the developers have learned this language during their school/college time. In many cases, writing query based on SQL is much easier than writing queries in NoSQL supported languages. I believe this is the current situation but in the future this situation can reverse when No SQL query languages are equally popular. ACID (Atomicity Consistency Isolation Durability) – Not all the NoSQL solutions offers ACID compliant language. There are always situations (for example banking transactions, eCommerce shopping carts etc.) where if there is no ACID the operations can be invalid as well database integrity can be at risk. Even though the data volume indeed qualify as a Big Data there are always operations in the application which absolutely needs ACID compliance matured language. The Mixed Bag I have often heard argument that all the big social media sites now a days have moved away from Relational Database. Actually this is not entirely true. While researching about Big Data and Relational Database, I have found that many of the popular social media sites uses Big Data solutions along with Relational Database. Many are using relational databases to deliver the results to end user on the run time and many still uses a relational database as their major backbone. Here are a few examples: Facebook uses MySQL to display the timeline. (Reference Link) Twitter uses MySQL. (Reference Link) Tumblr uses Sharded MySQL (Reference Link) Wikipedia uses MySQL for data storage. (Reference Link) There are many for prominent organizations which are running large scale applications uses relational database along with various Big Data frameworks to satisfy their various business needs. Summary I believe that RDBMS is like a vanilla ice cream. Everybody loves it and everybody has it. NoSQL and other solutions are like chocolate ice cream or custom ice cream – there is a huge base which loves them and wants them but not every ice cream maker can make it just right  for everyone’s taste. No matter how fancy an ice cream store is there is always plain vanilla ice cream available there. Just like the same, there are always cases and situations in the Big Data’s story where traditional relational database is the part of the whole story. In the real world scenarios there will be always the case when there will be need of the relational database concepts and its ideology. It is extremely important to accept relational database as one of the key components of the Big Data instead of treating it as a substandard technology. Ray of Hope – NewSQL In this module we discussed that there are places where we need ACID compliance from our Big Data application and NoSQL will not support that out of box. There is a new termed coined for the application/tool which supports most of the properties of the traditional RDBMS and supports Big Data infrastructure – NewSQL. Tomorrow In tomorrow’s blog post we will discuss about NewSQL. Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: Big Data, PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, T SQL

    Read the article

  • SQL SERVER – SSMS: Backup and Restore Events Report

    - by Pinal Dave
    A DBA wears multiple hats and in fact does more than what an eye can see. One of the core task of a DBA is to take backups. This looks so trivial that most developers shrug this off as the only activity a DBA might be doing. I have huge respect for DBA’s all around the world because even if they seem cool with all the scripting, automation, maintenance works round the clock to keep the business working almost 365 days 24×7, their worth is knowing that one day when the systems / HDD crashes and you have an important delivery to make. So these backup tasks / maintenance jobs that have been done come handy and are no more trivial as they might seem to be as considered by many. So the important question like: “When was the last backup taken?”, “How much time did the last backup take?”, “What type of backup was taken last?” etc are tricky questions and this report lands answers to the same in a jiffy. So the SSMS report, we are talking can be used to find backups and restore operation done for the selected database. Whenever we perform any backup or restore operation, the information is stored in the msdb database. This report can utilize that information and provide information about the size, time taken and also the file location for those operations. Here is how this report can be launched.   Once we launch this report, we can see 4 major sections shown as listed below. Average Time Taken For Backup Operations Successful Backup Operations Backup Operation Errors Successful Restore Operations Let us look at each section next. Average Time Taken For Backup Operations Information shown in “Average Time Taken For Backup Operations” section is taken from a backupset table in the msdb database. Here is the query and the expanded version of that particular section USE msdb; SELECT (ROW_NUMBER() OVER (ORDER BY t1.TYPE))%2 AS l1 ,       1 AS l2 ,       1 AS l3 ,       t1.TYPE AS [type] ,       (AVG(DATEDIFF(ss,backup_start_date, backup_finish_date)))/60.0 AS AverageBackupDuration FROM backupset t1 INNER JOIN sys.databases t3 ON ( t1.database_name = t3.name) WHERE t3.name = N'AdventureWorks2014' GROUP BY t1.TYPE ORDER BY t1.TYPE On my small database the time taken for differential backup was less than a minute, hence the value of zero is displayed. This is an important piece of backup operation which might help you in planning maintenance windows. Successful Backup Operations Here is the expanded version of this section.   This information is derived from various backup tracking tables from msdb database.  Here is the simplified version of the query which can be used separately as well. SELECT * FROM sys.databases t1 INNER JOIN backupset t3 ON (t3.database_name = t1.name) LEFT OUTER JOIN backupmediaset t5 ON ( t3.media_set_id = t5.media_set_id) LEFT OUTER JOIN backupmediafamily t6 ON ( t6.media_set_id = t5.media_set_id) WHERE (t1.name = N'AdventureWorks2014') ORDER BY backup_start_date DESC,t3.backup_set_id,t6.physical_device_name; The report does some calculations to show the data in a more readable format. For example, the backup size is shown in KB, MB or GB. I have expanded first row by clicking on (+) on “Device type” column. That has shown me the path of the physical backup file. Personally looking at this section, the Backup Size, Device Type and Backup Name are critical and are worth a note. As mentioned in the previous section, this section also has the Duration embedded inside it. Backup Operation Errors This section of the report gets data from default trace. You might wonder how. One of the event which is tracked by default trace is “ErrorLog”. This means that whatever message is written to errorlog gets written to default trace file as well. Interestingly, whenever there is a backup failure, an error message is written to ERRORLOG and hence default trace. This section takes advantage of that and shows the information. We can read below message under this section, which confirms above logic. No backup operations errors occurred for (AdventureWorks2014) database in the recent past or default trace is not enabled. Successful Restore Operations This section may not be very useful in production server (do you perform a restore of database?) but might be useful in the development and log shipping secondary environment, where we might be interested to see restore operations for a particular database. Here is the expanded version of the section. To fill this section of the report, I have restored the same backups which were taken to populate earlier sections. Here is the simplified version of the query used to populate this output. USE msdb; SELECT * FROM restorehistory t1 LEFT OUTER JOIN restorefile t2 ON ( t1.restore_history_id = t2.restore_history_id) LEFT OUTER JOIN backupset t3 ON ( t1.backup_set_id = t3.backup_set_id) WHERE t1.destination_database_name = N'AdventureWorks2014' ORDER BY restore_date DESC,  t1.restore_history_id,t2.destination_phys_name Have you ever looked at the backup strategy of your key databases? Are they in sync and do we have scope for improvements? Then this is the report to analyze after a week or month of maintenance plans running in your database. Do chime in with what are the strategies you are using in your environments. Reference: Pinal Dave (http://blog.sqlauthority.com)Filed under: PostADay, SQL, SQL Authority, SQL Backup and Restore, SQL Query, SQL Server, SQL Server Management Studio, SQL Tips and Tricks, T SQL Tagged: SQL Reports

    Read the article

  • Backup options in SharePoint 2007

    - by sreejukg
    It is very important to make sure the server farm backup is taking properly, making sure that in case of any disaster, the administrator has the latest backup that can be used to restore. This articles addresses some of the options available for backup/restore in SharePoint 2007 Backup There are two options that can be utilized to take backup of SharePoint sites. Using SharePoint Central Administration website Using SharePoint central administration website, you can do backup/restore from user interface. Using central administration website you can back up the following · Server farm · Web application · Content databases Follow these steps to take backup of the server farm using central administration 1. Open Central administration website 2. Navigate to Operations -> Backup and Restore -> Perform a backup 3. Here you will have options to choose the item to back up. Select Farm (the top most item in the list) 4. Once you select the items to backup, click on “Continue to backup options” 5. Select “Full” as type of backup. 6. In the backup file location, enter the path where you need to store the backup. The path should be according to the UNC, for e.g. for c drive you may use \\server\c$\mybackupFolder 7. Click ok 8. Now you will be redirected to Backup and Restore Status page. This page shows the progress for the backup operation. You can use the refresh button to update the status of backup(this page will automatically refresh in every 30 seconds). Once completed you can find the files in the specified folder. Using STSADM website SharePoint comes with a STSADM command line tool. STSADM provides lot of administrative operations that can be performed on SharePoint 2007 sites. You can find STSADM command from the following location C:\Program Files\Common Files\Microsoft shared\web server extensions\12\bin (You may change the drive letter according to your installation) STSADM provides a method for performing the Office SharePoint Server 2007 administration tasks at the command line or by using batch files or scripts. STSADM provides access to operations not available by using the Central Administration site The general syntax for STSADM is as follows STSADM -operation Operation Name –parameter1 value1 –parameter2 value2 ……….. Using STSADM you can back up the following · Server farm · Web application · Content databases To perform any STSADM, operation you need to be a member of administrators group. Follow these steps to take backup of SharePoint server farm using STSADM tool. Note: make sure you are logged in to the computer where central administration website is installed. 1. Open the Command prompt (You should run command prompt with administrator privileges) 2. Change the working directory to C:\Program Files\Common Files\Microsoft shared\web server extensions\12\bin 3. Enter the command, then press enter Stsadm –o backup -directory <UNC path> -backupmethod full 4. You will get success / failure message once the command finishes. How to schedule the backup There is no option to schedule a backup using central administration site. Also there is no operation provided by STSADM to automate the backup. The farm administrators need to take backup in regular intervals. To achieve this, you can write a batch file that includes STSADM command to take full backup of the server. This batch file can be scheduled using windows task scheduler to execute in certain intervals. Sample of the batch file 1. Open notepad(or any other text editor) 2. Enter the following commands @echo off echo =============================================================== echo Back up the farm to <C:\backup> echo =============================================================== cd %COMMONPROGRAMFILES%\Microsoft Shared\web server extensions\12\BIN @echo off stsadm.exe -o backup -directory "<\backup>" -backupmethod full echo completed 3. Save the file with .bat extension You can schedule this batch file as you require. Other Options Using STSADM tool, you will be able to take backup for individual site collection. The syntax for this is stsadm -o backup -url <URL name for site collection> -filename <file name> [-overwrite] The explanations for the parameters are as follows. -url The url of the site collection you need to backup -filename The name of the backup file. E.g. c:\backup.bak -overwrite optional. Indicates if the filename specified exists, whether to overwrite or not. If you are creating the batch file for scheduling the backup for a site collection, you may need to specify the backup filename automatically created. It is an option that you can generate the filename with date so that you can keep backup for each day. e.g. The following commands can be utilized create a site collection backup. @echo off echo =============================================================== echo Back up the farm to <C:\backup> echo =============================================================== echo =============================================================== echo getting todays date to a variable echo =============================================================== @For /F "tokens=1,2,3 delims=/ " %%A in (‘Date /t’) do @( Set Day=%%A Set Month=%%B Set Year=%%C Set todayDate=%%C%%B%%A ) cd %COMMONPROGRAMFILES%\Microsoft Shared\web server extensions\12\BIN @echo off stsadm -o backup -url <sitecollection url> -filename \\ServerName\ShareName\Backup_%todayDate%.bak -overwrite echo completed To read more about backup STSADM operation, read this http://technet.microsoft.com/en-us/library/cc263441.aspx

    Read the article

< Previous Page | 393 394 395 396 397 398 399 400 401 402 403 404  | Next Page >