Search Results

Search found 14679 results on 588 pages for 'index merge'.

Page 117/588 | < Previous Page | 113 114 115 116 117 118 119 120 121 122 123 124  | Next Page >

  • Can this PHP version of SPICE be improved?

    - by Noctis Skytower
    I do not know much about PHP's standard library of functions and was wondering if the following code can be improved in any way. The implementation should yield the same results, the API should remain as it is, but ways to make is more PHP-ish would be greatly appreciated. This is a custom encryption library. Code <?php /*************************************** Create random major and minor SPICE key. ***************************************/ function crypt_major() { $all = range("\x00", "\xFF"); shuffle($all); $major_key = implode("", $all); return $major_key; } function crypt_minor() { $sample = array(); do { array_push($sample, 0, 1, 2, 3); } while (count($sample) != 256); shuffle($sample); $list = array(); for ($index = 0; $index < 64; $index++) { $b12 = $sample[$index * 4] << 6; $b34 = $sample[$index * 4 + 1] << 4; $b56 = $sample[$index * 4 + 2] << 2; $b78 = $sample[$index * 4 + 3]; array_push($list, $b12 + $b34 + $b56 + $b78); } $minor_key = implode("", array_map("chr", $list)); return $minor_key; } /*************************************** Create the SPICE key via the given name. ***************************************/ function named_major($name) { srand(crc32($name)); return crypt_major(); } function named_minor($name) { srand(crc32($name)); return crypt_minor(); } /*************************************** Check validity for major and minor keys. ***************************************/ function _check_major($key) { if (is_string($key) && strlen($key) == 256) { foreach (range("\x00", "\xFF") as $char) { if (substr_count($key, $char) == 0) { return FALSE; } } return TRUE; } return FALSE; } function _check_minor($key) { if (is_string($key) && strlen($key) == 64) { $indexs = array(); foreach (array_map("ord", str_split($key)) as $byte) { foreach (range(6, 0, 2) as $shift) { array_push($indexs, ($byte >> $shift) & 3); } } $dict = array_count_values($indexs); foreach (range(0, 3) as $index) { if ($dict[$index] != 64) { return FALSE; } } return TRUE; } return FALSE; } /*************************************** Create encode maps for encode functions. ***************************************/ function _encode_map_1($major) { return array_map("ord", str_split($major)); } function _encode_map_2($minor) { $map_2 = array(array(), array(), array(), array()); $list = array(); foreach (array_map("ord", str_split($minor)) as $byte) { foreach (range(6, 0, 2) as $shift) { array_push($list, ($byte >> $shift) & 3); } } for ($byte = 0; $byte < 256; $byte++) { array_push($map_2[$list[$byte]], chr($byte)); } return $map_2; } /*************************************** Create decode maps for decode functions. ***************************************/ function _decode_map_1($minor) { $map_1 = array(); foreach (array_map("ord", str_split($minor)) as $byte) { foreach (range(6, 0, 2) as $shift) { array_push($map_1, ($byte >> $shift) & 3); } } return $map_1; }function _decode_map_2($major) { $map_2 = array(); $temp = array_map("ord", str_split($major)); for ($byte = 0; $byte < 256; $byte++) { $map_2[$temp[$byte]] = chr($byte); } return $map_2; } /*************************************** Encrypt or decrypt the string with maps. ***************************************/ function _encode($string, $map_1, $map_2) { $cache = ""; foreach (str_split($string) as $char) { $byte = $map_1[ord($char)]; foreach (range(6, 0, 2) as $shift) { $cache .= $map_2[($byte >> $shift) & 3][mt_rand(0, 63)]; } } return $cache; } function _decode($string, $map_1, $map_2) { $cache = ""; $temp = str_split($string); for ($iter = 0; $iter < strlen($string) / 4; $iter++) { $b12 = $map_1[ord($temp[$iter * 4])] << 6; $b34 = $map_1[ord($temp[$iter * 4 + 1])] << 4; $b56 = $map_1[ord($temp[$iter * 4 + 2])] << 2; $b78 = $map_1[ord($temp[$iter * 4 + 3])]; $cache .= $map_2[$b12 + $b34 + $b56 + $b78]; } return $cache; } /*************************************** This is the public interface for coding. ***************************************/ function encode_string($string, $major, $minor) { if (is_string($string)) { if (_check_major($major) && _check_minor($minor)) { $map_1 = _encode_map_1($major); $map_2 = _encode_map_2($minor); return _encode($string, $map_1, $map_2); } } return FALSE; } function decode_string($string, $major, $minor) { if (is_string($string) && strlen($string) % 4 == 0) { if (_check_major($major) && _check_minor($minor)) { $map_1 = _decode_map_1($minor); $map_2 = _decode_map_2($major); return _decode($string, $map_1, $map_2); } } return FALSE; } ?> This is a sample showing how the code is being used. Hex editors may be of help with the input / output. Example <?php # get and process all of the form data @ $input = htmlspecialchars($_POST["input"]); @ $majorname = htmlspecialchars($_POST["majorname"]); @ $minorname = htmlspecialchars($_POST["minorname"]); @ $majorkey = htmlspecialchars($_POST["majorkey"]); @ $minorkey = htmlspecialchars($_POST["minorkey"]); @ $output = htmlspecialchars($_POST["output"]); # process the submissions by operation # CREATE @ $operation = $_POST["operation"]; if ($operation == "Create") { if (strlen($_POST["majorname"]) == 0) { $majorkey = bin2hex(crypt_major()); } if (strlen($_POST["minorname"]) == 0) { $minorkey = bin2hex(crypt_minor()); } if (strlen($_POST["majorname"]) != 0) { $majorkey = bin2hex(named_major($_POST["majorname"])); } if (strlen($_POST["minorname"]) != 0) { $minorkey = bin2hex(named_minor($_POST["minorname"])); } } # ENCRYPT or DECRYPT function is_hex($char) { if ($char == "0"): return TRUE; elseif ($char == "1"): return TRUE; elseif ($char == "2"): return TRUE; elseif ($char == "3"): return TRUE; elseif ($char == "4"): return TRUE; elseif ($char == "5"): return TRUE; elseif ($char == "6"): return TRUE; elseif ($char == "7"): return TRUE; elseif ($char == "8"): return TRUE; elseif ($char == "9"): return TRUE; elseif ($char == "a"): return TRUE; elseif ($char == "b"): return TRUE; elseif ($char == "c"): return TRUE; elseif ($char == "d"): return TRUE; elseif ($char == "e"): return TRUE; elseif ($char == "f"): return TRUE; else: return FALSE; endif; } function hex2bin($str) { if (strlen($str) % 2 == 0): $string = strtolower($str); else: $string = strtolower("0" . $str); endif; $cache = ""; $temp = str_split($str); for ($index = 0; $index < count($temp) / 2; $index++) { $h1 = $temp[$index * 2]; if (is_hex($h1)) { $h2 = $temp[$index * 2 + 1]; if (is_hex($h2)) { $cache .= chr(hexdec($h1 . $h2)); } else { return FALSE; } } else { return FALSE; } } return $cache; } if ($operation == "Encrypt" || $operation == "Decrypt") { # CHECK FOR ANY ERROR $errors = array(); if (strlen($_POST["input"]) == 0) { $output = ""; } $binmajor = hex2bin($_POST["majorkey"]); if (strlen($_POST["majorkey"]) == 0) { array_push($errors, "There must be a major key."); } elseif ($binmajor == FALSE) { array_push($errors, "The major key must be in hex."); } elseif (_check_major($binmajor) == FALSE) { array_push($errors, "The major key is corrupt."); } $binminor = hex2bin($_POST["minorkey"]); if (strlen($_POST["minorkey"]) == 0) { array_push($errors, "There must be a minor key."); } elseif ($binminor == FALSE) { array_push($errors, "The minor key must be in hex."); } elseif (_check_minor($binminor) == FALSE) { array_push($errors, "The minor key is corrupt."); } if ($_POST["operation"] == "Decrypt") { $bininput = hex2bin(str_replace("\r", "", str_replace("\n", "", $_POST["input"]))); if ($bininput == FALSE) { if (strlen($_POST["input"]) != 0) { array_push($errors, "The input data must be in hex."); } } elseif (strlen($bininput) % 4 != 0) { array_push($errors, "The input data is corrupt."); } } if (count($errors) != 0) { # ERRORS ARE FOUND $output = "ERROR:"; foreach ($errors as $error) { $output .= "\n" . $error; } } elseif (strlen($_POST["input"]) != 0) { # CONTINUE WORKING if ($_POST["operation"] == "Encrypt") { # ENCRYPT $output = substr(chunk_split(bin2hex(encode_string($_POST["input"], $binmajor, $binminor)), 58), 0, -2); } else { # DECRYPT $output = htmlspecialchars(decode_string($bininput, $binmajor, $binminor)); } } } # echo the form with the values filled echo "<P><TEXTAREA class=maintextarea name=input rows=25 cols=25>" . $input . "</TEXTAREA></P>\n"; echo "<P>Major Name:</P>\n"; echo "<P><INPUT id=textbox1 name=majorname value=\"" . $majorname . "\"></P>\n"; echo "<P>Minor Name:</P>\n"; echo "<P><INPUT id=textbox1 name=minorname value=\"" . $minorname . "\"></P>\n"; echo "<DIV style=\"TEXT-ALIGN: center\"><INPUT class=submit type=submit value=Create name=operation>\n"; echo "</DIV>\n"; echo "<P>Major Key:</P>\n"; echo "<P><INPUT id=textbox1 name=majorkey value=\"" . $majorkey . "\"></P>\n"; echo "<P>Minor Key:</P>\n"; echo "<P><INPUT id=textbox1 name=minorkey value=\"" . $minorkey . "\"></P>\n"; echo "<DIV style=\"TEXT-ALIGN: center\"><INPUT class=submit type=submit value=Encrypt name=operation> \n"; echo "<INPUT class=submit type=submit value=Decrypt name=operation> </DIV>\n"; echo "<P>Result:</P>\n"; echo "<P><TEXTAREA class=maintextarea name=output rows=25 readOnly cols=25>" . $output . "</TEXTAREA></P></DIV></FORM>\n"; ?>

    Read the article

  • What can be improved in this PHP code?

    - by Noctis Skytower
    This is a custom encryption library. I do not know much about PHP's standard library of functions and was wondering if the following code can be improved in any way. The implementation should yield the same results, the API should remain as it is, but ways to make is more PHP-ish would be greatly appreciated. Code <?php /*************************************** Create random major and minor SPICE key. ***************************************/ function crypt_major() { $all = range("\x00", "\xFF"); shuffle($all); $major_key = implode("", $all); return $major_key; } function crypt_minor() { $sample = array(); do { array_push($sample, 0, 1, 2, 3); } while (count($sample) != 256); shuffle($sample); $list = array(); for ($index = 0; $index < 64; $index++) { $b12 = $sample[$index * 4] << 6; $b34 = $sample[$index * 4 + 1] << 4; $b56 = $sample[$index * 4 + 2] << 2; $b78 = $sample[$index * 4 + 3]; array_push($list, $b12 + $b34 + $b56 + $b78); } $minor_key = implode("", array_map("chr", $list)); return $minor_key; } /*************************************** Create the SPICE key via the given name. ***************************************/ function named_major($name) { srand(crc32($name)); return crypt_major(); } function named_minor($name) { srand(crc32($name)); return crypt_minor(); } /*************************************** Check validity for major and minor keys. ***************************************/ function _check_major($key) { if (is_string($key) && strlen($key) == 256) { foreach (range("\x00", "\xFF") as $char) { if (substr_count($key, $char) == 0) { return FALSE; } } return TRUE; } return FALSE; } function _check_minor($key) { if (is_string($key) && strlen($key) == 64) { $indexs = array(); foreach (array_map("ord", str_split($key)) as $byte) { foreach (range(6, 0, 2) as $shift) { array_push($indexs, ($byte >> $shift) & 3); } } $dict = array_count_values($indexs); foreach (range(0, 3) as $index) { if ($dict[$index] != 64) { return FALSE; } } return TRUE; } return FALSE; } /*************************************** Create encode maps for encode functions. ***************************************/ function _encode_map_1($major) { return array_map("ord", str_split($major)); } function _encode_map_2($minor) { $map_2 = array(array(), array(), array(), array()); $list = array(); foreach (array_map("ord", str_split($minor)) as $byte) { foreach (range(6, 0, 2) as $shift) { array_push($list, ($byte >> $shift) & 3); } } for ($byte = 0; $byte < 256; $byte++) { array_push($map_2[$list[$byte]], chr($byte)); } return $map_2; } /*************************************** Create decode maps for decode functions. ***************************************/ function _decode_map_1($minor) { $map_1 = array(); foreach (array_map("ord", str_split($minor)) as $byte) { foreach (range(6, 0, 2) as $shift) { array_push($map_1, ($byte >> $shift) & 3); } } return $map_1; }function _decode_map_2($major) { $map_2 = array(); $temp = array_map("ord", str_split($major)); for ($byte = 0; $byte < 256; $byte++) { $map_2[$temp[$byte]] = chr($byte); } return $map_2; } /*************************************** Encrypt or decrypt the string with maps. ***************************************/ function _encode($string, $map_1, $map_2) { $cache = ""; foreach (str_split($string) as $char) { $byte = $map_1[ord($char)]; foreach (range(6, 0, 2) as $shift) { $cache .= $map_2[($byte >> $shift) & 3][mt_rand(0, 63)]; } } return $cache; } function _decode($string, $map_1, $map_2) { $cache = ""; $temp = str_split($string); for ($iter = 0; $iter < strlen($string) / 4; $iter++) { $b12 = $map_1[ord($temp[$iter * 4])] << 6; $b34 = $map_1[ord($temp[$iter * 4 + 1])] << 4; $b56 = $map_1[ord($temp[$iter * 4 + 2])] << 2; $b78 = $map_1[ord($temp[$iter * 4 + 3])]; $cache .= $map_2[$b12 + $b34 + $b56 + $b78]; } return $cache; } /*************************************** This is the public interface for coding. ***************************************/ function encode_string($string, $major, $minor) { if (is_string($string)) { if (_check_major($major) && _check_minor($minor)) { $map_1 = _encode_map_1($major); $map_2 = _encode_map_2($minor); return _encode($string, $map_1, $map_2); } } return FALSE; } function decode_string($string, $major, $minor) { if (is_string($string) && strlen($string) % 4 == 0) { if (_check_major($major) && _check_minor($minor)) { $map_1 = _decode_map_1($minor); $map_2 = _decode_map_2($major); return _decode($string, $map_1, $map_2); } } return FALSE; } ?> This is a sample showing how the code is being used. Hex editors may be of help with the input / output. Example <?php # get and process all of the form data @ $input = htmlspecialchars($_POST["input"]); @ $majorname = htmlspecialchars($_POST["majorname"]); @ $minorname = htmlspecialchars($_POST["minorname"]); @ $majorkey = htmlspecialchars($_POST["majorkey"]); @ $minorkey = htmlspecialchars($_POST["minorkey"]); @ $output = htmlspecialchars($_POST["output"]); # process the submissions by operation # CREATE @ $operation = $_POST["operation"]; if ($operation == "Create") { if (strlen($_POST["majorname"]) == 0) { $majorkey = bin2hex(crypt_major()); } if (strlen($_POST["minorname"]) == 0) { $minorkey = bin2hex(crypt_minor()); } if (strlen($_POST["majorname"]) != 0) { $majorkey = bin2hex(named_major($_POST["majorname"])); } if (strlen($_POST["minorname"]) != 0) { $minorkey = bin2hex(named_minor($_POST["minorname"])); } } # ENCRYPT or DECRYPT function is_hex($char) { if ($char == "0"): return TRUE; elseif ($char == "1"): return TRUE; elseif ($char == "2"): return TRUE; elseif ($char == "3"): return TRUE; elseif ($char == "4"): return TRUE; elseif ($char == "5"): return TRUE; elseif ($char == "6"): return TRUE; elseif ($char == "7"): return TRUE; elseif ($char == "8"): return TRUE; elseif ($char == "9"): return TRUE; elseif ($char == "a"): return TRUE; elseif ($char == "b"): return TRUE; elseif ($char == "c"): return TRUE; elseif ($char == "d"): return TRUE; elseif ($char == "e"): return TRUE; elseif ($char == "f"): return TRUE; else: return FALSE; endif; } function hex2bin($str) { if (strlen($str) % 2 == 0): $string = strtolower($str); else: $string = strtolower("0" . $str); endif; $cache = ""; $temp = str_split($str); for ($index = 0; $index < count($temp) / 2; $index++) { $h1 = $temp[$index * 2]; if (is_hex($h1)) { $h2 = $temp[$index * 2 + 1]; if (is_hex($h2)) { $cache .= chr(hexdec($h1 . $h2)); } else { return FALSE; } } else { return FALSE; } } return $cache; } if ($operation == "Encrypt" || $operation == "Decrypt") { # CHECK FOR ANY ERROR $errors = array(); if (strlen($_POST["input"]) == 0) { $output = ""; } $binmajor = hex2bin($_POST["majorkey"]); if (strlen($_POST["majorkey"]) == 0) { array_push($errors, "There must be a major key."); } elseif ($binmajor == FALSE) { array_push($errors, "The major key must be in hex."); } elseif (_check_major($binmajor) == FALSE) { array_push($errors, "The major key is corrupt."); } $binminor = hex2bin($_POST["minorkey"]); if (strlen($_POST["minorkey"]) == 0) { array_push($errors, "There must be a minor key."); } elseif ($binminor == FALSE) { array_push($errors, "The minor key must be in hex."); } elseif (_check_minor($binminor) == FALSE) { array_push($errors, "The minor key is corrupt."); } if ($_POST["operation"] == "Decrypt") { $bininput = hex2bin(str_replace("\r", "", str_replace("\n", "", $_POST["input"]))); if ($bininput == FALSE) { if (strlen($_POST["input"]) != 0) { array_push($errors, "The input data must be in hex."); } } elseif (strlen($bininput) % 4 != 0) { array_push($errors, "The input data is corrupt."); } } if (count($errors) != 0) { # ERRORS ARE FOUND $output = "ERROR:"; foreach ($errors as $error) { $output .= "\n" . $error; } } elseif (strlen($_POST["input"]) != 0) { # CONTINUE WORKING if ($_POST["operation"] == "Encrypt") { # ENCRYPT $output = substr(chunk_split(bin2hex(encode_string($_POST["input"], $binmajor, $binminor)), 58), 0, -2); } else { # DECRYPT $output = htmlspecialchars(decode_string($bininput, $binmajor, $binminor)); } } } # echo the form with the values filled echo "<P><TEXTAREA class=maintextarea name=input rows=25 cols=25>" . $input . "</TEXTAREA></P>\n"; echo "<P>Major Name:</P>\n"; echo "<P><INPUT id=textbox1 name=majorname value=\"" . $majorname . "\"></P>\n"; echo "<P>Minor Name:</P>\n"; echo "<P><INPUT id=textbox1 name=minorname value=\"" . $minorname . "\"></P>\n"; echo "<DIV style=\"TEXT-ALIGN: center\"><INPUT class=submit type=submit value=Create name=operation>\n"; echo "</DIV>\n"; echo "<P>Major Key:</P>\n"; echo "<P><INPUT id=textbox1 name=majorkey value=\"" . $majorkey . "\"></P>\n"; echo "<P>Minor Key:</P>\n"; echo "<P><INPUT id=textbox1 name=minorkey value=\"" . $minorkey . "\"></P>\n"; echo "<DIV style=\"TEXT-ALIGN: center\"><INPUT class=submit type=submit value=Encrypt name=operation> \n"; echo "<INPUT class=submit type=submit value=Decrypt name=operation> </DIV>\n"; echo "<P>Result:</P>\n"; echo "<P><TEXTAREA class=maintextarea name=output rows=25 readOnly cols=25>" . $output . "</TEXTAREA></P></DIV></FORM>\n"; ?> What should be editted for better memory efficiency or faster execution?

    Read the article

  • Voxel Face Crawling (Mesh simplification, possibly using greedy)

    - by Tim Winter
    This is in regards to a Minecraft-like terrain engine. I store blocks in chunks (16x256x16 blocks in a chunk). When I generate a chunk, I use multiple procedural techniques to set the terrain and to place objects. While generating, I keep one 1D array for the full chunk (solid or not) and a separate 1D array of solid blocks. After generation, I iterate through the solid blocks checking their neighbors so I only generate block faces that don't have solid neighbors. I store which faces to generate in their own list (that's 6 lists, one per possible face). When rendering a chunk, I render all lists in the camera's current chunk and only the lists facing the camera in all other chunks. Using a 2D atlas with this little shader trick Andrew Russell suggested, I want to merge similar faces together completely. That is, if they are in the same list (same normal), are adjacent to each other, have the same light level, etc. My assumption would be to have each of the 6 lists sorted by the axis they rest on, then by the other two axes (the list for the top of a block would be sorted by it's Y value, then X, then Z). With this alone, I could quite easily merge strips of faces, but I'm looking to merge more than just strips together when possible. I've read up on this greedy meshing algorithm, but I am having a lot of trouble understanding it. To even use it, I would think I'd need to perform a type of flood-fill per sorted list to get the groups of merge-able faces. Then, per group, perform the greedy algorithm. It all sounds awfully expensive if I would ever want dynamic terrain/lighting after initial generation. So, my question: To perform merging of faces as described (ignoring whether it's a bad idea for dynamic terrain/lighting), is there perhaps an algorithm that is simpler to implement? I would also quite happily accept an answer that walks me through the greedy algorithm in a much simpler way (a link or explanation). I don't mind a slight performance decrease if it's easier to implement or even if it's only a little better than just doing strips. I worry that most algorithms focus on triangles rather than quads and using a 2D atlas the way I am, I don't know that I could implement something triangle based with my current skills. PS: I already frustum cull per chunk and as described, I also cull faces between solid blocks. I don't occlusion cull yet and may never.

    Read the article

  • SortedDictionary and SortedList

    - by Simon Cooper
    Apart from Dictionary<TKey, TValue>, there's two other dictionaries in the BCL - SortedDictionary<TKey, TValue> and SortedList<TKey, TValue>. On the face of it, these two classes do the same thing - provide an IDictionary<TKey, TValue> interface where the iterator returns the items sorted by the key. So what's the difference between them, and when should you use one rather than the other? (as in my previous post, I'll assume you have some basic algorithm & datastructure knowledge) SortedDictionary We'll first cover SortedDictionary. This is implemented as a special sort of binary tree called a red-black tree. Essentially, it's a binary tree that uses various constraints on how the nodes of the tree can be arranged to ensure the tree is always roughly balanced (for more gory algorithmical details, see the wikipedia link above). What I'm concerned about in this post is how the .NET SortedDictionary is actually implemented. In .NET 4, behind the scenes, the actual implementation of the tree is delegated to a SortedSet<KeyValuePair<TKey, TValue>>. One example tree might look like this: Each node in the above tree is stored as a separate SortedSet<T>.Node object (remember, in a SortedDictionary, T is instantiated to KeyValuePair<TKey, TValue>): class Node { public bool IsRed; public T Item; public SortedSet<T>.Node Left; public SortedSet<T>.Node Right; } The SortedSet only stores a reference to the root node; all the data in the tree is accessed by traversing the Left and Right node references until you reach the node you're looking for. Each individual node can be physically stored anywhere in memory; what's important is the relationship between the nodes. This is also why there is no constructor to SortedDictionary or SortedSet that takes an integer representing the capacity; there are no internal arrays that need to be created and resized. This may seen trivial, but it's an important distinction between SortedDictionary and SortedList that I'll cover later on. And that's pretty much it; it's a standard red-black tree. Plenty of webpages and datastructure books cover the algorithms behind the tree itself far better than I could. What's interesting is the comparions between SortedDictionary and SortedList, which I'll cover at the end. As a side point, SortedDictionary has existed in the BCL ever since .NET 2. That means that, all through .NET 2, 3, and 3.5, there has been a bona-fide sorted set class in the BCL (called TreeSet). However, it was internal, so it couldn't be used outside System.dll. Only in .NET 4 was this class exposed as SortedSet. SortedList Whereas SortedDictionary didn't use any backing arrays, SortedList does. It is implemented just as the name suggests; two arrays, one containing the keys, and one the values (I've just used random letters for the values): The items in the keys array are always guarenteed to be stored in sorted order, and the value corresponding to each key is stored in the same index as the key in the values array. In this example, the value for key item 5 is 'z', and for key item 8 is 'm'. Whenever an item is inserted or removed from the SortedList, a binary search is run on the keys array to find the correct index, then all the items in the arrays are shifted to accomodate the new or removed item. For example, if the key 3 was removed, a binary search would be run to find the array index the item was at, then everything above that index would be moved down by one: and then if the key/value pair {7, 'f'} was added, a binary search would be run on the keys to find the index to insert the new item, and everything above that index would be moved up to accomodate the new item: If another item was then added, both arrays would be resized (to a length of 10) before the new item was added to the arrays. As you can see, any insertions or removals in the middle of the list require a proportion of the array contents to be moved; an O(n) operation. However, if the insertion or removal is at the end of the array (ie the largest key), then it's only O(log n); the cost of the binary search to determine it does actually need to be added to the end (excluding the occasional O(n) cost of resizing the arrays to fit more items). As a side effect of using backing arrays, SortedList offers IList Keys and Values views that simply use the backing keys or values arrays, as well as various methods utilising the array index of stored items, which SortedDictionary does not (and cannot) offer. The Comparison So, when should you use one and not the other? Well, here's the important differences: Memory usage SortedDictionary and SortedList have got very different memory profiles. SortedDictionary... has a memory overhead of one object instance, a bool, and two references per item. On 64-bit systems, this adds up to ~40 bytes, not including the stored item and the reference to it from the Node object. stores the items in separate objects that can be spread all over the heap. This helps to keep memory fragmentation low, as the individual node objects can be allocated wherever there's a spare 60 bytes. In contrast, SortedList... has no additional overhead per item (only the reference to it in the array entries), however the backing arrays can be significantly larger than you need; every time the arrays are resized they double in size. That means that if you add 513 items to a SortedList, the backing arrays will each have a length of 1024. To conteract this, the TrimExcess method resizes the arrays back down to the actual size needed, or you can simply assign list.Capacity = list.Count. stores its items in a continuous block in memory. If the list stores thousands of items, this can cause significant problems with Large Object Heap memory fragmentation as the array resizes, which SortedDictionary doesn't have. Performance Operations on a SortedDictionary always have O(log n) performance, regardless of where in the collection you're adding or removing items. In contrast, SortedList has O(n) performance when you're altering the middle of the collection. If you're adding or removing from the end (ie the largest item), then performance is O(log n), same as SortedDictionary (in practice, it will likely be slightly faster, due to the array items all being in the same area in memory, also called locality of reference). So, when should you use one and not the other? As always with these sort of things, there are no hard-and-fast rules. But generally, if you: need to access items using their index within the collection are populating the dictionary all at once from sorted data aren't adding or removing keys once it's populated then use a SortedList. But if you: don't know how many items are going to be in the dictionary are populating the dictionary from random, unsorted data are adding & removing items randomly then use a SortedDictionary. The default (again, there's no definite rules on these sort of things!) should be to use SortedDictionary, unless there's a good reason to use SortedList, due to the bad performance of SortedList when altering the middle of the collection.

    Read the article

  • When is a Seek not a Seek?

    - by Paul White
    The following script creates a single-column clustered table containing the integers from 1 to 1,000 inclusive. IF OBJECT_ID(N'tempdb..#Test', N'U') IS NOT NULL DROP TABLE #Test ; GO CREATE TABLE #Test ( id INTEGER PRIMARY KEY CLUSTERED ); ; INSERT #Test (id) SELECT V.number FROM master.dbo.spt_values AS V WHERE V.[type] = N'P' AND V.number BETWEEN 1 AND 1000 ; Let’s say we need to find the rows with values from 100 to 170, excluding any values that divide exactly by 10.  One way to write that query would be: SELECT T.id FROM #Test AS T WHERE T.id IN ( 101,102,103,104,105,106,107,108,109, 111,112,113,114,115,116,117,118,119, 121,122,123,124,125,126,127,128,129, 131,132,133,134,135,136,137,138,139, 141,142,143,144,145,146,147,148,149, 151,152,153,154,155,156,157,158,159, 161,162,163,164,165,166,167,168,169 ) ; That query produces a pretty efficient-looking query plan: Knowing that the source column is defined as an INTEGER, we could also express the query this way: SELECT T.id FROM #Test AS T WHERE T.id >= 101 AND T.id <= 169 AND T.id % 10 > 0 ; We get a similar-looking plan: If you look closely, you might notice that the line connecting the two icons is a little thinner than before.  The first query is estimated to produce 61.9167 rows – very close to the 63 rows we know the query will return.  The second query presents a tougher challenge for SQL Server because it doesn’t know how to predict the selectivity of the modulo expression (T.id % 10 > 0).  Without that last line, the second query is estimated to produce 68.1667 rows – a slight overestimate.  Adding the opaque modulo expression results in SQL Server guessing at the selectivity.  As you may know, the selectivity guess for a greater-than operation is 30%, so the final estimate is 30% of 68.1667, which comes to 20.45 rows. The second difference is that the Clustered Index Seek is costed at 99% of the estimated total for the statement.  For some reason, the final SELECT operator is assigned a small cost of 0.0000484 units; I have absolutely no idea why this is so, or what it models.  Nevertheless, we can compare the total cost for both queries: the first one comes in at 0.0033501 units, and the second at 0.0034054.  The important point is that the second query is costed very slightly higher than the first, even though it is expected to produce many fewer rows (20.45 versus 61.9167). If you run the two queries, they produce exactly the same results, and both complete so quickly that it is impossible to measure CPU usage for a single execution.  We can, however, compare the I/O statistics for a single run by running the queries with STATISTICS IO ON: Table '#Test'. Scan count 63, logical reads 126, physical reads 0. Table '#Test'. Scan count 01, logical reads 002, physical reads 0. The query with the IN list uses 126 logical reads (and has a ‘scan count’ of 63), while the second query form completes with just 2 logical reads (and a ‘scan count’ of 1).  It is no coincidence that 126 = 63 * 2, by the way.  It is almost as if the first query is doing 63 seeks, compared to one for the second query. In fact, that is exactly what it is doing.  There is no indication of this in the graphical plan, or the tool-tip that appears when you hover your mouse over the Clustered Index Seek icon.  To see the 63 seek operations, you have click on the Seek icon and look in the Properties window (press F4, or right-click and choose from the menu): The Seek Predicates list shows a total of 63 seek operations – one for each of the values from the IN list contained in the first query.  I have expanded the first seek node to show the details; it is seeking down the clustered index to find the entry with the value 101.  Each of the other 62 nodes expands similarly, and the same information is contained (even more verbosely) in the XML form of the plan. Each of the 63 seek operations starts at the root of the clustered index B-tree and navigates down to the leaf page that contains the sought key value.  Our table is just large enough to need a separate root page, so each seek incurs 2 logical reads (one for the root, and one for the leaf).  We can see the index depth using the INDEXPROPERTY function, or by using the a DMV: SELECT S.index_type_desc, S.index_depth FROM sys.dm_db_index_physical_stats ( DB_ID(N'tempdb'), OBJECT_ID(N'tempdb..#Test', N'U'), 1, 1, DEFAULT ) AS S ; Let’s look now at the Properties window when the Clustered Index Seek from the second query is selected: There is just one seek operation, which starts at the root of the index and navigates the B-tree looking for the first key that matches the Start range condition (id >= 101).  It then continues to read records at the leaf level of the index (following links between leaf-level pages if necessary) until it finds a row that does not meet the End range condition (id <= 169).  Every row that meets the seek range condition is also tested against the Residual Predicate highlighted above (id % 10 > 0), and is only returned if it matches that as well. You will not be surprised that the single seek (with a range scan and residual predicate) is much more efficient than 63 singleton seeks.  It is not 63 times more efficient (as the logical reads comparison would suggest), but it is around three times faster.  Let’s run both query forms 10,000 times and measure the elapsed time: DECLARE @i INTEGER, @n INTEGER = 10000, @s DATETIME = GETDATE() ; SET NOCOUNT ON; SET STATISTICS XML OFF; ; WHILE @n > 0 BEGIN SELECT @i = T.id FROM #Test AS T WHERE T.id IN ( 101,102,103,104,105,106,107,108,109, 111,112,113,114,115,116,117,118,119, 121,122,123,124,125,126,127,128,129, 131,132,133,134,135,136,137,138,139, 141,142,143,144,145,146,147,148,149, 151,152,153,154,155,156,157,158,159, 161,162,163,164,165,166,167,168,169 ) ; SET @n -= 1; END ; PRINT DATEDIFF(MILLISECOND, @s, GETDATE()) ; GO DECLARE @i INTEGER, @n INTEGER = 10000, @s DATETIME = GETDATE() ; SET NOCOUNT ON ; WHILE @n > 0 BEGIN SELECT @i = T.id FROM #Test AS T WHERE T.id >= 101 AND T.id <= 169 AND T.id % 10 > 0 ; SET @n -= 1; END ; PRINT DATEDIFF(MILLISECOND, @s, GETDATE()) ; On my laptop, running SQL Server 2008 build 4272 (SP2 CU2), the IN form of the query takes around 830ms and the range query about 300ms.  The main point of this post is not performance, however – it is meant as an introduction to the next few parts in this mini-series that will continue to explore scans and seeks in detail. When is a seek not a seek?  When it is 63 seeks © Paul White 2011 email: [email protected] twitter: @SQL_kiwi

    Read the article

  • Execution plan warnings–The final chapter

    - by Dave Ballantyne
    In my previous posts (here and here), I showed examples of some of the execution plan warnings that have been added to SQL Server 2012.  There is one other warning that is of interest to me : “Unmatched Indexes”. Firstly, how do I know this is the final one ?  The plan is an XML document, right ? So that means that it can have an accompanying XSD.  As an XSD is a schema definition, we can poke around inside it to find interesting things that *could* be in the final XML file. The showplan schema is stored in the folder Microsoft SQL Server\110\Tools\Binn\schemas\sqlserver\2004\07\showplan and by comparing schemas over releases you can get a really good idea of any new functionality that has been added. Here is the section of the Sql Server 2012 showplan schema that has been interesting me so far : <xsd:complexType name="AffectingConvertWarningType"> <xsd:annotation> <xsd:documentation>Warning information for plan-affecting type conversion</xsd:documentation> </xsd:annotation> <xsd:sequence> <!-- Additional information may go here when available --> </xsd:sequence> <xsd:attribute name="ConvertIssue" use="required"> <xsd:simpleType> <xsd:restriction base="xsd:string"> <xsd:enumeration value="Cardinality Estimate" /> <xsd:enumeration value="Seek Plan" /> <!-- to be extended here --> </xsd:restriction> </xsd:simpleType> </xsd:attribute> <xsd:attribute name="Expression" type ="xsd:string" use="required" /></xsd:complexType><xsd:complexType name="WarningsType"> <xsd:annotation> <xsd:documentation>List of all possible iterator or query specific warnings (e.g. hash spilling, no join predicate)</xsd:documentation> </xsd:annotation> <xsd:choice minOccurs="1" maxOccurs="unbounded"> <xsd:element name="ColumnsWithNoStatistics" type="shp:ColumnReferenceListType" minOccurs="0" maxOccurs="1" /> <xsd:element name="SpillToTempDb" type="shp:SpillToTempDbType" minOccurs="0" maxOccurs="unbounded" /> <xsd:element name="Wait" type="shp:WaitWarningType" minOccurs="0" maxOccurs="unbounded" /> <xsd:element name="PlanAffectingConvert" type="shp:AffectingConvertWarningType" minOccurs="0" maxOccurs="unbounded" /> </xsd:choice> <xsd:attribute name="NoJoinPredicate" type="xsd:boolean" use="optional" /> <xsd:attribute name="SpatialGuess" type="xsd:boolean" use="optional" /> <xsd:attribute name="UnmatchedIndexes" type="xsd:boolean" use="optional" /> <xsd:attribute name="FullUpdateForOnlineIndexBuild" type="xsd:boolean" use="optional" /></xsd:complexType> I especially like the “to be extended here” comment,  high hopes that we will see more of these in the future.   So “Unmatched Indexes” was a warning that I couldn’t get and many thanks must go to Fabiano Amorim (b|t) for showing me the way.   Filtered indexes were introduced in Sql Server 2008 and are really useful if you only need to index only a portion of the data within a table.  However,  if your SQL code uses a variable as a predicate on the filtered data that matches the filtered condition, then the filtered index cannot be used as, naturally,  the value in the variable may ( and probably will ) change and therefore will need to read data outside the index.  As an aside,  you could use option(recompile) here , in which case the optimizer will build a plan specific to the variable values and use the filtered index,  but that can bring about other problems.   To demonstrate this warning, we need to generate some test data :   DROP TABLE #TestTab1GOCREATE TABLE #TestTab1 (Col1 Int not null, Col2 Char(7500) not null, Quantity Int not null)GOINSERT INTO #TestTab1 VALUES (1,1,1),(1,2,5),(1,2,10),(1,3,20), (2,1,101),(2,2,105),(2,2,110),(2,3,120)GO and then add a filtered index CREATE INDEX ixFilter ON #TestTab1 (Col1)WHERE Quantity = 122 Now if we execute SELECT COUNT(*) FROM #TestTab1 WHERE Quantity = 122 We will see the filtered index being scanned But if we parameterize the query DECLARE @i INT = 122SELECT COUNT(*) FROM #TestTab1 WHERE Quantity = @i The plan is very different a table scan, as the value of the variable used in the predicate can change at run time, and also we see the familiar warning triangle. If we now look at the properties pane, we will see two pieces of information “Warnings” and “UnmatchedIndexes”. So, handily, we are being told which filtered index is not being used due to parameterization.

    Read the article

  • Git commit messages with nvie branching model

    - by eykanal
    This Git branching model recommends branching for all development efforts and merging when complete: Branch Develop Merge when complete I'm wondering how this works in practice, given that performing a merge off this model will simply add a commit to the develop with whatever commit message happened to be the last one in line. Do people using this model do an interactive rebase on the feature branch before committing? If not, how do you ensure that the commits make sense on the main branch?

    Read the article

  • Is it OK to repeat code for unit tests?

    - by Pete
    I wrote some sorting algorithms for a class assignment and I also wrote a few tests to make sure the algorithms were implemented correctly. My tests are only like 10 lines long and there are 3 of them but only 1 line changes between the 3 so there is a lot of repeated code. Is it better to refactor this code into another method that is then called from each test? Wouldn't I then need to write another test to test the refactoring? Some of the variables can even be moved up to the class level. Should testing classes and methods follow the same rules as regular classes/methods? Here's an example: [TestMethod] public void MergeSortAssertArrayIsSorted() { int[] a = new int[1000]; Random rand = new Random(DateTime.Now.Millisecond); for(int i = 0; i < a.Length; i++) { a[i] = rand.Next(Int16.MaxValue); } int[] b = new int[1000]; a.CopyTo(b, 0); List<int> temp = b.ToList(); temp.Sort(); b = temp.ToArray(); MergeSort merge = new MergeSort(); merge.mergeSort(a, 0, a.Length - 1); CollectionAssert.AreEqual(a, b); } [TestMethod] public void InsertionSortAssertArrayIsSorted() { int[] a = new int[1000]; Random rand = new Random(DateTime.Now.Millisecond); for (int i = 0; i < a.Length; i++) { a[i] = rand.Next(Int16.MaxValue); } int[] b = new int[1000]; a.CopyTo(b, 0); List<int> temp = b.ToList(); temp.Sort(); b = temp.ToArray(); InsertionSort merge = new InsertionSort(); merge.insertionSort(a); CollectionAssert.AreEqual(a, b); }

    Read the article

  • How to cleanly add after-the-fact commits from the same feature into git tree

    - by Dennis
    I am one of two developers on a system. I make most of the commits at this time period. My current git workflow is as such: there is master branch only (no develop/release) I make a new branch when I want to do a feature, do lots of commits, and then when I'm done, I merge that branch back into master, and usually push it to remote. ...except, I am usually not done. I often come back to alter one thing or another and every time I think it is done, but it can be 3-4 commits before I am really done and move onto something else. Problem The problem I have now is that .. my feature branch tree is merged and pushed into master and remote master, and then I realize that I am not really done with that feature, as in I have finishing touches I want to add, where finishing touches may be cosmetic only, or may be significant, but they still belong to that one feature I just worked on. What I do now Currently, when I have extra after-the-fact commits like this, I solve this problem by rolling back my merge, and re-merging my feature branch into master with my new commits, and I do that so that git tree looks clean. One clean feature branch branched out of master and merged back into it. I then push --force my changes to origin, since my origin doesn't see much traffic at the moment, so I can almost count that things will be safe, or I can even talk to other dev if I have to coordinate. But I know it is not a good way to do this in general, as it rewrites what others may have already pulled, causing potential issues. And it did happen even with my dev, where git had to do an extra weird merge when our trees diverged. Other ways to solve this which I deem to be not so great Next best way is to just make those extra commits to the master branch directly, be it fast-forward merge, or not. It doesn't make the tree look as pretty as in my current way I'm solving this, but then it's not rewriting history. Yet another way is to wait. Maybe wait 24 hours and not push things to origin. That way I can rewrite things as I see fit. The con of this approach is time wasted waiting, when people may be waiting for a fix now. Yet another way is to make a "new" feature branch every time I realize I need to fix something extra. I may end up with things like feature-branch feature-branch-html-fix, feature-branch-checkbox-fix, and so on, kind of polluting the git tree somewhat. Is there a way to manage what I am trying to do without the drawbacks I described? I'm going for clean-looking history here, but maybe I need to drop this goal, if technically it is not a possibility.

    Read the article

  • Routing to the actions with same names but different parameters

    - by zerkms
    I have this set of routes: routes.MapRoute( "IssueType", "issue/{type}", new { controller = "Issue", action = "Index" } ); routes.MapRoute( "Default", // Route name "{controller}/{action}/{id}", // URL with parameters new { controller = "Home", action = "Index", id = UrlParameter.Optional } // Parameter defaults ); Here is the controller class: public class IssueController : Controller { public ActionResult Index() { // todo: redirect to concrete type return View(); } public ActionResult Index(string type) { return View(); } } why, when i request http://host/issue i get The current request for action 'Index' on controller type 'IssueController' is ambiguous between the following action methods: I expect that first one method should act when there is no parameters, and second one when some parameter specified. where did i made mistake? UPD: possible duplicate: http://stackoverflow.com/questions/436866/can-you-overload-controller-methods-in-asp-net-mvc

    Read the article

  • Codeigniter benchmarking, where are these ms coming from?

    - by ropstah
    I'm in the process of benchmarking my website. class Home extends Controller { function Home() { parent::Controller(); $this->benchmark->mark('Constructor_start'); $this->output->enable_profiler(TRUE); $this->load->library ('MasterPage'); $this->benchmark->mark('Constructor_end'); } function index() { $this->benchmark->mark('Index_start'); $this->masterpage->setMasterPage('master/home'); $this->masterpage->addContent('home/index', 'page'); $this->masterpage->show(); $this->benchmark->mark('Index_start'); } } These are the results: Loading Time Base Classes: 0.0076 Constructor: 0.0007 Index: 0.0440 Controller Execution Time ( Home/ Index ): 0.4467 Total Execution Time: 0.4545` I understand the following: Loading Time Base Classes (0.0076) Constructor (0.0007) Index (0.0440) But where is the rest of the time coming from?

    Read the article

  • Rails route error? uninitialized constant ActiveResource::Base

    - by Marco
    I'm following the Getting Started with Rails guide but ran into an issue opening http://localhost:3000 Shell output: [2010-03-23 19:19:14] ERROR NameError: uninitialized constant ActiveResource::Base Error in the browser: Internal Server Error uninitialized constant ActiveResource::Base WEBrick/1.3.1 (Ruby/1.8.7/2009-06-12) at localhost:3000 I followed the directions exactly as they were specified in the guide: Ran rails generate controller home index I removed index.html Added root :to = "home#index" to config/routes.rb I checked app/views/home/index.html.erb and it is indeed there. I then used rails server to launch the server. At first attempt the browser loads a blank page, but afterwards starts showing the browser error above. Why is it that Rails can't locate the index.html.erb file? Or is the error something different? - Running Rails 3.0beta with Ruby 1.8.7

    Read the article

  • window.focus(), self.focus() not working in firefox

    - by Nisanth
    Hi all i am developing a chat application ... i have multiple chat windows ... i want to know which windw contain new message ... i have the following code .. function getCount() { $.ajax({ type: "POST", url: baseUrl + '/Chat/count', data: "chat_id=" + document.ajax.chat_id.value, success: function(msg){ if(msg == 'new1') { self.focus(); //window.focus(); } } }); } If an operator attending both chat.... for example the url is like http://localhost/nisanth/admin/Chat/index/chatId/15 http://localhost/nisanth/admin/Chat/index/chatId/16 http://localhost/nisanth/user/Chat/index/chatId/15 http://localhost/nisanth/user/Chat/index/chatId/16 if the user 16 enter a message i need focus http://localhost/nisanth/admin/Chat/index/chatId/16 This code is work fine with IE but not in firefox...please give me a solution... the above code is in the same html

    Read the article

  • There is insufficient system memory to run this query when creating temporary table

    - by phenevo
    StringBuilder query = new StringBuilder(); query.Append("CREATE TABLE #Codes (Code nvarchar(100) collate database_default ) "); query.Append("Insert into #Codes (Code) "); int lengthOfCodesArray = targetCodes.Length; for (int index = 0; index < lengthOfCodesArray; index++) { string targetCode = targetCodes[index]; query.Append("Select N'" + targetCode + "' "); if (index != lengthOfCodesArray - 1) { query.Append("Union All "); } } query.Append("drop table #Codes "); on: cmd.ExecuteReader() I get There is insufficient system memory to run this query when creating temporary table But weird thing is that, when I have 25k codes is ok, when 5k I get this error. Initial size is 262 MB. Lengt of each code is average 15.

    Read the article

  • SQL SERVER – Weekly Series – Memory Lane – #005

    - by pinaldave
    Here is the list of curetted articles of SQLAuthority.com across all these years. Instead of just listing all the articles I have selected a few of my most favorite articles and have listed them here with additional notes below it. Let me know which one of the following is your favorite article from memory lane. 2006 SQL SERVER – Cursor to Kill All Process in Database I indeed wrote this cursor and when I often look back, I wonder how naive I was to write this. The reason for writing this cursor was to free up my database from any existing connection so I can do database operation. This worked fine but there can be a potentially big issue if there was any important transaction was killed by this process. There is another way to to achieve the same thing where we can use ALTER syntax to take database in single user mode. Read more about that over here and here. 2007 Rules of Third Normal Form and Normalization Advantage – 3NF The rules of 3NF are mentioned here Make a separate table for each set of related attributes, and give each table a primary key. If an attribute depends on only part of a multi-valued key, remove it to a separate table If attributes do not contribute to a description of the key, remove them to a separate table. Correct Syntax for Stored Procedure SP Sometime a simple question is the most important question. I often see in industry incorrectly written Stored Procedure. Few writes code after the most outer BEGIN…END and few writes code after the GO Statement. In this brief blog post, I have attempted to explain the same. 2008 Switch Between Result Pan and Query Pan – SQL Shortcut Many times when I am writing query I have to scroll the result displayed in the result set. Most of the developer uses the mouse to switch between and Query Pane and Result Pane. There are few developers who are crazy about Keyboard shortcuts. F6 is the keyword which can be used to switch between query pane and tabs of the result pane. Interesting Observation – Use of Index and Execution Plan Query Optimization is a complex game and it has its own rules. From the example in the article we have discovered that Query Optimizer does not use clustered index to retrieve data, sometime non clustered index provides optimal performance for retrieving Primary Key. When all the rows and columns are selected Primary Key should be used to select data as it provides optimal performance. 2009 Interesting Observation – TOP 100 PERCENT and ORDER BY If you pull up any application or system where there are more than 100 SQL Server Views are created – I am very confident that at one or two places you will notice the scenario wherein View the ORDER BY clause is used with TOP 100 PERCENT. SQL Server 2008 VIEW with ORDER BY clause does not throw an error; moreover, it does not acknowledge the presence of it as well. In this article we have taken three perfect examples and demonstrated which clause we should use when. Comma Separated Values (CSV) from Table Column A Very common question – How to create comma separated values from a table in the database? The answer is also very common if we use XML. Check out this article for quick learning on the same subject. Azure Start Guide – Step by Step Installation Guide Though Azure portal has changed a quite bit since I wrote this article, the concept used in this article are not old. They are still valid and many of the functions are still working as mentioned in the article. I believe this one article will put you on the track to use Azure! Size of Index Table for Each Index – Solution Earlier I have posted a small question on this blog and requested help from readers to participate here and provide a solution. The puzzle was to write a query that will return the size for each index that is on any particular table. We need a query that will return an additional column in the above listed query and it should contain the size of the index. This article presents two of the best solutions from the puzzle. 2010 Well, this week in 2010 was the week of puzzles as I posted three interesting puzzles. Till today I am noticing pretty good interesting in the puzzles. They are tricky but for sure brings a great value if you are a database developer for a long time. I suggest you go over this puzzles and their answers. Did you really know all of the answers? I am confident that reading following three blog post will for sure help you enhance the experience with T-SQL. SQL SERVER – Challenge – Puzzle – Usage of FAST Hint SQL SERVER – Puzzle – Challenge – Error While Converting Money to Decimal SQL SERVER – Challenge – Puzzle – Why does RIGHT JOIN Exists 2011 DVM sys.dm_os_sys_info Column Name Changed in SQL Server 2012 Have you ever faced a situation where something does not work? When you try to fix it - you enjoy fixing it and started to appreciate the breaking changes. Well, this was exactly I felt yesterday. Before I begin my story, I want to candidly state that I do not encourage anybody to use * in the SELECT statement. Now the disclaimer is over – I suggest you read the original story – you will love it! Get Directory Structure using Extended Stored Procedure xp_dirtree Here is the question to you – why would you do something in SQL Server where you can do the same task in command prompt much easily. Well, the answer is sometime there are real use cases when we have to do such thing. This is a similar example where I have demonstrated how in SQL Server 2012 we can use extended stored procedure to retrieve directory structure. Reference: Pinal Dave (http://blog.sqlauthority.com) Filed under: Memory Lane, PostADay, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, T SQL, Technology

    Read the article

  • Getting template metaprogramming compile-time constants at runtime

    - by GMan - Save the Unicorns
    Background Consider the following: template <unsigned N> struct Fibonacci { enum { value = Fibonacci<N-1>::value + Fibonacci<N-2>::value }; }; template <> struct Fibonacci<1> { enum { value = 1 }; }; template <> struct Fibonacci<0> { enum { value = 0 }; }; This is a common example and we can get the value of a Fibonacci number as a compile-time constant: int main(void) { std::cout << "Fibonacci(15) = "; std::cout << Fibonacci<15>::value; std::cout << std::endl; } But you obviously cannot get the value at runtime: int main(void) { std::srand(static_cast<unsigned>(std::time(0))); // ensure the table exists up to a certain size // (even though the rest of the code won't work) static const unsigned fibbMax = 20; Fibonacci<fibbMax>::value; // get index into sequence unsigned fibb = std::rand() % fibbMax; std::cout << "Fibonacci(" << fibb << ") = "; std::cout << Fibonacci<fibb>::value; std::cout << std::endl; } Because fibb is not a compile-time constant. Question So my question is: What is the best way to peek into this table at run-time? The most obvious solution (and "solution" should be taken lightly), is to have a large switch statement: unsigned fibonacci(unsigned index) { switch (index) { case 0: return Fibonacci<0>::value; case 1: return Fibonacci<1>::value; case 2: return Fibonacci<2>::value; . . . case 20: return Fibonacci<20>::value; default: return fibonacci(index - 1) + fibonacci(index - 2); } } int main(void) { std::srand(static_cast<unsigned>(std::time(0))); static const unsigned fibbMax = 20; // get index into sequence unsigned fibb = std::rand() % fibbMax; std::cout << "Fibonacci(" << fibb << ") = "; std::cout << fibonacci(fibb); std::cout << std::endl; } But now the size of the table is very hard coded and it wouldn't be easy to expand it to say, 40. The only one I came up with that has a similiar method of query is this: template <int TableSize = 40> class FibonacciTable { public: enum { max = TableSize }; static unsigned get(unsigned index) { if (index == TableSize) { return Fibonacci<TableSize>::value; } else { // too far, pass downwards return FibonacciTable<TableSize - 1>::get(index); } } }; template <> class FibonacciTable<0> { public: enum { max = 0 }; static unsigned get(unsigned) { // doesn't matter, no where else to go. // must be 0, or the original value was // not in table return 0; } }; int main(void) { std::srand(static_cast<unsigned>(std::time(0))); // get index into sequence unsigned fibb = std::rand() % FibonacciTable<>::max; std::cout << "Fibonacci(" << fibb << ") = "; std::cout << FibonacciTable<>::get(fibb); std::cout << std::endl; } Which seems to work great. The only two problems I see are: Potentially large call stack, since calculating Fibonacci<2 requires we go through TableMax all the way to 2, and: If the value is outside of the table, it returns zero as opposed to calculating it. So is there something I am missing? It seems there should be a better way to pick out these values at runtime. A template metaprogramming version of a switch statement perhaps, that generates a switch statement up to a certain number? Thanks in advance.

    Read the article

  • how to set flex combobox cursor position

    - by crazy horse
    I have a combobox implementation as follows - Based on user input (min 2 chars) in the editable combobox, the data provider is refreshed and drop-down opened, showing different data sets as user input varies. Problem is that after drop-down opens, the cursor moves back to the beginning. So for instance, the user types in "ab", and wants to type in "c" to form the search string "abc". Due to the cursor re-setting its position to 0, the search string instead ends up as "cab". Here's what I tried already (doesn't work) : textInput.mx_internal::getTextField().setSelection(index, index); where index = length of user input. This selects text from index to index (which effectively un-selects text) and is supposed to place the cursor at the end. Any thoughts?

    Read the article

  • Change the Views location

    - by Vinni
    I am developing a website in MVC 2.0. I want to change the View folder location in my website. I wanted to keep the views folder inside other folders, When I try to do so i am getting following errors The view 'Index' or its master was not found. The following locations were searched: ~/Views/Search/Index.aspx ~/Views/Search/Index.ascx ~/Views/Shared/Index.aspx ~/Views/Shared/Index.ascx Description: An unhandled exception occurred during the execution of the current web request. Please review the stack trace for more information about the error and where it originated in the code. My Views folder will be in ~/XYZ/ABC/Views instead of ~/Views. Please solve my problem. Will I get any problems If I change the default Views folder location. Do I need to change anything in HTML Helper classes because I don't know anything in MVC as this is my starting project i dont want to risk..Please help me out...

    Read the article

  • How to split string in group in vb.net

    - by amol kadam
    Hi. i'm amol kadam,I want to know how to split string in two part.My string is in Time format (12:12).& I want to seperate this in hour & minute format.the datatype for all variables are string. for hour variable used strTimeHr & for minute strTimeMin .I tried below code but their was a exception "Index and length must refer to a location within the string. Parameter name: length" If Not (objDS.Tables(0).Rows(0)("TimeOfAccident") Is Nothing Or objDS.Tables(0).Rows(0)("TimeOfAccident") Is System.DBNull.Value) Then strTime = objDS.Tables(0).Rows(0)("TimeOfAccident") 'strTime taking value 12:12 index = strTime.IndexOf(":") 'index taking value 2 lastIndex = strTime.Length 'Lastindex taking value 5 strTimeHr = strTime.Substring(0, index) 'strTime taking value 12 correctly strTimeMin = strTime.Substring(index + 1, lastIndex) 'BUT HERE IS PROBLEM OCCURE strTimeMin Doesn't taking any value Me.NumUpDwHr.Text = strTimeHr Me.NumUpDwMin.Text = strTimeMin End If

    Read the article

  • Global Handler in IIS7 (Classic Mode) gives "Failed to Execute" error

    - by Akash Kava
    I have made a custom handler called MyIndexHandler and I want it to handle *.index requests, now as I want this handler to be executed by any website, I installed my handler with following two steps in IIS manager. Add MyIndexHandler.dll in GAC Add Managed Module for *.index, the drop down in IIS displays my index handler correctly so it means it did find it correctly from the GAC. I added Script map for aspnet_isapi.dll for *.index (this is required for classic mode) Now in any of my website if I try xyz.index, my handler does not get called and it returns "Failed to execute URL", now this only happens when IIS is unable to find the mapped handler, but I already have defined mapped handler at IIS level and when i try to add entry in web.config manually for this handler, it tells me you can not define multiple handler, it means that IIS finds my handler, mapping but for some reason it does not execute it.

    Read the article

  • Zipping Rx IObservable with infinite number set

    - by Toni Kielo
    I have a IObservable [named rows in the sample below] from Reactive extensions framework and I want to add index numbers to each object it observes. I've tried to implement this using Zip function: rows.Zip(Enumerable.Range(1, int.MaxValue), (row, index) => new { Row = row, Index = index }) .Subscribe(a => ProcessRow(a.Row, a.Index), () => Completed()); .. but unfortunately this throws ArgumentOutOfRangeException: Specified argument was out of the range of valid values. Parameter name: disposables Am I understanding the Zip function wrong or is there a problem with my code? The Range part of the code doesn't seem to be the problem and the IObservable isn't yet receiving any events.

    Read the article

  • Question about function returning array data

    - by Doug
    var grossBrackets = new Array( '300', '400', '500', '600', '700', '800', '900', '1000' ); function bracketSort( itemToSort ) { for( index = 0 ; index < grossBrackets.length ; index++ ) { if ( itemToSort < grossBrackets[index] ) { bracketData[index]++; } else if ( itemToSort > grossBrackets[7] ) { grossBrackets[7]++; } } return bracketData; } This is my current code, and I basically want to sort the data into their proper brackets. My source code is really long, but when I input these numbers into the function: 200.18 200.27 200.36 200.45 200.54 bracketData prints 5,5,5,5,5,5,5,5 or is there a better way to do this? Brackets: <300, <400, <500, <600, <700, <800, <900, <1000, greater than 1000

    Read the article

  • How to define an Integer bean in Struts 1.x

    - by ian_scho_es
    Hi. How do you instantiate an Integer bean, assigning a value, in the Struts 1.x framework? <bean:define id="index" type="java.lang.Integer" value="0"/> or <bean:define id="index" type="java.lang.Integer" value="${0}"/> Results in a: java.lang.ClassCastException: java.lang.String <bean:define id="index" type="java.lang.Integer" value="<%=0%>"/> Results in: The method setValue(String) in the type DefineTag is not applicable for the arguments (int) <% java.lang.Integer index = new java.lang.Integer(0); %> Works, but makes my eyes bleed. Note that I had to refactor iterating over a list but am now applying a filter within the iteration. This was the cleanest solution of all! <logic:equal name="aplicacion" property="generico" value="false" indexId="index"> Maybe I need to go about this completely differently. Many thanks.

    Read the article

  • ASP.NET MVC 2 RC 2 returns Area-specific controller when no area specified

    - by Bryan
    I have a basic MVC 2 (RC2) site with one base-level controller ("Home"), and one area ("Admin") with one controller ("Abstract"). When i call http://website/Abstract - the Abstract controller in the Admin area gets called even though i haven't specified the Area in the URL. To make matters worse - it doesn't seem to know it's under Admin because it can't find the associated view and just returns: The view 'Index' or its master was not found. The following locations were searched: ~/Views/Abstract/Index.aspx ~/Views/Abstract/Index.ascx ~/Views/Shared/Index.aspx ~/Views/Shared/Index.ascx Am i doing something wrong? Is this a bug? A feature?

    Read the article

  • Stop loading detail grid at page load.

    - by pirzada
    How can I stop DetailGrid from not loading at page load. Because I have button in master grid to load Detail Grid. Below is code for Detail Grid. jQuery().ready(function () { jQuery("#list10_d").jqGrid({ height: 100, url: '/JQSandbox/MyFullSubGridData?id=0', datatype: "json", mtype: 'POST', colNames: ['Index', 'Name', 'Department', 'Hire Date', 'Supervisor'], colModel: [ { name: 'employeeID', index: 'employeeID', width: 10 }, { width: 30 }, { name: 'employeeDepartment', index: 'employeeDepartment', width: 30 }, { name: 'employeeHiredate', index: 'employeeHiredate', width: 40, sortable: false }, { name: 'employeeSup', index: 'employeeSup', formatter: 'checkbox', align: 'center', width: 30, search: false } ], loadtext: "", loadui: "block", width: 500, rowNum: 5, rowList: [5, 10, 20], pager: '#pager10_d', sortname: 'employeeID', viewrecords: true, sortorder: "asc", multiselect: true, caption: "Detail Grid: 1" }).navGrid('#pager10_d', { add: false, edit: false, del: false }); });

    Read the article

< Previous Page | 113 114 115 116 117 118 119 120 121 122 123 124  | Next Page >