Search Results

Search found 54202 results on 2169 pages for 'jqgrid asp net'.

Page 2167/2169 | < Previous Page | 2163 2164 2165 2166 2167 2168 2169  | Next Page >

  • Trying to find USB device on iphone with IOKit.framework

    - by HuGeek
    Hi all, i'm working on a project were i need the usb port to communicate with a external device. I have been looking for exemple on the net (Apple and /developer/IOKit/usb exemple) and trying some other but i can't even find the device. In my code i blocking at the place where the fucntion looks for a next iterator (pointer in fact) with the function getNextIterator but never returns a good value so the code is blocking. By the way i am using toolchain and added IOKit.framework in my project. All i what right now is the communicate or do like a ping to someone on the USB bus!! I blocking in the 'FindDevice'....i can't manage to enter in the while because the variable usbDevice is always = to 0....i have tested my code in a small mac program and it works... Thanks Here is my code : IOReturn ConfigureDevice(IOUSBDeviceInterface **dev) { UInt8 numConfig; IOReturn result; IOUSBConfigurationDescriptorPtr configDesc; //Get the number of configurations result = (*dev)->GetNumberOfConfigurations(dev, &numConfig); if (!numConfig) { return -1; } // Get the configuration descriptor result = (*dev)->GetConfigurationDescriptorPtr(dev, 0, &configDesc); if (result) { NSLog(@"Couldn't get configuration descriptior for index %d (err=%08x)\n", 0, result); return -1; } ifdef OSX_DEBUG NSLog(@"Number of Configurations: %d\n", numConfig); endif // Configure the device result = (*dev)->SetConfiguration(dev, configDesc->bConfigurationValue); if (result) { NSLog(@"Unable to set configuration to value %d (err=%08x)\n", 0, result); return -1; } return kIOReturnSuccess; } IOReturn FindInterfaces(IOUSBDeviceInterface *dev, IOUSBInterfaceInterface **itf) { IOReturn kr; IOUSBFindInterfaceRequest request; io_iterator_t iterator; io_service_t usbInterface; IOUSBInterfaceInterface **intf = NULL; IOCFPlugInInterface **plugInInterface = NULL; HRESULT res; SInt32 score; UInt8 intfClass; UInt8 intfSubClass; UInt8 intfNumEndpoints; int pipeRef; CFRunLoopSourceRef runLoopSource; NSLog(@"Debut FindInterfaces \n"); request.bInterfaceClass = kIOUSBFindInterfaceDontCare; request.bInterfaceSubClass = kIOUSBFindInterfaceDontCare; request.bInterfaceProtocol = kIOUSBFindInterfaceDontCare; request.bAlternateSetting = kIOUSBFindInterfaceDontCare; kr = (*dev)->CreateInterfaceIterator(dev, &request, &iterator); usbInterface = IOIteratorNext(iterator); IOObjectRelease(iterator); NSLog(@"Interface found.\n"); kr = IOCreatePlugInInterfaceForService(usbInterface, kIOUSBInterfaceUserClientTypeID, kIOCFPlugInInterfaceID, &plugInInterface, &score); kr = IOObjectRelease(usbInterface); // done with the usbInterface object now that I have the plugin if ((kIOReturnSuccess != kr) || !plugInInterface) { NSLog(@"unable to create a plugin (%08x)\n", kr); return -1; } // I have the interface plugin. I need the interface interface res = (*plugInInterface)->QueryInterface(plugInInterface, CFUUIDGetUUIDBytes(kIOUSBInterfaceInterfaceID), (LPVOID*) &intf); (*plugInInterface)->Release(plugInInterface); // done with this if (res || !intf) { NSLog(@"couldn't create an IOUSBInterfaceInterface (%08x)\n", (int) res); return -1; } // Now open the interface. This will cause the pipes to be instantiated that are // associated with the endpoints defined in the interface descriptor. kr = (*intf)->USBInterfaceOpen(intf); if (kIOReturnSuccess != kr) { NSLog(@"unable to open interface (%08x)\n", kr); (void) (*intf)->Release(intf); return -1; } kr = (*intf)->CreateInterfaceAsyncEventSource(intf, &runLoopSource); if (kIOReturnSuccess != kr) { NSLog(@"unable to create async event source (%08x)\n", kr); (void) (*intf)->USBInterfaceClose(intf); (void) (*intf)->Release(intf); return -1; } CFRunLoopAddSource(CFRunLoopGetCurrent(), runLoopSource, kCFRunLoopDefaultMode); if (!intf) { NSLog(@"Interface is NULL!\n"); } else { *itf = intf; } NSLog(@"End of FindInterface \n \n"); return kr; } unsigned int FindDevice(void *refCon, io_iterator_t iterator) { kern_return_t kr; io_service_t usbDevice; IOCFPlugInInterface **plugInInterface = NULL; HRESULT result; SInt32 score; UInt16 vendor; UInt16 product; UInt16 release; unsigned int count = 0; NSLog(@"Searching Device....\n"); while (usbDevice = IOIteratorNext(iterator)) { // create intermediate plug-in NSLog(@"Found a device!\n"); kr = IOCreatePlugInInterfaceForService(usbDevice, kIOUSBDeviceUserClientTypeID, kIOCFPlugInInterfaceID, &plugInInterface, &score); kr = IOObjectRelease(usbDevice); if ((kIOReturnSuccess != kr) || !plugInInterface) { NSLog(@"Unable to create a plug-in (%08x)\n", kr); continue; } // Now create the device interface result = (*plugInInterface)->QueryInterface(plugInInterface, CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID), (LPVOID)&dev); // Don't need intermediate Plug-In Interface (*plugInInterface)->Release(plugInInterface); if (result || !dev) { NSLog(@"Couldn't create a device interface (%08x)\n", (int)result); continue; } // check these values for confirmation kr = (*dev)->GetDeviceVendor(dev, &vendor); kr = (*dev)->GetDeviceProduct(dev, &product); //kr = (*dev)->GetDeviceReleaseNumber(dev, &release); //if ((vendor != LegoUSBVendorID) || (product != LegoUSBProductID) || (release != LegoUSBRelease)) { if ((vendor != LegoUSBVendorID) || (product != LegoUSBProductID)) { NSLog(@"Found unwanted device (vendor = %d != %d, product = %d != %d, release = %d)\n", vendor, kUSBVendorID, product, LegoUSBProductID, release); (void) (*dev)-Release(dev); continue; } // Open the device to change its state kr = (*dev)->USBDeviceOpen(dev); if (kr == kIOReturnSuccess) { count++; } else { NSLog(@"Unable to open device: %08x\n", kr); (void) (*dev)->Release(dev); continue; } // Configure device kr = ConfigureDevice(dev); if (kr != kIOReturnSuccess) { NSLog(@"Unable to configure device: %08x\n", kr); (void) (*dev)->USBDeviceClose(dev); (void) (*dev)->Release(dev); continue; } break; } return count; } // USB rcx Init IOUSBInterfaceInterface** osx_usb_rcx_init (void) { CFMutableDictionaryRef matchingDict; kern_return_t result; IOUSBInterfaceInterface **intf = NULL; unsigned int device_count = 0; // Create master handler result = IOMasterPort(MACH_PORT_NULL, &gMasterPort); if (result || !gMasterPort) { NSLog(@"ERR: Couldn't create master I/O Kit port(%08x)\n", result); return NULL; } else { NSLog(@"Created Master Port.\n"); NSLog(@"Master port 0x:08X \n \n", gMasterPort); } // Set up the matching dictionary for class IOUSBDevice and its subclasses matchingDict = IOServiceMatching(kIOUSBDeviceClassName); if (!matchingDict) { NSLog(@"Couldn't create a USB matching dictionary \n"); mach_port_deallocate(mach_task_self(), gMasterPort); return NULL; } else { NSLog(@"USB matching dictionary : %08X \n", matchingDict); } CFDictionarySetValue(matchingDict, CFSTR(kUSBVendorID), CFNumberCreate(kCFAllocatorDefault, kCFNumberShortType, &LegoUSBVendorID)); CFDictionarySetValue(matchingDict, CFSTR(kUSBProductID), CFNumberCreate(kCFAllocatorDefault, kCFNumberShortType, &LegoUSBProductID)); result = IOServiceGetMatchingServices(gMasterPort, matchingDict, &gRawAddedIter); matchingDict = 0; // this was consumed by the above call // Iterate over matching devices to access already present devices NSLog(@"RawAddedIter : 0x:%08X \n", &gRawAddedIter); device_count = FindDevice(NULL, gRawAddedIter); if (device_count == 1) { result = FindInterfaces(dev, &intf); if (kIOReturnSuccess != result) { NSLog(@"unable to find interfaces on device: %08x\n", result); (*dev)-USBDeviceClose(dev); (*dev)-Release(dev); return NULL; } // osx_usb_rcx_wakeup(intf); return intf; } else if (device_count 1) { NSLog(@"too many matching devices (%d) !\n", device_count); } else { NSLog(@"no matching devices found\n"); } return NULL; } int main(int argc, char *argv[]) { int returnCode; NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; NSLog(@"Debut du programme \n \n"); osx_usb_rcx_init(); NSLog(@"Fin du programme \n \n"); return 0; // returnCode = UIApplicationMain(argc, argv, @"Untitled1App", @"Untitled1App"); // [pool release]; // return returnCode; }

    Read the article

  • getting SIGSEGV in std::_List_const_iterator<Exiv2::Exifdatum>::operator++ whilst using jni

    - by HJED
    Hi I'm using jni to access the exiv2 API in my Java project and I'm getting a SIGSEGV error in std::_List_const_iterator::operator++. I'm uncertain how to fix this error. I've tried using high -Xmx values as well as running on both jdk1.6.0 (server and cacao JVMs) and 1.7.0 (server JVM). gdb traceback: #0 0x00007fffa36f2363 in std::_List_const_iterator<Exiv2::Exifdatum>::operator++ (this=0x7ffff7fd3500) at /usr/include/c++/4.4/bits/stl_list.h:223 #1 0x00007fffa36f2310 in std::__distance<std::_List_const_iterator<Exiv2::Exifdatum> > (__first=..., __last=...) at /usr/include/c++/4.4/bits/stl_iterator_base_funcs.h:79 #2 0x00007fffa36f224d in std::distance<std::_List_const_iterator<Exiv2::Exifdatum> > (__first=..., __last=...) at /usr/include/c++/4.4/bits/stl_iterator_base_funcs.h:114 #3 0x00007fffa36f1f27 in std::list<Exiv2::Exifdatum, std::allocator<Exiv2::Exifdatum> >::size (this=0x7fffa4030910) at /usr/include/c++/4.4/bits/stl_list.h:805 #4 0x00007fffa36f1d50 in Exiv2::ExifData::count (this=0x7fffa4030910) at /usr/local/include/exiv2/exif.hpp:518 #5 0x00007fffa36f1d30 in Exiv2::ExifData::empty (this=0x7fffa4030910) at /usr/local/include/exiv2/exif.hpp:516 #6 0x00007fffa36f1763 in getVars (path=0x7fffa401d2f0 "/home/hjed/PC100001.JPG", env=0x6131c8, obj=0x7ffff7fd37a8) at src/main.cpp:146 #7 0x00007fffa36f19d8 in Java_photo_exiv2_Exiv2MetaDataStore_impl_1loadFromExiv (env=0x6131c8, obj=0x7ffff7fd37a8, path=0x7ffff7fd37a0, obj2=0x7ffff7fd3798) at src/main.cpp:160 #8 0x00007ffff21d9cc8 in ?? () #9 0x00000000fffffffe in ?? () #10 0x00007ffff7fd3740 in ?? () #11 0x0000000000613000 in ?? () #12 0x00007ffff7fd3738 in ?? () #13 0x00007fffaa1076e0 in ?? () #14 0x00007ffff7fd37a8 in ?? () #15 0x00007fffaa108d10 in ?? () #16 0x0000000000000000 in ?? () Java error: # A fatal error has been detected by the Java Runtime Environment: # # SIGSEGV (0xb) at pc=0x00007fac11223363, pid=11905, tid=140378349111040 # # JRE version: 6.0_20-b20 # Java VM: OpenJDK 64-Bit Server VM (19.0-b09 mixed mode linux-amd64 ) # Derivative: IcedTea6 1.9.2 # Distribution: Ubuntu 10.10, package 6b20-1.9.2-0ubuntu2 # Problematic frame: # C [libExiff2-binding.so+0x4363] _ZNSt20_List_const_iteratorIN5Exiv29ExifdatumEEppEv+0xf # # If you would like to submit a bug report, please include # instructions how to reproduce the bug and visit: # https://bugs.launchpad.net/ubuntu/+source/openjdk-6/ # The crash happened outside the Java Virtual Machine in native code. # See problematic frame for where to report the bug. # --------------- T H R E A D --------------- Current thread (0x0000000000dbf000): JavaThread "main" [_thread_in_native, id=11909, stack(0x00007fac61920000,0x00007fac61a21000)] siginfo:si_signo=SIGSEGV: si_errno=0, si_code=128 (), si_addr=0x0000000000000000 Registers: ... Register to memory mapping: RAX=0x6c8948f0245c8948 0x6c8948f0245c8948 is pointing to unknown location RBX=0x00007fac0c042c00 0x00007fac0c042c00 is pointing to unknown location RCX=0x0000000000000000 0x0000000000000000 is pointing to unknown location RDX=0x6c8948f0245c8948 0x6c8948f0245c8948 is pointing to unknown location RSP=0x00007fac61a1f4e0 0x00007fac61a1f4e0 is pointing into the stack for thread: 0x0000000000dbf000 "main" prio=10 tid=0x0000000000dbf000 nid=0x2e85 runnable [0x00007fac61a1f000] java.lang.Thread.State: RUNNABLE RBP=0x00007fac61a1f4e0 0x00007fac61a1f4e0 is pointing into the stack for thread: 0x0000000000dbf000 "main" prio=10 tid=0x0000000000dbf000 nid=0x2e85 runnable [0x00007fac61a1f000] java.lang.Thread.State: RUNNABLE RSI=0x00007fac61a1f4f0 0x00007fac61a1f4f0 is pointing into the stack for thread: 0x0000000000dbf000 "main" prio=10 tid=0x0000000000dbf000 nid=0x2e85 runnable [0x00007fac61a1f000] java.lang.Thread.State: RUNNABLE RDI=0x00007fac61a1f500 0x00007fac61a1f500 is pointing into the stack for thread: 0x0000000000dbf000 "main" prio=10 tid=0x0000000000dbf000 nid=0x2e85 runnable [0x00007fac61a1f000] java.lang.Thread.State: RUNNABLE R8 =0x00007fac0c054630 0x00007fac0c054630 is pointing to unknown location R9 =0x00007fac61a1f358 0x00007fac61a1f358 is pointing into the stack for thread: 0x0000000000dbf000 "main" prio=10 tid=0x0000000000dbf000 nid=0x2e85 runnable [0x00007fac61a1f000] java.lang.Thread.State: RUNNABLE R10=0x00007fac61a1f270 0x00007fac61a1f270 is pointing into the stack for thread: 0x0000000000dbf000 "main" prio=10 tid=0x0000000000dbf000 nid=0x2e85 runnable [0x00007fac61a1f000] java.lang.Thread.State: RUNNABLE R11=0x00007fac11223354 0x00007fac11223354: _ZNSt20_List_const_iteratorIN5Exiv29ExifdatumEEppEv+0 in /home/hjed/libExiff2-binding.so at 0x00007fac1121f000 R12=0x0000000000dbf000 "main" prio=10 tid=0x0000000000dbf000 nid=0x2e85 runnable [0x00007fac61a1f000] java.lang.Thread.State: RUNNABLE R13=0x00007fac13ad1be8 {method} - klass: {other class} R14=0x00007fac61a1f7a8 0x00007fac61a1f7a8 is pointing into the stack for thread: 0x0000000000dbf000 "main" prio=10 tid=0x0000000000dbf000 nid=0x2e85 runnable [0x00007fac61a1f000] java.lang.Thread.State: RUNNABLE R15=0x0000000000dbf000 "main" prio=10 tid=0x0000000000dbf000 nid=0x2e85 runnable [0x00007fac61a1f000] java.lang.Thread.State: RUNNABLE Top of Stack: (sp=0x00007fac61a1f4e0) ... Instructions: (pc=0x00007fac11223363) ... Stack: [0x00007fac61920000,0x00007fac61a21000], sp=0x00007fac61a1f4e0, free space=1021k Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code) C [libExiff2-binding.so+0x4363] _ZNSt20_List_const_iteratorIN5Exiv29ExifdatumEEppEv+0xf C [libExiff2-binding.so+0x4310] _ZSt10__distanceISt20_List_const_iteratorIN5Exiv29ExifdatumEEENSt15iterator_traitsIT_E15difference_typeES5_S5_St18input_iterator_tag+0x26 C [libExiff2-binding.so+0x424d] _ZSt8distanceISt20_List_const_iteratorIN5Exiv29ExifdatumEEENSt15iterator_traitsIT_E15difference_typeES5_S5_+0x36 C [libExiff2-binding.so+0x3f27] _ZNKSt4listIN5Exiv29ExifdatumESaIS1_EE4sizeEv+0x33 C [libExiff2-binding.so+0x3d50] _ZNK5Exiv28ExifData5countEv+0x18 C [libExiff2-binding.so+0x3d30] _ZNK5Exiv28ExifData5emptyEv+0x18 C [libExiff2-binding.so+0x3763] _Z7getVarsPKcP7JNIEnv_P8_jobject+0x3e3 C [libExiff2-binding.so+0x39d8] Java_photo_exiv2_Exiv2MetaDataStore_impl_1loadFromExiv+0x4b j photo.exiv2.Exiv2MetaDataStore.impl_loadFromExiv(Ljava/lang/String;Lphoto/exiv2/Exiv2MetaDataStore;)V+0 j photo.exiv2.Exiv2MetaDataStore.loadFromExiv2()V+9 j photo.exiv2.Exiv2MetaDataStore.loadData()V+1 j photo.exiv2.Exiv2MetaDataStore.<init>(Lphoto/ImageFile;)V+10 j photo.ImageFile.<init>(Ljava/lang/String;)V+11 j test.Main.main([Ljava/lang/String;)V+67 v ~StubRoutines::call_stub V [libjvm.so+0x428698] V [libjvm.so+0x4275c8] V [libjvm.so+0x432943] V [libjvm.so+0x447f91] C [java+0x3495] JavaMain+0xd75 Java frames: (J=compiled Java code, j=interpreted, Vv=VM code) j photo.exiv2.Exiv2MetaDataStore.impl_loadFromExiv(Ljava/lang/String;Lphoto/exiv2/Exiv2MetaDataStore;)V+0 j photo.exiv2.Exiv2MetaDataStore.loadFromExiv2()V+9 j photo.exiv2.Exiv2MetaDataStore.loadData()V+1 j photo.exiv2.Exiv2MetaDataStore.<init>(Lphoto/ImageFile;)V+10 j photo.ImageFile.<init>(Ljava/lang/String;)V+11 j test.Main.main([Ljava/lang/String;)V+67 v ~StubRoutines::call_stub --------------- P R O C E S S --------------- Java Threads: ( => current thread ) 0x00007fac0c028000 JavaThread "Low Memory Detector" daemon [_thread_blocked, id=11924, stack(0x00007fac11532000,0x00007fac11633000)] 0x00007fac0c025800 JavaThread "CompilerThread1" daemon [_thread_blocked, id=11923, stack(0x00007fac11633000,0x00007fac11734000)] 0x00007fac0c022000 JavaThread "CompilerThread0" daemon [_thread_blocked, id=11922, stack(0x00007fac11734000,0x00007fac11835000)] 0x00007fac0c01f800 JavaThread "Signal Dispatcher" daemon [_thread_blocked, id=11921, stack(0x00007fac11835000,0x00007fac11936000)] 0x00007fac0c001000 JavaThread "Finalizer" daemon [_thread_blocked, id=11920, stack(0x00007fac11e2d000,0x00007fac11f2e000)] 0x0000000000e36000 JavaThread "Reference Handler" daemon [_thread_blocked, id=11919, stack(0x00007fac11f2e000,0x00007fac1202f000)] =>0x0000000000dbf000 JavaThread "main" [_thread_in_native, id=11909, stack(0x00007fac61920000,0x00007fac61a21000)] Other Threads: 0x0000000000e2f800 VMThread [stack: 0x00007fac1202f000,0x00007fac12130000] [id=11918] 0x00007fac0c02b000 WatcherThread [stack: 0x00007fac11431000,0x00007fac11532000] [id=11925] ... Heap PSYoungGen total 18432K, used 632K [0x00007fac47210000, 0x00007fac486a0000, 0x00007fac5bc10000) eden space 15808K, 4% used [0x00007fac47210000,0x00007fac472ae188,0x00007fac48180000) from space 2624K, 0% used [0x00007fac48410000,0x00007fac48410000,0x00007fac486a0000) to space 2624K, 0% used [0x00007fac48180000,0x00007fac48180000,0x00007fac48410000) PSOldGen total 42240K, used 0K [0x00007fac1de10000, 0x00007fac20750000, 0x00007fac47210000) object space 42240K, 0% used [0x00007fac1de10000,0x00007fac1de10000,0x00007fac20750000) PSPermGen total 21248K, used 2831K [0x00007fac13810000, 0x00007fac14cd0000, 0x00007fac1de10000) object space 21248K, 13% used [0x00007fac13810000,0x00007fac13ad3d80,0x00007fac14cd0000) Dynamic libraries: ... VM Arguments: jvm_args: -Dfile.encoding=UTF-8 java_command: test.Main Launcher Type: SUN_STANDARD Environment Variables: PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games USERNAME=hjed LD_LIBRARY_PATH=/usr/lib/jvm/java-6-openjdk/jre/lib/amd64/server:/usr/lib/jvm/java-6-openjdk/jre/lib/amd64:/usr/lib/jvm/java-6-openjdk/jre/../lib/amd64 SHELL=/bin/bash DISPLAY=:0.0 Signal Handlers: ... --------------- S Y S T E M --------------- OS:Ubuntu 10.10 (maverick) uname:Linux 2.6.35-24-generic #42-Ubuntu SMP Thu Dec 2 02:41:37 UTC 2010 x86_64 libc:glibc 2.12.1 NPTL 2.12.1 rlimit: STACK 8192k, CORE 0k, NPROC infinity, NOFILE 1024, AS infinity load average:0.27 0.31 0.30 /proc/meminfo: MemTotal: 4048200 kB MemFree: 106552 kB Buffers: 838212 kB Cached: 1172496 kB SwapCached: 0 kB Active: 1801316 kB Inactive: 1774880 kB Active(anon): 1224708 kB Inactive(anon): 355012 kB Active(file): 576608 kB Inactive(file): 1419868 kB Unevictable: 64 kB Mlocked: 64 kB SwapTotal: 7065596 kB SwapFree: 7065596 kB Dirty: 20 kB Writeback: 0 kB AnonPages: 1565608 kB Mapped: 213424 kB Shmem: 14216 kB Slab: 164812 kB SReclaimable: 102576 kB SUnreclaim: 62236 kB KernelStack: 4784 kB PageTables: 44908 kB NFS_Unstable: 0 kB Bounce: 0 kB WritebackTmp: 0 kB CommitLimit: 9089696 kB Committed_AS: 3676872 kB VmallocTotal: 34359738367 kB VmallocUsed: 332952 kB VmallocChunk: 34359397884 kB HardwareCorrupted: 0 kB HugePages_Total: 0 HugePages_Free: 0 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 2048 kB DirectMap4k: 48704 kB DirectMap2M: 4136960 kB CPU:total 8 (4 cores per cpu, 2 threads per core) family 6 model 26 stepping 5, cmov, cx8, fxsr, mmx, sse, sse2, sse3, ssse3, sse4.1, sse4.2, popcnt, ht Memory: 4k page, physical 4048200k(106552k free), swap 7065596k(7065596k free) vm_info: OpenJDK 64-Bit Server VM (19.0-b09) for linux-amd64 JRE (1.6.0_20-b20), built on Dec 10 2010 19:45:55 by "buildd" with gcc 4.4.5 main.cpp: jobject toJava(std::auto_ptr<Exiv2::Value> v, const char * type, JNIEnv * env) { jclass stringClass; jmethodID cid; jobject result; stringClass = env->FindClass("photo/exiv2/Value"); cid = env->GetMethodID(stringClass, "<init>", "(Ljava/lang/String;Ljava/lang/Object;)V"); jvalue val; if ((strcmp(type, "String") == 0) || (strcmp(type, "String") == 0)) { val.l = env->NewStringUTF(v->toString().c_str()); } else if (strcmp(type, "Short") == 0) { val.s = v->toLong(0); } else if (strcmp(type, "Long") == 0) { val.j = v->toLong(0); } result = env->NewObject(stringClass, cid, env->NewStringUTF(v->toString().c_str()), val); return result; } void inLoop(std::auto_ptr<MetadataContainer> md, JNIEnv * env, jmethodID mid, jobject obj) { jvalue values[2]; const char* key = md->key().c_str(); values[0].l = env->NewStringUTF(key); /** md->value().toString().c_str(); const char* value = md->typeName(); values[1].l = env->NewStringUTF(value); TODO: do type conversions */ //std::cout << md->typeName() << std::endl; /** const char* type = md->value().toString().c_str(); values[1].l = env->NewStringUTF(type);*/ values[1].l = toJava(md->getValue(), md->typeName(), env); env->CallVoidMethodA(obj, mid, values); } void getVars(const char* path, JNIEnv * env, jobject obj) { //Load image Exiv2::Image::AutoPtr image = Exiv2::ImageFactory::open(path); assert(image.get() != 0); image->readMetadata(); //load method jclass cls = env->GetObjectClass(obj); jmethodID mid = env->GetMethodID(cls, "exiv2_reciveElement", "(Ljava/lang/String;Lphoto/exiv2/Value;)V"); //Load IPTC data /**loadIPTC(image, path, env, obj, mid); loadEXIF(image, path, env, obj, mid);*/ Exiv2::IptcData &iptcData = image->iptcData(); if (mid != NULL) { //is there any IPTC data AND check that method exists if (iptcData.empty()) { std::string error(path); error += ": failed loading IPTC data, there may not be any data"; } else { Exiv2::IptcData::iterator end = iptcData.end(); for (Exiv2::IptcData::iterator md = iptcData.begin(); md != end; ++md) { std::auto_ptr<MetadataContainer> meta(new MetadataContainer(md)); inLoop(meta, env, mid, obj); } } Exiv2::ExifData &exifData = image->exifData(); //is there any Exif data AND check that method exists if (exifData.empty()) { //error occurs here (main.cpp:146) std::string error(path); error += ": failed loading Exif data, there may not be any data"; } else { Exiv2::ExifData::iterator end = exifData.end(); for (Exiv2::ExifData::iterator md = exifData.begin(); md != end; ++md) { std::auto_ptr<MetadataContainer> meta(new MetadataContainer(md)); inLoop(meta, env, mid, obj); } } } else { std::string error(path); error += ": failed to load method"; } } JNIEXPORT void JNICALL Java_photo_exiv2_Exiv2MetaDataStore_impl_1loadFromExiv(JNIEnv * env, jobject obj, jstring path, jobject obj2) { const char* path2 = env->GetStringUTFChars(path, NULL); getVars(path2, env, obj); env->ReleaseStringUTFChars(path, path2); } Thanks for any help, HJED EDIT This is the output when runing the jvm with the -cacao option: run: null:/usr/local/lib Error: Directory Olympus2 with 1536 entries considered invalid; not read. LOG: [0x00007ff005376700] We received a SIGSEGV and tried to handle it, but we were LOG: [0x00007ff005376700] unable to find a Java method at: LOG: [0x00007ff005376700] LOG: [0x00007ff005376700] PC=0x00007feffe4ee67d LOG: [0x00007ff005376700] LOG: [0x00007ff005376700] Dumping the current stacktrace: at photo.exiv2.Exiv2MetaDataStore.impl_loadFromExiv(Ljava/lang/String;Lphoto/exiv2/Exiv2MetaDataStore;)V(Native Method) at photo.exiv2.Exiv2MetaDataStore.loadFromExiv2()V(Exiv2MetaDataStore.java:38) at photo.exiv2.Exiv2MetaDataStore.loadData()V(Exiv2MetaDataStore.java:29) at photo.exiv2.MetaDataStore.<init>(Lphoto/ImageFile;)V(MetaDataStore.java:33) at photo.exiv2.Exiv2MetaDataStore.<init>(Lphoto/ImageFile;)V(Exiv2MetaDataStore.java:20) at photo.ImageFile.<init>(Ljava/lang/String;)V(ImageFile.java:22) at test.Main.main([Ljava/lang/String;)V(Main.java:28) LOG: [0x00007ff005376700] vm_abort: WARNING, port me to C++ and use os::abort() instead. LOG: [0x00007ff005376700] Exiting... LOG: [0x00007ff005376700] Backtrace (15 stack frames): LOG: [0x00007ff005376700] /usr/lib/jvm/java-6-openjdk/jre/lib/amd64/cacao/libjvm.so(+0x4ff54) [0x7ff004306f54] LOG: [0x00007ff005376700] /usr/lib/jvm/java-6-openjdk/jre/lib/amd64/cacao/libjvm.so(+0x5ac01) [0x7ff004311c01] LOG: [0x00007ff005376700] /usr/lib/jvm/java-6-openjdk/jre/lib/amd64/cacao/libjvm.so(+0x66e9a) [0x7ff00431de9a] LOG: [0x00007ff005376700] /usr/lib/jvm/java-6-openjdk/jre/lib/amd64/cacao/libjvm.so(+0x76408) [0x7ff00432d408] LOG: [0x00007ff005376700] /usr/lib/jvm/java-6-openjdk/jre/lib/amd64/cacao/libjvm.so(+0x79a4c) [0x7ff004330a4c] LOG: [0x00007ff005376700] /lib/libpthread.so.0(+0xfb40) [0x7ff004d53b40] LOG: [0x00007ff005376700] /home/hjed/libExiff2-binding.so(_ZNSt20_List_const_iteratorIN5Exiv29ExifdatumEEppEv+0xf) [0x7feffe4ee67d] LOG: [0x00007ff005376700] /home/hjed/libExiff2-binding.so(_ZSt10__distanceISt20_List_const_iteratorIN5Exiv29ExifdatumEEENSt15iterator_traitsIT_E15difference_typeES5_S5_St18input_iterator_tag+0x26) [0x7feffe4ee62a] LOG: [0x00007ff005376700] /home/hjed/libExiff2-binding.so(_ZSt8distanceISt20_List_const_iteratorIN5Exiv29ExifdatumEEENSt15iterator_traitsIT_E15difference_typeES5_S5_+0x36) [0x7feffe4ee567] LOG: [0x00007ff005376700] /home/hjed/libExiff2-binding.so(_ZNKSt4listIN5Exiv29ExifdatumESaIS1_EE4sizeEv+0x33) [0x7feffe4ee22b] LOG: [0x00007ff005376700] /home/hjed/libExiff2-binding.so(_ZNK5Exiv28ExifData5countEv+0x18) [0x7feffe4ee054] LOG: [0x00007ff005376700] /home/hjed/libExiff2-binding.so(_ZNK5Exiv28ExifData5emptyEv+0x18) [0x7feffe4ee034] LOG: [0x00007ff005376700] /home/hjed/libExiff2-binding.so(_Z7getVarsPKcP7JNIEnv_P8_jobject+0x3d7) [0x7feffe4ed947] LOG: [0x00007ff005376700] /home/hjed/libExiff2-binding.so(Java_photo_exiv2_Exiv2MetaDataStore_impl_1loadFromExiv+0x4b) [0x7feffe4edcdc] LOG: [0x00007ff005376700] [0x7feffe701ccd] Java Result: 134 BUILD SUCCESSFUL (total time: 0 seconds)

    Read the article

  • Conflict between two Javascripts (MailChimp validation etc. scripts & jQuery hSlides.js)

    - by Brian
    I have two scripts running on the same page, one is the jQuery.hSlides.js script http://www.jesuscarrera.info/demos/hslides/ and the other is a custom script that is used for MailChimp list signup integration. The hSlides panel can be seen in effect here: http://theatricalbellydance.com. I've turned off the MailChimp script because it was conflicting with the hSlides script, causing it not to to fail completely (as seen here http://theatricalbellydance.com/home2/). Can someone tell me what could be done to the hSlides script to stop the conflict with the MailChimp script? The MailChimp Script var fnames = new Array(); var ftypes = new Array(); fnames[0] = 'EMAIL'; ftypes[0] = 'email'; fnames[3] = 'MMERGE3'; ftypes[3] = 'text'; fnames[1] = 'FNAME'; ftypes[1] = 'text'; fnames[2] = 'LNAME'; ftypes[2] = 'text'; fnames[4] = 'MMERGE4'; ftypes[4] = 'address'; fnames[6] = 'MMERGE6'; ftypes[6] = 'number'; fnames[9] = 'MMERGE9'; ftypes[9] = 'text'; fnames[5] = 'MMERGE5'; ftypes[5] = 'text'; fnames[7] = 'MMERGE7'; ftypes[7] = 'text'; fnames[8] = 'MMERGE8'; ftypes[8] = 'text'; fnames[10] = 'MMERGE10'; ftypes[10] = 'text'; fnames[11] = 'MMERGE11'; ftypes[11] = 'text'; fnames[12] = 'MMERGE12'; ftypes[12] = 'text'; var err_style = ''; try { err_style = mc_custom_error_style; } catch (e) { err_style = 'margin: 1em 0 0 0; padding: 1em 0.5em 0.5em 0.5em; background: rgb(255, 238, 238) none repeat scroll 0% 0%; font-weight: bold; float: left; z-index: 1; width: 80%; -moz-background-clip: -moz-initial; -moz-background-origin: -moz-initial; -moz-background-inline-policy: -moz-initial; color: rgb(255, 0, 0);'; } var mce_jQuery = jQuery.noConflict(); mce_jQuery(document).ready(function ($) { var options = { errorClass: 'mce_inline_error', errorElement: 'div', errorStyle: err_style, onkeyup: function () {}, onfocusout: function () {}, onblur: function () {} }; var mce_validator = mce_jQuery("#mc-embedded-subscribe-form").validate(options); options = { url: 'http://theatricalbellydance.us1.list-manage.com/subscribe/post-json?u=1d127e7630ced825cb1a8b5a9&id=9f12d2a6bb&c=?', type: 'GET', dataType: 'json', contentType: "application/json; charset=utf-8", beforeSubmit: function () { mce_jQuery('#mce_tmp_error_msg').remove(); mce_jQuery('.datefield', '#mc_embed_signup').each(function () { var txt = 'filled'; var fields = new Array(); var i = 0; mce_jQuery(':text', this).each(function () { fields[i] = this; i++; }); mce_jQuery(':hidden', this).each(function () { if (fields[0].value == 'MM' && fields[1].value == 'DD' && fields[2].value == 'YYYY') { this.value = ''; } else if (fields[0].value == '' && fields[1].value == '' && fields[2].value == '') { this.value = ''; } else { this.value = fields[0].value + '/' + fields[1].value + '/' + fields[2].value; } }); }); return mce_validator.form(); }, success: mce_success_cb }; mce_jQuery('#mc-embedded-subscribe-form').ajaxForm(options); }); function mce_success_cb(resp) { mce_jQuery('#mce-success-response').hide(); mce_jQuery('#mce-error-response').hide(); if (resp.result == "success") { mce_jQuery('#mce-' + resp.result + '-response').show(); mce_jQuery('#mce-' + resp.result + '-response').html(resp.msg); mce_jQuery('#mc-embedded-subscribe-form').each(function () { this.reset(); }); } else { var index = -1; var msg; try { var parts = resp.msg.split(' - ', 2); if (parts[1] == undefined) { msg = resp.msg; } else { i = parseInt(parts[0]); if (i.toString() == parts[0]) { index = parts[0]; msg = parts[1]; } else { index = -1; msg = resp.msg; } } } catch (e) { index = -1; msg = resp.msg; } try { if (index == -1) { mce_jQuery('#mce-' + resp.result + '-response').show(); mce_jQuery('#mce-' + resp.result + '-response').html(msg); } else { err_id = 'mce_tmp_error_msg'; html = '<div id="' + err_id + '" style="' + err_style + '"> ' + msg + '</div>'; var input_id = '#mc_embed_signup'; var f = mce_jQuery(input_id); if (ftypes[index] == 'address') { input_id = '#mce-' + fnames[index] + '-addr1'; f = mce_jQuery(input_id).parent().parent().get(0); } else if (ftypes[index] == 'date') { input_id = '#mce-' + fnames[index] + '-month'; f = mce_jQuery(input_id).parent().parent().get(0); } else { input_id = '#mce-' + fnames[index]; f = mce_jQuery().parent(input_id).get(0); } if (f) { mce_jQuery(f).append(html); mce_jQuery(input_id).focus(); } else { mce_jQuery('#mce-' + resp.result + '-response').show(); mce_jQuery('#mce-' + resp.result + '-response').html(msg); } } } catch (e) { mce_jQuery('#mce-' + resp.result + '-response').show(); mce_jQuery('#mce-' + resp.result + '-response').html(msg); } } } The hslides script: /* * hSlides (1.0) // 2008.02.25 // <http://plugins.jquery.com/project/hslides> * * REQUIRES jQuery 1.2.3+ <http://jquery.com/> * * Copyright (c) 2008 TrafficBroker <http://www.trafficbroker.co.uk> * Licensed under GPL and MIT licenses * * hSlides is an horizontal accordion navigation, sliding the panels around to reveal one of interest. * * Sample Configuration: * // this is the minimum configuration needed * $('#accordion').hSlides({ * totalWidth: 730, * totalHeight: 140, * minPanelWidth: 87, * maxPanelWidth: 425 * }); * * Config Options: * // Required configuration * totalWidth: Total width of the accordion // default: 0 * totalHeight: Total height of the accordion // default: 0 * minPanelWidth: Minimum width of the panel (closed) // default: 0 * maxPanelWidth: Maximum width of the panel (opened) // default: 0 * // Optional configuration * midPanelWidth: Middle width of the panel (centered) // default: 0 * speed: Speed for the animation // default: 500 * easing: Easing effect for the animation. Other than 'swing' or 'linear' must be provided by plugin // default: 'swing' * sensitivity: Sensitivity threshold (must be 1 or higher) // default: 3 * interval: Milliseconds for onMouseOver polling interval // default: 100 * timeout: Milliseconds delay before onMouseOut // default: 300 * eventHandler: Event to open panels: click or hover. For the hover option requires hoverIntent plugin <http://cherne.net/brian/resources/jquery.hoverIntent.html> // default: 'click' * panelSelector: HTML element storing the panels // default: 'li' * activeClass: CSS class for the active panel // default: none * panelPositioning: Accordion panelPositioning: top -> first panel on the bottom and next on the top, other value -> first panel on the top and next to the bottom // default: 'top' * // Callback funtctions. Inside them, we can refer the panel with $(this). * onEnter: Funtion raised when the panel is activated. // default: none * onLeave: Funtion raised when the panel is deactivated. // default: none * * We can override the defaults with: * $.fn.hSlides.defaults.easing = 'easeOutCubic'; * * @param settings An object with configuration options * @author Jesus Carrera <[email protected]> */ (function($) { $.fn.hSlides = function(settings) { // override default configuration settings = $.extend({}, $.fn.hSlides.defaults, settings); // for each accordion return this.each(function(){ var wrapper = this; var panelLeft = 0; var panels = $(settings.panelSelector, wrapper); var panelPositioning = 1; if (settings.panelPositioning != 'top'){ panelLeft = ($(settings.panelSelector, wrapper).length - 1) * settings.minPanelWidth; panels = $(settings.panelSelector, wrapper).reverse(); panelPositioning = -1; } // necessary styles for the wrapper $(this).css('position', 'relative').css('overflow', 'hidden').css('width', settings.totalWidth).css('height', settings.totalHeight); // set the initial position of the panels var zIndex = 0; panels.each(function(){ // necessary styles for the panels $(this).css('position', 'absolute').css('left', panelLeft).css('zIndex', zIndex).css('height', settings.totalHeight).css('width', settings.maxPanelWidth); zIndex ++; // if this panel is the activated by default, set it as active and move the next (to show this one) if ($(this).hasClass(settings.activeClass)){ $.data($(this)[0], 'active', true); if (settings.panelPositioning != 'top'){ panelLeft = ($(settings.panelSelector, wrapper).index(this) + 1) * settings.minPanelWidth - settings.maxPanelWidth; }else{ panelLeft = panelLeft + settings.maxPanelWidth; } }else{ // check if we are centering and some panel is active // this is why we can't add/remove the active class in the callbacks: positioning the panels if we have one active if (settings.midPanelWidth && $(settings.panelSelector, wrapper).hasClass(settings.activeClass) == false){ panelLeft = panelLeft + settings.midPanelWidth * panelPositioning; }else{ panelLeft = panelLeft + settings.minPanelWidth * panelPositioning; } } }); // iterates through the panels setting the active and changing the position var movePanels = function(){ // index of the new active panel var activeIndex = $(settings.panelSelector, wrapper).index(this); // iterate all panels panels.each(function(){ // deactivate if is the active if ( $.data($(this)[0], 'active') == true ){ $.data($(this)[0], 'active', false); $(this).removeClass(settings.activeClass).each(settings.onLeave); } // set position of current panel var currentIndex = $(settings.panelSelector, wrapper).index(this); panelLeft = settings.minPanelWidth * currentIndex; // if the panel is next to the active, we need to add the opened width if ( (currentIndex * panelPositioning) > (activeIndex * panelPositioning)){ panelLeft = panelLeft + (settings.maxPanelWidth - settings.minPanelWidth) * panelPositioning; } // animate $(this).animate({left: panelLeft}, settings.speed, settings.easing); }); // activate the new active panel $.data($(this)[0], 'active', true); $(this).addClass(settings.activeClass).each(settings.onEnter); }; // center the panels if configured var centerPanels = function(){ var panelLeft = 0; if (settings.panelPositioning != 'top'){ panelLeft = ($(settings.panelSelector, wrapper).length - 1) * settings.minPanelWidth; } panels.each(function(){ $(this).removeClass(settings.activeClass).animate({left: panelLeft}, settings.speed, settings.easing); if ($.data($(this)[0], 'active') == true){ $.data($(this)[0], 'active', false); $(this).each(settings.onLeave); } panelLeft = panelLeft + settings.midPanelWidth * panelPositioning ; }); }; // event handling if(settings.eventHandler == 'click'){ $(settings.panelSelector, wrapper).click(movePanels); }else{ var configHoverPanel = { sensitivity: settings.sensitivity, interval: settings.interval, over: movePanels, timeout: settings.timeout, out: function() {} } var configHoverWrapper = { sensitivity: settings.sensitivity, interval: settings.interval, over: function() {}, timeout: settings.timeout, out: centerPanels } $(settings.panelSelector, wrapper).hoverIntent(configHoverPanel); if (settings.midPanelWidth != 0){ $(wrapper).hoverIntent(configHoverWrapper); } } }); }; // invert the order of the jQuery elements $.fn.reverse = function(){ return this.pushStack(this.get().reverse(), arguments); }; // default settings $.fn.hSlides.defaults = { totalWidth: 0, totalHeight: 0, minPanelWidth: 0, maxPanelWidth: 0, midPanelWidth: 0, speed: 500, easing: 'swing', sensitivity: 3, interval: 100, timeout: 300, eventHandler: 'click', panelSelector: 'li', activeClass: false, panelPositioning: 'top', onEnter: function() {}, onLeave: function() {} }; })(jQuery);

    Read the article

  • Android - BroadcastReceiver CONNECTIVITY ACTION

    - by Marc Ortiz
    in my project there's a service that listens for changes in the connectivity. When wifi is switched off to on then it gets called. The problem it's that i'm using a fragment and inside a fragment there's a button with the setonclicklistener(); and onclick(); SOMETIMES when i touch the button then the service receives an intent that the connectivity has changed (the method gets called without any reason...). Here's the code of my fragment activity for the viewpager layout: public static class FragmentSelection extends Fragment { int mNum; static FragmentSelection newInstance(int num) { FragmentSelection f = new FragmentSelection(); // Supply num input as an argument. Bundle args = new Bundle(); args.putInt("num", num); f.setArguments(args); return f; } /** * When creating, retrieve this instance's number from its arguments. */ @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); mNum = (getArguments() != null ? getArguments().getInt("num") : 1) + 1; } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { final View v = inflater.inflate(R.layout.fragment_intro_contenido, container, false); Button btStart = (Button) v.findViewById(R.id.btChanged); final CheckBox cbSMS = (CheckBox) v.findViewById(R.id.checkBoxSms); final CheckBox cbCalls = (CheckBox) v .findViewById(R.id.checkBoxLlamadas); final CheckBox cbApps = (CheckBox) v .findViewById(R.id.checkBoxApps); final CheckBox cbPosition = (CheckBox) v .findViewById(R.id.checkBoxPosicion); final CheckBox cbContacts = (CheckBox) v .findViewById(R.id.checkBoxContactos); final CheckBox cbAll = (CheckBox) v.findViewById(R.id.checkBoxAll); cbSMS.setChecked(true); cbCalls.setChecked(true); cbApps.setChecked(true); cbPosition.setChecked(true); cbContacts.setChecked(true); cbAll.setChecked(true); btStart.setOnClickListener(new View.OnClickListener() { public void onClick(View c) { Intent i = new Intent(); i.setClass(v.getContext(), AnonymeActivity.class); if (!cbSMS.isChecked()) { i.putExtra("sms", 0); } else { i.putExtra("sms", 1); } if (!cbCalls.isChecked()) { i.putExtra("calls", 0); } else { i.putExtra("calls", 1); } if (cbContacts.isChecked()) { i.putExtra("contacts", 1); } else { i.putExtra("contacts", 0); } if (cbApps.isChecked()) { i.putExtra("apps", 1); } else { i.putExtra("apps", 0); } if (!cbPosition.isChecked()) { i.putExtra("gps", 0); } else { i.putExtra("gps", 1); } if (!cbAll.isChecked()) { if (cbSMS.isChecked() && cbCalls.isChecked() && cbContacts.isChecked() && cbApps.isChecked() && cbPosition.isChecked()) { i.putExtra("all", 1); } else { i.putExtra("all", 0); } } else { i.putExtra("all", 1); } startActivity(i); } }); return v; } @Override public void onActivityCreated(Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); } } My BroadcastReceiver class: class Broadcast_Reciver extends BroadcastReceiver implements Variables { CheckConexion cc; @Override public void onReceive(Context contxt, Intent intent) { // Cuando hay un evento, lo diferenciamos y hacemos una acción. if (intent.getAction().equals(SMS_RECEIVED)) { Sms sms = new Sms(null, contxt); sms.uploadNewSms(intent); } else if (intent.getAction().equals(Intent.ACTION_BATTERY_LOW)) { // st.batterylow(contxt); } else if (intent.getAction().equals(Intent.ACTION_POWER_CONNECTED)) { // st.power(1, contxt); } else if (intent.getAction().equals(Intent.ACTION_POWER_DISCONNECTED)) { // st.power(0, contxt); } else if (intent.getAction().equals(Intent.ACTION_CALL_BUTTON)) { // Notify } else if (intent.getAction().equals(Intent.ACTION_CAMERA_BUTTON)) { // Notify } else if (intent.getAction().equals(Intent.ACTION_PACKAGE_ADDED) || intent.getAction().equals(Intent.ACTION_PACKAGE_CHANGED) || intent.getAction().equals(Intent.ACTION_PACKAGE_REMOVED)) { Database db = new Database(contxt); if (db.open().Preferences(4)) { Uri data = intent.getData(); new ListApps(contxt).import_app(intent, contxt, data, intent.getAction()); } db.close(); } else if (intent.getAction().equals( ConnectivityManager.CONNECTIVITY_ACTION)) { cc = new CheckConexion(contxt); if (cc.isOnline()) { Database db = new Database(contxt); if (db.open().move() == 1) { new UploadOffline(contxt); } db.close(); } } } } And the errors: 9-03 23:20:37.887: E/SqliteDatabaseCpp(2715): CREATE TABLE android_metadata failed 09-03 23:20:37.887: E/SQLiteDatabase(2715): Failed to open the database. closing it. 09-03 23:20:37.887: E/SQLiteDatabase(2715): android.database.sqlite.SQLiteDatabaseLockedException: database is locked 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.database.sqlite.SQLiteDatabase.native_setLocale(Native Method) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.database.sqlite.SQLiteDatabase.setLocale_(SQLiteDatabase.java:2211) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.database.sqlite.SQLiteDatabase.setLocale(SQLiteDatabase.java:2199) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.database.sqlite.SQLiteDatabase.openDatabase(SQLiteDatabase.java:1130) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.database.sqlite.SQLiteDatabase.openDatabase(SQLiteDatabase.java:1081) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.database.sqlite.SQLiteDatabase.openOrCreateDatabase(SQLiteDatabase.java:1167) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.app.ContextImpl.openOrCreateDatabase(ContextImpl.java:833) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.content.ContextWrapper.openOrCreateDatabase(ContextWrapper.java:221) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.database.sqlite.SQLiteOpenHelper.getWritableDatabase(SQLiteOpenHelper.java:157) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at com.background.Database.open(Database.java:127) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at <b><b><b><h3>com.background.Broadcast_Reciver.onReceive(BroadcastService.java:100)</h3> 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.app.LoadedApk$ReceiverDispatcher$Args.run(LoadedApk.java:728) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.os.Handler.handleCallback(Handler.java:605) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.os.Handler.dispatchMessage(Handler.java:92) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.os.Looper.loop(Looper.java:137) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at android.app.ActivityThread.main(ActivityThread.java:4507) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at java.lang.reflect.Method.invokeNative(Native Method) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at java.lang.reflect.Method.invoke(Method.java:511) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:790) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:557) 09-03 23:20:37.887: E/SQLiteDatabase(2715): at dalvik.system.NativeStart.main(Native Method) 09-03 23:20:37.887: D/AndroidRuntime(2715): Shutting down VM 09-03 23:20:37.887: W/dalvikvm(2715): threadid=1: thread exiting with uncaught exception (group=0x40c3b1f8) 09-03 23:20:37.902: E/AndroidRuntime(2715): FATAL EXCEPTION: main 09-03 23:20:37.902: E/AndroidRuntime(2715): java.lang.RuntimeException: Error receiving broadcast Intent { act=android.net.conn.CONNECTIVITY_CHANGE flg=0x10000010 (has extras) } in com.background.Broadcast_Reciver@415203f8 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.app.LoadedApk$ReceiverDispatcher$Args.run(LoadedApk.java:737) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.os.Handler.handleCallback(Handler.java:605) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.os.Handler.dispatchMessage(Handler.java:92) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.os.Looper.loop(Looper.java:137) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.app.ActivityThread.main(ActivityThread.java:4507) 09-03 23:20:37.902: E/AndroidRuntime(2715): at java.lang.reflect.Method.invokeNative(Native Method) 09-03 23:20:37.902: E/AndroidRuntime(2715): at java.lang.reflect.Method.invoke(Method.java:511) 09-03 23:20:37.902: E/AndroidRuntime(2715): at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:790) 09-03 23:20:37.902: E/AndroidRuntime(2715): at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:557) 09-03 23:20:37.902: E/AndroidRuntime(2715): at dalvik.system.NativeStart.main(Native Method) 09-03 23:20:37.902: E/AndroidRuntime(2715): Caused by: android.database.sqlite.SQLiteDatabaseLockedException: database is locked 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.database.sqlite.SQLiteDatabase.native_setLocale(Native Method) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.database.sqlite.SQLiteDatabase.setLocale_(SQLiteDatabase.java:2211) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.database.sqlite.SQLiteDatabase.setLocale(SQLiteDatabase.java:2199) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.database.sqlite.SQLiteDatabase.openDatabase(SQLiteDatabase.java:1130) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.database.sqlite.SQLiteDatabase.openDatabase(SQLiteDatabase.java:1081) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.database.sqlite.SQLiteDatabase.openOrCreateDatabase(SQLiteDatabase.java:1167) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.app.ContextImpl.openOrCreateDatabase(ContextImpl.java:833) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.content.ContextWrapper.openOrCreateDatabase(ContextWrapper.java:221) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.database.sqlite.SQLiteOpenHelper.getWritableDatabase(SQLiteOpenHelper.java:157) 09-03 23:20:37.902: E/AndroidRuntime(2715): at com.background.Database.open(Database.java:127) 09-03 23:20:37.902: E/AndroidRuntime(2715): at com.background.Broadcast_Reciver.onReceive(BroadcastService.java:100) 09-03 23:20:37.902: E/AndroidRuntime(2715): at android.app.LoadedApk$ReceiverDispatcher$Args.run(LoadedApk.java:728) 09-03 23:20:37.902: E/AndroidRuntime(2715): ... 9 more Checkout that the program goes into BroadcastReceiver class and i don't understand why!

    Read the article

  • Displaying emails in a JTable (Java Swing)

    - by Paul
    Hi I'm new to all this. I have been trying to display fetched emails in a JTable using the JavaMail add-on. However when I ask the program to set the value it never does. I have been working in NetBeans if that is any help? the fetchMail class finds all may on a server. The gui class is used to display all emails in a table as well as creating mail. You will probably think that I have tried it like a bull in a china shop, I am new to Java and trying to give myself a challenge. Any help/advice would be greatly appreciated fetchMail: package mail; import java.util.; import java.io.; import java.text.DateFormat; import java.text.SimpleDateFormat; import javax.mail.; import javax.mail.internet.; import javax.mail.search.; import javax.activation.; public class fetchMail { public void fetch(String username, String pass, String search){ MessagesTableModel tableModel = new MessagesTableModel(); String complete; DateFormat df = new SimpleDateFormat("dd/MM/yyyy"); gui gui = new gui(); // SUBSTITUTE YOUR ISP's POP3 SERVER HERE!!! String host = "imap.gmail.com"; // SUBSTITUTE YOUR USERNAME AND PASSWORD TO ACCESS E-MAIL HERE!!! String user = username; String password = pass; // SUBSTITUTE YOUR SUBJECT SUBSTRING TO SEARCH HERE!!! String subjectSubstringToSearch = search; // Get a session. Use a blank Properties object. Session session = Session.getInstance(new Properties()); Properties props = System.getProperties(); props.setProperty("mail.store.protocol", "imaps"); props.setProperty("mail.imap.socketFactory.class", "javax.net.ssl.SSLSocketFactory"); props.setProperty("mail.imap.socketFactory.fallback", "false"); try { // Get a Store object Store store = session.getStore("imaps"); store.connect(host, user, password); // Get "INBOX" Folder fldr = store.getFolder("INBOX"); fldr.open(Folder.READ_WRITE); int count = fldr.getMessageCount(); System.out.println(count + " total messages"); // Message numebers start at 1 for(int i = 1; i <= count; i++) { // Get a message by its sequence number Message m = fldr.getMessage(i); // Get some headers Date date = m.getSentDate(); int pos = i - 1; String d = df.format(date); Address [] from = m.getFrom(); String subj = m.getSubject(); String mimeType = m.getContentType(); complete = date + "\t" + from[0] + "\t" + subj + "\t" + mimeType; //tableModel.setMessages(m); gui.setDate(d, pos); // System.out.println(d + " " + i); } // Search for e-mails by some subject substring String pattern = subjectSubstringToSearch; SubjectTerm st = new SubjectTerm(pattern); // Get some message references Message [] found = fldr.search(st); System.out.println(found.length + " messages matched Subject pattern \"" + pattern + "\""); for (int i = 0; i < found.length; i++) { Message m = found[i]; // Get some headers Date date = m.getSentDate(); Address [] from = m.getFrom(); String subj = m.getSubject(); String mimeType = m.getContentType(); //System.out.println(date + "\t" + from[0] + "\t" + // subj + "\t" + mimeType); Object o = m.getContent(); if (o instanceof String) { // System.out.println("**This is a String Message**"); // System.out.println((String)o); } else if (o instanceof Multipart) { // System.out.print("**This is a Multipart Message. "); Multipart mp = (Multipart)o; int count3 = mp.getCount(); // System.out.println("It has " + count3 + // " BodyParts in it**"); for (int j = 0; j < count3; j++) { // Part are numbered starting at 0 BodyPart b = mp.getBodyPart(j); String mimeType2 = b.getContentType(); // System.out.println( "BodyPart " + (j + 1) + // " is of MimeType " + mimeType); Object o2 = b.getContent(); if (o2 instanceof String) { // System.out.println("**This is a String BodyPart**"); // System.out.println((String)o2); } else if (o2 instanceof Multipart) { // System.out.print( // "**This BodyPart is a nested Multipart. "); Multipart mp2 = (Multipart)o2; int count2 = mp2.getCount(); // System.out.println("It has " + count2 + // "further BodyParts in it**"); } else if (o2 instanceof InputStream) { // System.out.println( // "**This is an InputStream BodyPart**"); } } //End of for } else if (o instanceof InputStream) { // System.out.println("**This is an InputStream message**"); InputStream is = (InputStream)o; // Assumes character content (not binary images) int c; while ((c = is.read()) != -1) { // System.out.write(c); } } // Uncomment to set "delete" flag on the message //m.setFlag(Flags.Flag.DELETED,true); } //End of for // "true" actually deletes flagged messages from folder fldr.close(true); store.close(); } catch (MessagingException mex) { // Prints all nested (chained) exceptions as well mex.printStackTrace(); } catch (IOException ioex) { ioex.printStackTrace(); } } } gui: /* * gui.java * * Created on 13-May-2010, 18:29:30 */ package mail; import java.text.DateFormat; import java.text.FieldPosition; import java.text.ParsePosition; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Vector; import javax.mail.Address; import javax.swing.JScrollPane; import javax.swing.JTable; import javax.swing.ListSelectionModel; import javax.swing.event.ListSelectionEvent; import javax.swing.event.ListSelectionListener; import javax.swing.event.TableModelListener; import javax.swing.table.DefaultTableModel; import javax.swing.table.TableModel; public class gui extends javax.swing.JFrame { private MessagesTableModel tableModel; // Table listing messages. private JTable table; String date; /** Creates new form gui */ public gui() { initComponents(); } @SuppressWarnings("unchecked") private void initComponents() { recieve = new javax.swing.JButton(); jButton1 = new javax.swing.JButton(); jScrollPane1 = new javax.swing.JScrollPane(); inboxTable = new javax.swing.JTable(); setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE); recieve.setText("Receve"); recieve.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { recieveActionPerformed(evt); } }); jButton1.setText("new"); jButton1.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { jButton1ActionPerformed(evt); } }); inboxTable.setModel(new javax.swing.table.DefaultTableModel( new Object [][] { {null, null, null}, {null, null, null}, {null, null, null}, {null, null, null} }, new String [] { "Date", "subject", "sender" } ) { Class[] types = new Class [] { java.lang.String.class, java.lang.String.class, java.lang.String.class }; public Class getColumnClass(int columnIndex) { return types [columnIndex]; } }); jScrollPane1.setViewportView(inboxTable); inboxTable.getColumnModel().getColumn(0).setResizable(false); inboxTable.getColumnModel().getColumn(1).setResizable(false); inboxTable.getColumnModel().getColumn(2).setResizable(false); javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane()); getContentPane().setLayout(layout); layout.setHorizontalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addGap(39, 39, 39) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addComponent(jScrollPane1, javax.swing.GroupLayout.PREFERRED_SIZE, 558, javax.swing.GroupLayout.PREFERRED_SIZE) .addGroup(layout.createSequentialGroup() .addComponent(recieve) .addGap(18, 18, 18) .addComponent(jButton1, javax.swing.GroupLayout.PREFERRED_SIZE, 75, javax.swing.GroupLayout.PREFERRED_SIZE))) .addGap(73, 73, 73)) ); layout.setVerticalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addGap(31, 31, 31) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(recieve) .addComponent(jButton1)) .addGap(18, 18, 18) .addComponent(jScrollPane1, javax.swing.GroupLayout.PREFERRED_SIZE, 258, javax.swing.GroupLayout.PREFERRED_SIZE) .addContainerGap(179, Short.MAX_VALUE)) ); pack(); }// </editor-fold> private void recieveActionPerformed(java.awt.event.ActionEvent evt) { fetchMail fetch = new fetchMail(); fetch.fetch(email goes here, password goes here, search goes here); } private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) { createMail create = new createMail(); centerW center = new centerW(); //create.attVis(); center.center(create); create.setVisible(true); } public void setDate(String Date, int pos){ //pos = pos + 1; String [] s = new String [5]; s[pos] = Date; inboxTable.setValueAt(Date, pos, 0); } public String getDate(){ return date; } /** * @param args the command line arguments */ public static void main(String args[]) { java.awt.EventQueue.invokeLater(new Runnable() { public void run() { new gui().setVisible(true); } }); } // Variables declaration - do not modify private javax.swing.JTable inboxTable; private javax.swing.JButton jButton1; private javax.swing.JScrollPane jScrollPane1; private javax.swing.JButton recieve; // End of variables declaration }

    Read the article

  • repaint problem

    - by user357816
    I have a problem with my repaint in the method move. I dont know what to doo, the code is below import java.awt.*; import java.io.*; import java.text.*; import java.util.*; import javax.sound.sampled.*; import javax.swing.*; import javax.swing.Timer; import java.awt.event.*; import java.lang.*; public class bbb extends JPanel { public Stack<Integer> stacks[]; public JButton auto,jugar,nojugar; public JButton ok,ok2; public JLabel info=new JLabel("Numero de Discos: "); public JLabel instruc=new JLabel("Presiona la base de las torres para mover las fichas"); public JLabel instruc2=new JLabel("No puedes poner una pieza grande sobre una pequenia!"); public JComboBox numeros=new JComboBox(); public JComboBox velocidad=new JComboBox(); public boolean seguir=false,parar=false,primera=true; public int n1,n2,n3; public int click1=0; public int opcion=1,tiempo=50; public int op=1,continuar=0,cont=0; public int piezas=0; public int posx,posy; public int no; public bbb() throws IOException { stacks = new Stack[3]; stacks[0]=new Stack<Integer>(); stacks[1]=new Stack<Integer>(); stacks[2]=new Stack<Integer>(); setPreferredSize(new Dimension(1366,768)); ok=new JButton("OK"); ok.setBounds(new Rectangle(270,50,70,25)); ok.addActionListener(new okiz()); ok2=new JButton("OK"); ok2.setBounds(new Rectangle(270,50,70,25)); ok2.addActionListener(new vel()); add(ok2);ok2.setVisible(false); auto=new JButton("Automatico"); auto.setBounds(new Rectangle(50,80,100,25)); auto.addActionListener(new a()); jugar=new JButton("PLAY"); jugar.setBounds(new Rectangle(100,100,70,25)); jugar.addActionListener(new play()); nojugar=new JButton("PAUSE"); nojugar.setBounds(new Rectangle(100,150,70,25)); nojugar.addActionListener(new stop()); setLayout(null); info.setBounds(new Rectangle(50,50,170,25)); info.setForeground(Color.white); instruc.setBounds(new Rectangle(970,50,570,25)); instruc.setForeground(Color.white); instruc2.setBounds(new Rectangle(970,70,570,25)); instruc2.setForeground(Color.white); add(instruc);add(instruc2); add(jugar);add(nojugar);jugar.setVisible(false);nojugar.setVisible(false); add(info); info.setVisible(false); add(ok); ok.setVisible(false); add(auto); numeros.setBounds(new Rectangle(210,50,50,25)); numeros.addItem(1);numeros.addItem(2);numeros.addItem(3);numeros.addItem(4);numeros.addItem(5); numeros.addItem(6);numeros.addItem(7);numeros.addItem(8);numeros.addItem(9);numeros.addItem(10); add(numeros); numeros.setVisible(false); velocidad.setBounds(new Rectangle(150,50,100,25)); velocidad.addItem("Lenta"); velocidad.addItem("Intermedia"); velocidad.addItem("Rapida"); add(velocidad); velocidad.setVisible(false); } public void Mover(int origen, int destino) { for (int i=0;i<3;i++) { System.out.print("stack "+i+": "); for(int n : stacks[i]) System.out.print(n+";"); System.out.println(""); } System.out.println("de <"+origen+"> a <"+destino+">"); stacks[destino].push(stacks[origen].pop()); System.out.println(""); this.validate(); this.repaint( ); } public void hanoi(int origen, int destino, int cuantas) { while (parar) {} if (cuantas <= 1) Mover(origen,destino); else { hanoi(origen,3 - (origen+destino),cuantas-1); Mover(origen,destino); hanoi(3 - (origen+destino),destino,cuantas-1); } } public void paintComponent(Graphics g) { ImageIcon fondo= new ImageIcon("fondo.jpg"); g.drawImage(fondo.getImage(),0, 0,1366,768,null); g.setColor(new Color((int)(Math.random() * 254), (int)(Math.random() *255), (int)(Math.random() * 255))); g.fillRect(0,0,100,100); g.setColor(Color.white); g.fillRect(150,600,250,25); g.fillRect(550,600,250,25); g.fillRect(950,600,250,25); g.setColor(Color.red); g.fillRect(270,325,10,275); g.fillRect(270+400,325,10,275); g.fillRect(270+800,325,10,275); int x, y,top=0; g.setColor(Color.yellow); x=150;y=580; for(int ii:stacks[0]) { g.fillRect(x+((ii*125)/10),y-(((ii)*250)/10),((10-ii)*250)/10,20);} x=550;y=580; for(int ii:stacks[1]) {g.fillRect(x+((ii*125)/10),y-(((ii)*250)/10),((10-ii)*250)/10,20);} x=950;y=580; for(int ii:stacks[2]) {g.fillRect(x+((ii*125)/10),y-(((ii)*250)/10),((10-ii)*250)/10,20);} System.out.println("ENTRO"); setOpaque(false); } private class play implements ActionListener //manual { public void actionPerformed(ActionEvent algo) { parar=false; if(primera=true) { hanoi(0,2,no); primera=false; } } } private class stop implements ActionListener //manual { public void actionPerformed(ActionEvent algo) { parar=true; } } private class vel implements ActionListener //manual { public void actionPerformed(ActionEvent algo) { if (velocidad.getSelectedItem()=="Lenta") {tiempo=150;} else if (velocidad.getSelectedItem()=="Intermedia") {tiempo=75;} else tiempo=50; ok2.setVisible(false); jugar.setVisible(true); nojugar.setVisible(true); } } private class a implements ActionListener //auto { public void actionPerformed(ActionEvent algo) { auto.setVisible(false); info.setVisible(true); numeros.setVisible(true); ok.setVisible(true); op=3; } } private class okiz implements ActionListener //ok { public void actionPerformed(ActionEvent algo) { no=Integer.parseInt(numeros.getSelectedItem().toString()); piezas=no; if (no>0 && no<11) { info.setVisible(false); numeros.setVisible(false); ok.setVisible(false); for (int i=no;i>0;i--) stacks[0].push(i); opcion=2; if (op==3) { info.setText("Velocidad: ");info.setVisible(true); velocidad.setVisible(true); ok2.setVisible(true); } } else { } repaint(); } } } the code of the other class that calls the one up is below: import java.awt.*; import java.io.*; import java.net.URL; import javax.imageio.*; import javax.swing.*; import javax.swing.border.*; import java.lang.*; import java.awt.event.*; public class aaa extends JPanel { private ImageIcon Background; private JLabel fondo; public static void main(String[] args) throws IOException { JFrame.setDefaultLookAndFeelDecorated(true); final JPanel cp = new JPanel(new BorderLayout()); JFrame frame = new JFrame ("Torres de Hanoi"); frame.setDefaultCloseOperation (JFrame.EXIT_ON_CLOSE); frame.setSize(550,550); frame.setVisible(true); bbb panel = new bbb(); frame.getContentPane().add(panel); frame.pack(); frame.setVisible(true); } }

    Read the article

  • Problems installing Memcache (PECL extension)

    - by Petrus
    I have installed memcached fine, and now I will need to install PECL extension memcache. Im running RedHat x86_64 es5. The installation gives me this: downloading memcache-2.2.6.tgz ... Starting to download memcache-2.2.6.tgz (35,957 bytes) ..........done: 35,957 bytes 11 source files, building running: phpize Configuring for: PHP Api Version: 20090626 Zend Module Api No: 20090626 Zend Extension Api No: 220090626 Enable memcache session handler support? [yes] : Notice: Use of undefined constant STDIN - assumed 'STDIN' in PEAR/Frontend/CLI.php on line 304 Warning: fgets() expects parameter 1 to be resource, string given in PEAR/Frontend/CLI.php on line 304 Warning: fgets() expects parameter 1 to be resource, string given in /usr/lib/php/PEAR/Frontend/CLI.php on line 304 building in /root/tmp/pear-build-root/memcache-2.2.6 running: /root/tmp/pear/memcache/configure --enable-memcache-session=yes checking for egrep... grep -E checking for a sed that does not truncate output... /bin/sed checking for cc... cc checking for C compiler default output file name... a.out checking whether the C compiler works... yes checking whether we are cross compiling... no checking for suffix of executables... checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether cc accepts -g... yes checking for cc option to accept ANSI C... none needed checking how to run the C preprocessor... cc -E checking for icc... no checking for suncc... no checking whether cc understands -c and -o together... yes checking for system library directory... lib checking if compiler supports -R... no checking if compiler supports -Wl,-rpath,... yes checking build system type... x86_64-unknown-linux-gnu checking host system type... x86_64-unknown-linux-gnu checking target system type... x86_64-unknown-linux-gnu checking for PHP prefix... /usr checking for PHP includes... -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib checking for PHP extension directory... /usr/lib/php/extensions/no-debug-non-zts-20090626 checking for PHP installed headers prefix... /usr/include/php checking if debug is enabled... no checking if zts is enabled... no checking for re2c... re2c checking for re2c version... invalid configure: WARNING: You will need re2c 0.13.4 or later if you want to regenerate PHP parsers. checking for gawk... gawk checking whether to enable memcache support... yes, shared checking whether to enable memcache session handler support... yes checking for the location of ZLIB... no checking for the location of zlib... /usr checking for session includes... /usr/include/php checking for memcache session support... enabled checking for ld used by cc... /usr/bin/ld checking if the linker (/usr/bin/ld) is GNU ld... yes checking for /usr/bin/ld option to reload object files... -r checking for BSD-compatible nm... /usr/bin/nm -B checking whether ln -s works... yes checking how to recognize dependent libraries... pass_all checking for ANSI C header files... yes checking for sys/types.h... yes checking for sys/stat.h... yes checking for stdlib.h... yes checking for string.h... yes checking for memory.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking dlfcn.h usability... yes checking dlfcn.h presence... yes checking for dlfcn.h... yes checking the maximum length of command line arguments... 98304 checking command to parse /usr/bin/nm -B output from cc object... ok checking for objdir... .libs checking for ar... ar checking for ranlib... ranlib checking for strip... strip checking if cc supports -fno-rtti -fno-exceptions... no checking for cc option to produce PIC... -fPIC checking if cc PIC flag -fPIC works... yes checking if cc static flag -static works... yes checking if cc supports -c -o file.o... yes checking whether the cc linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes checking whether -lc should be explicitly linked in... no checking dynamic linker characteristics... GNU/Linux ld.so checking how to hardcode library paths into programs... immediate checking whether stripping libraries is possible... yes checking if libtool supports shared libraries... yes checking whether to build shared libraries... yes checking whether to build static libraries... no creating libtool appending configuration tag "CXX" to libtool configure: creating ./config.status config.status: creating config.h running: make /bin/sh /root/tmp/pear-build-root/memcache-2.2.6/libtool --mode=compile cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache.c -o memcache.lo mkdir .libs cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache.c -fPIC -DPIC -o .libs/memcache.o /bin/sh /root/tmp/pear-build-root/memcache-2.2.6/libtool --mode=compile cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache_queue.c -o memcache_queue.lo cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache_queue.c -fPIC -DPIC -o .libs/memcache_queue.o /bin/sh /root/tmp/pear-build-root/memcache-2.2.6/libtool --mode=compile cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache_standard_hash.c -o memcache_standard_hash.lo cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache_standard_hash.c -fPIC -DPIC -o .libs/memcache_standard_hash.o /bin/sh /root/tmp/pear-build-root/memcache-2.2.6/libtool --mode=compile cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache_consistent_hash.c -o memcache_consistent_hash.lo cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache_consistent_hash.c -fPIC -DPIC -o .libs/memcache_consistent_hash.o /bin/sh /root/tmp/pear-build-root/memcache-2.2.6/libtool --mode=compile cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache_session.c -o memcache_session.lo cc -I/usr/include/php -I. -I/root/tmp/pear/memcache -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -c /root/tmp/pear/memcache/memcache_session.c -fPIC -DPIC -o .libs/memcache_session.o /bin/sh /root/tmp/pear-build-root/memcache-2.2.6/libtool --mode=link cc -DPHP_ATOM_INC -I/root/tmp/pear-build-root/memcache-2.2.6/include -I/root/tmp/pear-build-root/memcache-2.2.6/main -I/root/tmp/pear/memcache -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -o memcache.la -export-dynamic -avoid-version -prefer-pic -module -rpath /root/tmp/pear-build-root/memcache-2.2.6/modules memcache.lo memcache_queue.lo memcache_standard_hash.lo memcache_consistent_hash.lo memcache_session.lo cc -shared .libs/memcache.o .libs/memcache_queue.o .libs/memcache_standard_hash.o .libs/memcache_consistent_hash.o .libs/memcache_session.o -Wl,-soname -Wl,memcache.so -o .libs/memcache.so creating memcache.la (cd .libs && rm -f memcache.la && ln -s ../memcache.la memcache.la) /bin/sh /root/tmp/pear-build-root/memcache-2.2.6/libtool --mode=install cp ./memcache.la /root/tmp/pear-build-root/memcache-2.2.6/modules cp ./.libs/memcache.so /root/tmp/pear-build-root/memcache-2.2.6/modules/memcache.so cp ./.libs/memcache.lai /root/tmp/pear-build-root/memcache-2.2.6/modules/memcache.la PATH="$PATH:/sbin" ldconfig -n /root/tmp/pear-build-root/memcache-2.2.6/modules ---------------------------------------------------------------------- Libraries have been installed in: /root/tmp/pear-build-root/memcache-2.2.6/modules If you ever happen to want to link against installed libraries in a given directory, LIBDIR, you must either use libtool, and specify the full pathname of the library, or use the `-LLIBDIR' flag during linking and do at least one of the following: - add LIBDIR to the `LD_LIBRARY_PATH' environment variable during execution - add LIBDIR to the `LD_RUN_PATH' environment variable during linking - use the `-Wl,--rpath -Wl,LIBDIR' linker flag - have your system administrator add LIBDIR to `/etc/ld.so.conf' See any operating system documentation about shared libraries for more information, such as the ld(1) and ld.so(8) manual pages. ---------------------------------------------------------------------- Build complete. Don't forget to run 'make test'. running: make INSTALL_ROOT="/root/tmp/pear-build-root/install-memcache-2.2.6" install Installing shared extensions: /root/tmp/pear-build-root/install-memcache-2.2.6/usr/lib/php/extensions/no-debug-non-zts-20090626/ running: find "/root/tmp/pear-build-root/install-memcache-2.2.6" | xargs ls -dils 361232 4 drwxr-xr-x 3 root root 4096 Jan 28 10:47 /root/tmp/pear-build-root/install-memcache-2.2.6 361263 4 drwxr-xr-x 3 root root 4096 Jan 28 10:47 /root/tmp/pear-build-root/install-memcache-2.2.6/usr 361264 4 drwxr-xr-x 3 root root 4096 Jan 28 10:47 /root/tmp/pear-build-root/install-memcache-2.2.6/usr/lib 361265 4 drwxr-xr-x 3 root root 4096 Jan 28 10:47 /root/tmp/pear-build-root/install-memcache-2.2.6/usr/lib/php 361266 4 drwxr-xr-x 3 root root 4096 Jan 28 10:47 /root/tmp/pear-build-root/install-memcache-2.2.6/usr/lib/php/extensions 361267 4 drwxr-xr-x 2 root root 4096 Jan 28 10:47 /root/tmp/pear-build-root/install-memcache-2.2.6/usr/lib/php/extensions/no-debug-non-zts-20090626 361262 236 -rwxr-xr-x 1 root root 235575 Jan 28 10:47 /root/tmp/pear-build-root/install-memcache-2.2.6/usr/lib/php/extensions/no-debug-non-zts-20090626/memcache.so Build process completed successfully Installing '/usr/lib/php/extensions/no-debug-non-zts-20090626/memcache.so' install ok: channel://pecl.php.net/memcache-2.2.6 Extension memcache enabled in php.ini The memcache.so object is not in /usr/local/lib/php/extensions/no-debug-non-zts-20090626 I tried as well to install this extension "memcached 1.0.2 (PHP extension for interfacing with memcached via libmemcached library)" but it failed: downloading memcached-1.0.2.tgz ... Starting to download memcached-1.0.2.tgz (22,724 bytes) ........done: 22,724 bytes 4 source files, building running: phpize Configuring for: PHP Api Version: 20090626 Zend Module Api No: 20090626 Zend Extension Api No: 220090626 building in /root/tmp/pear-build-root/memcached-1.0.2 running: /root/tmp/pear/memcached/configure checking for egrep... grep -E checking for a sed that does not truncate output... /bin/sed checking for cc... cc checking for C compiler default output file name... a.out checking whether the C compiler works... yes checking whether we are cross compiling... no checking for suffix of executables... checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether cc accepts -g... yes checking for cc option to accept ANSI C... none needed checking how to run the C preprocessor... cc -E checking for icc... no checking for suncc... no checking whether cc understands -c and -o together... yes checking for system library directory... lib checking if compiler supports -R... no checking if compiler supports -Wl,-rpath,... yes checking build system type... x86_64-unknown-linux-gnu checking host system type... x86_64-unknown-linux-gnu checking target system type... x86_64-unknown-linux-gnu checking for PHP prefix... /usr checking for PHP includes... -I/usr/include/php -I/usr/include/php/main -I/usr/include/php/TSRM -I/usr/include/php/Zend -I/usr/include/php/ext -I/usr/include/php/ext/date/lib checking for PHP extension directory... /usr/lib/php/extensions/no-debug-non-zts-20090626 checking for PHP installed headers prefix... /usr/include/php checking if debug is enabled... no checking if zts is enabled... no checking for re2c... re2c checking for re2c version... invalid configure: WARNING: You will need re2c 0.13.4 or later if you want to regenerate PHP parsers. checking for gawk... gawk checking whether to enable memcached support... yes, shared checking for libmemcached... yes, shared checking whether to enable memcached session handler support... yes checking whether to enable memcached igbinary serializer support... no checking for ZLIB... yes, shared checking for zlib location... /usr checking for session includes... /usr/include/php checking for memcached session support... enabled checking for memcached igbinary support... disabled checking for libmemcached location... configure: error: memcached support requires libmemcached. Use --with-libmemcached-dir= to specify the prefix where libmemcached headers and library are located ERROR: `/root/tmp/pear/memcached/configure' failed The memcached.so object is not in /usr/local/lib/php/extensions/no-debug-non-zts-20090626 Is there a kind soul out there that can solve this puzzle?

    Read the article

  • Zen and the Art of File and Folder Organization

    - by Mark Virtue
    Is your desk a paragon of neatness, or does it look like a paper-bomb has gone off? If you’ve been putting off getting organized because the task is too huge or daunting, or you don’t know where to start, we’ve got 40 tips to get you on the path to zen mastery of your filing system. For all those readers who would like to get their files and folders organized, or, if they’re already organized, better organized—we have compiled a complete guide to getting organized and staying organized, a comprehensive article that will hopefully cover every possible tip you could want. Signs that Your Computer is Poorly Organized If your computer is a mess, you’re probably already aware of it.  But just in case you’re not, here are some tell-tale signs: Your Desktop has over 40 icons on it “My Documents” contains over 300 files and 60 folders, including MP3s and digital photos You use the Windows’ built-in search facility whenever you need to find a file You can’t find programs in the out-of-control list of programs in your Start Menu You save all your Word documents in one folder, all your spreadsheets in a second folder, etc Any given file that you’re looking for may be in any one of four different sets of folders But before we start, here are some quick notes: We’re going to assume you know what files and folders are, and how to create, save, rename, copy and delete them The organization principles described in this article apply equally to all computer systems.  However, the screenshots here will reflect how things look on Windows (usually Windows 7).  We will also mention some useful features of Windows that can help you get organized. Everyone has their own favorite methodology of organizing and filing, and it’s all too easy to get into “My Way is Better than Your Way” arguments.  The reality is that there is no perfect way of getting things organized.  When I wrote this article, I tried to keep a generalist and objective viewpoint.  I consider myself to be unusually well organized (to the point of obsession, truth be told), and I’ve had 25 years experience in collecting and organizing files on computers.  So I’ve got a lot to say on the subject.  But the tips I have described here are only one way of doing it.  Hopefully some of these tips will work for you too, but please don’t read this as any sort of “right” way to do it. At the end of the article we’ll be asking you, the reader, for your own organization tips. Why Bother Organizing At All? For some, the answer to this question is self-evident. And yet, in this era of powerful desktop search software (the search capabilities built into the Windows Vista and Windows 7 Start Menus, and third-party programs like Google Desktop Search), the question does need to be asked, and answered. I have a friend who puts every file he ever creates, receives or downloads into his My Documents folder and doesn’t bother filing them into subfolders at all.  He relies on the search functionality built into his Windows operating system to help him find whatever he’s looking for.  And he always finds it.  He’s a Search Samurai.  For him, filing is a waste of valuable time that could be spent enjoying life! It’s tempting to follow suit.  On the face of it, why would anyone bother to take the time to organize their hard disk when such excellent search software is available?  Well, if all you ever want to do with the files you own is to locate and open them individually (for listening, editing, etc), then there’s no reason to ever bother doing one scrap of organization.  But consider these common tasks that are not achievable with desktop search software: Find files manually.  Often it’s not convenient, speedy or even possible to utilize your desktop search software to find what you want.  It doesn’t work 100% of the time, or you may not even have it installed.  Sometimes its just plain faster to go straight to the file you want, if you know it’s in a particular sub-folder, rather than trawling through hundreds of search results. Find groups of similar files (e.g. all your “work” files, all the photos of your Europe holiday in 2008, all your music videos, all the MP3s from Dark Side of the Moon, all your letters you wrote to your wife, all your tax returns).  Clever naming of the files will only get you so far.  Sometimes it’s the date the file was created that’s important, other times it’s the file format, and other times it’s the purpose of the file.  How do you name a collection of files so that they’re easy to isolate based on any of the above criteria?  Short answer, you can’t. Move files to a new computer.  It’s time to upgrade your computer.  How do you quickly grab all the files that are important to you?  Or you decide to have two computers now – one for home and one for work.  How do you quickly isolate only the work-related files to move them to the work computer? Synchronize files to other computers.  If you have more than one computer, and you need to mirror some of your files onto the other computer (e.g. your music collection), then you need a way to quickly determine which files are to be synced and which are not.  Surely you don’t want to synchronize everything? Choose which files to back up.  If your backup regime calls for multiple backups, or requires speedy backups, then you’ll need to be able to specify which files are to be backed up, and which are not.  This is not possible if they’re all in the same folder. Finally, if you’re simply someone who takes pleasure in being organized, tidy and ordered (me! me!), then you don’t even need a reason.  Being disorganized is simply unthinkable. Tips on Getting Organized Here we present our 40 best tips on how to get organized.  Or, if you’re already organized, to get better organized. Tip #1.  Choose Your Organization System Carefully The reason that most people are not organized is that it takes time.  And the first thing that takes time is deciding upon a system of organization.  This is always a matter of personal preference, and is not something that a geek on a website can tell you.  You should always choose your own system, based on how your own brain is organized (which makes the assumption that your brain is, in fact, organized). We can’t instruct you, but we can make suggestions: You may want to start off with a system based on the users of the computer.  i.e. “My Files”, “My Wife’s Files”, My Son’s Files”, etc.  Inside “My Files”, you might then break it down into “Personal” and “Business”.  You may then realize that there are overlaps.  For example, everyone may want to share access to the music library, or the photos from the school play.  So you may create another folder called “Family”, for the “common” files. You may decide that the highest-level breakdown of your files is based on the “source” of each file.  In other words, who created the files.  You could have “Files created by ME (business or personal)”, “Files created by people I know (family, friends, etc)”, and finally “Files created by the rest of the world (MP3 music files, downloaded or ripped movies or TV shows, software installation files, gorgeous desktop wallpaper images you’ve collected, etc).”  This system happens to be the one I use myself.  See below:  Mark is for files created by meVC is for files created by my company (Virtual Creations)Others is for files created by my friends and familyData is the rest of the worldAlso, Settings is where I store the configuration files and other program data files for my installed software (more on this in tip #34, below). Each folder will present its own particular set of requirements for further sub-organization.  For example, you may decide to organize your music collection into sub-folders based on the artist’s name, while your digital photos might get organized based on the date they were taken.  It can be different for every sub-folder! Another strategy would be based on “currentness”.  Files you have yet to open and look at live in one folder.  Ones that have been looked at but not yet filed live in another place.  Current, active projects live in yet another place.  All other files (your “archive”, if you like) would live in a fourth folder. (And of course, within that last folder you’d need to create a further sub-system based on one of the previous bullet points). Put some thought into this – changing it when it proves incomplete can be a big hassle!  Before you go to the trouble of implementing any system you come up with, examine a wide cross-section of the files you own and see if they will all be able to find a nice logical place to sit within your system. Tip #2.  When You Decide on Your System, Stick to It! There’s nothing more pointless than going to all the trouble of creating a system and filing all your files, and then whenever you create, receive or download a new file, you simply dump it onto your Desktop.  You need to be disciplined – forever!  Every new file you get, spend those extra few seconds to file it where it belongs!  Otherwise, in just a month or two, you’ll be worse off than before – half your files will be organized and half will be disorganized – and you won’t know which is which! Tip #3.  Choose the Root Folder of Your Structure Carefully Every data file (document, photo, music file, etc) that you create, own or is important to you, no matter where it came from, should be found within one single folder, and that one single folder should be located at the root of your C: drive (as a sub-folder of C:\).  In other words, do not base your folder structure in standard folders like “My Documents”.  If you do, then you’re leaving it up to the operating system engineers to decide what folder structure is best for you.  And every operating system has a different system!  In Windows 7 your files are found in C:\Users\YourName, whilst on Windows XP it was C:\Documents and Settings\YourName\My Documents.  In UNIX systems it’s often /home/YourName. These standard default folders tend to fill up with junk files and folders that are not at all important to you.  “My Documents” is the worst offender.  Every second piece of software you install, it seems, likes to create its own folder in the “My Documents” folder.  These folders usually don’t fit within your organizational structure, so don’t use them!  In fact, don’t even use the “My Documents” folder at all.  Allow it to fill up with junk, and then simply ignore it.  It sounds heretical, but: Don’t ever visit your “My Documents” folder!  Remove your icons/links to “My Documents” and replace them with links to the folders you created and you care about! Create your own file system from scratch!  Probably the best place to put it would be on your D: drive – if you have one.  This way, all your files live on one drive, while all the operating system and software component files live on the C: drive – simply and elegantly separated.  The benefits of that are profound.  Not only are there obvious organizational benefits (see tip #10, below), but when it comes to migrate your data to a new computer, you can (sometimes) simply unplug your D: drive and plug it in as the D: drive of your new computer (this implies that the D: drive is actually a separate physical disk, and not a partition on the same disk as C:).  You also get a slight speed improvement (again, only if your C: and D: drives are on separate physical disks). Warning:  From tip #12, below, you will see that it’s actually a good idea to have exactly the same file system structure – including the drive it’s filed on – on all of the computers you own.  So if you decide to use the D: drive as the storage system for your own files, make sure you are able to use the D: drive on all the computers you own.  If you can’t ensure that, then you can still use a clever geeky trick to store your files on the D: drive, but still access them all via the C: drive (see tip #17, below). If you only have one hard disk (C:), then create a dedicated folder that will contain all your files – something like C:\Files.  The name of the folder is not important, but make it a single, brief word. There are several reasons for this: When creating a backup regime, it’s easy to decide what files should be backed up – they’re all in the one folder! If you ever decide to trade in your computer for a new one, you know exactly which files to migrate You will always know where to begin a search for any file If you synchronize files with other computers, it makes your synchronization routines very simple.   It also causes all your shortcuts to continue to work on the other machines (more about this in tip #24, below). Once you’ve decided where your files should go, then put all your files in there – Everything!  Completely disregard the standard, default folders that are created for you by the operating system (“My Music”, “My Pictures”, etc).  In fact, you can actually relocate many of those folders into your own structure (more about that below, in tip #6). The more completely you get all your data files (documents, photos, music, etc) and all your configuration settings into that one folder, then the easier it will be to perform all of the above tasks. Once this has been done, and all your files live in one folder, all the other folders in C:\ can be thought of as “operating system” folders, and therefore of little day-to-day interest for us. Here’s a screenshot of a nicely organized C: drive, where all user files are located within the \Files folder:   Tip #4.  Use Sub-Folders This would be our simplest and most obvious tip.  It almost goes without saying.  Any organizational system you decide upon (see tip #1) will require that you create sub-folders for your files.  Get used to creating folders on a regular basis. Tip #5.  Don’t be Shy About Depth Create as many levels of sub-folders as you need.  Don’t be scared to do so.  Every time you notice an opportunity to group a set of related files into a sub-folder, do so.  Examples might include:  All the MP3s from one music CD, all the photos from one holiday, or all the documents from one client. It’s perfectly okay to put files into a folder called C:\Files\Me\From Others\Services\WestCo Bank\Statements\2009.  That’s only seven levels deep.  Ten levels is not uncommon.  Of course, it’s possible to take this too far.  If you notice yourself creating a sub-folder to hold only one file, then you’ve probably become a little over-zealous.  On the other hand, if you simply create a structure with only two levels (for example C:\Files\Work) then you really haven’t achieved any level of organization at all (unless you own only six files!).  Your “Work” folder will have become a dumping ground, just like your Desktop was, with most likely hundreds of files in it. Tip #6.  Move the Standard User Folders into Your Own Folder Structure Most operating systems, including Windows, create a set of standard folders for each of its users.  These folders then become the default location for files such as documents, music files, digital photos and downloaded Internet files.  In Windows 7, the full list is shown below: Some of these folders you may never use nor care about (for example, the Favorites folder, if you’re not using Internet Explorer as your browser).  Those ones you can leave where they are.  But you may be using some of the other folders to store files that are important to you.  Even if you’re not using them, Windows will still often treat them as the default storage location for many types of files.  When you go to save a standard file type, it can become annoying to be automatically prompted to save it in a folder that’s not part of your own file structure. But there’s a simple solution:  Move the folders you care about into your own folder structure!  If you do, then the next time you go to save a file of the corresponding type, Windows will prompt you to save it in the new, moved location. Moving the folders is easy.  Simply drag-and-drop them to the new location.  Here’s a screenshot of the default My Music folder being moved to my custom personal folder (Mark): Tip #7.  Name Files and Folders Intelligently This is another one that almost goes without saying, but we’ll say it anyway:  Do not allow files to be created that have meaningless names like Document1.doc, or folders called New Folder (2).  Take that extra 20 seconds and come up with a meaningful name for the file/folder – one that accurately divulges its contents without repeating the entire contents in the name. Tip #8.  Watch Out for Long Filenames Another way to tell if you have not yet created enough depth to your folder hierarchy is that your files often require really long names.  If you need to call a file Johnson Sales Figures March 2009.xls (which might happen to live in the same folder as Abercrombie Budget Report 2008.xls), then you might want to create some sub-folders so that the first file could be simply called March.xls, and living in the Clients\Johnson\Sales Figures\2009 folder. A well-placed file needs only a brief filename! Tip #9.  Use Shortcuts!  Everywhere! This is probably the single most useful and important tip we can offer.  A shortcut allows a file to be in two places at once. Why would you want that?  Well, the file and folder structure of every popular operating system on the market today is hierarchical.  This means that all objects (files and folders) always live within exactly one parent folder.  It’s a bit like a tree.  A tree has branches (folders) and leaves (files).  Each leaf, and each branch, is supported by exactly one parent branch, all the way back to the root of the tree (which, incidentally, is exactly why C:\ is called the “root folder” of the C: drive). That hard disks are structured this way may seem obvious and even necessary, but it’s only one way of organizing data.  There are others:  Relational databases, for example, organize structured data entirely differently.  The main limitation of hierarchical filing structures is that a file can only ever be in one branch of the tree – in only one folder – at a time.  Why is this a problem?  Well, there are two main reasons why this limitation is a problem for computer users: The “correct” place for a file, according to our organizational rationale, is very often a very inconvenient place for that file to be located.  Just because it’s correctly filed doesn’t mean it’s easy to get to.  Your file may be “correctly” buried six levels deep in your sub-folder structure, but you may need regular and speedy access to this file every day.  You could always move it to a more convenient location, but that would mean that you would need to re-file back to its “correct” location it every time you’d finished working on it.  Most unsatisfactory. A file may simply “belong” in two or more different locations within your file structure.  For example, say you’re an accountant and you have just completed the 2009 tax return for John Smith.  It might make sense to you to call this file 2009 Tax Return.doc and file it under Clients\John Smith.  But it may also be important to you to have the 2009 tax returns from all your clients together in the one place.  So you might also want to call the file John Smith.doc and file it under Tax Returns\2009.  The problem is, in a purely hierarchical filing system, you can’t put it in both places.  Grrrrr! Fortunately, Windows (and most other operating systems) offers a way for you to do exactly that:  It’s called a “shortcut” (also known as an “alias” on Macs and a “symbolic link” on UNIX systems).  Shortcuts allow a file to exist in one place, and an icon that represents the file to be created and put anywhere else you please.  In fact, you can create a dozen such icons and scatter them all over your hard disk.  Double-clicking on one of these icons/shortcuts opens up the original file, just as if you had double-clicked on the original file itself. Consider the following two icons: The one on the left is the actual Word document, while the one on the right is a shortcut that represents the Word document.  Double-clicking on either icon will open the same file.  There are two main visual differences between the icons: The shortcut will have a small arrow in the lower-left-hand corner (on Windows, anyway) The shortcut is allowed to have a name that does not include the file extension (the “.docx” part, in this case) You can delete the shortcut at any time without losing any actual data.  The original is still intact.  All you lose is the ability to get to that data from wherever the shortcut was. So why are shortcuts so great?  Because they allow us to easily overcome the main limitation of hierarchical file systems, and put a file in two (or more) places at the same time.  You will always have files that don’t play nice with your organizational rationale, and can’t be filed in only one place.  They demand to exist in two places.  Shortcuts allow this!  Furthermore, they allow you to collect your most often-opened files and folders together in one spot for convenient access.  The cool part is that the original files stay where they are, safe forever in their perfectly organized location. So your collection of most often-opened files can – and should – become a collection of shortcuts! If you’re still not convinced of the utility of shortcuts, consider the following well-known areas of a typical Windows computer: The Start Menu (and all the programs that live within it) The Quick Launch bar (or the Superbar in Windows 7) The “Favorite folders” area in the top-left corner of the Windows Explorer window (in Windows Vista or Windows 7) Your Internet Explorer Favorites or Firefox Bookmarks Each item in each of these areas is a shortcut!  Each of those areas exist for one purpose only:  For convenience – to provide you with a collection of the files and folders you access most often. It should be easy to see by now that shortcuts are designed for one single purpose:  To make accessing your files more convenient.  Each time you double-click on a shortcut, you are saved the hassle of locating the file (or folder, or program, or drive, or control panel icon) that it represents. Shortcuts allow us to invent a golden rule of file and folder organization: “Only ever have one copy of a file – never have two copies of the same file.  Use a shortcut instead” (this rule doesn’t apply to copies created for backup purposes, of course!) There are also lesser rules, like “don’t move a file into your work area – create a shortcut there instead”, and “any time you find yourself frustrated with how long it takes to locate a file, create a shortcut to it and place that shortcut in a convenient location.” So how to we create these massively useful shortcuts?  There are two main ways: “Copy” the original file or folder (click on it and type Ctrl-C, or right-click on it and select Copy):  Then right-click in an empty area of the destination folder (the place where you want the shortcut to go) and select Paste shortcut: Right-drag (drag with the right mouse button) the file from the source folder to the destination folder.  When you let go of the mouse button at the destination folder, a menu pops up: Select Create shortcuts here. Note that when shortcuts are created, they are often named something like Shortcut to Budget Detail.doc (windows XP) or Budget Detail – Shortcut.doc (Windows 7).   If you don’t like those extra words, you can easily rename the shortcuts after they’re created, or you can configure Windows to never insert the extra words in the first place (see our article on how to do this). And of course, you can create shortcuts to folders too, not just to files! Bottom line: Whenever you have a file that you’d like to access from somewhere else (whether it’s convenience you’re after, or because the file simply belongs in two places), create a shortcut to the original file in the new location. Tip #10.  Separate Application Files from Data Files Any digital organization guru will drum this rule into you.  Application files are the components of the software you’ve installed (e.g. Microsoft Word, Adobe Photoshop or Internet Explorer).  Data files are the files that you’ve created for yourself using that software (e.g. Word Documents, digital photos, emails or playlists). Software gets installed, uninstalled and upgraded all the time.  Hopefully you always have the original installation media (or downloaded set-up file) kept somewhere safe, and can thus reinstall your software at any time.  This means that the software component files are of little importance.  Whereas the files you have created with that software is, by definition, important.  It’s a good rule to always separate unimportant files from important files. So when your software prompts you to save a file you’ve just created, take a moment and check out where it’s suggesting that you save the file.  If it’s suggesting that you save the file into the same folder as the software itself, then definitely don’t follow that suggestion.  File it in your own folder!  In fact, see if you can find the program’s configuration option that determines where files are saved by default (if it has one), and change it. Tip #11.  Organize Files Based on Purpose, Not on File Type If you have, for example a folder called Work\Clients\Johnson, and within that folder you have two sub-folders, Word Documents and Spreadsheets (in other words, you’re separating “.doc” files from “.xls” files), then chances are that you’re not optimally organized.  It makes little sense to organize your files based on the program that created them.  Instead, create your sub-folders based on the purpose of the file.  For example, it would make more sense to create sub-folders called Correspondence and Financials.  It may well be that all the files in a given sub-folder are of the same file-type, but this should be more of a coincidence and less of a design feature of your organization system. Tip #12.  Maintain the Same Folder Structure on All Your Computers In other words, whatever organizational system you create, apply it to every computer that you can.  There are several benefits to this: There’s less to remember.  No matter where you are, you always know where to look for your files If you copy or synchronize files from one computer to another, then setting up the synchronization job becomes very simple Shortcuts can be copied or moved from one computer to another with ease (assuming the original files are also copied/moved).  There’s no need to find the target of the shortcut all over again on the second computer Ditto for linked files (e.g Word documents that link to data in a separate Excel file), playlists, and any files that reference the exact file locations of other files. This applies even to the drive that your files are stored on.  If your files are stored on C: on one computer, make sure they’re stored on C: on all your computers.  Otherwise all your shortcuts, playlists and linked files will stop working! Tip #13.  Create an “Inbox” Folder Create yourself a folder where you store all files that you’re currently working on, or that you haven’t gotten around to filing yet.  You can think of this folder as your “to-do” list.  You can call it “Inbox” (making it the same metaphor as your email system), or “Work”, or “To-Do”, or “Scratch”, or whatever name makes sense to you.  It doesn’t matter what you call it – just make sure you have one! Once you have finished working on a file, you then move it from the “Inbox” to its correct location within your organizational structure. You may want to use your Desktop as this “Inbox” folder.  Rightly or wrongly, most people do.  It’s not a bad place to put such files, but be careful:  If you do decide that your Desktop represents your “to-do” list, then make sure that no other files find their way there.  In other words, make sure that your “Inbox”, wherever it is, Desktop or otherwise, is kept free of junk – stray files that don’t belong there. So where should you put this folder, which, almost by definition, lives outside the structure of the rest of your filing system?  Well, first and foremost, it has to be somewhere handy.  This will be one of your most-visited folders, so convenience is key.  Putting it on the Desktop is a great option – especially if you don’t have any other folders on your Desktop:  the folder then becomes supremely easy to find in Windows Explorer: You would then create shortcuts to this folder in convenient spots all over your computer (“Favorite Links”, “Quick Launch”, etc). Tip #14.  Ensure You have Only One “Inbox” Folder Once you’ve created your “Inbox” folder, don’t use any other folder location as your “to-do list”.  Throw every incoming or created file into the Inbox folder as you create/receive it.  This keeps the rest of your computer pristine and free of randomly created or downloaded junk.  The last thing you want to be doing is checking multiple folders to see all your current tasks and projects.  Gather them all together into one folder. Here are some tips to help ensure you only have one Inbox: Set the default “save” location of all your programs to this folder. Set the default “download” location for your browser to this folder. If this folder is not your desktop (recommended) then also see if you can make a point of not putting “to-do” files on your desktop.  This keeps your desktop uncluttered and Zen-like: (the Inbox folder is in the bottom-right corner) Tip #15.  Be Vigilant about Clearing Your “Inbox” Folder This is one of the keys to staying organized.  If you let your “Inbox” overflow (i.e. allow there to be more than, say, 30 files or folders in there), then you’re probably going to start feeling like you’re overwhelmed:  You’re not keeping up with your to-do list.  Once your Inbox gets beyond a certain point (around 30 files, studies have shown), then you’ll simply start to avoid it.  You may continue to put files in there, but you’ll be scared to look at it, fearing the “out of control” feeling that all overworked, chaotic or just plain disorganized people regularly feel. So, here’s what you can do: Visit your Inbox/to-do folder regularly (at least five times per day). Scan the folder regularly for files that you have completed working on and are ready for filing.  File them immediately. Make it a source of pride to keep the number of files in this folder as small as possible.  If you value peace of mind, then make the emptiness of this folder one of your highest (computer) priorities If you know that a particular file has been in the folder for more than, say, six weeks, then admit that you’re not actually going to get around to processing it, and move it to its final resting place. Tip #16.  File Everything Immediately, and Use Shortcuts for Your Active Projects As soon as you create, receive or download a new file, store it away in its “correct” folder immediately.  Then, whenever you need to work on it (possibly straight away), create a shortcut to it in your “Inbox” (“to-do”) folder or your desktop.  That way, all your files are always in their “correct” locations, yet you still have immediate, convenient access to your current, active files.  When you finish working on a file, simply delete the shortcut. Ideally, your “Inbox” folder – and your Desktop – should contain no actual files or folders.  They should simply contain shortcuts. Tip #17.  Use Directory Symbolic Links (or Junctions) to Maintain One Unified Folder Structure Using this tip, we can get around a potential hiccup that we can run into when creating our organizational structure – the issue of having more than one drive on our computer (C:, D:, etc).  We might have files we need to store on the D: drive for space reasons, and yet want to base our organized folder structure on the C: drive (or vice-versa). Your chosen organizational structure may dictate that all your files must be accessed from the C: drive (for example, the root folder of all your files may be something like C:\Files).  And yet you may still have a D: drive and wish to take advantage of the hundreds of spare Gigabytes that it offers.  Did you know that it’s actually possible to store your files on the D: drive and yet access them as if they were on the C: drive?  And no, we’re not talking about shortcuts here (although the concept is very similar). By using the shell command mklink, you can essentially take a folder that lives on one drive and create an alias for it on a different drive (you can do lots more than that with mklink – for a full rundown on this programs capabilities, see our dedicated article).  These aliases are called directory symbolic links (and used to be known as junctions).  You can think of them as “virtual” folders.  They function exactly like regular folders, except they’re physically located somewhere else. For example, you may decide that your entire D: drive contains your complete organizational file structure, but that you need to reference all those files as if they were on the C: drive, under C:\Files.  If that was the case you could create C:\Files as a directory symbolic link – a link to D:, as follows: mklink /d c:\files d:\ Or it may be that the only files you wish to store on the D: drive are your movie collection.  You could locate all your movie files in the root of your D: drive, and then link it to C:\Files\Media\Movies, as follows: mklink /d c:\files\media\movies d:\ (Needless to say, you must run these commands from a command prompt – click the Start button, type cmd and press Enter) Tip #18. Customize Your Folder Icons This is not strictly speaking an organizational tip, but having unique icons for each folder does allow you to more quickly visually identify which folder is which, and thus saves you time when you’re finding files.  An example is below (from my folder that contains all files downloaded from the Internet): To learn how to change your folder icons, please refer to our dedicated article on the subject. Tip #19.  Tidy Your Start Menu The Windows Start Menu is usually one of the messiest parts of any Windows computer.  Every program you install seems to adopt a completely different approach to placing icons in this menu.  Some simply put a single program icon.  Others create a folder based on the name of the software.  And others create a folder based on the name of the software manufacturer.  It’s chaos, and can make it hard to find the software you want to run. Thankfully we can avoid this chaos with useful operating system features like Quick Launch, the Superbar or pinned start menu items. Even so, it would make a lot of sense to get into the guts of the Start Menu itself and give it a good once-over.  All you really need to decide is how you’re going to organize your applications.  A structure based on the purpose of the application is an obvious candidate.  Below is an example of one such structure: In this structure, Utilities means software whose job it is to keep the computer itself running smoothly (configuration tools, backup software, Zip programs, etc).  Applications refers to any productivity software that doesn’t fit under the headings Multimedia, Graphics, Internet, etc. In case you’re not aware, every icon in your Start Menu is a shortcut and can be manipulated like any other shortcut (copied, moved, deleted, etc). With the Windows Start Menu (all version of Windows), Microsoft has decided that there be two parallel folder structures to store your Start Menu shortcuts.  One for you (the logged-in user of the computer) and one for all users of the computer.  Having two parallel structures can often be redundant:  If you are the only user of the computer, then having two parallel structures is totally redundant.  Even if you have several users that regularly log into the computer, most of your installed software will need to be made available to all users, and should thus be moved out of the “just you” version of the Start Menu and into the “all users” area. To take control of your Start Menu, so you can start organizing it, you’ll need to know how to access the actual folders and shortcut files that make up the Start Menu (both versions of it).  To find these folders and files, click the Start button and then right-click on the All Programs text (Windows XP users should right-click on the Start button itself): The Open option refers to the “just you” version of the Start Menu, while the Open All Users option refers to the “all users” version.  Click on the one you want to organize. A Windows Explorer window then opens with your chosen version of the Start Menu selected.  From there it’s easy.  Double-click on the Programs folder and you’ll see all your folders and shortcuts.  Now you can delete/rename/move until it’s just the way you want it. Note:  When you’re reorganizing your Start Menu, you may want to have two Explorer windows open at the same time – one showing the “just you” version and one showing the “all users” version.  You can drag-and-drop between the windows. Tip #20.  Keep Your Start Menu Tidy Once you have a perfectly organized Start Menu, try to be a little vigilant about keeping it that way.  Every time you install a new piece of software, the icons that get created will almost certainly violate your organizational structure. So to keep your Start Menu pristine and organized, make sure you do the following whenever you install a new piece of software: Check whether the software was installed into the “just you” area of the Start Menu, or the “all users” area, and then move it to the correct area. Remove all the unnecessary icons (like the “Read me” icon, the “Help” icon (you can always open the help from within the software itself when it’s running), the “Uninstall” icon, the link(s)to the manufacturer’s website, etc) Rename the main icon(s) of the software to something brief that makes sense to you.  For example, you might like to rename Microsoft Office Word 2010 to simply Word Move the icon(s) into the correct folder based on your Start Menu organizational structure And don’t forget:  when you uninstall a piece of software, the software’s uninstall routine is no longer going to be able to remove the software’s icon from the Start Menu (because you moved and/or renamed it), so you’ll need to remove that icon manually. Tip #21.  Tidy C:\ The root of your C: drive (C:\) is a common dumping ground for files and folders – both by the users of your computer and by the software that you install on your computer.  It can become a mess. There’s almost no software these days that requires itself to be installed in C:\.  99% of the time it can and should be installed into C:\Program Files.  And as for your own files, well, it’s clear that they can (and almost always should) be stored somewhere else. In an ideal world, your C:\ folder should look like this (on Windows 7): Note that there are some system files and folders in C:\ that are usually and deliberately “hidden” (such as the Windows virtual memory file pagefile.sys, the boot loader file bootmgr, and the System Volume Information folder).  Hiding these files and folders is a good idea, as they need to stay where they are and are almost never needed to be opened or even seen by you, the user.  Hiding them prevents you from accidentally messing with them, and enhances your sense of order and well-being when you look at your C: drive folder. Tip #22.  Tidy Your Desktop The Desktop is probably the most abused part of a Windows computer (from an organization point of view).  It usually serves as a dumping ground for all incoming files, as well as holding icons to oft-used applications, plus some regularly opened files and folders.  It often ends up becoming an uncontrolled mess.  See if you can avoid this.  Here’s why… Application icons (Word, Internet Explorer, etc) are often found on the Desktop, but it’s unlikely that this is the optimum place for them.  The “Quick Launch” bar (or the Superbar in Windows 7) is always visible and so represents a perfect location to put your icons.  You’ll only be able to see the icons on your Desktop when all your programs are minimized.  It might be time to get your application icons off your desktop… You may have decided that the Inbox/To-do folder on your computer (see tip #13, above) should be your Desktop.  If so, then enough said.  Simply be vigilant about clearing it and preventing it from being polluted by junk files (see tip #15, above).  On the other hand, if your Desktop is not acting as your “Inbox” folder, then there’s no reason for it to have any data files or folders on it at all, except perhaps a couple of shortcuts to often-opened files and folders (either ongoing or current projects).  Everything else should be moved to your “Inbox” folder. In an ideal world, it might look like this: Tip #23.  Move Permanent Items on Your Desktop Away from the Top-Left Corner When files/folders are dragged onto your desktop in a Windows Explorer window, or when shortcuts are created on your Desktop from Internet Explorer, those icons are always placed in the top-left corner – or as close as they can get.  If you have other files, folders or shortcuts that you keep on the Desktop permanently, then it’s a good idea to separate these permanent icons from the transient ones, so that you can quickly identify which ones the transients are.  An easy way to do this is to move all your permanent icons to the right-hand side of your Desktop.  That should keep them separated from incoming items. Tip #24.  Synchronize If you have more than one computer, you’ll almost certainly want to share files between them.  If the computers are permanently attached to the same local network, then there’s no need to store multiple copies of any one file or folder – shortcuts will suffice.  However, if the computers are not always on the same network, then you will at some point need to copy files between them.  For files that need to permanently live on both computers, the ideal way to do this is to synchronize the files, as opposed to simply copying them. We only have room here to write a brief summary of synchronization, not a full article.  In short, there are several different types of synchronization: Where the contents of one folder are accessible anywhere, such as with Dropbox Where the contents of any number of folders are accessible anywhere, such as with Windows Live Mesh Where any files or folders from anywhere on your computer are synchronized with exactly one other computer, such as with the Windows “Briefcase”, Microsoft SyncToy, or (much more powerful, yet still free) SyncBack from 2BrightSparks.  This only works when both computers are on the same local network, at least temporarily. A great advantage of synchronization solutions is that once you’ve got it configured the way you want it, then the sync process happens automatically, every time.  Click a button (or schedule it to happen automatically) and all your files are automagically put where they’re supposed to be. If you maintain the same file and folder structure on both computers, then you can also sync files depend upon the correct location of other files, like shortcuts, playlists and office documents that link to other office documents, and the synchronized files still work on the other computer! Tip #25.  Hide Files You Never Need to See If you have your files well organized, you will often be able to tell if a file is out of place just by glancing at the contents of a folder (for example, it should be pretty obvious if you look in a folder that contains all the MP3s from one music CD and see a Word document in there).  This is a good thing – it allows you to determine if there are files out of place with a quick glance.  Yet sometimes there are files in a folder that seem out of place but actually need to be there, such as the “folder art” JPEGs in music folders, and various files in the root of the C: drive.  If such files never need to be opened by you, then a good idea is to simply hide them.  Then, the next time you glance at the folder, you won’t have to remember whether that file was supposed to be there or not, because you won’t see it at all! To hide a file, simply right-click on it and choose Properties: Then simply tick the Hidden tick-box:   Tip #26.  Keep Every Setup File These days most software is downloaded from the Internet.  Whenever you download a piece of software, keep it.  You’ll never know when you need to reinstall the software. Further, keep with it an Internet shortcut that links back to the website where you originally downloaded it, in case you ever need to check for updates. See tip #33 below for a full description of the excellence of organizing your setup files. Tip #27.  Try to Minimize the Number of Folders that Contain Both Files and Sub-folders Some of the folders in your organizational structure will contain only files.  Others will contain only sub-folders.  And you will also have some folders that contain both files and sub-folders.  You will notice slight improvements in how long it takes you to locate a file if you try to avoid this third type of folder.  It’s not always possible, of course – you’ll always have some of these folders, but see if you can avoid it. One way of doing this is to take all the leftover files that didn’t end up getting stored in a sub-folder and create a special “Miscellaneous” or “Other” folder for them. Tip #28.  Starting a Filename with an Underscore Brings it to the Top of a List Further to the previous tip, if you name that “Miscellaneous” or “Other” folder in such a way that its name begins with an underscore “_”, then it will appear at the top of the list of files/folders. The screenshot below is an example of this.  Each folder in the list contains a set of digital photos.  The folder at the top of the list, _Misc, contains random photos that didn’t deserve their own dedicated folder: Tip #29.  Clean Up those CD-ROMs and (shudder!) Floppy Disks Have you got a pile of CD-ROMs stacked on a shelf of your office?  Old photos, or files you archived off onto CD-ROM (or even worse, floppy disks!) because you didn’t have enough disk space at the time?  In the meantime have you upgraded your computer and now have 500 Gigabytes of space you don’t know what to do with?  If so, isn’t it time you tidied up that stack of disks and filed them into your gorgeous new folder structure? So what are you waiting for?  Bite the bullet, copy them all back onto your computer, file them in their appropriate folders, and then back the whole lot up onto a shiny new 1000Gig external hard drive! Useful Folders to Create This next section suggests some useful folders that you might want to create within your folder structure.  I’ve personally found them to be indispensable. The first three are all about convenience – handy folders to create and then put somewhere that you can always access instantly.  For each one, it’s not so important where the actual folder is located, but it’s very important where you put the shortcut(s) to the folder.  You might want to locate the shortcuts: On your Desktop In your “Quick Launch” area (or pinned to your Windows 7 Superbar) In your Windows Explorer “Favorite Links” area Tip #30.  Create an “Inbox” (“To-Do”) Folder This has already been mentioned in depth (see tip #13), but we wanted to reiterate its importance here.  This folder contains all the recently created, received or downloaded files that you have not yet had a chance to file away properly, and it also may contain files that you have yet to process.  In effect, it becomes a sort of “to-do list”.  It doesn’t have to be called “Inbox” – you can call it whatever you want. Tip #31.  Create a Folder where Your Current Projects are Collected Rather than going hunting for them all the time, or dumping them all on your desktop, create a special folder where you put links (or work folders) for each of the projects you’re currently working on. You can locate this folder in your “Inbox” folder, on your desktop, or anywhere at all – just so long as there’s a way of getting to it quickly, such as putting a link to it in Windows Explorer’s “Favorite Links” area: Tip #32.  Create a Folder for Files and Folders that You Regularly Open You will always have a few files that you open regularly, whether it be a spreadsheet of your current accounts, or a favorite playlist.  These are not necessarily “current projects”, rather they’re simply files that you always find yourself opening.  Typically such files would be located on your desktop (or even better, shortcuts to those files).  Why not collect all such shortcuts together and put them in their own special folder? As with the “Current Projects” folder (above), you would want to locate that folder somewhere convenient.  Below is an example of a folder called “Quick links”, with about seven files (shortcuts) in it, that is accessible through the Windows Quick Launch bar: See tip #37 below for a full explanation of the power of the Quick Launch bar. Tip #33.  Create a “Set-ups” Folder A typical computer has dozens of applications installed on it.  For each piece of software, there are often many different pieces of information you need to keep track of, including: The original installation setup file(s).  This can be anything from a simple 100Kb setup.exe file you downloaded from a website, all the way up to a 4Gig ISO file that you copied from a DVD-ROM that you purchased. The home page of the software manufacturer (in case you need to look up something on their support pages, their forum or their online help) The page containing the download link for your actual file (in case you need to re-download it, or download an upgraded version) The serial number Your proof-of-purchase documentation Any other template files, plug-ins, themes, etc that also need to get installed For each piece of software, it’s a great idea to gather all of these files together and put them in a single folder.  The folder can be the name of the software (plus possibly a very brief description of what it’s for – in case you can’t remember what the software does based in its name).  Then you would gather all of these folders together into one place, and call it something like “Software” or “Setups”. If you have enough of these folders (I have several hundred, being a geek, collected over 20 years), then you may want to further categorize them.  My own categorization structure is based on “platform” (operating system): The last seven folders each represents one platform/operating system, while _Operating Systems contains set-up files for installing the operating systems themselves.  _Hardware contains ROMs for hardware I own, such as routers. Within the Windows folder (above), you can see the beginnings of the vast library of software I’ve compiled over the years: An example of a typical application folder looks like this: Tip #34.  Have a “Settings” Folder We all know that our documents are important.  So are our photos and music files.  We save all of these files into folders, and then locate them afterwards and double-click on them to open them.  But there are many files that are important to us that can’t be saved into folders, and then searched for and double-clicked later on.  These files certainly contain important information that we need, but are often created internally by an application, and saved wherever that application feels is appropriate. A good example of this is the “PST” file that Outlook creates for us and uses to store all our emails, contacts, appointments and so forth.  Another example would be the collection of Bookmarks that Firefox stores on your behalf. And yet another example would be the customized settings and configuration files of our all our software.  Granted, most Windows programs store their configuration in the Registry, but there are still many programs that use configuration files to store their settings. Imagine if you lost all of the above files!  And yet, when people are backing up their computers, they typically only back up the files they know about – those that are stored in the “My Documents” folder, etc.  If they had a hard disk failure or their computer was lost or stolen, their backup files would not include some of the most vital files they owned.  Also, when migrating to a new computer, it’s vital to ensure that these files make the journey. It can be a very useful idea to create yourself a folder to store all your “settings” – files that are important to you but which you never actually search for by name and double-click on to open them.  Otherwise, next time you go to set up a new computer just the way you want it, you’ll need to spend hours recreating the configuration of your previous computer! So how to we get our important files into this folder?  Well, we have a few options: Some programs (such as Outlook and its PST files) allow you to place these files wherever you want.  If you delve into the program’s options, you will find a setting somewhere that controls the location of the important settings files (or “personal storage” – PST – when it comes to Outlook) Some programs do not allow you to change such locations in any easy way, but if you get into the Registry, you can sometimes find a registry key that refers to the location of the file(s).  Simply move the file into your Settings folder and adjust the registry key to refer to the new location. Some programs stubbornly refuse to allow their settings files to be placed anywhere other then where they stipulate.  When faced with programs like these, you have three choices:  (1) You can ignore those files, (2) You can copy the files into your Settings folder (let’s face it – settings don’t change very often), or (3) you can use synchronization software, such as the Windows Briefcase, to make synchronized copies of all your files in your Settings folder.  All you then have to do is to remember to run your sync software periodically (perhaps just before you run your backup software!). There are some other things you may decide to locate inside this new “Settings” folder: Exports of registry keys (from the many applications that store their configurations in the Registry).  This is useful for backup purposes or for migrating to a new computer Notes you’ve made about all the specific customizations you have made to a particular piece of software (so that you’ll know how to do it all again on your next computer) Shortcuts to webpages that detail how to tweak certain aspects of your operating system or applications so they are just the way you like them (such as how to remove the words “Shortcut to” from the beginning of newly created shortcuts).  In other words, you’d want to create shortcuts to half the pages on the How-To Geek website! Here’s an example of a “Settings” folder: Windows Features that Help with Organization This section details some of the features of Microsoft Windows that are a boon to anyone hoping to stay optimally organized. Tip #35.  Use the “Favorite Links” Area to Access Oft-Used Folders Once you’ve created your great new filing system, work out which folders you access most regularly, or which serve as great starting points for locating the rest of the files in your folder structure, and then put links to those folders in your “Favorite Links” area of the left-hand side of the Windows Explorer window (simply called “Favorites” in Windows 7):   Some ideas for folders you might want to add there include: Your “Inbox” folder (or whatever you’ve called it) – most important! The base of your filing structure (e.g. C:\Files) A folder containing shortcuts to often-accessed folders on other computers around the network (shown above as Network Folders) A folder containing shortcuts to your current projects (unless that folder is in your “Inbox” folder) Getting folders into this area is very simple – just locate the folder you’re interested in and drag it there! Tip #36.  Customize the Places Bar in the File/Open and File/Save Boxes Consider the screenshot below: The highlighted icons (collectively known as the “Places Bar”) can be customized to refer to any folder location you want, allowing instant access to any part of your organizational structure. Note:  These File/Open and File/Save boxes have been superseded by new versions that use the Windows Vista/Windows 7 “Favorite Links”, but the older versions (shown above) are still used by a surprisingly large number of applications. The easiest way to customize these icons is to use the Group Policy Editor, but not everyone has access to this program.  If you do, open it up and navigate to: User Configuration > Administrative Templates > Windows Components > Windows Explorer > Common Open File Dialog If you don’t have access to the Group Policy Editor, then you’ll need to get into the Registry.  Navigate to: HKEY_CURRENT_USER \ Software \ Microsoft  \ Windows \ CurrentVersion \ Policies \ comdlg32 \ Placesbar It should then be easy to make the desired changes.  Log off and log on again to allow the changes to take effect. Tip #37.  Use the Quick Launch Bar as a Application and File Launcher That Quick Launch bar (to the right of the Start button) is a lot more useful than people give it credit for.  Most people simply have half a dozen icons in it, and use it to start just those programs.  But it can actually be used to instantly access just about anything in your filing system: For complete instructions on how to set this up, visit our dedicated article on this topic. Tip #38.  Put a Shortcut to Windows Explorer into Your Quick Launch Bar This is only necessary in Windows Vista and Windows XP.  The Microsoft boffins finally got wise and added it to the Windows 7 Superbar by default. Windows Explorer – the program used for managing your files and folders – is one of the most useful programs in Windows.  Anyone who considers themselves serious about being organized needs instant access to this program at any time.  A great place to create a shortcut to this program is in the Windows XP and Windows Vista “Quick Launch” bar: To get it there, locate it in your Start Menu (usually under “Accessories”) and then right-drag it down into your Quick Launch bar (and create a copy). Tip #39.  Customize the Starting Folder for Your Windows 7 Explorer Superbar Icon If you’re on Windows 7, your Superbar will include a Windows Explorer icon.  Clicking on the icon will launch Windows Explorer (of course), and will start you off in your “Libraries” folder.  Libraries may be fine as a starting point, but if you have created yourself an “Inbox” folder, then it would probably make more sense to start off in this folder every time you launch Windows Explorer. To change this default/starting folder location, then first right-click the Explorer icon in the Superbar, and then right-click Properties:Then, in Target field of the Windows Explorer Properties box that appears, type %windir%\explorer.exe followed by the path of the folder you wish to start in.  For example: %windir%\explorer.exe C:\Files If that folder happened to be on the Desktop (and called, say, “Inbox”), then you would use the following cleverness: %windir%\explorer.exe shell:desktop\Inbox Then click OK and test it out. Tip #40.  Ummmmm…. No, that’s it.  I can’t think of another one.  That’s all of the tips I can come up with.  I only created this one because 40 is such a nice round number… Case Study – An Organized PC To finish off the article, I have included a few screenshots of my (main) computer (running Vista).  The aim here is twofold: To give you a sense of what it looks like when the above, sometimes abstract, tips are applied to a real-life computer, and To offer some ideas about folders and structure that you may want to steal to use on your own PC. Let’s start with the C: drive itself.  Very minimal.  All my files are contained within C:\Files.  I’ll confine the rest of the case study to this folder: That folder contains the following: Mark: My personal files VC: My business (Virtual Creations, Australia) Others contains files created by friends and family Data contains files from the rest of the world (can be thought of as “public” files, usually downloaded from the Net) Settings is described above in tip #34 The Data folder contains the following sub-folders: Audio:  Radio plays, audio books, podcasts, etc Development:  Programmer and developer resources, sample source code, etc (see below) Humour:  Jokes, funnies (those emails that we all receive) Movies:  Downloaded and ripped movies (all legal, of course!), their scripts, DVD covers, etc. Music:  (see below) Setups:  Installation files for software (explained in full in tip #33) System:  (see below) TV:  Downloaded TV shows Writings:  Books, instruction manuals, etc (see below) The Music folder contains the following sub-folders: Album covers:  JPEG scans Guitar tabs:  Text files of guitar sheet music Lists:  e.g. “Top 1000 songs of all time” Lyrics:  Text files MIDI:  Electronic music files MP3 (representing 99% of the Music folder):  MP3s, either ripped from CDs or downloaded, sorted by artist/album name Music Video:  Video clips Sheet Music:  usually PDFs The Data\Writings folder contains the following sub-folders: (all pretty self-explanatory) The Data\Development folder contains the following sub-folders: Again, all pretty self-explanatory (if you’re a geek) The Data\System folder contains the following sub-folders: These are usually themes, plug-ins and other downloadable program-specific resources. The Mark folder contains the following sub-folders: From Others:  Usually letters that other people (friends, family, etc) have written to me For Others:  Letters and other things I have created for other people Green Book:  None of your business Playlists:  M3U files that I have compiled of my favorite songs (plus one M3U playlist file for every album I own) Writing:  Fiction, philosophy and other musings of mine Mark Docs:  Shortcut to C:\Users\Mark Settings:  Shortcut to C:\Files\Settings\Mark The Others folder contains the following sub-folders: The VC (Virtual Creations, my business – I develop websites) folder contains the following sub-folders: And again, all of those are pretty self-explanatory. Conclusion These tips have saved my sanity and helped keep me a productive geek, but what about you? What tips and tricks do you have to keep your files organized?  Please share them with us in the comments.  Come on, don’t be shy… Similar Articles Productive Geek Tips Fix For When Windows Explorer in Vista Stops Showing File NamesWhy Did Windows Vista’s Music Folder Icon Turn Yellow?Print or Create a Text File List of the Contents in a Directory the Easy WayCustomize the Windows 7 or Vista Send To MenuAdd Copy To / Move To on Windows 7 or Vista Right-Click Menu TouchFreeze Alternative in AutoHotkey The Icy Undertow Desktop Windows Home Server – Backup to LAN The Clear & Clean Desktop Use This Bookmarklet to Easily Get Albums Use AutoHotkey to Assign a Hotkey to a Specific Window Latest Software Reviews Tinyhacker Random Tips Acronis Online Backup DVDFab 6 Revo Uninstaller Pro Registry Mechanic 9 for Windows Track Daily Goals With 42Goals Video Toolbox is a Superb Online Video Editor Fun with 47 charts and graphs Tomorrow is Mother’s Day Check the Average Speed of YouTube Videos You’ve Watched OutlookStatView Scans and Displays General Usage Statistics

    Read the article

  • Is there a Telecommunications Reference Architecture?

    - by raul.goycoolea
    @font-face { font-family: "Arial"; }@font-face { font-family: "Courier New"; }@font-face { font-family: "Wingdings"; }@font-face { font-family: "Cambria"; }p.MsoNormal, li.MsoNormal, div.MsoNormal { margin: 0cm 0cm 0.0001pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraph, li.MsoListParagraph, div.MsoListParagraph { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpFirst, li.MsoListParagraphCxSpFirst, div.MsoListParagraphCxSpFirst { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpMiddle, li.MsoListParagraphCxSpMiddle, div.MsoListParagraphCxSpMiddle { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpLast, li.MsoListParagraphCxSpLast, div.MsoListParagraphCxSpLast { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }div.Section1 { page: Section1; }ol { margin-bottom: 0cm; }ul { margin-bottom: 0cm; } Abstract   Reference architecture provides needed architectural information that can be provided in advance to an enterprise to enable consistent architectural best practices. Enterprise Reference Architecture helps business owners to actualize their strategies, vision, objectives, and principles. It evaluates the IT systems, based on Reference Architecture goals, principles, and standards. It helps to reduce IT costs by increasing functionality, availability, scalability, etc. Telecom Reference Architecture provides customers with the flexibility to view bundled service bills online with the provision of multiple services. It provides real-time, flexible billing and charging systems, to handle complex promotions, discounts, and settlements with multiple parties. This paper attempts to describe the Reference Architecture for the Telecom Enterprises. It lays the foundation for a Telecom Reference Architecture by articulating the requirements, drivers, and pitfalls for telecom service providers. It describes generic reference architecture for telecom enterprises and moves on to explain how to achieve Enterprise Reference Architecture by using SOA.   Introduction   A Reference Architecture provides a methodology, set of practices, template, and standards based on a set of successful solutions implemented earlier. These solutions have been generalized and structured for the depiction of both a logical and a physical architecture, based on the harvesting of a set of patterns that describe observations in a number of successful implementations. It helps as a reference for the various architectures that an enterprise can implement to solve various problems. It can be used as the starting point or the point of comparisons for various departments/business entities of a company, or for the various companies for an enterprise. It provides multiple views for multiple stakeholders.   Major artifacts of the Enterprise Reference Architecture are methodologies, standards, metadata, documents, design patterns, etc.   Purpose of Reference Architecture   In most cases, architects spend a lot of time researching, investigating, defining, and re-arguing architectural decisions. It is like reinventing the wheel as their peers in other organizations or even the same organization have already spent a lot of time and effort defining their own architectural practices. This prevents an organization from learning from its own experiences and applying that knowledge for increased effectiveness.   Reference architecture provides missing architectural information that can be provided in advance to project team members to enable consistent architectural best practices.   Enterprise Reference Architecture helps an enterprise to achieve the following at the abstract level:   ·       Reference architecture is more of a communication channel to an enterprise ·       Helps the business owners to accommodate to their strategies, vision, objectives, and principles. ·       Evaluates the IT systems based on Reference Architecture Principles ·       Reduces IT spending through increasing functionality, availability, scalability, etc ·       A Real-time Integration Model helps to reduce the latency of the data updates Is used to define a single source of Information ·       Provides a clear view on how to manage information and security ·       Defines the policy around the data ownership, product boundaries, etc. ·       Helps with cost optimization across project and solution portfolios by eliminating unused or duplicate investments and assets ·       Has a shorter implementation time and cost   Once the reference architecture is in place, the set of architectural principles, standards, reference models, and best practices ensure that the aligned investments have the greatest possible likelihood of success in both the near term and the long term (TCO).     Common pitfalls for Telecom Service Providers   Telecom Reference Architecture serves as the first step towards maturity for a telecom service provider. During the course of our assignments/experiences with telecom players, we have come across the following observations – Some of these indicate a lack of maturity of the telecom service provider:   ·       In markets that are growing and not so mature, it has been observed that telcos have a significant amount of in-house or home-grown applications. In some of these markets, the growth has been so rapid that IT has been unable to cope with business demands. Telcos have shown a tendency to come up with workarounds in their IT applications so as to meet business needs. ·       Even for core functions like provisioning or mediation, some telcos have tried to manage with home-grown applications. ·       Most of the applications do not have the required scalability or maintainability to sustain growth in volumes or functionality. ·       Applications face interoperability issues with other applications in the operator's landscape. Integrating a new application or network element requires considerable effort on the part of the other applications. ·       Application boundaries are not clear, and functionality that is not in the initial scope of that application gets pushed onto it. This results in the development of the multiple, small applications without proper boundaries. ·       Usage of Legacy OSS/BSS systems, poor Integration across Multiple COTS Products and Internal Systems. Most of the Integrations are developed on ad-hoc basis and Point-to-Point Integration. ·       Redundancy of the business functions in different applications • Fragmented data across the different applications and no integrated view of the strategic data • Lot of performance Issues due to the usage of the complex integration across OSS and BSS systems   However, this is where the maturity of the telecom industry as a whole can be of help. The collaborative efforts of telcos to overcome some of these problems have resulted in bodies like the TM Forum. They have come up with frameworks for business processes, data, applications, and technology for telecom service providers. These could be a good starting point for telcos to clean up their enterprise landscape.   Industry Trends in Telecom Reference Architecture   Telecom reference architectures are evolving rapidly because telcos are facing business and IT challenges.   “The reality is that there probably is no killer application, no silver bullet that the telcos can latch onto to carry them into a 21st Century.... Instead, there are probably hundreds – perhaps thousands – of niche applications.... And the only way to find which of these works for you is to try out lots of them, ramp up the ones that work, and discontinue the ones that fail.” – Martin Creaner President & CTO TM Forum.   The following trends have been observed in telecom reference architecture:   ·       Transformation of business structures to align with customer requirements ·       Adoption of more Internet-like technical architectures. The Web 2.0 concept is increasingly being used. ·       Virtualization of the traditional operations support system (OSS) ·       Adoption of SOA to support development of IP-based services ·       Adoption of frameworks like Service Delivery Platforms (SDPs) and IP Multimedia Subsystem ·       (IMS) to enable seamless deployment of various services over fixed and mobile networks ·       Replacement of in-house, customized, and stove-piped OSS/BSS with standards-based COTS products ·       Compliance with industry standards and frameworks like eTOM, SID, and TAM to enable seamless integration with other standards-based products   Drivers of Reference Architecture   The drivers of the Reference Architecture are Reference Architecture Goals, Principles, and Enterprise Vision and Telecom Transformation. The details are depicted below diagram. @font-face { font-family: "Cambria"; }p.MsoNormal, li.MsoNormal, div.MsoNormal { margin: 0cm 0cm 0.0001pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoCaption, li.MsoCaption, div.MsoCaption { margin: 0cm 0cm 10pt; font-size: 9pt; font-family: "Times New Roman"; color: rgb(79, 129, 189); font-weight: bold; }div.Section1 { page: Section1; } Figure 1. Drivers for Reference Architecture @font-face { font-family: "Arial"; }@font-face { font-family: "Courier New"; }@font-face { font-family: "Wingdings"; }@font-face { font-family: "Cambria"; }p.MsoNormal, li.MsoNormal, div.MsoNormal { margin: 0cm 0cm 0.0001pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraph, li.MsoListParagraph, div.MsoListParagraph { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpFirst, li.MsoListParagraphCxSpFirst, div.MsoListParagraphCxSpFirst { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpMiddle, li.MsoListParagraphCxSpMiddle, div.MsoListParagraphCxSpMiddle { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpLast, li.MsoListParagraphCxSpLast, div.MsoListParagraphCxSpLast { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }div.Section1 { page: Section1; }ol { margin-bottom: 0cm; }ul { margin-bottom: 0cm; } Today’s telecom reference architectures should seamlessly integrate traditional legacy-based applications and transition to next-generation network technologies (e.g., IP multimedia subsystems). This has resulted in new requirements for flexible, real-time billing and OSS/BSS systems and implications on the service provider’s organizational requirements and structure.   Telecom reference architectures are today expected to:   ·       Integrate voice, messaging, email and other VAS over fixed and mobile networks, back end systems ·       Be able to provision multiple services and service bundles • Deliver converged voice, video and data services ·       Leverage the existing Network Infrastructure ·       Provide real-time, flexible billing and charging systems to handle complex promotions, discounts, and settlements with multiple parties. ·       Support charging of advanced data services such as VoIP, On-Demand, Services (e.g.  Video), IMS/SIP Services, Mobile Money, Content Services and IPTV. ·       Help in faster deployment of new services • Serve as an effective platform for collaboration between network IT and business organizations ·       Harness the potential of converging technology, networks, devices and content to develop multimedia services and solutions of ever-increasing sophistication on a single Internet Protocol (IP) ·       Ensure better service delivery and zero revenue leakage through real-time balance and credit management ·       Lower operating costs to drive profitability   Enterprise Reference Architecture   The Enterprise Reference Architecture (RA) fills the gap between the concepts and vocabulary defined by the reference model and the implementation. Reference architecture provides detailed architectural information in a common format such that solutions can be repeatedly designed and deployed in a consistent, high-quality, supportable fashion. This paper attempts to describe the Reference Architecture for the Telecom Application Usage and how to achieve the Enterprise Level Reference Architecture using SOA.   • Telecom Reference Architecture • Enterprise SOA based Reference Architecture   Telecom Reference Architecture   Tele Management Forum’s New Generation Operations Systems and Software (NGOSS) is an architectural framework for organizing, integrating, and implementing telecom systems. NGOSS is a component-based framework consisting of the following elements:   ·       The enhanced Telecom Operations Map (eTOM) is a business process framework. ·       The Shared Information Data (SID) model provides a comprehensive information framework that may be specialized for the needs of a particular organization. ·       The Telecom Application Map (TAM) is an application framework to depict the functional footprint of applications, relative to the horizontal processes within eTOM. ·       The Technology Neutral Architecture (TNA) is an integrated framework. TNA is an architecture that is sustainable through technology changes.   NGOSS Architecture Standards are:   ·       Centralized data ·       Loosely coupled distributed systems ·       Application components/re-use  ·       A technology-neutral system framework with technology specific implementations ·       Interoperability to service provider data/processes ·       Allows more re-use of business components across multiple business scenarios ·       Workflow automation   The traditional operator systems architecture consists of four layers,   ·       Business Support System (BSS) layer, with focus toward customers and business partners. Manages order, subscriber, pricing, rating, and billing information. ·       Operations Support System (OSS) layer, built around product, service, and resource inventories. ·       Networks layer – consists of Network elements and 3rd Party Systems. ·       Integration Layer – to maximize application communication and overall solution flexibility.   Reference architecture for telecom enterprises is depicted below. @font-face { font-family: "Arial"; }@font-face { font-family: "Courier New"; }@font-face { font-family: "Wingdings"; }@font-face { font-family: "Cambria"; }p.MsoNormal, li.MsoNormal, div.MsoNormal { margin: 0cm 0cm 0.0001pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoCaption, li.MsoCaption, div.MsoCaption { margin: 0cm 0cm 10pt; font-size: 9pt; font-family: "Times New Roman"; color: rgb(79, 129, 189); font-weight: bold; }p.MsoListParagraph, li.MsoListParagraph, div.MsoListParagraph { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpFirst, li.MsoListParagraphCxSpFirst, div.MsoListParagraphCxSpFirst { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpMiddle, li.MsoListParagraphCxSpMiddle, div.MsoListParagraphCxSpMiddle { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpLast, li.MsoListParagraphCxSpLast, div.MsoListParagraphCxSpLast { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }div.Section1 { page: Section1; }ol { margin-bottom: 0cm; }ul { margin-bottom: 0cm; } Figure 2. Telecom Reference Architecture   The major building blocks of any Telecom Service Provider architecture are as follows:   1. Customer Relationship Management   CRM encompasses the end-to-end lifecycle of the customer: customer initiation/acquisition, sales, ordering, and service activation, customer care and support, proactive campaigns, cross sell/up sell, and retention/loyalty.   CRM also includes the collection of customer information and its application to personalize, customize, and integrate delivery of service to a customer, as well as to identify opportunities for increasing the value of the customer to the enterprise.   The key functionalities related to Customer Relationship Management are   ·       Manage the end-to-end lifecycle of a customer request for products. ·       Create and manage customer profiles. ·       Manage all interactions with customers – inquiries, requests, and responses. ·       Provide updates to Billing and other south bound systems on customer/account related updates such as customer/ account creation, deletion, modification, request bills, final bill, duplicate bills, credit limits through Middleware. ·       Work with Order Management System, Product, and Service Management components within CRM. ·       Manage customer preferences – Involve all the touch points and channels to the customer, including contact center, retail stores, dealers, self service, and field service, as well as via any media (phone, face to face, web, mobile device, chat, email, SMS, mail, the customer's bill, etc.). ·       Support single interface for customer contact details, preferences, account details, offers, customer premise equipment, bill details, bill cycle details, and customer interactions.   CRM applications interact with customers through customer touch points like portals, point-of-sale terminals, interactive voice response systems, etc. The requests by customers are sent via fulfillment/provisioning to billing system for ordering processing.   2. Billing and Revenue Management   Billing and Revenue Management handles the collection of appropriate usage records and production of timely and accurate bills – for providing pre-bill usage information and billing to customers; for processing their payments; and for performing payment collections. In addition, it handles customer inquiries about bills, provides billing inquiry status, and is responsible for resolving billing problems to the customer's satisfaction in a timely manner. This process grouping also supports prepayment for services.   The key functionalities provided by these applications are   ·       To ensure that enterprise revenue is billed and invoices delivered appropriately to customers. ·       To manage customers’ billing accounts, process their payments, perform payment collections, and monitor the status of the account balance. ·       To ensure the timely and effective fulfillment of all customer bill inquiries and complaints. ·       Collect the usage records from mediation and ensure appropriate rating and discounting of all usage and pricing. ·       Support revenue sharing; split charging where usage is guided to an account different from the service consumer. ·       Support prepaid and post-paid rating. ·       Send notification on approach / exceeding the usage thresholds as enforced by the subscribed offer, and / or as setup by the customer. ·       Support prepaid, post paid, and hybrid (where some services are prepaid and the rest of the services post paid) customers and conversion from post paid to prepaid, and vice versa. ·       Support different billing function requirements like charge prorating, promotion, discount, adjustment, waiver, write-off, account receivable, GL Interface, late payment fee, credit control, dunning, account or service suspension, re-activation, expiry, termination, contract violation penalty, etc. ·       Initiate direct debit to collect payment against an invoice outstanding. ·       Send notification to Middleware on different events; for example, payment receipt, pre-suspension, threshold exceed, etc.   Billing systems typically get usage data from mediation systems for rating and billing. They get provisioning requests from order management systems and inquiries from CRM systems. Convergent and real-time billing systems can directly get usage details from network elements.   3. Mediation   Mediation systems transform/translate the Raw or Native Usage Data Records into a general format that is acceptable to billing for their rating purposes.   The following lists the high-level roles and responsibilities executed by the Mediation system in the end-to-end solution.   ·       Collect Usage Data Records from different data sources – like network elements, routers, servers – via different protocol and interfaces. ·       Process Usage Data Records – Mediation will process Usage Data Records as per the source format. ·       Validate Usage Data Records from each source. ·       Segregates Usage Data Records coming from each source to multiple, based on the segregation requirement of end Application. ·       Aggregates Usage Data Records based on the aggregation rule if any from different sources. ·       Consolidates multiple Usage Data Records from each source. ·       Delivers formatted Usage Data Records to different end application like Billing, Interconnect, Fraud Management, etc. ·       Generates audit trail for incoming Usage Data Records and keeps track of all the Usage Data Records at various stages of mediation process. ·       Checks duplicate Usage Data Records across files for a given time window.   4. Fulfillment   This area is responsible for providing customers with their requested products in a timely and correct manner. It translates the customer's business or personal need into a solution that can be delivered using the specific products in the enterprise's portfolio. This process informs the customers of the status of their purchase order, and ensures completion on time, as well as ensuring a delighted customer. These processes are responsible for accepting and issuing orders. They deal with pre-order feasibility determination, credit authorization, order issuance, order status and tracking, customer update on customer order activities, and customer notification on order completion. Order management and provisioning applications fall into this category.   The key functionalities provided by these applications are   ·       Issuing new customer orders, modifying open customer orders, or canceling open customer orders; ·       Verifying whether specific non-standard offerings sought by customers are feasible and supportable; ·       Checking the credit worthiness of customers as part of the customer order process; ·       Testing the completed offering to ensure it is working correctly; ·       Updating of the Customer Inventory Database to reflect that the specific product offering has been allocated, modified, or cancelled; ·       Assigning and tracking customer provisioning activities; ·       Managing customer provisioning jeopardy conditions; and ·       Reporting progress on customer orders and other processes to customer.   These applications typically get orders from CRM systems. They interact with network elements and billing systems for fulfillment of orders.   5. Enterprise Management   This process area includes those processes that manage enterprise-wide activities and needs, or have application within the enterprise as a whole. They encompass all business management processes that   ·       Are necessary to support the whole of the enterprise, including processes for financial management, legal management, regulatory management, process, cost, and quality management, etc.;   ·       Are responsible for setting corporate policies, strategies, and directions, and for providing guidelines and targets for the whole of the business, including strategy development and planning for areas, such as Enterprise Architecture, that are integral to the direction and development of the business;   ·       Occur throughout the enterprise, including processes for project management, performance assessments, cost assessments, etc.     (i) Enterprise Risk Management:   Enterprise Risk Management focuses on assuring that risks and threats to the enterprise value and/or reputation are identified, and appropriate controls are in place to minimize or eliminate the identified risks. The identified risks may be physical or logical/virtual. Successful risk management ensures that the enterprise can support its mission critical operations, processes, applications, and communications in the face of serious incidents such as security threats/violations and fraud attempts. Two key areas covered in Risk Management by telecom operators are:   ·       Revenue Assurance: Revenue assurance system will be responsible for identifying revenue loss scenarios across components/systems, and will help in rectifying the problems. The following lists the high-level roles and responsibilities executed by the Revenue Assurance system in the end-to-end solution. o   Identify all usage information dropped when networks are being upgraded. o   Interconnect bill verification. o   Identify where services are routinely provisioned but never billed. o   Identify poor sales policies that are intensifying collections problems. o   Find leakage where usage is sent to error bucket and never billed for. o   Find leakage where field service, CRM, and network build-out are not optimized.   ·       Fraud Management: Involves collecting data from different systems to identify abnormalities in traffic patterns, usage patterns, and subscription patterns to report suspicious activity that might suggest fraudulent usage of resources, resulting in revenue losses to the operator.   The key roles and responsibilities of the system component are as follows:   o   Fraud management system will capture and monitor high usage (over a certain threshold) in terms of duration, value, and number of calls for each subscriber. The threshold for each subscriber is decided by the system and fixed automatically. o   Fraud management will be able to detect the unauthorized access to services for certain subscribers. These subscribers may have been provided unauthorized services by employees. The component will raise the alert to the operator the very first time of such illegal calls or calls which are not billed. o   The solution will be to have an alarm management system that will deliver alarms to the operator/provider whenever it detects a fraud, thus minimizing fraud by catching it the first time it occurs. o   The Fraud Management system will be capable of interfacing with switches, mediation systems, and billing systems   (ii) Knowledge Management   This process focuses on knowledge management, technology research within the enterprise, and the evaluation of potential technology acquisitions.   Key responsibilities of knowledge base management are to   ·       Maintain knowledge base – Creation and updating of knowledge base on ongoing basis. ·       Search knowledge base – Search of knowledge base on keywords or category browse ·       Maintain metadata – Management of metadata on knowledge base to ensure effective management and search. ·       Run report generator. ·       Provide content – Add content to the knowledge base, e.g., user guides, operational manual, etc.   (iii) Document Management   It focuses on maintaining a repository of all electronic documents or images of paper documents relevant to the enterprise using a system.   (iv) Data Management   It manages data as a valuable resource for any enterprise. For telecom enterprises, the typical areas covered are Master Data Management, Data Warehousing, and Business Intelligence. It is also responsible for data governance, security, quality, and database management.   Key responsibilities of Data Management are   ·       Using ETL, extract the data from CRM, Billing, web content, ERP, campaign management, financial, network operations, asset management info, customer contact data, customer measures, benchmarks, process data, e.g., process inputs, outputs, and measures, into Enterprise Data Warehouse. ·       Management of data traceability with source, data related business rules/decisions, data quality, data cleansing data reconciliation, competitors data – storage for all the enterprise data (customer profiles, products, offers, revenues, etc.) ·       Get online update through night time replication or physical backup process at regular frequency. ·       Provide the data access to business intelligence and other systems for their analysis, report generation, and use.   (v) Business Intelligence   It uses the Enterprise Data to provide the various analysis and reports that contain prospects and analytics for customer retention, acquisition of new customers due to the offers, and SLAs. It will generate right and optimized plans – bolt-ons for the customers.   The following lists the high-level roles and responsibilities executed by the Business Intelligence system at the Enterprise Level:   ·       It will do Pattern analysis and reports problem. ·       It will do Data Analysis – Statistical analysis, data profiling, affinity analysis of data, customer segment wise usage patterns on offers, products, service and revenue generation against services and customer segments. ·       It will do Performance (business, system, and forecast) analysis, churn propensity, response time, and SLAs analysis. ·       It will support for online and offline analysis, and report drill down capability. ·       It will collect, store, and report various SLA data. ·       It will provide the necessary intelligence for marketing and working on campaigns, etc., with cost benefit analysis and predictions.   It will advise on customer promotions with additional services based on loyalty and credit history of customer   ·       It will Interface with Enterprise Data Management system for data to run reports and analysis tasks. It will interface with the campaign schedules, based on historical success evidence.   (vi) Stakeholder and External Relations Management   It manages the enterprise's relationship with stakeholders and outside entities. Stakeholders include shareholders, employee organizations, etc. Outside entities include regulators, local community, and unions. Some of the processes within this grouping are Shareholder Relations, External Affairs, Labor Relations, and Public Relations.   (vii) Enterprise Resource Planning   It is used to manage internal and external resources, including tangible assets, financial resources, materials, and human resources. Its purpose is to facilitate the flow of information between all business functions inside the boundaries of the enterprise and manage the connections to outside stakeholders. ERP systems consolidate all business operations into a uniform and enterprise wide system environment.   The key roles and responsibilities for Enterprise System are given below:   ·        It will handle responsibilities such as core accounting, financial, and management reporting. ·       It will interface with CRM for capturing customer account and details. ·       It will interface with billing to capture the billing revenue and other financial data. ·       It will be responsible for executing the dunning process. Billing will send the required feed to ERP for execution of dunning. ·       It will interface with the CRM and Billing through batch interfaces. Enterprise management systems are like horizontals in the enterprise and typically interact with all major telecom systems. E.g., an ERP system interacts with CRM, Fulfillment, and Billing systems for different kinds of data exchanges.   6. External Interfaces/Touch Points   The typical external parties are customers, suppliers/partners, employees, shareholders, and other stakeholders. External interactions from/to a Service Provider to other parties can be achieved by a variety of mechanisms, including:   ·       Exchange of emails or faxes ·       Call Centers ·       Web Portals ·       Business-to-Business (B2B) automated transactions   These applications provide an Internet technology driven interface to external parties to undertake a variety of business functions directly for themselves. These can provide fully or partially automated service to external parties through various touch points.   Typical characteristics of these touch points are   ·       Pre-integrated self-service system, including stand-alone web framework or integration front end with a portal engine ·       Self services layer exposing atomic web services/APIs for reuse by multiple systems across the architectural environment ·       Portlets driven connectivity exposing data and services interoperability through a portal engine or web application   These touch points mostly interact with the CRM systems for requests, inquiries, and responses.   7. Middleware   The component will be primarily responsible for integrating the different systems components under a common platform. It should provide a Standards-Based Platform for building Service Oriented Architecture and Composite Applications. The following lists the high-level roles and responsibilities executed by the Middleware component in the end-to-end solution.   ·       As an integration framework, covering to and fro interfaces ·       Provide a web service framework with service registry. ·       Support SOA framework with SOA service registry. ·       Each of the interfaces from / to Middleware to other components would handle data transformation, translation, and mapping of data points. ·       Receive data from the caller / activate and/or forward the data to the recipient system in XML format. ·       Use standard XML for data exchange. ·       Provide the response back to the service/call initiator. ·       Provide a tracking until the response completion. ·       Keep a store transitional data against each call/transaction. ·       Interface through Middleware to get any information that is possible and allowed from the existing systems to enterprise systems; e.g., customer profile and customer history, etc. ·       Provide the data in a common unified format to the SOA calls across systems, and follow the Enterprise Architecture directive. ·       Provide an audit trail for all transactions being handled by the component.   8. Network Elements   The term Network Element means a facility or equipment used in the provision of a telecommunications service. Such terms also includes features, functions, and capabilities that are provided by means of such facility or equipment, including subscriber numbers, databases, signaling systems, and information sufficient for billing and collection or used in the transmission, routing, or other provision of a telecommunications service.   Typical network elements in a GSM network are Home Location Register (HLR), Intelligent Network (IN), Mobile Switching Center (MSC), SMS Center (SMSC), and network elements for other value added services like Push-to-talk (PTT), Ring Back Tone (RBT), etc.   Network elements are invoked when subscribers use their telecom devices for any kind of usage. These elements generate usage data and pass it on to downstream systems like mediation and billing system for rating and billing. They also integrate with provisioning systems for order/service fulfillment.   9. 3rd Party Applications   3rd Party systems are applications like content providers, payment gateways, point of sale terminals, and databases/applications maintained by the Government.   Depending on applicability and the type of functionality provided by 3rd party applications, the integration with different telecom systems like CRM, provisioning, and billing will be done.   10. Service Delivery Platform   A service delivery platform (SDP) provides the architecture for the rapid deployment, provisioning, execution, management, and billing of value added telecom services. SDPs are based on the concept of SOA and layered architecture. They support the delivery of voice, data services, and content in network and device-independent fashion. They allow application developers to aggregate network capabilities, services, and sources of content. SDPs typically contain layers for web services exposure, service application development, and network abstraction.   SOA Reference Architecture   SOA concept is based on the principle of developing reusable business service and building applications by composing those services, instead of building monolithic applications in silos. It’s about bridging the gap between business and IT through a set of business-aligned IT services, using a set of design principles, patterns, and techniques.   In an SOA, resources are made available to participants in a value net, enterprise, line of business (typically spanning multiple applications within an enterprise or across multiple enterprises). It consists of a set of business-aligned IT services that collectively fulfill an organization’s business processes and goals. We can choreograph these services into composite applications and invoke them through standard protocols. SOA, apart from agility and reusability, enables:   ·       The business to specify processes as orchestrations of reusable services ·       Technology agnostic business design, with technology hidden behind service interface ·       A contractual-like interaction between business and IT, based on service SLAs ·       Accountability and governance, better aligned to business services ·       Applications interconnections untangling by allowing access only through service interfaces, reducing the daunting side effects of change ·       Reduced pressure to replace legacy and extended lifetime for legacy applications, through encapsulation in services   ·       A Cloud Computing paradigm, using web services technologies, that makes possible service outsourcing on an on-demand, utility-like, pay-per-usage basis   The following section represents the Reference Architecture of logical view for the Telecom Solution. The new custom built application needs to align with this logical architecture in the long run to achieve EA benefits.   Packaged implementation applications, such as ERP billing applications, need to expose their functions as service providers (as other applications consume) and interact with other applications as service consumers.   COT applications need to expose services through wrappers such as adapters to utilize existing resources and at the same time achieve Enterprise Architecture goal and objectives.   The following are the various layers for Enterprise level deployment of SOA. This diagram captures the abstract view of Enterprise SOA layers and important components of each layer. Layered architecture means decomposition of services such that most interactions occur between adjacent layers. However, there is no strict rule that top layers should not directly communicate with bottom layers.   The diagram below represents the important logical pieces that would result from overall SOA transformation. @font-face { font-family: "Arial"; }@font-face { font-family: "Courier New"; }@font-face { font-family: "Wingdings"; }@font-face { font-family: "Cambria"; }p.MsoNormal, li.MsoNormal, div.MsoNormal { margin: 0cm 0cm 0.0001pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoCaption, li.MsoCaption, div.MsoCaption { margin: 0cm 0cm 10pt; font-size: 9pt; font-family: "Times New Roman"; color: rgb(79, 129, 189); font-weight: bold; }p.MsoListParagraph, li.MsoListParagraph, div.MsoListParagraph { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpFirst, li.MsoListParagraphCxSpFirst, div.MsoListParagraphCxSpFirst { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpMiddle, li.MsoListParagraphCxSpMiddle, div.MsoListParagraphCxSpMiddle { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }p.MsoListParagraphCxSpLast, li.MsoListParagraphCxSpLast, div.MsoListParagraphCxSpLast { margin: 0cm 0cm 0.0001pt 36pt; font-size: 12pt; font-family: "Times New Roman"; }div.Section1 { page: Section1; }ol { margin-bottom: 0cm; }ul { margin-bottom: 0cm; } Figure 3. Enterprise SOA Reference Architecture 1.          Operational System Layer: This layer consists of all packaged applications like CRM, ERP, custom built applications, COTS based applications like Billing, Revenue Management, Fulfilment, and the Enterprise databases that are essential and contribute directly or indirectly to the Enterprise OSS/BSS Transformation.   ERP holds the data of Asset Lifecycle Management, Supply Chain, and Advanced Procurement and Human Capital Management, etc.   CRM holds the data related to Order, Sales, and Marketing, Customer Care, Partner Relationship Management, Loyalty, etc.   Content Management handles Enterprise Search and Query. Billing application consists of the following components:   ·       Collections Management, Customer Billing Management, Invoices, Real-Time Rating, Discounting, and Applying of Charges ·       Enterprise databases will hold both the application and service data, whether structured or unstructured.   MDM - Master data majorly consists of Customer, Order, Product, and Service Data.     2.          Enterprise Component Layer:   This layer consists of the Application Services and Common Services that are responsible for realizing the functionality and maintaining the QoS of the exposed services. This layer uses container-based technologies such as application servers to implement the components, workload management, high availability, and load balancing.   Application Services: This Service Layer enables application, technology, and database abstraction so that the complex accessing logic is hidden from the other service layers. This is a basic service layer, which exposes application functionalities and data as reusable services. The three types of the Application access services are:   ·       Application Access Service: This Service Layer exposes application level functionalities as a reusable service between BSS to BSS and BSS to OSS integration. This layer is enabled using disparate technology such as Web Service, Integration Servers, and Adaptors, etc.   ·       Data Access Service: This Service Layer exposes application data services as a reusable reference data service. This is done via direct interaction with application data. and provides the federated query.   ·       Network Access Service: This Service Layer exposes provisioning layer as a reusable service from OSS to OSS integration. This integration service emphasizes the need for high performance, stateless process flows, and distributed design.   Common Services encompasses management of structured, semi-structured, and unstructured data such as information services, portal services, interaction services, infrastructure services, and security services, etc.   3.          Integration Layer:   This consists of service infrastructure components like service bus, service gateway for partner integration, service registry, service repository, and BPEL processor. Service bus will carry the service invocation payloads/messages between consumers and providers. The other important functions expected from it are itinerary based routing, distributed caching of routing information, transformations, and all qualities of service for messaging-like reliability, scalability, and availability, etc. Service registry will hold all contracts (wsdl) of services, and it helps developers to locate or discover service during design time or runtime.   • BPEL processor would be useful in orchestrating the services to compose a complex business scenario or process. • Workflow and business rules management are also required to support manual triggering of certain activities within business process. based on the rules setup and also the state machine information. Application, data, and service mediation layer typically forms the overall composite application development framework or SOA Framework.   4.          Business Process Layer: These are typically the intermediate services layer and represent Shared Business Process Services. At Enterprise Level, these services are from Customer Management, Order Management, Billing, Finance, and Asset Management application domains.   5.          Access Layer: This layer consists of portals for Enterprise and provides a single view of Enterprise information management and dashboard services.   6.          Channel Layer: This consists of various devices; applications that form part of extended enterprise; browsers through which users access the applications.   7.          Client Layer: This designates the different types of users accessing the enterprise applications. The type of user typically would be an important factor in determining the level of access to applications.   8.          Vertical pieces like management, monitoring, security, and development cut across all horizontal layers Management and monitoring involves all aspects of SOA-like services, SLAs, and other QoS lifecycle processes for both applications and services surrounding SOA governance.     9.          EA Governance, Reference Architecture, Roadmap, Principles, and Best Practices:   EA Governance is important in terms of providing the overall direction to SOA implementation within the enterprise. This involves board-level involvement, in addition to business and IT executives. At a high level, this involves managing the SOA projects implementation, managing SOA infrastructure, and controlling the entire effort through all fine-tuned IT processes in accordance with COBIT (Control Objectives for Information Technology).   Devising tools and techniques to promote reuse culture, and the SOA way of doing things needs competency centers to be established in addition to training the workforce to take up new roles that are suited to SOA journey.   Conclusions   Reference Architectures can serve as the basis for disparate architecture efforts throughout the organization, even if they use different tools and technologies. Reference architectures provide best practices and approaches in the independent way a vendor deals with technology and standards. Reference Architectures model the abstract architectural elements for an enterprise independent of the technologies, protocols, and products that are used to implement an SOA. Telecom enterprises today are facing significant business and technology challenges due to growing competition, a multitude of services, and convergence. Adopting architectural best practices could go a long way in meeting these challenges. The use of SOA-based architecture for communication to each of the external systems like Billing, CRM, etc., in OSS/BSS system has made the architecture very loosely coupled, with greater flexibility. Any change in the external systems would be absorbed at the Integration Layer without affecting the rest of the ecosystem. The use of a Business Process Management (BPM) tool makes the management and maintenance of the business processes easy, with better performance in terms of lead time, quality, and cost. Since the Architecture is based on standards, it will lower the cost of deploying and managing OSS/BSS applications over their lifecycles.

    Read the article

  • SQLAuthority News – TechEd India – April 12-14, 2010 Bangalore – An Unforgettable Experience – An Op

    - by pinaldave
    TechEd India was one of the largest Technology events in India led by Microsoft. This event was attended by more than 3,000 technology enthusiasts, making it one of the most well-organized events of the year. Though I attempted to attend almost all the technology events here, I have not seen any bigger or better event in Indian subcontinents other than this. There are 21 Technical Tracks at Tech·Ed India 2010 that span more than 745 learning opportunities. I was fortunate enough to be a part of this whole event as a speaker and a delegate, as well. TechEd India Speaker Badge and A Token of Lifetime Hotel Selection I presented three different sessions at TechEd India and was also a part of panel discussion. (The details of the sessions are given at the end of this blog post.) Due to extensive traveling, I stay away from my family occasionally. For this reason, I took my wife – Nupur and daughter Shaivi (8 months old) to the event along with me. We stayed at the same hotel where the event was organized so as to maximize my time bonding with my family and to have more time in networking with technology community, at the same time. The hotel Lalit Ashok is the largest and most luxurious venue one can find in Bangalore, located in the middle of the city. The cost of the hotel was a bit pricey, but looking at all the advantages, I had decided to ask for a booking there. Hotel Lalit Ashok Nupur Dave and Shaivi Dave Arrival Day – DAY 0 – April 11, 2010 I reached the event a day earlier, and that was one wise decision for I was able to relax a bit and go over my presentation for the next day’s course. I am a kind of person who likes to get everything ready ahead of time. I was also able to enjoy a pleasant evening with several Microsoft employees and my family friends. I even checked out the location where I would be doing presentations the next day. I was fortunate enough to meet Bijoy Singhal from Microsoft who helped me out with a few of the logistics issues that occured the day before. I was not aware of the fact that the very next day he was going to be “The Man” of the TechEd 2010 event. Vinod Kumar from Microsoft was really very kind as he talked to me regarding my subsequent session. He gave me some suggestions which were really helpful that I was able to incorporate them during my presentation. Finally, I was able to meet Abhishek Kant from Microsoft; his valuable suggestions and unlimited passion have inspired many people like me to work with the Community. Pradipta from Microsoft was also around, being extremely busy with logistics; however, in those busy times, he did find some good spare time to have a chat with me and the other Community leaders. I also met Harish Ranganathan and Sachin Rathi, both from Microsoft. It was so interesting to listen to both of them talking about SharePoint. I just have no words to express my overwhelmed spirit because of all these passionate young guys - Pradipta,Vinod, Bijoy, Harish, Sachin and Ahishek (of course!). Map of TechEd India 2010 Event Day 1 – April 12, 2010 From morning until night time, today was truly a very busy day for me. I had two presentations and one panel discussion for the day. Needless to say, I had a few meetings to attend as well. The day started with a keynote from S. Somaseger where he announced the launch of Visual Studio 2010. The keynote area was really eye-catching because of the very large, bigger-than- life uniform screen. This was truly one to show. The title music of the keynote was very interesting and it featured Bijoy Singhal as the model. It was interesting to talk to him afterwards, when we laughed at jokes together about his modeling assignment. TechEd India Keynote Opening Featuring Bijoy TechEd India 2010 Keynote – S. Somasegar Time: 11:15pm – 11:45pm Session 1: True Lies of SQL Server – SQL Myth Buster Following the excellent keynote, I had my very first session on the subject of SQL Server Myth Buster. At first, I was a bit nervous as right after the keynote, for this was my very first session and during my presentation I saw lots of Microsoft Product Team members. Well, it really went well and I had a really good discussion with attendees of the session. I felt that a well begin was half-done and my confidence was regained. Right after the session, I met a few of my Community friends and had meaningful discussions with them on many subjects. The abstract of the session is as follows: In this 30-minute demo session, I am going to briefly demonstrate few SQL Server Myths and their resolutions as I back them up with some demo. This demo presentation is a must-attend for all developers and administrators who would come to the event. This is going to be a very quick yet fun session. Pinal Presenting session at TechEd India 2010 Time: 1:00 PM – 2:00 PM Lunch with Somasegar After the session I went to see my daughter, and then I headed right away to the lunch with S. Somasegar – the keynote speaker and senior vice president of the Developer Division at Microsoft. I really thank to Abhishek who made it possible for us. Because of his efforts, all the MVPs had the opportunity to meet such a legendary person and had to talk with them on Microsoft Technology. Though Somasegar is currently holding such a high position in Microsoft, he is very polite and a real gentleman, and how I wish that everybody in industry is like him. Believe me, if you spread love and kindness, then that is what you will receive back. As soon as lunch time was over, I ran to the session hall as my second presentation was about to start. Time: 2:30pm – 3:30pm Session 2: Master Data Services in Microsoft SQL Server 2008 R2 Business Intelligence is a subject which was widely talked about at TechEd. Everybody was interested in this subject, and I did not excuse myself from this great concept as well. I consider myself fortunate as I was presenting on the subject of Master Data Services at TechEd. When I had initially learned this subject, I had a bit of confusion about the usage of this tool. Later on, I decided that I would tackle about how we all developers and DBAs are not able to understand something so simple such as this, and even worst, creating confusion about the technology. During system designing, it is very important to have a reference material or master lookup tables. Well, I talked about the same subject and presented the session keeping that as my center talk. The session went very well and I received lots of interesting questions. I got many compliments for talking about this subject on the real-life scenario. I really thank Rushabh Mehta (CEO, Solid Quality Mentors India) for his supportive suggestions that helped me prepare the slide deck, as well as the subject. Pinal Presenting session at TechEd India 2010 The abstract of the session is as follows: SQL Server Master Data Services will ship with SQL Server 2008 R2 and will improve Microsoft’s platform appeal. This session provides an in-depth demonstration of MDS features and highlights important usage scenarios. Master Data Services enables consistent decision-making process by allowing you to create, manage and propagate changes from a single master view of your business entities. Also, MDS – Master Data-hub which is a vital component, helps ensure the consistency of reporting across systems and deliver faster and more accurate results across the enterprise. We will talk about establishing the basis for a centralized approach to defining, deploying, and managing master data in the enterprise. Pinal Presenting session at TechEd India 2010 The day was still not over for me. I had ran into several friends but we were not able keep our enthusiasm under control about all the rumors saying that SQL Server 2008 R2 was about to be launched tomorrow in the keynote. I then ran to my third and final technical event for the day- a panel discussion with the top technologies of India. Time: 5:00pm – 6:00pm Panel Discussion: Harness the power of Web – SEO and Technical Blogging As I have delivered two technical sessions by this time, I was a bit tired but  not less enthusiastic when I had to talk about Blog and Technology. We discussed many different topics there. I told them that the most important aspect for any blog is its content. We discussed in depth the issues with plagiarism and how to avoid it. Another topic of discussion was how we technology bloggers can create awareness in the Community about what the right kind of blogging is and what morally and technically wrong acts are. A couple of questions were raised about what type of liberty a person can have in terms of writing blogs. Well, it was generically agreed that a blog is mainly a representation of our ideas and thoughts; it should not be governed by external entities. As long as one is writing what they really want to say, but not providing incorrect information or not practicing plagiarism, a blogger should be allowed to express himself. This panel discussion was supposed to be over in an hour, but the interest of the participants was remarkable and so it was extended for 30 minutes more. Finally, we decided to bring to a close the discussion and agreed that we will continue the topic next year. TechEd India Panel Discussion on Web, Technology and SEO Surprisingly, the day was just beginning after doing all of these. By this time, I have almost met all the MVP who arrived at the event, as well as many Microsoft employees. There were lots of Community folks present, too. I decided that I would go to meet several friends from the Community and continue to communicate with me on SQLAuthority.com. I also met Abhishek Baxi and had a good talk with him regarding Win Mobile and Twitter. He also took a very quick video of me wherein I spoke in my mother’s tongue, Gujarati. It was funny that I talked in Gujarati almost all the day, but when I was talking in the interview I could not find the right Gujarati words to speak. I think we all think in English when we think about Technology, so as to address universality. After meeting them, I headed towards the Speakers’ Dinner. Time: 8:00 PM – onwards Speakers Dinner The Speakers’ dinner was indeed a wonderful opportunity for all the speakers to get together and relax. We talked so many different things, from XBOX to Hindi Movies, and from SQL to Samosas. I just could not express how much fun I had. After a long evening, when I returned tmy room and met Shaivi, I just felt instantly relaxed. Kids are really gifts from God. Today was a really long but exciting day. So many things happened in just one day: Visual Studio Lanch, lunch with Somasegar, 2 technical sessions, 1 panel discussion, community leaders meeting, speakers dinner and, last but not leas,t playing with my child! A perfect day! Day 2 – April 13, 2010 Today started with a bang with the excellent keynote by Kamal Hathi who launched SQL Server 2008 R2 in India and demonstrated the power of PowerPivot to all of us. 101 Million Rows in Excel brought lots of applause from the audience. Kamal Hathi Presenting Keynote at TechEd India 2010 The day was a bit easier one for me. I had no sessions today and no events planned. I had a few meetings planned for the second day of the event. I sat in the speaker’s lounge for half a day and met many people there. I attended nearly 9 different meetings today. The subjects of the meetings were very different. Here is a list of the topics of the Community-related meetings: SQL PASS and its involvement in India and subcontinents How to start community blogging Forums and developing aptitude towards technology Ahmedabad/Gandhinagar User Groups and their developments SharePoint and SQL Business Meeting – a client meeting Business Meeting – a potential performance tuning project Business Meeting – Solid Quality Mentors (SolidQ) And family friends Pinal Dave at TechEd India The day passed by so quickly during this meeting. In the evening, I headed to Partners Expo with friends and checked out few of the booths. I really wanted to talk about some of the products, but due to the freebies there was so much crowd that I finally decided to just take the contact details of the partner. I will now start sending them with my queries and, hopefully, I will have my questions answered. Nupur and Shaivi had also one meeting to attend; it was with our family friend Vijay Raj. Vijay is also a person who loves Technology and loves it more than anybody. I see him growing and learning every day, but still remaining as a ‘human’. I believe that if someone acquires as much knowledge as him, that person will become either a computer or cyborg. Here, Vijay is still a kind gentleman and is able to stay as our close family friend. Shaivi was really happy to play with Uncle Vijay. Pinal Dave and Vijay Raj Renuka Prasad, a Microsoft MVP, impressed me with his passion and knowledge of SQL. Every time he gives me credit for his success, I believe that he is very humble. He has way more certifications than me and has worked many more years with SQL compared to me. He is an excellent photographer as well. Most of the photos in this blog post have been taken by him. I told him if ever he wants to do a part time job, he can do the photography very well. Pinal Dave and Renuka Prasad I also met L Srividya from Microsoft, whom I was looking forward to meet. She is a bundle of knowledge that everyone would surely learn a lot from her. I was able to get a few minutes from her and well, I felt confident. She enlightened me with SQL Server BI concepts, domain management and SQL Server security and few other interesting details. I also had a wonderful time talking about SharePoint with fellow Solid Quality Mentor Joy Rathnayake. He is very passionate about SharePoint but when you talk .NET and SQL with him, he is still overwhelmingly knowledgeable. In fact, while talking to him, I figured out that the recent training he delivered was on SQL Server 2008 R2. I told him a joke that it hurts my ego as he is more popular now in SQL training and consulting than me. I am sure all of you agree that working with good people is a gift from God. I am fortunate enough to work with the best of the best Industry experts. It was a great pleasure to hang out with my Community friends – Ahswin Kini, HimaBindu Vejella, Vasudev G, Suprotim Agrawal, Dhananjay, Vikram Pendse, Mahesh Dhola, Mahesh Mitkari,  Manu Zacharia, Shobhan, Hardik Shah, Ashish Mohta, Manan, Subodh Sohani and Sanjay Shetty (of course!) .  (Please let me know if I have met you at the event and forgot your name to list here). Time: 8:00 PM – onwards Community Leaders Dinner After lots of meetings, I headed towards the Community Leaders dinner meeting and met almost all the folks I met in morning. The discussion was almost the same but the real good thing was that we were enjoying it. The food was really good. Nupur was invited in the event, but Shaivi could not come. When Nupur tried to enter the event, she was stopped as Shaivi did not have the pass to enter the dinner. Nupur expressed that Shaivi is only 8 months old and does not eat outside food as well and could not stay by herself at this age, but the door keeper did not agree and asked that without the entry details Shaivi could not go in, but Nupur could. Nupur called me on phone and asked me to help her out. By the time, I was outside; the organizer of the event reached to the door and happily approved Shaivi to join the party. Once in the party, Shaivi had lots of fun meeting so many people. Shaivi Dave and Abhishek Kant Dean Guida (Infragistics President and CEO) and Pinal Dave (SQLAuthority.com) Day 3 – April 14, 2010 Though, it was last day, I was very much excited today as I was about to present my very favorite session. Query Optimization and Performance Tuning is my domain expertise and I make my leaving by consulting and training the same. Today’s session was on the same subject and as an additional twist, another subject about Spatial Database was presented. I was always intrigued with Spatial Database and I have enjoyed learning about it; however, I have never thought about Spatial Indexing before it was decided that I will do this session. I really thank Solid Quality Mentor Dr. Greg Low for his assistance in helping me prepare the slide deck and also review the content. Furthermore, today was really what I call my ‘learning day’ . So far I had not attended any session in TechEd and I felt a bit down for that. Everybody spends their valuable time & money to learn something new and exciting in TechEd and I had not attended a single session at the moment thinking that it was already last day of the event. I did have a plan for the day and I attended two technical sessions before my session of spatial database. I attended 2 sessions of Vinod Kumar. Vinod is a natural storyteller and there was no doubt that his sessions would be jam-packed. People attended his sessions simply because Vinod is syhe speaker. He did not have a single time disappointed audience; he is truly a good speaker. He knows his stuff very well. I personally do not think that in India he can be compared to anyone for SQL. Time: 12:30pm-1:30pm SQL Server Query Optimization, Execution and Debugging Query Performance I really had a fun time attending this session. Vinod made this session very interactive. The entire audience really got into the presentation and started participating in the event. Vinod was presenting a small problem with Query Tuning, which any developer would have encountered and solved with their help in such a fashion that a developer feels he or she have already resolved it. In one question, I was the only one who was ready to answer and Vinod told me in a light tone that I am now allowed to answer it! The audience really found it very amusing. There was a huge crowd around Vinod after the session. Vinod – A master storyteller! Time: 3:45pm-4:45pm Data Recovery / consistency with CheckDB This session was much heavier than the earlier one, and I must say this is my most favorite session I EVER attended in India. In this TechEd I have only attended two sessions, but in my career, I have attended numerous technical sessions not only in India, but all over the world. This session had taken my breath away. One by one, Vinod took the different databases, and started to corrupt them in different ways. Each database has some unique ways to get corrupted. Once that was done, Vinod started to show the DBCC CEHCKDB and demonstrated how it can solve your problem. He finally fixed all the databases with this single tool. I do have a good knowledge of this subject, but let me honestly admit that I have learned a lot from this session. I enjoyed and cheered during this session along with other attendees. I had total satisfaction that, just like everyone, I took advantage of the event and learned something. I am now TECHnically EDucated. Pinal Dave and Vinod Kumar After two very interactive and informative SQL Sessions from Vinod Kumar, the next turn me presenting on Spatial Database and Indexing. I got once again nervous but Vinod told me to stay natural and do my presentation. Well, once I got a huge stage with a total of four projectors and a large crowd, I felt better. Time: 5:00pm-6:00pm Session 3: Developing with SQL Server Spatial and Deep Dive into Spatial Indexing Pinal Presenting session at TechEd India 2010 Pinal Presenting session at TechEd India 2010 I kicked off this session with Michael J Swart‘s beautiful spatial image. This session was the last one for the day but, to my surprise, I had more than 200+ attendees. Slowly, the rain was starting outside and I was worried that the hall would not be full; despite this, there was not a single seat available in the first five minutes of the session. Thanks to all of you for attending my presentation. I had demonstrated the map of world (and India) and quickly explained what  Geographic and Geometry data types in Spatial Database are. This session had interesting story of Indexing and Comparison, as well as how different traditional indexes are from spatial indexing. Pinal Presenting session at TechEd India 2010 Due to the heavy rain during this event, the power went off for about 22 minutes (just an accident – nobodies fault). During these minutes, there were no audio, no video and no light. I continued to address the mass of 200+ people without any audio device and PowerPoint. I must thank the audience because not a single person left from the session. They all stayed in their place, some moved closure to listen to me properly. I noticed that the curiosity and eagerness to learn new things was at the peak even though it was the very last session of the TechEd. Everybody wanted get the maximum knowledge out of this whole event. I was touched by the support from audience. They listened and participated in my session even without any kinds of technology (no ppt, no mike, no AC, nothing). During these 22 minutes, I had completed my theory verbally. Pinal Presenting session at TechEd India 2010 After a while, we got the projector back online and we continued with some exciting demos. Many thanks to Microsoft people who worked energetically in background to get the backup power for project up. I had a very interesting demo wherein I overlaid Bangalore and Hyderabad on the India Map and find their aerial distance between them. After finding the aerial distance, we browsed online and found that SQL Server estimates the exact aerial distance between these two cities, as compared to the factual distance. There was a huge applause from the crowd on the subject that SQL Server takes into the count of the curvature of the earth and finds the precise distances based on details. During the process of finding the distance, I demonstrated a few examples of the indexes where I expressed how one can use those indexes to find these distances and how they can improve the performance of similar query. I also demonstrated few examples wherein we were able to see in which data type the Index is most useful. We finished the demos with a few more internal stuff. Pinal Presenting session at TechEd India 2010 Despite all issues, I was mostly satisfied with my presentation. I think it was the best session I have ever presented at any conference. There was no help from Technology for a while, but I still got lots of appreciation at the end. When we ended the session, the applause from the audience was so loud that for a moment, the rain was not audible. I was truly moved by the dedication of the Technology enthusiasts. Pinal Dave After Presenting session at TechEd India 2010 The abstract of the session is as follows: The Microsoft SQL Server 2008 delivers new spatial data types that enable you to consume, use, and extend location-based data through spatial-enabled applications. Attend this session to learn how to use spatial functionality in next version of SQL Server to build and optimize spatial queries. This session outlines the new geography data type to store geodetic spatial data and perform operations on it, use the new geometry data type to store planar spatial data and perform operations on it, take advantage of new spatial indexes for high performance queries, use the new spatial results tab to quickly and easily view spatial query results directly from within Management Studio, extend spatial data capabilities by building or integrating location-enabled applications through support for spatial standards and specifications and much more. Time: 8:00 PM – onwards Dinner by Sponsors After the lively session during the day, there was another dinner party courtesy of one of the sponsors of TechEd. All the MVPs and several Community leaders were present at the dinner. I would like to express my gratitude to Abhishek Kant for organizing this wonderful event for us. It was a blast and really relaxing in all angles. We all stayed there for a long time and talked about our sweet and unforgettable memories of the event. Pinal Dave and Bijoy Singhal It was really one wonderful event. After writing this much, I say that I have no words to express about how much I enjoyed TechEd. However, it is true that I shared with you only 1% of the total activities I have done at the event. There were so many people I have met, yet were not mentioned here although I wanted to write their names here, too . Anyway, I have learned so many things and up until now, I am not able to get over all the fun I had in this event. Pinal Dave at TechEd India 2010 The Next Days – April 15, 2010 – till today I am still not able to get my mind out of the whole experience I had at TechEd India 2010. It was like a whole Microsoft Family working together to celebrate a happy occasion. TechEd India – Truly An Unforgettable Experience! Reference : Pinal Dave (http://blog.SQLAuthority.com) Filed under: About Me, MVP, Pinal Dave, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, SQLAuthority Author Visit, SQLAuthority News, SQLServer, T SQL, Technology Tagged: TechEd, TechEdIn

    Read the article

  • SQLAuthority News – TechEd India – April 12-14, 2010 Bangalore – An Unforgettable Experience – An Op

    - by pinaldave
    TechEd India was one of the largest Technology events in India led by Microsoft. This event was attended by more than 3,000 technology enthusiasts, making it one of the most well-organized events of the year. Though I attempted to attend almost all the technology events here, I have not seen any bigger or better event in Indian subcontinents other than this. There are 21 Technical Tracks at Tech·Ed India 2010 that span more than 745 learning opportunities. I was fortunate enough to be a part of this whole event as a speaker and a delegate, as well. TechEd India Speaker Badge and A Token of Lifetime Hotel Selection I presented three different sessions at TechEd India and was also a part of panel discussion. (The details of the sessions are given at the end of this blog post.) Due to extensive traveling, I stay away from my family occasionally. For this reason, I took my wife – Nupur and daughter Shaivi (8 months old) to the event along with me. We stayed at the same hotel where the event was organized so as to maximize my time bonding with my family and to have more time in networking with technology community, at the same time. The hotel Lalit Ashok is the largest and most luxurious venue one can find in Bangalore, located in the middle of the city. The cost of the hotel was a bit pricey, but looking at all the advantages, I had decided to ask for a booking there. Hotel Lalit Ashok Nupur Dave and Shaivi Dave Arrival Day – DAY 0 – April 11, 2010 I reached the event a day earlier, and that was one wise decision for I was able to relax a bit and go over my presentation for the next day’s course. I am a kind of person who likes to get everything ready ahead of time. I was also able to enjoy a pleasant evening with several Microsoft employees and my family friends. I even checked out the location where I would be doing presentations the next day. I was fortunate enough to meet Bijoy Singhal from Microsoft who helped me out with a few of the logistics issues that occured the day before. I was not aware of the fact that the very next day he was going to be “The Man” of the TechEd 2010 event. Vinod Kumar from Microsoft was really very kind as he talked to me regarding my subsequent session. He gave me some suggestions which were really helpful that I was able to incorporate them during my presentation. Finally, I was able to meet Abhishek Kant from Microsoft; his valuable suggestions and unlimited passion have inspired many people like me to work with the Community. Pradipta from Microsoft was also around, being extremely busy with logistics; however, in those busy times, he did find some good spare time to have a chat with me and the other Community leaders. I also met Harish Ranganathan and Sachin Rathi, both from Microsoft. It was so interesting to listen to both of them talking about SharePoint. I just have no words to express my overwhelmed spirit because of all these passionate young guys - Pradipta,Vinod, Bijoy, Harish, Sachin and Ahishek (of course!). Map of TechEd India 2010 Event Day 1 – April 12, 2010 From morning until night time, today was truly a very busy day for me. I had two presentations and one panel discussion for the day. Needless to say, I had a few meetings to attend as well. The day started with a keynote from S. Somaseger where he announced the launch of Visual Studio 2010. The keynote area was really eye-catching because of the very large, bigger-than- life uniform screen. This was truly one to show. The title music of the keynote was very interesting and it featured Bijoy Singhal as the model. It was interesting to talk to him afterwards, when we laughed at jokes together about his modeling assignment. TechEd India Keynote Opening Featuring Bijoy TechEd India 2010 Keynote – S. Somasegar Time: 11:15pm – 11:45pm Session 1: True Lies of SQL Server – SQL Myth Buster Following the excellent keynote, I had my very first session on the subject of SQL Server Myth Buster. At first, I was a bit nervous as right after the keynote, for this was my very first session and during my presentation I saw lots of Microsoft Product Team members. Well, it really went well and I had a really good discussion with attendees of the session. I felt that a well begin was half-done and my confidence was regained. Right after the session, I met a few of my Community friends and had meaningful discussions with them on many subjects. The abstract of the session is as follows: In this 30-minute demo session, I am going to briefly demonstrate few SQL Server Myths and their resolutions as I back them up with some demo. This demo presentation is a must-attend for all developers and administrators who would come to the event. This is going to be a very quick yet fun session. Pinal Presenting session at TechEd India 2010 Time: 1:00 PM – 2:00 PM Lunch with Somasegar After the session I went to see my daughter, and then I headed right away to the lunch with S. Somasegar – the keynote speaker and senior vice president of the Developer Division at Microsoft. I really thank to Abhishek who made it possible for us. Because of his efforts, all the MVPs had the opportunity to meet such a legendary person and had to talk with them on Microsoft Technology. Though Somasegar is currently holding such a high position in Microsoft, he is very polite and a real gentleman, and how I wish that everybody in industry is like him. Believe me, if you spread love and kindness, then that is what you will receive back. As soon as lunch time was over, I ran to the session hall as my second presentation was about to start. Time: 2:30pm – 3:30pm Session 2: Master Data Services in Microsoft SQL Server 2008 R2 Business Intelligence is a subject which was widely talked about at TechEd. Everybody was interested in this subject, and I did not excuse myself from this great concept as well. I consider myself fortunate as I was presenting on the subject of Master Data Services at TechEd. When I had initially learned this subject, I had a bit of confusion about the usage of this tool. Later on, I decided that I would tackle about how we all developers and DBAs are not able to understand something so simple such as this, and even worst, creating confusion about the technology. During system designing, it is very important to have a reference material or master lookup tables. Well, I talked about the same subject and presented the session keeping that as my center talk. The session went very well and I received lots of interesting questions. I got many compliments for talking about this subject on the real-life scenario. I really thank Rushabh Mehta (CEO, Solid Quality Mentors India) for his supportive suggestions that helped me prepare the slide deck, as well as the subject. Pinal Presenting session at TechEd India 2010 The abstract of the session is as follows: SQL Server Master Data Services will ship with SQL Server 2008 R2 and will improve Microsoft’s platform appeal. This session provides an in-depth demonstration of MDS features and highlights important usage scenarios. Master Data Services enables consistent decision-making process by allowing you to create, manage and propagate changes from a single master view of your business entities. Also, MDS – Master Data-hub which is a vital component, helps ensure the consistency of reporting across systems and deliver faster and more accurate results across the enterprise. We will talk about establishing the basis for a centralized approach to defining, deploying, and managing master data in the enterprise. Pinal Presenting session at TechEd India 2010 The day was still not over for me. I had ran into several friends but we were not able keep our enthusiasm under control about all the rumors saying that SQL Server 2008 R2 was about to be launched tomorrow in the keynote. I then ran to my third and final technical event for the day- a panel discussion with the top technologies of India. Time: 5:00pm – 6:00pm Panel Discussion: Harness the power of Web – SEO and Technical Blogging As I have delivered two technical sessions by this time, I was a bit tired but  not less enthusiastic when I had to talk about Blog and Technology. We discussed many different topics there. I told them that the most important aspect for any blog is its content. We discussed in depth the issues with plagiarism and how to avoid it. Another topic of discussion was how we technology bloggers can create awareness in the Community about what the right kind of blogging is and what morally and technically wrong acts are. A couple of questions were raised about what type of liberty a person can have in terms of writing blogs. Well, it was generically agreed that a blog is mainly a representation of our ideas and thoughts; it should not be governed by external entities. As long as one is writing what they really want to say, but not providing incorrect information or not practicing plagiarism, a blogger should be allowed to express himself. This panel discussion was supposed to be over in an hour, but the interest of the participants was remarkable and so it was extended for 30 minutes more. Finally, we decided to bring to a close the discussion and agreed that we will continue the topic next year. TechEd India Panel Discussion on Web, Technology and SEO Surprisingly, the day was just beginning after doing all of these. By this time, I have almost met all the MVP who arrived at the event, as well as many Microsoft employees. There were lots of Community folks present, too. I decided that I would go to meet several friends from the Community and continue to communicate with me on SQLAuthority.com. I also met Abhishek Baxi and had a good talk with him regarding Win Mobile and Twitter. He also took a very quick video of me wherein I spoke in my mother’s tongue, Gujarati. It was funny that I talked in Gujarati almost all the day, but when I was talking in the interview I could not find the right Gujarati words to speak. I think we all think in English when we think about Technology, so as to address universality. After meeting them, I headed towards the Speakers’ Dinner. Time: 8:00 PM – onwards Speakers Dinner The Speakers’ dinner was indeed a wonderful opportunity for all the speakers to get together and relax. We talked so many different things, from XBOX to Hindi Movies, and from SQL to Samosas. I just could not express how much fun I had. After a long evening, when I returned tmy room and met Shaivi, I just felt instantly relaxed. Kids are really gifts from God. Today was a really long but exciting day. So many things happened in just one day: Visual Studio Lanch, lunch with Somasegar, 2 technical sessions, 1 panel discussion, community leaders meeting, speakers dinner and, last but not leas,t playing with my child! A perfect day! Day 2 – April 13, 2010 Today started with a bang with the excellent keynote by Kamal Hathi who launched SQL Server 2008 R2 in India and demonstrated the power of PowerPivot to all of us. 101 Million Rows in Excel brought lots of applause from the audience. Kamal Hathi Presenting Keynote at TechEd India 2010 The day was a bit easier one for me. I had no sessions today and no events planned. I had a few meetings planned for the second day of the event. I sat in the speaker’s lounge for half a day and met many people there. I attended nearly 9 different meetings today. The subjects of the meetings were very different. Here is a list of the topics of the Community-related meetings: SQL PASS and its involvement in India and subcontinents How to start community blogging Forums and developing aptitude towards technology Ahmedabad/Gandhinagar User Groups and their developments SharePoint and SQL Business Meeting – a client meeting Business Meeting – a potential performance tuning project Business Meeting – Solid Quality Mentors (SolidQ) And family friends Pinal Dave at TechEd India The day passed by so quickly during this meeting. In the evening, I headed to Partners Expo with friends and checked out few of the booths. I really wanted to talk about some of the products, but due to the freebies there was so much crowd that I finally decided to just take the contact details of the partner. I will now start sending them with my queries and, hopefully, I will have my questions answered. Nupur and Shaivi had also one meeting to attend; it was with our family friend Vijay Raj. Vijay is also a person who loves Technology and loves it more than anybody. I see him growing and learning every day, but still remaining as a ‘human’. I believe that if someone acquires as much knowledge as him, that person will become either a computer or cyborg. Here, Vijay is still a kind gentleman and is able to stay as our close family friend. Shaivi was really happy to play with Uncle Vijay. Pinal Dave and Vijay Raj Renuka Prasad, a Microsoft MVP, impressed me with his passion and knowledge of SQL. Every time he gives me credit for his success, I believe that he is very humble. He has way more certifications than me and has worked many more years with SQL compared to me. He is an excellent photographer as well. Most of the photos in this blog post have been taken by him. I told him if ever he wants to do a part time job, he can do the photography very well. Pinal Dave and Renuka Prasad I also met L Srividya from Microsoft, whom I was looking forward to meet. She is a bundle of knowledge that everyone would surely learn a lot from her. I was able to get a few minutes from her and well, I felt confident. She enlightened me with SQL Server BI concepts, domain management and SQL Server security and few other interesting details. I also had a wonderful time talking about SharePoint with fellow Solid Quality Mentor Joy Rathnayake. He is very passionate about SharePoint but when you talk .NET and SQL with him, he is still overwhelmingly knowledgeable. In fact, while talking to him, I figured out that the recent training he delivered was on SQL Server 2008 R2. I told him a joke that it hurts my ego as he is more popular now in SQL training and consulting than me. I am sure all of you agree that working with good people is a gift from God. I am fortunate enough to work with the best of the best Industry experts. It was a great pleasure to hang out with my Community friends – Ahswin Kini, HimaBindu Vejella, Vasudev G, Suprotim Agrawal, Dhananjay, Vikram Pendse, Mahesh Dhola, Mahesh Mitkari,  Manu Zacharia, Shobhan, Hardik Shah, Ashish Mohta, Manan, Subodh Sohani and Sanjay Shetty (of course!) .  (Please let me know if I have met you at the event and forgot your name to list here). Time: 8:00 PM – onwards Community Leaders Dinner After lots of meetings, I headed towards the Community Leaders dinner meeting and met almost all the folks I met in morning. The discussion was almost the same but the real good thing was that we were enjoying it. The food was really good. Nupur was invited in the event, but Shaivi could not come. When Nupur tried to enter the event, she was stopped as Shaivi did not have the pass to enter the dinner. Nupur expressed that Shaivi is only 8 months old and does not eat outside food as well and could not stay by herself at this age, but the door keeper did not agree and asked that without the entry details Shaivi could not go in, but Nupur could. Nupur called me on phone and asked me to help her out. By the time, I was outside; the organizer of the event reached to the door and happily approved Shaivi to join the party. Once in the party, Shaivi had lots of fun meeting so many people. Shaivi Dave and Abhishek Kant Dean Guida (Infragistics President and CEO) and Pinal Dave (SQLAuthority.com) Day 3 – April 14, 2010 Though, it was last day, I was very much excited today as I was about to present my very favorite session. Query Optimization and Performance Tuning is my domain expertise and I make my leaving by consulting and training the same. Today’s session was on the same subject and as an additional twist, another subject about Spatial Database was presented. I was always intrigued with Spatial Database and I have enjoyed learning about it; however, I have never thought about Spatial Indexing before it was decided that I will do this session. I really thank Solid Quality Mentor Dr. Greg Low for his assistance in helping me prepare the slide deck and also review the content. Furthermore, today was really what I call my ‘learning day’ . So far I had not attended any session in TechEd and I felt a bit down for that. Everybody spends their valuable time & money to learn something new and exciting in TechEd and I had not attended a single session at the moment thinking that it was already last day of the event. I did have a plan for the day and I attended two technical sessions before my session of spatial database. I attended 2 sessions of Vinod Kumar. Vinod is a natural storyteller and there was no doubt that his sessions would be jam-packed. People attended his sessions simply because Vinod is syhe speaker. He did not have a single time disappointed audience; he is truly a good speaker. He knows his stuff very well. I personally do not think that in India he can be compared to anyone for SQL. Time: 12:30pm-1:30pm SQL Server Query Optimization, Execution and Debugging Query Performance I really had a fun time attending this session. Vinod made this session very interactive. The entire audience really got into the presentation and started participating in the event. Vinod was presenting a small problem with Query Tuning, which any developer would have encountered and solved with their help in such a fashion that a developer feels he or she have already resolved it. In one question, I was the only one who was ready to answer and Vinod told me in a light tone that I am now allowed to answer it! The audience really found it very amusing. There was a huge crowd around Vinod after the session. Vinod – A master storyteller! Time: 3:45pm-4:45pm Data Recovery / consistency with CheckDB This session was much heavier than the earlier one, and I must say this is my most favorite session I EVER attended in India. In this TechEd I have only attended two sessions, but in my career, I have attended numerous technical sessions not only in India, but all over the world. This session had taken my breath away. One by one, Vinod took the different databases, and started to corrupt them in different ways. Each database has some unique ways to get corrupted. Once that was done, Vinod started to show the DBCC CEHCKDB and demonstrated how it can solve your problem. He finally fixed all the databases with this single tool. I do have a good knowledge of this subject, but let me honestly admit that I have learned a lot from this session. I enjoyed and cheered during this session along with other attendees. I had total satisfaction that, just like everyone, I took advantage of the event and learned something. I am now TECHnically EDucated. Pinal Dave and Vinod Kumar After two very interactive and informative SQL Sessions from Vinod Kumar, the next turn me presenting on Spatial Database and Indexing. I got once again nervous but Vinod told me to stay natural and do my presentation. Well, once I got a huge stage with a total of four projectors and a large crowd, I felt better. Time: 5:00pm-6:00pm Session 3: Developing with SQL Server Spatial and Deep Dive into Spatial Indexing Pinal Presenting session at TechEd India 2010 Pinal Presenting session at TechEd India 2010 I kicked off this session with Michael J Swart‘s beautiful spatial image. This session was the last one for the day but, to my surprise, I had more than 200+ attendees. Slowly, the rain was starting outside and I was worried that the hall would not be full; despite this, there was not a single seat available in the first five minutes of the session. Thanks to all of you for attending my presentation. I had demonstrated the map of world (and India) and quickly explained what  Geographic and Geometry data types in Spatial Database are. This session had interesting story of Indexing and Comparison, as well as how different traditional indexes are from spatial indexing. Pinal Presenting session at TechEd India 2010 Due to the heavy rain during this event, the power went off for about 22 minutes (just an accident – nobodies fault). During these minutes, there were no audio, no video and no light. I continued to address the mass of 200+ people without any audio device and PowerPoint. I must thank the audience because not a single person left from the session. They all stayed in their place, some moved closure to listen to me properly. I noticed that the curiosity and eagerness to learn new things was at the peak even though it was the very last session of the TechEd. Everybody wanted get the maximum knowledge out of this whole event. I was touched by the support from audience. They listened and participated in my session even without any kinds of technology (no ppt, no mike, no AC, nothing). During these 22 minutes, I had completed my theory verbally. Pinal Presenting session at TechEd India 2010 After a while, we got the projector back online and we continued with some exciting demos. Many thanks to Microsoft people who worked energetically in background to get the backup power for project up. I had a very interesting demo wherein I overlaid Bangalore and Hyderabad on the India Map and find their aerial distance between them. After finding the aerial distance, we browsed online and found that SQL Server estimates the exact aerial distance between these two cities, as compared to the factual distance. There was a huge applause from the crowd on the subject that SQL Server takes into the count of the curvature of the earth and finds the precise distances based on details. During the process of finding the distance, I demonstrated a few examples of the indexes where I expressed how one can use those indexes to find these distances and how they can improve the performance of similar query. I also demonstrated few examples wherein we were able to see in which data type the Index is most useful. We finished the demos with a few more internal stuff. Pinal Presenting session at TechEd India 2010 Despite all issues, I was mostly satisfied with my presentation. I think it was the best session I have ever presented at any conference. There was no help from Technology for a while, but I still got lots of appreciation at the end. When we ended the session, the applause from the audience was so loud that for a moment, the rain was not audible. I was truly moved by the dedication of the Technology enthusiasts. Pinal Dave After Presenting session at TechEd India 2010 The abstract of the session is as follows: The Microsoft SQL Server 2008 delivers new spatial data types that enable you to consume, use, and extend location-based data through spatial-enabled applications. Attend this session to learn how to use spatial functionality in next version of SQL Server to build and optimize spatial queries. This session outlines the new geography data type to store geodetic spatial data and perform operations on it, use the new geometry data type to store planar spatial data and perform operations on it, take advantage of new spatial indexes for high performance queries, use the new spatial results tab to quickly and easily view spatial query results directly from within Management Studio, extend spatial data capabilities by building or integrating location-enabled applications through support for spatial standards and specifications and much more. Time: 8:00 PM – onwards Dinner by Sponsors After the lively session during the day, there was another dinner party courtesy of one of the sponsors of TechEd. All the MVPs and several Community leaders were present at the dinner. I would like to express my gratitude to Abhishek Kant for organizing this wonderful event for us. It was a blast and really relaxing in all angles. We all stayed there for a long time and talked about our sweet and unforgettable memories of the event. Pinal Dave and Bijoy Singhal It was really one wonderful event. After writing this much, I say that I have no words to express about how much I enjoyed TechEd. However, it is true that I shared with you only 1% of the total activities I have done at the event. There were so many people I have met, yet were not mentioned here although I wanted to write their names here, too . Anyway, I have learned so many things and up until now, I am not able to get over all the fun I had in this event. Pinal Dave at TechEd India 2010 The Next Days – April 15, 2010 – till today I am still not able to get my mind out of the whole experience I had at TechEd India 2010. It was like a whole Microsoft Family working together to celebrate a happy occasion. TechEd India – Truly An Unforgettable Experience! Reference : Pinal Dave (http://blog.SQLAuthority.com) Filed under: About Me, MVP, Pinal Dave, SQL, SQL Authority, SQL Query, SQL Server, SQL Tips and Tricks, SQLAuthority Author Visit, SQLAuthority News, SQLServer, T SQL, Technology Tagged: TechEd, TechEdIn

    Read the article

  • iTunes crashes with "Attempt to allocate 1073741824 bytes for NS/CFData failed" error

    - by kubi
    This is a pretty common occurrence. Every two days or so iTunes will crash on me with this error. FYI, 1073741824 is 2^30, which is gigabyte. The crash log is below if you're interested. Process: iTunes [40778] Path: /Applications/iTunes.app/Contents/MacOS/iTunes Identifier: com.apple.iTunes Version: 9.0.1 (9.0.1) Build Info: iTunes-9010901~2 Code Type: X86 (Native) Parent Process: launchd [638] Date/Time: 2009-10-21 11:35:55.159 -0400 OS Version: Mac OS X 10.6.1 (10B504) Report Version: 6 Interval Since Last Report: 38292 sec Per-App Interval Since Last Report: 63956 sec Per-App Crashes Since Last Report: 1 Anonymous UUID: A7149D8A-1161-4740-976B-DB99AE1B01DD Exception Type: EXC_BREAKPOINT (SIGTRAP) Exception Codes: 0x0000000000000002, 0x0000000000000000 Crashed Thread: 13 Application Specific Information: *** Terminating app due to uncaught exception 'NSMallocException', reason: 'Attempt to allocate 1073741824 bytes for NS/CFData failed' *** Call stack at first throw: ( 0 CoreFoundation 0x9924958a __raiseError + 410 1 libobjc.A.dylib 0x9440df49 objc_exception_throw + 56 2 Foundation 0x945e2fda _NSSearchForNameInPath + 0 3 CoreFoundation 0x99262ca5 __CFDataHandleOutOfMemory + 101 4 CoreFoundation 0x9919f27d __CFDataGrow + 717 5 CoreFoundation 0x9919ce0a CFDataReplaceBytes + 362 6 CoreFoundation 0x9919e63a CFDataAppendBytes + 154 7 iTunes 0x00522808 0x0 + 5384200 8 iTunes 0x00523471 0x0 + 5387377 9 iTunes 0x00441bc8 0x0 + 4463560 10 CoreFoundation 0x9923eba3 _signalEventSync + 99 11 CoreFoundation 0x9923f58e _cfstream_solo_signalEventSync + 126 12 CoreFoundation 0x9923f4d7 CFReadStreamSignalEvent + 39 13 CFNetwork 0x97374c23 _ZN14HTTPReadStream11streamEventEm + 169 14 CoreFoundation 0x9923eba3 _signalEventSync + 99 15 CoreFoundation 0x9923eb1a _cfstream_shared_signalEventSync + 458 16 CoreFoundation 0x991b58cb __CFRunLoopDoSources0 + 1563 17 CoreFoundation 0x991b385f __CFRunLoopRun + 1071 18 CoreFoundation 0x991b2d34 CFRunLoopRunSpecific + 452 19 CoreFoundation 0x991b87a4 CFRunLoopRun + 84 20 iTunes 0x0000ade8 0x0 + 44520 21 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 22 libSystem.B.dylib 0x96de9dbe thread_start + 34 ) Thread 0: Dispatch queue: com.apple.main-thread 0 libSystem.B.dylib 0x96dbc7da mach_msg_trap + 10 1 libSystem.B.dylib 0x96dbcf47 mach_msg + 68 2 com.apple.CoreFoundation 0x991b3dbf __CFRunLoopRun + 2447 3 com.apple.CoreFoundation 0x991b2d34 CFRunLoopRunSpecific + 452 4 com.apple.CoreFoundation 0x991b2b61 CFRunLoopRunInMode + 97 5 com.apple.HIToolbox 0x97c3bfec RunCurrentEventLoopInMode + 392 6 com.apple.HIToolbox 0x97c3bda3 ReceiveNextEventCommon + 354 7 com.apple.HIToolbox 0x97dc3d91 ReceiveNextEvent + 83 8 com.apple.iTunes 0x00135fae 0x1000 + 1265582 9 com.apple.HIToolbox 0x97c0f129 DispatchEventToHandlers(EventTargetRec*, OpaqueEventRef*, HandlerCallRec*) + 1567 10 com.apple.HIToolbox 0x97c0e3f0 SendEventToEventTargetInternal(OpaqueEventRef*, OpaqueEventTargetRef*, HandlerCallRec*) + 411 11 com.apple.HIToolbox 0x97c0e24f SendEventToEventTargetWithOptions + 58 12 com.apple.HIToolbox 0x97c42c0c ToolboxEventDispatcherHandler(OpaqueEventHandlerCallRef*, OpaqueEventRef*, void*) + 3006 13 com.apple.HIToolbox 0x97c0f57a DispatchEventToHandlers(EventTargetRec*, OpaqueEventRef*, HandlerCallRec*) + 2672 14 com.apple.HIToolbox 0x97c0e3f0 SendEventToEventTargetInternal(OpaqueEventRef*, OpaqueEventTargetRef*, HandlerCallRec*) + 411 15 com.apple.HIToolbox 0x97c30a81 SendEventToEventTarget + 52 16 com.apple.HIToolbox 0x97db98f7 ToolboxEventDispatcher + 86 17 com.apple.HIToolbox 0x97db9a2f RunApplicationEventLoop + 243 18 com.apple.iTunes 0x00135d84 0x1000 + 1265028 19 com.apple.iTunes 0x00135c70 0x1000 + 1264752 20 com.apple.iTunes 0x0000d2af 0x1000 + 49839 21 com.apple.iTunes 0x000049a8 0x1000 + 14760 22 com.apple.iTunes 0x00002bfb 0x1000 + 7163 23 com.apple.iTunes 0x00002b29 0x1000 + 6953 Thread 1: Dispatch queue: com.apple.libdispatch-manager 0 libSystem.B.dylib 0x96de303a kevent + 10 1 libSystem.B.dylib 0x96de3768 _dispatch_mgr_invoke + 215 2 libSystem.B.dylib 0x96de2bf9 _dispatch_queue_invoke + 183 3 libSystem.B.dylib 0x96de298a _dispatch_worker_thread2 + 234 4 libSystem.B.dylib 0x96de2401 _pthread_wqthread + 390 5 libSystem.B.dylib 0x96de2246 start_wqthread + 30 Thread 2: 0 libSystem.B.dylib 0x96dbc7da mach_msg_trap + 10 1 libSystem.B.dylib 0x96dbcf47 mach_msg + 68 2 com.apple.CoreFoundation 0x991b3dbf __CFRunLoopRun + 2447 3 com.apple.CoreFoundation 0x991b2d34 CFRunLoopRunSpecific + 452 4 com.apple.CoreFoundation 0x991b87a4 CFRunLoopRun + 84 5 com.apple.iTunes 0x0000ade8 0x1000 + 40424 6 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 7 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 3: 0 libSystem.B.dylib 0x96ddb756 select$DARWIN_EXTSN + 10 1 com.apple.CoreFoundation 0x991f304d __CFSocketManager + 1085 2 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 3 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 4: 0 libSystem.B.dylib 0x96e84766 accept$NOCANCEL$UNIX2003 + 10 1 libSystem.B.dylib 0x96e8363e accept + 32 2 com.apple.iTunes 0x0044c792 0x1000 + 4503442 3 com.apple.iTunes 0x004a86cd 0x1000 + 4880077 4 com.apple.iTunes 0x004a879b 0x1000 + 4880283 5 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 6 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 5: 0 libSystem.B.dylib 0x96e84766 accept$NOCANCEL$UNIX2003 + 10 1 libSystem.B.dylib 0x96e8363e accept + 32 2 com.apple.iTunes 0x0044c792 0x1000 + 4503442 3 com.apple.iTunes 0x004a86cd 0x1000 + 4880077 4 com.apple.iTunes 0x004a879b 0x1000 + 4880283 5 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 6 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 6: 0 libSystem.B.dylib 0x96dbc7da mach_msg_trap + 10 1 libSystem.B.dylib 0x96dbcf47 mach_msg + 68 2 com.apple.CoreFoundation 0x991b3dbf __CFRunLoopRun + 2447 3 com.apple.CoreFoundation 0x991b2d34 CFRunLoopRunSpecific + 452 4 com.apple.CoreFoundation 0x991b87a4 CFRunLoopRun + 84 5 com.apple.iTunes 0x00135e09 0x1000 + 1265161 6 com.apple.iTunes 0x00135cc5 0x1000 + 1264837 7 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 8 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 7: 0 libSystem.B.dylib 0x96dbc7da mach_msg_trap + 10 1 libSystem.B.dylib 0x96dbcf47 mach_msg + 68 2 com.apple.CoreFoundation 0x991b3dbf __CFRunLoopRun + 2447 3 com.apple.CoreFoundation 0x991b2d34 CFRunLoopRunSpecific + 452 4 com.apple.CoreFoundation 0x991b87a4 CFRunLoopRun + 84 5 com.apple.iTunes 0x0000ade8 0x1000 + 40424 6 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 7 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 8: 0 libSystem.B.dylib 0x96dbc83a semaphore_timedwait_signal_trap + 10 1 libSystem.B.dylib 0x96dea3c1 _pthread_cond_wait + 1066 2 libSystem.B.dylib 0x96e19208 pthread_cond_timedwait_relative_np + 47 3 com.apple.iTunes 0x0004ca83 0x1000 + 309891 4 com.apple.iTunes 0x0004c7cb 0x1000 + 309195 5 com.apple.iTunes 0x0004c76a 0x1000 + 309098 6 com.apple.iTunes 0x0004c5bb 0x1000 + 308667 7 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 8 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 9: 0 libSystem.B.dylib 0x96dbc7da mach_msg_trap + 10 1 libSystem.B.dylib 0x96dbcf47 mach_msg + 68 2 com.apple.CoreFoundation 0x991b3dbf __CFRunLoopRun + 2447 3 com.apple.CoreFoundation 0x991b2d34 CFRunLoopRunSpecific + 452 4 com.apple.CoreFoundation 0x991b87a4 CFRunLoopRun + 84 5 com.apple.iTunes 0x0000ade8 0x1000 + 40424 6 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 7 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 10: 0 libSystem.B.dylib 0x96dbc7da mach_msg_trap + 10 1 libSystem.B.dylib 0x96dbcf47 mach_msg + 68 2 com.apple.CoreFoundation 0x991b3dbf __CFRunLoopRun + 2447 3 com.apple.CoreFoundation 0x991b2d34 CFRunLoopRunSpecific + 452 4 com.apple.CoreFoundation 0x991b87a4 CFRunLoopRun + 84 5 com.apple.iTunes 0x0000ade8 0x1000 + 40424 6 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 7 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 11: 0 libSystem.B.dylib 0x96dbc7da mach_msg_trap + 10 1 libSystem.B.dylib 0x96dbcf47 mach_msg + 68 2 com.apple.CoreFoundation 0x991b3dbf __CFRunLoopRun + 2447 3 com.apple.CoreFoundation 0x991b2d34 CFRunLoopRunSpecific + 452 4 com.apple.CoreFoundation 0x991b87a4 CFRunLoopRun + 84 5 com.apple.iTunes 0x0000ade8 0x1000 + 40424 6 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 7 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 12: 0 libSystem.B.dylib 0x96dbc822 semaphore_wait_signal_trap + 10 1 libSystem.B.dylib 0x96dea3d8 _pthread_cond_wait + 1089 2 libSystem.B.dylib 0x96e3370f pthread_cond_wait + 48 3 com.apple.iTunes 0x0000ae70 0x1000 + 40560 4 com.apple.iTunes 0x0000ad06 0x1000 + 40198 5 com.apple.iTunes 0x004418a1 0x1000 + 4458657 6 com.apple.iTunes 0x0043f960 0x1000 + 4450656 7 com.apple.iTunes 0x00525475 0x1000 + 5391477 8 com.apple.iTunes 0x00525c0d 0x1000 + 5393421 9 com.apple.iTunes 0x0004c62c 0x1000 + 308780 10 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 11 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 13 Crashed: 0 com.apple.CoreFoundation 0x99293b07 ___TERMINATING_DUE_TO_UNCAUGHT_EXCEPTION___ + 7 1 libobjc.A.dylib 0x9440df49 objc_exception_throw + 56 2 com.apple.CoreFoundation 0x991b2fbc CFRunLoopRunSpecific + 1100 3 com.apple.CoreFoundation 0x991b87a4 CFRunLoopRun + 84 4 com.apple.iTunes 0x0000ade8 0x1000 + 40424 5 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 6 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 14: 0 libSystem.B.dylib 0x96dbc7da mach_msg_trap + 10 1 libSystem.B.dylib 0x96dbcf47 mach_msg + 68 2 com.apple.CoreFoundation 0x991b3dbf __CFRunLoopRun + 2447 3 com.apple.CoreFoundation 0x991b2d34 CFRunLoopRunSpecific + 452 4 com.apple.CoreFoundation 0x991b87a4 CFRunLoopRun + 84 5 com.apple.iTunes 0x0000ade8 0x1000 + 40424 6 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 7 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 15: 0 libSystem.B.dylib 0x96dbc822 semaphore_wait_signal_trap + 10 1 libSystem.B.dylib 0x96dea3d8 _pthread_cond_wait + 1089 2 libSystem.B.dylib 0x96e3370f pthread_cond_wait + 48 3 ...ickTimeComponents.component 0x915275b7 jpegdecompress_MPLoop + 79 4 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 5 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 16: 0 libSystem.B.dylib 0x96dbc822 semaphore_wait_signal_trap + 10 1 libSystem.B.dylib 0x96dea3d8 _pthread_cond_wait + 1089 2 libSystem.B.dylib 0x96e3370f pthread_cond_wait + 48 3 com.apple.iTunes 0x00025d27 0x1000 + 150823 4 com.apple.iTunes 0x00025237 0x1000 + 148023 5 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 6 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 17: 0 libSystem.B.dylib 0x96de2092 __workq_kernreturn + 10 1 libSystem.B.dylib 0x96de2628 _pthread_wqthread + 941 2 libSystem.B.dylib 0x96de2246 start_wqthread + 30 Thread 18: 0 libSystem.B.dylib 0x96dbc83a semaphore_timedwait_signal_trap + 10 1 libSystem.B.dylib 0x96dea3c1 _pthread_cond_wait + 1066 2 libSystem.B.dylib 0x96e19208 pthread_cond_timedwait_relative_np + 47 3 com.apple.iTunes 0x0004ca83 0x1000 + 309891 4 com.apple.iTunes 0x0004c7cb 0x1000 + 309195 5 com.apple.iTunes 0x0004c76a 0x1000 + 309098 6 com.apple.iTunes 0x0004c5bb 0x1000 + 308667 7 libSystem.B.dylib 0x96de9f39 _pthread_start + 345 8 libSystem.B.dylib 0x96de9dbe thread_start + 34 Thread 13 crashed with X86 Thread State (32-bit): eax: 0x00000000 ebx: 0x9440df25 ecx: 0xb08b2000 edx: 0x0000003b edi: 0xa0737ab0 esi: 0x19685e20 ebp: 0xb08b1de8 esp: 0xb08b1dd0 ss: 0x0000001f efl: 0x00000282 eip: 0x99293b07 cs: 0x00000017 ds: 0x0000001f es: 0x0000001f fs: 0x0000001f gs: 0x00000037 cr2: 0x00ff9000 Binary Images: 0x1000 - 0xbd9ff8 com.apple.iTunes 9.0.1 (9.0.1) <18B3F1D1-1E3E-6DD1-CB52-F346ACB01921> /Applications/iTunes.app/Contents/MacOS/iTunes 0xdf9000 - 0xe01ff7 com.apple.ipodsynchronization 3.0 (116) <B41B2240-34E9-4A5E-A210-F02D99E3C00E> /System/Library/PrivateFrameworks/iPodSync.framework/Versions/A/iPodSync 0xe09000 - 0xe0eff7 com.apple.iPod 1.6 (17) <4CCD2720-D270-C0D2-1E14-1374779C2401> /System/Library/PrivateFrameworks/iPod.framework/Versions/A/iPod 0xe14000 - 0xe9bfe3 com.apple.iTunes.iPodUpdater 9.0 (9.0) <474ED35C-EDCE-1FEB-AC8C-075B806977A8> /Applications/iTunes.app/Contents/Frameworks/iPodUpdater.framework/Versions/A/iPodUpdater 0xee7000 - 0xf27ff7 com.apple.vmutils 4.2 (106) <834EA6B0-C91B-4CF1-ED3C-229C26459578> /System/Library/PrivateFrameworks/vmutils.framework/Versions/A/vmutils 0x14be000 - 0x14beff7 libmx.A.dylib ??? (???) <01401BF8-3FC7-19CF-ACCE-0F292BFD2F25> /usr/lib/libmx.A.dylib 0x14d0000 - 0x14d0ff7 +net.sourceforge.SafariAdBlockLoader 0.4.0 RC3 (0.4.0 RC3) <8E9A6641-9CE7-5416-DC84-883DB8BAFDDA> /Library/InputManagers/Safari AdBlock/Safari AdBlock Loader.bundle/Contents/MacOS/Safari AdBlock Loader 0x15f8000 - 0x15f9ff7 com.apple.textencoding.unicode 2.3 (2.3) <78A61FD5-70EE-19EA-48D4-3481C640B70D> /System/Library/TextEncodings/Unicode Encodings.bundle/Contents/MacOS/Unicode Encodings 0x1778000 - 0x179efff libssl.0.9.7.dylib ??? (???) <8BF98B2F-0F55-40CA-C082-43C76707BD24> /usr/lib/libssl.0.9.7.dylib 0x17e6000 - 0x17eaff3 com.apple.audio.AudioIPCPlugIn 1.1.0 (1.1.0) <39CD9296-183C-5603-94A4-0A0EC327BA69> /System/Library/Extensions/AudioIPCDriver.kext/Contents/Resources/AudioIPCPlugIn.bundle/Contents/MacOS/AudioIPCPlugIn 0x17ef000 - 0x17f4ffb com.apple.audio.AppleHDAHALPlugIn 1.7.4 (1.7.4a1) <B4217DD8-4BDE-CC1C-70FF-06EA901F376D> /System/Library/Extensions/AppleHDA.kext/Contents/PlugIns/AppleHDAHALPlugIn.bundle/Contents/MacOS/AppleHDAHALPlugIn 0x12800000 - 0x138eaff7 com.apple.CoreFP 1.5.18 (1.5) <740FE25C-0539-AEFF-2108-C2C0D338CDCE> /System/Library/PrivateFrameworks/CoreFP.framework/CoreFP 0x1390f000 - 0x139c4fe7 libcrypto.0.9.7.dylib ??? (???) <4917E4F2-817F-5AC4-3FBE-54BC96360448> /usr/lib/libcrypto.0.9.7.dylib 0x13a0a000 - 0x13a50ff3 com.apple.mobiledevice 251.6 (251.6) <E998830A-CFBF-3060-4770-1089AED68444> /System/Library/PrivateFrameworks/MobileDevice.framework/MobileDevice 0x167f5000 - 0x167f7ff7 com.apple.PDFImporter 2.1 (???) <C78368B0-3712-067C-9467-55932890C979> /System/Library/Components/PDFImporter.component/Contents/MacOS/PDFImporter 0x16900000 - 0x16905ff7 com.apple.QuartzComposer.iTunesPlugIn 1.2 (16) <8511A037-AFDE-5D1A-67DA-1B4837432D85> /Library/iTunes/iTunes Plug-ins/Quartz Composer Visualizer.bundle/Contents/MacOS/Quartz Composer Visualizer 0x17fa8000 - 0x181cbfe7 com.apple.audio.codecs.Components 2.0 (2.0) <064E9181-38CC-C2D3-070D-4D162D2903E8> /System/Library/Components/AudioCodecs.component/Contents/MacOS/AudioCodecs 0x18764000 - 0x1877efc3 com.apple.AppleIntermediateCodec 1.2 (145) /Library/QuickTime/AppleIntermediateCodec.component/Contents/MacOS/AppleIntermediateCodec 0x18783000 - 0x18788ff7 com.apple.AppleMPEG2Codec 1.0.1 (220) <6FDFF3C8-7ECE-CB74-1374-9C0230C54F78> /Library/QuickTime/AppleMPEG2Codec.component/Contents/MacOS/AppleMPEG2Codec 0x19137000 - 0x1918cfef com.apple.AppleProResDecoder 2.0 (223) <793BA98A-2E7D-1C39-998D-805B60034DF4> /System/Library/QuickTime/AppleProResDecoder.component/Contents/MacOS/AppleProResDecoder 0x191c4000 - 0x191ddfe7 com.apple.applepixletvideo 1.2.19 (1.2d19) <4A68731C-8071-6CF5-012C-40F00CD1333A> /System/Library/QuickTime/ApplePixletVideo.component/Contents/MacOS/ApplePixletVideo 0x19400000 - 0x19479fef com.apple.AppleVAH264HW.component 2.0 (1.0) <FFC0DED4-1AA1-267E-CE43-0261727DA31D> /System/Library/QuickTime/AppleVAH264HW.component/Contents/MacOS/AppleVAH264HW 0x1953b000 - 0x19577fe3 com.apple.QuickTimeFireWireDV.component 7.6.3 (1584) <8E3D38A3-1005-305C-7B70-D400AB4AC0F3> /System/Library/QuickTime/QuickTimeFireWireDV.component/Contents/MacOS/QuickTimeFireWireDV 0x1a000000 - 0x1a312fe0 +org.perian.Perian 1.1.4 (1.1.4) <577A3B05-0FF7-FC3D-3223-88718A00D84C> /Library/QuickTime/Perian.component/Contents/MacOS/Perian 0x70000000 - 0x700cbfe7 com.apple.audio.units.Components 1.6 (1.6) <A568FC6D-1D2D-A04B-FD1A-AFF6E326E020> /System/Library/Components/CoreAudio.component/Contents/MacOS/CoreAudio 0x8fe00000 - 0x8fe4162b dyld 132.1 (???) <211AF0DD-42D9-79C8-BB6A-1F4BEEF4B4AB> /usr/lib/dyld 0x900cb000 - 0x900ccff7 com.apple.audio.units.AudioUnit 1.6 (1.6) <68180B96-381C-A09D-5576-606A134FD953> /System/Library/Frameworks/AudioUnit.framework/Versions/A/AudioUnit 0x900cd000 - 0x908b2fe7 com.apple.WebCore 6531 (6531.9) <F9A9848B-9EB0-B912-49F5-7E8010AF2CF1> /System/Library/Frameworks/WebKit.framework/Versions/A/Frameworks/WebCore.framework/Versions/A/WebCore 0x908b3000 - 0x908effff com.apple.CoreMediaIOServices 101.0 (715) <FD86FB28-9BA1-0993-1172-F10F61EA6344> /System/Library/PrivateFrameworks/CoreMediaIOServices.framework/Versions/A/CoreMediaIOServices 0x9095f000 - 0x90970ff7 com.apple.LangAnalysis 1.6.5 (1.6.5) <E77440D0-76EE-EB4C-3D00-9EDE417F13CF> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/LangAnalysis.framework/Versions/A/LangAnalysis 0x909ba000 - 0x90bb7feb com.apple.AddressBook.framework 5.0 (862) <BD05B213-46CF-8EFD-B801-CF741408600D> /System/Library/Frameworks/AddressBook.framework/Versions/A/AddressBook 0x90bd9000 - 0x90c03ff7 com.apple.shortcut 1.1 (1.1) <B0514FA9-7CAE-AD94-93CA-7B2A2C5F7B8A> /System/Library/PrivateFrameworks/Shortcut.framework/Versions/A/Shortcut 0x90c04000 - 0x90c23fe7 com.apple.opencl 11 (11) <372A42E7-FB10-B74D-E1A0-980E94D07021> /System/Library/Frameworks/OpenCL.framework/Versions/A/OpenCL 0x90c24000 - 0x90c24ff7 com.apple.Accelerate 1.5 (Accelerate 1.5) <F642E7A0-3720-FA19-0190-E6DBD9EF2D9B> /System/Library/Frameworks/Accelerate.framework/Versions/A/Accelerate 0x90c25000 - 0x90c25ff7 com.apple.ApplicationServices 38 (38) <8012B504-3D83-BFBB-DA65-065E061CFE03> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/ApplicationServices 0x90d7c000 - 0x90db6fe7 libFontRegistry.dylib ??? (???) <EE633CF6-8827-EF05-10A4-5F2937120227> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/ATS.framework/Versions/A/Resources/libFontRegistry.dylib 0x90db7000 - 0x91d45ff7 com.apple.QuickTimeComponents.component 7.6.3 (1584) /System/Library/QuickTime/QuickTimeComponents.component/Contents/MacOS/QuickTimeComponents 0x91d46000 - 0x91d7cfff libtidy.A.dylib ??? (???) <DDFAB560-3883-A6A2-7BDD-D91730982B48> /usr/lib/libtidy.A.dylib 0x91d8d000 - 0x91d92ff7 com.apple.OpenDirectory 10.6 (10.6) <92582807-E8F3-3DD9-EB42-4195CFB754A1> /System/Library/Frameworks/OpenDirectory.framework/Versions/A/OpenDirectory 0x91d93000 - 0x91d9aff7 com.apple.agl 3.0.12 (AGL-3.0.12) <6BF89127-C18C-27A9-F94A-981836A822FE> /System/Library/Frameworks/AGL.framework/Versions/A/AGL 0x91d9b000 - 0x91ddbff3 com.apple.securityinterface 4.0 (36981) <F024C5CA-0762-1599-5BAB-17F785E51075> /System/Library/Frameworks/SecurityInterface.framework/Versions/A/SecurityInterface 0x91e0d000 - 0x91e5dfe7 libGLU.dylib ??? (???) <55A69DCE-1237-341E-F239-CDFE1F5B19BB> /System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGLU.dylib 0x91e5e000 - 0x91ee0ffb SecurityFoundation ??? (???) <29C27E0E-B2B3-BF6B-B1F8-5783B8B01535> /System/Library/Frameworks/SecurityFoundation.framework/Versions/A/SecurityFoundation 0x91f28000 - 0x9235dff7 libLAPACK.dylib ??? (???) <5E2D2283-57DE-9A49-1DB0-CD027FEFA6C2> /System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/libLAPACK.dylib 0x9236b000 - 0x9237ffe7 libbsm.0.dylib ??? (???) <14CB053A-7C47-96DA-E415-0906BA1B78C9> /usr/lib/libbsm.0.dylib 0x923bb000 - 0x924e4fe7 com.apple.audio.toolbox.AudioToolbox 1.6 (1.6) <62BEEBE6-68FC-4A48-91CF-39DA2BD793F1> /System/Library/Frameworks/AudioToolbox.framework/Versions/A/AudioToolbox 0x924fe000 - 0x925b0ffb libFontParser.dylib ??? (???) <EB089832-660F-0B34-3AC8-CCDA937987D9> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/ATS.framework/Versions/A/Resources/libFontParser.dylib 0x9264d000 - 0x9264fff7 libRadiance.dylib ??? (???) <0E03CF64-0931-7B9A-F617-4387B809D6D8> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/ImageIO.framework/Versions/A/Resources/libRadiance.dylib 0x92650000 - 0x92696ff7 libauto.dylib ??? (???) <FAB17F30-A28B-E33D-6E21-C7119C9C83ED> /usr/lib/libauto.dylib 0x92697000 - 0x928c2ff3 com.apple.QuartzComposer 4.0 (156.6) <D1D3A5A8-75BC-4556-85FA-8A9F487106DD> /System/Library/Frameworks/Quartz.framework/Versions/A/Frameworks/QuartzComposer.framework/Versions/A/QuartzComposer 0x928c3000 - 0x92913ff7 com.apple.framework.familycontrols 2.0 (2.0) <50617342-E578-4C1C-938A-19A37ECA91CA> /System/Library/PrivateFrameworks/FamilyControls.framework/Versions/A/FamilyControls 0x92914000 - 0x92c0dfef com.apple.QuickTime 7.6.3 (1584) <687233E1-F428-5224-08D5-5874BEA2300D> /System/Library/Frameworks/QuickTime.framework/Versions/A/QuickTime 0x92c22000 - 0x92d62ff7 com.apple.syncservices 5.0 (575) <61B36E07-6D14-97DC-122F-41EDE1F6DB03> /System/Library/Frameworks/SyncServices.framework/Versions/A/SyncServices 0x92d6e000 - 0x92d9fff3 libTrueTypeScaler.dylib ??? (???) <F326E053-7425-2F10-F883-CBD56A1E1B72> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/ATS.framework/Versions/A/Resources/libTrueTypeScaler.dylib 0x92da0000 - 0x92dc0fe7 libresolv.9.dylib ??? (???) <A48921CB-3FA7-3071-AF9C-2D86FB493A3A> /usr/lib/libresolv.9.dylib 0x92dca000 - 0x92ed7ff7 com.apple.MediaToolbox 0.420.17 (420.17) <EE843140-C79F-3D8C-B89E-893CD74C3633> /System/Library/PrivateFrameworks/MediaToolbox.framework/Versions/A/MediaToolbox 0x92f4a000 - 0x92ff9fe3 com.apple.QuickTimeImporters.component 7.6.3 (1584) <34BF4FBA-BFCD-9A47-4BA9-E2B155C5C881> /System/Library/QuickTime/QuickTimeImporters.component/Contents/MacOS/QuickTimeImporters 0x92ffa000 - 0x930a9fef com.apple.ColorSync 4.6.0 (4.6.0) <66ABAE86-B0EC-D641-913D-08ACA965F9FA> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/ColorSync.framework/Versions/A/ColorSync 0x930aa000 - 0x931a0ff7 libGLProgrammability.dylib ??? (???) <B8E40851-3A01-7D01-2F96-537BF7FA63B5> /System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGLProgrammability.dylib 0x931a8000 - 0x93200fe7 com.apple.datadetectorscore 2.0 (80.7) <A40AA74A-9D13-2A6C-5440-B50905923251> /System/Library/PrivateFrameworks/DataDetectorsCore.framework/Versions/A/DataDetectorsCore 0x93201000 - 0x93228ff7 com.apple.quartzfilters 1.6.0 (1.6.0) <879A3B93-87A6-88FE-305D-DF1EAED04756> /System/Library/Frameworks/Quartz.framework/Versions/A/Frameworks/QuartzFilters.framework/Versions/A/QuartzFilters 0x93229000 - 0x9327aff7 com.apple.HIServices 1.8.0 (???) <B8EC13DB-A81A-91BF-8C82-66E840C64C91> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/HIServices.framework/Versions/A/HIServices 0x9327b000 - 0x932b6fe7 com.apple.DebugSymbols 1.1 (70) <05013716-CFCF-801E-5535-D0643869BDCD> /System/Library/PrivateFrameworks/DebugSymbols.framework/Versions/A/DebugSymbols 0x932b7000 - 0x93304feb com.apple.DirectoryService.PasswordServerFramework 6.0 (6.0) <BF66BA5D-BBC8-78A5-DBE2-F9DE3DD1D775> /System/Library/PrivateFrameworks/PasswordServer.framework/Versions/A/PasswordServer 0x93305000 - 0x93319ffb com.apple.speech.synthesis.framework 3.10.35 (3.10.35) <57DD5458-4F24-DA7D-0927-C3321A65D743> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/SpeechSynthesis.framework/Versions/A/SpeechSynthesis 0x9331a000 - 0x9335eff3 com.apple.coreui 0.2 (112) <A810DFFD-6314-5E2B-93A4-D5626634B1EE> /System/Library/PrivateFrameworks/CoreUI.framework/Versions/A/CoreUI 0x9335f000 - 0x93418fe7 libsqlite3.dylib ??? (???) <16CEF8E8-8C9A-94CD-EF5D-05477844C005> /usr/lib/libsqlite3.dylib 0x93419000 - 0x93473ff7 com.apple.framework.IOKit 2.0 (???) <7618DDEC-2E3B-9C6E-FDC9-15169E24B4FB> /System/Library/Frameworks/IOKit.framework/Versions/A/IOKit 0x93474000 - 0x934edff3 com.apple.audio.CoreAudio 3.2.0 (3.2) <91AE891E-6015-AABE-3512-2D5EBCA0937B> /System/Library/Frameworks/CoreAudio.framework/Versions/A/CoreAudio 0x934ee000 - 0x935effe7 libxml2.2.dylib ??? (???) <C242A74D-280A-90C3-3F79-891624AA45D2> /usr/lib/libxml2.2.dylib 0x935f0000 - 0x93634fe7 com.apple.Metadata 10.6.0 (507.1) <CBD1B22B-5F10-C784-03A2-35106B97DF3F> /System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/Metadata.framework/Versions/A/Metadata 0x93635000 - 0x93637ff7 com.apple.QuickTimeH264.component 7.6.3 (1584) /System/Library/QuickTime/QuickTimeH264.component/Contents/MacOS/QuickTimeH264 0x93639000 - 0x93654ff7 libPng.dylib ??? (???) <38DD4AA1-0643-85A0-F2F5-EE9269729975> /System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/ImageIO.framework/Versions/A/Resources/libPng.dylib 0x93655000 - 0x936affe7 com.apple.CorePDF 1.0 (1.0) <590244C9-15D7-7A65-13AF-6F597123746B> /System/Library/PrivateFrameworks/CorePDF.framework/Versions/A/CorePDF 0x936b0000 - 0x936e8ff7 com.apple.LDAPFramework 2.0 (120.1) <8C7F3F42-6A4D-D37A-4232-685D44E8769E> /System/Library/Frameworks/LDAP.framework/Versions/A/LDAP 0x93723000 - 0x93757ff7 libcups.2.dylib ??? (???) <9078BA07-DEE1-6597-D15D-7BE3A20CB5A0> /usr/lib/libcups.2.dylib 0x93758000 - 0x93758ff7 liblangid.dylib ??? (???) <B99607FC-5646-32C8-2C16-AFB5EA9097C2> /usr/lib/liblangid.dylib 0x93759000 - 0x9375cffb com.apple.help 1.3.1 (41) <67F1F424-3983-7A2A-EC21-867BE838E90B> /System/Library/Frameworks/Carbon.framework/Versions/A/Frameworks/Help.framework/Versions/A/Help 0x9375d000 - 0x937d4feb com.apple.backup.framework 1.1 (1.0) <73C642BD-

    Read the article

  • Cisco Unifed Communication integration for Microsoft Lync crashes on Remote Desktop services 2008 R2!

    - by user66267
    Hi everybody i have deployed office communication server 2007 R2 and communicator 2007 R2 and i made integration with Cisco Unified Communication Manager 7.1 in my network, i also uses Remote Desktop Servers 2008 R2 for Thin Client Computers, now that i installed Cisco UC integration client for communicator 2007 R2 (Ver. 8.0.3) or Cisco UC integration client for Microsoft Lync that works fine on PCs but Not on Remote Desktop Servers. i have Three Remote Desktop Servers in a Farm with loadbalancing enabled. all other applications on these RDP servers works fine for 120 active users. some times when i start Cisco UC client on Remote Desktop servers i get the following error "The Port Reguired for callbacks from Cisco unified client framework could not be read, please retry" i also found the folowing log so i think that may be the cause: 2011-01-05 08:24:21,489 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.SingleInstanceManager] [SingleInstanceManager.acquireMutex(0)] - Acquiring Mutex... 2011-01-05 08:24:21,512 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.IPC.PipeServer] [PipeServer.start(0)] - Starting Pipe Server 2011-01-05 08:24:21,516 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.SingleInstanceManager] [SingleInstanceManager.acquireMutex(0)] - Mutex Acquired... 2011-01-05 08:24:25,437 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.process.ProcessUtil] [ProcessUtil.isOtherPRTProcessRunning(0)] - No other instance(s) of ProblemReportingTool.exe found 2011-01-05 08:24:25,438 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - ******************************* 2011-01-05 08:24:25,439 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - **Launching CUCSF Problem Reporting Tool v0.8.3.2** 2011-01-05 08:24:25,440 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - ******************************* 2011-01-05 08:24:25,441 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - Raw input: -reason=Launched by the user from CUCIMOC ver 8.5.105.17095 -file=C:\Users\MA899~1.SAD\AppData\Local\Temp\36\CUCIMOCInstaller.txt 2011-01-05 08:24:25,445 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - Current culture: English (United States) 2011-01-05 08:24:25,448 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.init(0)] - Loading string resources from file 2011-01-05 08:24:25,455 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.context.CLIUtil] [CLIUtil.parse(0)] - Argument -reason Launched by the user from CUCIMOC ver 8.5.105.17095 received 2011-01-05 08:24:25,456 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.context.CLIUtil] [CLIUtil.parse(0)] - Argument -file C:\Users\MA899~1.SAD\AppData\Local\Temp\36\CUCIMOCInstaller.txt received 2011-01-05 08:24:25,457 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.startup(0)] - Launching GUI... 2011-01-05 08:24:25,536 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PROG.PleaseWaitText from resource file 2011-01-05 08:24:25,545 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.OKButtonText from resource file 2011-01-05 08:24:25,548 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.CancelButtonText from resource file 2011-01-05 08:24:25,549 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.ErrorMsgText1 from resource file 2011-01-05 08:24:25,549 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.Title from resource file 2011-01-05 08:24:25,552 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.WindowTitle from resource file 2011-01-05 08:24:25,553 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.AgreeText from resource file 2011-01-05 08:24:25,553 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.PrivacyText from resource file 2011-01-05 08:24:25,554 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.PrivacyTitle from resource file 2011-01-05 08:24:25,555 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.PrivacyLinkText from resource file 2011-01-05 08:24:25,555 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.DescriptionTitle from resource file 2011-01-05 08:24:25,629 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager..ctor(0)] - Starting SysInfoManager... 2011-01-05 08:24:25,634 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: systeminfo.exe 2011-01-05 08:24:25,669 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: tasklist.exe 2011-01-05 08:24:25,672 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: ipconfig.exe 2011-01-05 08:24:25,676 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: netstat.exe 2011-01-05 08:24:25,684 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: net.exe 2011-01-05 08:24:25,926 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.launchHardwareInfoThread(0)] - Launching worker thread: HardwareInfo 2011-01-05 08:24:25,928 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getHardWareInfo(0)] - Gathering CPU data 2011-01-05 08:24:26,149 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.launchCSFDirectoryInfoThread(0)] - Gathering CSF Directory Listing 2011-01-05 08:24:26,153 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [CSFDirectoryInfo.getCSFInstallPath(0)] - Retrieving CSF Install Directory 2011-01-05 08:24:26,159 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [CSFDirectoryInfo.getCSFInstallPath(0)] - CSF Install Path: C:\Program Files (x86)\Common Files\Cisco Systems\Client Services Framework 2011-01-05 08:24:26,162 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.launchWMIInfoThread(0)] - Launching worker thread: WMIInfo 2011-01-05 08:24:26,164 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getWMIInfo(0)] - Gathering Audio info... 2011-01-05 08:24:26,168 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.launchRegistryAndEnvironmentalVarInfoThread(0)] - Launching worker thread: Registry & Environment Variables 2011-01-05 08:24:26,173 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\Cisco Systems, Inc.\Client Services Framework\AdminData\ 2011-01-05 08:24:26,180 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\Policies\Cisco Systems, Inc.\Client Services Framework\AdminData\ 2011-01-05 08:24:26,182 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\Cisco Systems, Inc.\Unified Communications\CUCSF 2011-01-05 08:24:26,183 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\JavaSoft\Java Runtime Environment 2011-01-05 08:24:26,184 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\JavaSoft\Java Runtime Environment\1.6 2011-01-05 08:24:26,186 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\JavaSoft\Java Runtime Environment\1.6.0_17 2011-01-05 08:24:26,188 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\JavaSoft\Java Runtime Environment\1.6.0_17\MSI 2011-01-05 08:24:26,190 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.gatherRegistryAndEnvInfo(0)] - Gathering Environment Variables data 2011-01-05 08:24:26,283 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getWMIInfo(0)] - Gathering Video driver info... 2011-01-05 08:24:26,750 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.writeFile(0)] - Creating file: DirectoryInfo.txt 2011-01-05 08:24:26,759 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getWMIInfo(0)] - Gathering Monitor info... 2011-01-05 08:24:34,483 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.gatherFiles(0)] - Config Dir C:\Users\m.sadeghi\AppData\Roaming\Cisco\Unified Communications\ 2011-01-05 08:24:34,530 [WARN ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.addFile(0)] - C:\Users\MA899~1.SAD\AppData\Local\Temp\36\CUCIMOCInstaller.txt not found 2011-01-05 08:24:34,561 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.addSystemInfo(0)] - Waiting for worker threads... 2011-01-05 08:24:38,180 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getHardWareInfo(0)] - Gathering Resolution data 2011-01-05 08:24:55,565 [ERROR] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.addSystemInfo(0)] - One or more worker threads have not returned in a timely manner. Forcing quit. 2011-01-05 08:24:55,568 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.writeFile(0)] - Creating file: SystemInfo.txt 2011-01-05 08:24:55,577 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Checking for files to be excluded 2011-01-05 08:24:55,578 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: d11bfd8f-9745-41db-a35b-200389e65583.dat 2011-01-05 08:24:55,579 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: cacerts 2011-01-05 08:24:55,580 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: Voicemail.2639.20110103081119+0330.wav 2011-01-05 08:24:55,581 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: Voicemail.farhad.20101224165510+0330.wav 2011-01-05 08:24:55,581 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: Voicemail.postmaster.20101224165906+0330.wav 2011-01-05 08:24:55,582 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: VoicemailBeep.wav 2011-01-05 08:24:55,583 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: secModeNone 2011-01-05 08:24:55,586 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Preparing to create zip file... 2011-01-05 08:24:55,588 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - 60 files found 2011-01-05 08:24:55,589 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying .CSFExit.loc to temp folder. 2011-01-05 08:24:55,595 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CSF.loc to temp folder. 2011-01-05 08:24:55,597 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CsfAddress.dat to temp folder. 2011-01-05 08:24:55,600 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CSFLogSetting.dat to temp folder. 2011-01-05 08:24:55,634 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CSFSecurityKey.dat to temp folder. 2011-01-05 08:24:55,637 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CommunicationHistory.xml to temp folder. 2011-01-05 08:24:55,641 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying MehdiSadeghi.cnf.xml to temp folder. 2011-01-05 08:24:55,751 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying jtapi.jar to temp folder. 2011-01-05 08:24:55,812 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi.index to temp folder. 2011-01-05 08:24:55,820 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi01.log to temp folder. 2011-01-05 08:24:55,887 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi02.log to temp folder. 2011-01-05 08:24:55,968 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi03.log to temp folder. 2011-01-05 08:24:55,972 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi04.log to temp folder. 2011-01-05 08:24:56,008 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi05.log to temp folder. 2011-01-05 08:24:56,038 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi06.log to temp folder. 2011-01-05 08:24:56,079 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi07.log to temp folder. 2011-01-05 08:24:56,100 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi08.log to temp folder. 2011-01-05 08:24:56,140 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi09.log to temp folder. 2011-01-05 08:24:56,215 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi10.log to temp folder. 2011-01-05 08:24:56,296 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log to temp folder. 2011-01-05 08:24:56,319 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.1 to temp folder. 2011-01-05 08:24:56,498 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.2 to temp folder. 2011-01-05 08:24:56,708 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.3 to temp folder. 2011-01-05 08:24:56,912 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.4 to temp folder. 2011-01-05 08:24:57,105 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.5 to temp folder. 2011-01-05 08:24:57,292 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.6 to temp folder. 2011-01-05 08:24:57,505 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying tracker.log to temp folder. 2011-01-05 08:24:57,523 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying VideoEngineEncryptedTrace.txt to temp folder. 2011-01-05 08:24:57,542 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying VoiceEngineDebugTrace.txt to temp folder. 2011-01-05 08:24:57,545 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying VoiceEngineTrace.txt to temp folder. 2011-01-05 08:24:57,548 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying operationreport.log to temp folder. 2011-01-05 08:24:57,551 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying voicemailbox.dat to temp folder. 2011-01-05 08:24:57,554 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying voicemailfolder.dat to temp folder. 2011-01-05 08:24:57,558 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying UIPrefs.xml to temp folder. 2011-01-05 08:24:57,562 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log to temp folder. 2011-01-05 08:24:57,569 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.1 to temp folder. 2011-01-05 08:24:57,752 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.10 to temp folder. 2011-01-05 08:24:58,099 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.2 to temp folder. 2011-01-05 08:24:58,302 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.3 to temp folder. 2011-01-05 08:24:58,517 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.4 to temp folder. 2011-01-05 08:24:58,697 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.5 to temp folder. 2011-01-05 08:24:58,899 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.6 to temp folder. 2011-01-05 08:24:59,100 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.7 to temp folder. 2011-01-05 08:24:59,303 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.8 to temp folder. 2011-01-05 08:24:59,500 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.9 to temp folder. 2011-01-05 08:24:59,895 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Cisco.ClickToCall.Common.Core.dll.config to temp folder. 2011-01-05 08:24:59,915 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying ClickToCall.pref to temp folder. 2011-01-05 08:24:59,918 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoClickToCall.dll.config to temp folder. 2011-01-05 08:24:59,928 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoClickToCallContacts.dll.config to temp folder. 2011-01-05 08:24:59,948 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoPersonName.dll.config to temp folder. 2011-01-05 08:24:59,980 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying userData.properties to temp folder. 2011-01-05 08:24:59,988 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying userData.properties.backup to temp folder. 2011-01-05 08:24:59,990 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying cisco-uc-client.log4net.config to temp folder. 2011-01-05 08:24:59,994 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying cisco-uc-tab.log4net.config to temp folder. 2011-01-05 08:25:00,011 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying LocalSettings.xml to temp folder. 2011-01-05 08:25:00,025 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Description.txt to temp folder. 2011-01-05 08:25:00,028 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying LaunchInfo.txt to temp folder. 2011-01-05 08:25:00,031 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying DirectoryInfo.txt to temp folder. 2011-01-05 08:25:00,034 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying SystemInfo.txt to temp folder. 2011-01-05 08:25:00,036 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying csf-prt.log to temp folder.

    Read the article

  • Linux Mint 10 LXDE computer to act as LTSP Server without luck

    - by Rautamiekka
    So I've tried to make our screen-broken HP laptop to also serve as LTSP Server in addition to various other tasks, without luck, which may be cuz I'm running LM10 LXDE while the instructions are for Ubuntu. Excuse my ignorance. The entire output from Terminal after installing LTSP stuff along with a Server kernel and a load of other packages: administrator@rauta-mint-turion ~ $ sudo lt ltrace ltsp-build-client ltsp-chroot ltspfs ltspfsmounter ltsp-info ltsp-localapps ltsp-update-image ltsp-update-kernels ltsp-update-sshkeys administrator@rauta-mint-turion ~ $ sudo ltsp-update-sshkeys administrator@rauta-mint-turion ~ $ sudo ltsp-update-kernels find: `/opt/ltsp/': No such file or directory administrator@rauta-mint-turion ~ $ sudo ltsp-build-client /usr/share/ltsp/plugins/ltsp-build-client/common/010-chroot-tagging: line 3: /opt/ltsp/i386/etc/ltsp_chroot: No such file or directory error: LTSP client installation ended abnormally administrator@rauta-mint-turion ~ $ sudo ltsp-update-image Cannot determine assigned port. Assigning to port 2000. mkdir: cannot create directory `/opt/ltsp/i386/etc/ltsp': No such file or directory /usr/sbin/ltsp-update-image: 274: cannot create /opt/ltsp/i386/etc/ltsp/update-kernels.conf: Directory nonexistent /usr/sbin/ltsp-update-image: 274: cannot create /opt/ltsp/i386/etc/ltsp/update-kernels.conf: Directory nonexistent Regenerating kernel... chroot: failed to run command `/usr/share/ltsp/update-kernels': No such file or directory Done. Configuring inetd... Done. Updating pxelinux default configuration...Done. Skipping invalid chroot: /opt/ltsp/i386 chroot: failed to run command `test': No such file or directory administrator@rauta-mint-turion ~ $ sudo ltsp-chroot chroot: failed to run command `/bin/bash': No such file or directory administrator@rauta-mint-turion ~ $ bash administrator@rauta-mint-turion ~ $ exit exit administrator@rauta-mint-turion ~ $ sudo ls /opt/ltsp i386 administrator@rauta-mint-turion ~ $ sudo ls /opt/ltsp/i386/ administrator@rauta-mint-turion ~ $ sudo ltsp-build-client NOTE: Root directory /opt/ltsp/i386 already exists, this will lead to problems, please remove it before trying again. Exiting. error: LTSP client installation ended abnormally administrator@rauta-mint-turion ~ $ sudo rm -rv /opt/ltsp/i386 removed directory: `/opt/ltsp/i386' administrator@rauta-mint-turion ~ $ sudo ltsp-build-client /usr/share/ltsp/plugins/ltsp-build-client/common/010-chroot-tagging: line 3: /opt/ltsp/i386/etc/ltsp_chroot: No such file or directory error: LTSP client installation ended abnormally administrator@rauta-mint-turion ~ $ aptitude search ltsp p fts-ltsp-ldap - LDAP LTSP module for the TFTP/Fuse supplicant p ltsp-client - LTSP client environment p ltsp-client-core - LTSP client environment (core) p ltsp-cluster-accountmanager - Account creation and management daemon for LTSP p ltsp-cluster-control - Web based thin-client configuration management p ltsp-cluster-lbagent - LTSP loadbalancer agent offers variables about the state of the ltsp server p ltsp-cluster-lbserver - LTSP loadbalancer server returns the optimal ltsp server to terminal p ltsp-cluster-nxloadbalancer - Minimal NX loadbalancer for ltsp-cluster p ltsp-cluster-pxeconfig - LTSP-Cluster symlink generator p ltsp-controlaula - Classroom management tool with ltsp clients p ltsp-docs - LTSP Documentation p ltsp-livecd - starts an LTSP live server on an Ubuntu livecd session p ltsp-manager - Ubuntu LTSP server management GUI i A ltsp-server - Basic LTSP server environment i ltsp-server-standalone - Complete LTSP server environment i A ltspfs - Fuse based remote filesystem for LTSP thin clients p ltspfsd - Fuse based remote filesystem hooks for LTSP thin clients p ltspfsd-core - Fuse based remote filesystem daemon for LTSP thin clients p python-ltsp - provides ltsp related functions administrator@rauta-mint-turion ~ $ sudo aptitude purge ltsp-server ltsp-server-standalone ltspfs The following packages will be REMOVED: debconf-utils{u} debootstrap{u} dhcp3-server{u} gstreamer0.10-pulseaudio{u} ldm-server{u} libpulse-browse0{u} ltsp-server{p} ltsp-server-standalone{p} ltspfs{p} nbd-server{u} openbsd-inetd{u} pulseaudio{u} pulseaudio-esound-compat{u} pulseaudio-module-x11{u} pulseaudio-utils{u} squashfs-tools{u} tftpd-hpa{u} 0 packages upgraded, 0 newly installed, 17 to remove and 0 not upgraded. Need to get 0B of archives. After unpacking 6,996kB will be freed. Do you want to continue? [Y/n/?] (Reading database ... 158454 files and directories currently installed.) Removing ltsp-server-standalone ... Purging configuration files for ltsp-server-standalone ... Removing ltsp-server ... Purging configuration files for ltsp-server ... dpkg: warning: while removing ltsp-server, directory '/var/lib/tftpboot/ltsp' not empty so not removed. dpkg: warning: while removing ltsp-server, directory '/var/lib/tftpboot' not empty so not removed. Processing triggers for man-db ... (Reading database ... 158195 files and directories currently installed.) Removing debconf-utils ... Removing debootstrap ... Removing dhcp3-server ... * Stopping DHCP server dhcpd3 [ OK ] Removing gstreamer0.10-pulseaudio ... Removing ldm-server ... Removing pulseaudio-module-x11 ... Removing pulseaudio-esound-compat ... Removing pulseaudio ... * PulseAudio configured for per-user sessions Removing pulseaudio-utils ... Removing libpulse-browse0 ... Processing triggers for man-db ... Processing triggers for ureadahead ... ureadahead will be reprofiled on next reboot Processing triggers for libc-bin ... ldconfig deferred processing now taking place (Reading database ... 157944 files and directories currently installed.) Removing ltspfs ... Processing triggers for man-db ... (Reading database ... 157932 files and directories currently installed.) Removing nbd-server ... Stopping Network Block Device server: nbd-server. Removing openbsd-inetd ... * Stopping internet superserver inetd [ OK ] Removing squashfs-tools ... Removing tftpd-hpa ... tftpd-hpa stop/waiting Processing triggers for ureadahead ... Processing triggers for man-db ... administrator@rauta-mint-turion ~ $ sudo aptitude purge ~c The following packages will be REMOVED: dhcp3-server{p} libpulse-browse0{p} nbd-server{p} openbsd-inetd{p} pulseaudio{p} tftpd-hpa{p} 0 packages upgraded, 0 newly installed, 6 to remove and 0 not upgraded. Need to get 0B of archives. After unpacking 0B will be used. Do you want to continue? [Y/n/?] (Reading database ... 157881 files and directories currently installed.) Removing dhcp3-server ... Purging configuration files for dhcp3-server ... Removing libpulse-browse0 ... Purging configuration files for libpulse-browse0 ... Removing nbd-server ... Purging configuration files for nbd-server ... Removing openbsd-inetd ... Purging configuration files for openbsd-inetd ... Removing pulseaudio ... Purging configuration files for pulseaudio ... Removing tftpd-hpa ... Purging configuration files for tftpd-hpa ... Processing triggers for ureadahead ... administrator@rauta-mint-turion ~ $ sudo aptitude install ltsp-server-standalone The following NEW packages will be installed: debconf-utils{a} debootstrap{a} ldm-server{a} ltsp-server{a} ltsp-server-standalone ltspfs{a} nbd-server{a} openbsd-inetd{a} squashfs-tools{a} The following packages are RECOMMENDED but will NOT be installed: dhcp3-server pulseaudio-esound-compat tftpd-hpa 0 packages upgraded, 9 newly installed, 0 to remove and 0 not upgraded. Need to get 0B/498kB of archives. After unpacking 2,437kB will be used. Do you want to continue? [Y/n/?] Preconfiguring packages ... Selecting previously deselected package openbsd-inetd. (Reading database ... 157868 files and directories currently installed.) Unpacking openbsd-inetd (from .../openbsd-inetd_0.20080125-4ubuntu2_i386.deb) ... Processing triggers for man-db ... Processing triggers for ureadahead ... Setting up openbsd-inetd (0.20080125-4ubuntu2) ... * Stopping internet superserver inetd [ OK ] * Starting internet superserver inetd [ OK ] Selecting previously deselected package ldm-server. (Reading database ... 157877 files and directories currently installed.) Unpacking ldm-server (from .../ldm-server_2%3a2.1.3-0ubuntu1_all.deb) ... Selecting previously deselected package debconf-utils. Unpacking debconf-utils (from .../debconf-utils_1.5.32ubuntu3_all.deb) ... Selecting previously deselected package debootstrap. Unpacking debootstrap (from .../debootstrap_1.0.23ubuntu1_all.deb) ... Selecting previously deselected package nbd-server. Unpacking nbd-server (from .../nbd-server_1%3a2.9.14-2ubuntu1_i386.deb) ... Selecting previously deselected package squashfs-tools. Unpacking squashfs-tools (from .../squashfs-tools_1%3a4.0-8_i386.deb) ... Selecting previously deselected package ltsp-server. GNU nano 2.2.4 File: /etc/ltsp/ltsp-update-image.conf # Configuration file for ltsp-update-image # By default, do not compress the image # as it's reported to make it unstable NO_COMP="-noF -noD -noI -no-exports" [ Switched to /etc/ltsp/ltsp-update-image.conf ] administrator@rauta-mint-turion ~ $ ls /opt/ firefox/ ltsp/ mint-flashplugin/ administrator@rauta-mint-turion ~ $ ls /opt/ltsp/i386/ administrator@rauta-mint-turion ~ $ ls /opt/ltsp/ i386 administrator@rauta-mint-turion ~ $ sudo ltsp ltsp-build-client ltsp-chroot ltspfs ltspfsmounter ltsp-info ltsp-localapps ltsp-update-image ltsp-update-kernels ltsp-update-sshkeys administrator@rauta-mint-turion ~ $ sudo ltsp-build-client NOTE: Root directory /opt/ltsp/i386 already exists, this will lead to problems, please remove it before trying again. Exiting. error: LTSP client installation ended abnormally administrator@rauta-mint-turion ~ $ ^C administrator@rauta-mint-turion ~ $ sudo aptitude purge ltsp ltspfs ltsp-server ltsp-server-standalone administrator@rauta-mint-turion ~ $ sudo aptitude purge ltsp-server The following packages will be REMOVED: ltsp-server{p} 0 packages upgraded, 0 newly installed, 1 to remove and 0 not upgraded. Need to get 0B of archives. After unpacking 1,073kB will be freed. The following packages have unmet dependencies: ltsp-server-standalone: Depends: ltsp-server but it is not going to be installed. The following actions will resolve these dependencies: Remove the following packages: 1) ltsp-server-standalone Accept this solution? [Y/n/q/?] The following packages will be REMOVED: debconf-utils{u} debootstrap{u} ldm-server{u} ltsp-server{p} ltsp-server-standalone{a} ltspfs{u} nbd-server{u} openbsd-inetd{u} squashfs-tools{u} 0 packages upgraded, 0 newly installed, 9 to remove and 0 not upgraded. Need to get 0B of archives. After unpacking 2,437kB will be freed. Do you want to continue? [Y/n/?] (Reading database ... 158244 files and directories currently installed.) Removing ltsp-server-standalone ... (Reading database ... 158240 files and directories currently installed.) Removing ltsp-server ... Purging configuration files for ltsp-server ... dpkg: warning: while removing ltsp-server, directory '/var/lib/tftpboot/ltsp' not empty so not removed. dpkg: warning: while removing ltsp-server, directory '/var/lib/tftpboot' not empty so not removed. Processing triggers for man-db ... (Reading database ... 157987 files and directories currently installed.) Removing debconf-utils ... Removing debootstrap ... Removing ldm-server ... Removing ltspfs ... Removing nbd-server ... Stopping Network Block Device server: nbd-server. Removing openbsd-inetd ... * Stopping internet superserver inetd [ OK ] Removing squashfs-tools ... Processing triggers for man-db ... Processing triggers for ureadahead ... administrator@rauta-mint-turion ~ $ sudo aptitude purge ~c The following packages will be REMOVED: ltsp-server-standalone{p} nbd-server{p} openbsd-inetd{p} 0 packages upgraded, 0 newly installed, 3 to remove and 0 not upgraded. Need to get 0B of archives. After unpacking 0B will be used. Do you want to continue? [Y/n/?] (Reading database ... 157871 files and directories currently installed.) Removing ltsp-server-standalone ... Purging configuration files for ltsp-server-standalone ... Removing nbd-server ... Purging configuration files for nbd-server ... Removing openbsd-inetd ... Purging configuration files for openbsd-inetd ... Processing triggers for ureadahead ... administrator@rauta-mint-turion ~ $ sudo rm -rv /var/lib/t teamspeak-server/ tftpboot/ transmission-daemon/ administrator@rauta-mint-turion ~ $ sudo rm -rv /var/lib/tftpboot removed `/var/lib/tftpboot/ltsp/i386/pxelinux.cfg/default' removed directory: `/var/lib/tftpboot/ltsp/i386/pxelinux.cfg' removed directory: `/var/lib/tftpboot/ltsp/i386' removed directory: `/var/lib/tftpboot/ltsp' removed directory: `/var/lib/tftpboot' administrator@rauta-mint-turion ~ $ sudo find / -name "ltsp" /opt/ltsp administrator@rauta-mint-turion ~ $ sudo rm -rv /opt/ltsp removed directory: `/opt/ltsp/i386' removed directory: `/opt/ltsp' administrator@rauta-mint-turion ~ $ sudo aptitude install ltsp-server-standalone The following NEW packages will be installed: debconf-utils{a} debootstrap{a} ldm-server{a} ltsp-server{a} ltsp-server-standalone ltspfs{a} nbd-server{a} openbsd-inetd{a} squashfs-tools{a} The following packages are RECOMMENDED but will NOT be installed: dhcp3-server pulseaudio-esound-compat tftpd-hpa 0 packages upgraded, 9 newly installed, 0 to remove and 0 not upgraded. Need to get 0B/498kB of archives. After unpacking 2,437kB will be used. Do you want to continue? [Y/n/?] Preconfiguring packages ... Selecting previously deselected package openbsd-inetd. (Reading database ... 157868 files and directories currently installed.) Unpacking openbsd-inetd (from .../openbsd-inetd_0.20080125-4ubuntu2_i386.deb) ... Processing triggers for man-db ... GNU nano 2.2.4 New Buffer GNU nano 2.2.4 File: /etc/ltsp/dhcpd.conf # # Default LTSP dhcpd.conf config file. # authoritative; subnet 192.168.2.0 netmask 255.255.255.0 { range 192.168.2.70 192.168.2.79; option domain-name "jarvinen"; option domain-name-servers 192.168.2.254; option broadcast-address 192.168.2.255; option routers 192.168.2.254; # next-server 192.168.0.1; # get-lease-hostnames true; option subnet-mask 255.255.255.0; option root-path "/opt/ltsp/i386"; if substring( option vendor-class-identifier, 0, 9 ) = "PXEClient" { filename "/ltsp/i386/pxelinux.0"; } else { filename "/ltsp/i386/nbi.img"; } } [ Wrote 22 lines ] administrator@rauta-mint-turion ~ $ sudo service dhcp3-server start * Starting DHCP server dhcpd3 [ OK ] administrator@rauta-mint-turion ~ $ sudo ltsp-build-client /usr/share/ltsp/plugins/ltsp-build-client/common/010-chroot-tagging: line 3: /opt/ltsp/i386/etc/ltsp_chroot: No such file or directory error: LTSP client installation ended abnormally administrator@rauta-mint-turion ~ $ sudo cat /usr/share/ltsp/plugins/ltsp-build-client/common/010-chroot-tagging case "$MODE" in after-install) echo LTSP_CHROOT=$ROOT >> $ROOT/etc/ltsp_chroot ;; esac administrator@rauta-mint-turion ~ $ cd $ROOT/etc/ltsp_chroot bash: cd: /etc/ltsp_chroot: No such file or directory administrator@rauta-mint-turion ~ $ cd $ROOT/etc/ administrator@rauta-mint-turion /etc $ ls acpi chatscripts emacs group insserv.conf.d logrotate.conf mysql php5 rc6.d smbnetfs.conf UPower adduser.conf ConsoleKit environment group- iproute2 logrotate.d nanorc phpmyadmin rc.local snmp usb_modeswitch.conf alternatives console-setup esound grub.d issue lsb-base nbd-server pm rcS.d sound usb_modeswitch.d anacrontab cron.d firefox gshadow issue.net lsb-base-logging.sh ndiswrapper pnm2ppa.conf request-key.conf ssh ushare.conf apache2 cron.daily firefox-3.5 gshadow- java lsb-release netscsid.conf polkit-1 resolvconf ssl vga apm cron.hourly firestarter gtk-2.0 java-6-sun ltrace.conf network popularity-contest.conf resolv.conf sudoers vim apparmor cron.monthly fonts gtkmathview kbd ltsp NetworkManager ppp rmt sudoers.d vlc apparmor.d crontab foomatic gufw kernel lxdm networks printcap rpc su-to-rootrc vsftpd.conf apport cron.weekly fstab hal kernel-img.conf magic nsswitch.conf profile rsyslog.conf sweeprc w3m apt crypttab ftpusers hdparm.conf kerneloops.conf magic.mime ntp.conf profile.d rsyslog.d sysctl.conf wgetrc at.deny cups fuse.conf host.conf kompozer mailcap obex-data-server protocols samba sysctl.d wildmidi auto-apt dbconfig-common gai.conf hostname ldap mailcap.order ODBCDataSources psiconv sane.d teamspeak-server wodim.conf avahi dbus-1 gamin hosts ld.so.cache manpath.config odbc.ini pulse screenrc terminfo wpa_supplicant bash.bashrc debconf.conf gconf hosts.allow ld.so.conf menu openal purple securetty thunderbird X11 bash_completion debian_version gdb hosts.deny ld.so.conf.d menu-methods openoffice python security timezone xdg bash_completion.d default gdm hp legal mime.types opt python2.6 sensors3.conf transmission-daemon xml bindresvport.blacklist defoma ghostscript ifplugd lftp.conf mke2fs.conf pam.conf python2.7 sensors.d ts.conf xulrunner-1.9.2 blkid.conf deluser.conf gimp inetd.conf libpaper.d modprobe.d pam.d python3.1 services ucf.conf zsh_command_not_found blkid.tab depmod.d gnome init libreoffice modules pango rc0.d sgml udev bluetooth dhcp gnome-system-tools init.d linuxmint motd papersize rc1.d shadow ufw bonobo-activation dhcp3 gnome-vfs-2.0 initramfs-tools locale.alias mplayer passwd rc2.d shadow- updatedb.conf ca-certificates dictionaries-common gnome-vfs-mime-magic inputrc localtime mtab passwd- rc3.d shells update-manager ca-certificates.conf doc-base gre.d insserv logcheck mtab.fuselock pcmcia rc4.d skel update-motd.d calendar dpkg groff insserv.conf login.defs mtools.conf perl rc5.d smb2www update-notifier administrator@rauta-mint-turion /etc $ cd ltsp/ administrator@rauta-mint-turion /etc/ltsp $ ls dhcpd.conf ltsp-update-image.conf administrator@rauta-mint-turion /etc/ltsp $ cat dhcpd.conf # # Default LTSP dhcpd.conf config file. # authoritative; subnet 192.168.2.0 netmask 255.255.255.0 { range 192.168.2.70 192.168.2.79; option domain-name "jarvinen"; option domain-name-servers 192.168.2.254; option broadcast-address 192.168.2.255; option routers 192.168.2.254; # next-server 192.168.0.1; # get-lease-hostnames true; option subnet-mask 255.255.255.0; option root-path "/opt/ltsp/i386"; if substring( option vendor-class-identifier, 0, 9 ) = "PXEClient" { filename "/ltsp/i386/pxelinux.0"; } else { filename "/ltsp/i386/nbi.img"; } } administrator@rauta-mint-turion /etc/ltsp $

    Read the article

  • Cisco Unified Communication integration for Microsoft Lync crashes on Remote Desktop services 2008 R2!

    - by user66267
    Hi everybody i have deployed office communication server 2007 R2 and communicator 2007 R2 and i made integration with Cisco Unified Communication Manager 7.1 in my network, i also use Remote Desktop Servers 2008 R2 for Thin Client Computers, now that i installed Cisco UC integration client for communicator 2007 R2 (Ver. 8.0.3) or Cisco UC integration client for Microsoft Lync that works fine on PCs but Not on Remote Desktop Servers. i have Three Remote Desktop Servers in a Farm with loadbalancing enabled. all other applications on these RDP servers works fine for 120 active users. some times when i start Cisco UC client on Remote Desktop servers i get the following error: "The Port Reguired for callbacks from Cisco unified client framework could not be read, please retry" i also found the folowing log so i think that may be the cause: 2011-01-05 08:24:21,489 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.SingleInstanceManager] [SingleInstanceManager.acquireMutex(0)] - Acquiring Mutex... 2011-01-05 08:24:21,512 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.IPC.PipeServer] [PipeServer.start(0)] - Starting Pipe Server 2011-01-05 08:24:21,516 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.SingleInstanceManager] [SingleInstanceManager.acquireMutex(0)] - Mutex Acquired... 2011-01-05 08:24:25,437 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.process.ProcessUtil] [ProcessUtil.isOtherPRTProcessRunning(0)] - No other instance(s) of ProblemReportingTool.exe found 2011-01-05 08:24:25,438 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - ******************************* 2011-01-05 08:24:25,439 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - **Launching CUCSF Problem Reporting Tool v0.8.3.2** 2011-01-05 08:24:25,440 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - ******************************* 2011-01-05 08:24:25,441 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - Raw input: -reason=Launched by the user from CUCIMOC ver 8.5.105.17095 -file=C:\Users\MA899~1.SAD\AppData\Local\Temp\36\CUCIMOCInstaller.txt 2011-01-05 08:24:25,445 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.Main(0)] - Current culture: English (United States) 2011-01-05 08:24:25,448 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.init(0)] - Loading string resources from file 2011-01-05 08:24:25,455 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.context.CLIUtil] [CLIUtil.parse(0)] - Argument -reason Launched by the user from CUCIMOC ver 8.5.105.17095 received 2011-01-05 08:24:25,456 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.context.CLIUtil] [CLIUtil.parse(0)] - Argument -file C:\Users\MA899~1.SAD\AppData\Local\Temp\36\CUCIMOCInstaller.txt received 2011-01-05 08:24:25,457 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.controller.Controller] [Controller.startup(0)] - Launching GUI... 2011-01-05 08:24:25,536 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PROG.PleaseWaitText from resource file 2011-01-05 08:24:25,545 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.OKButtonText from resource file 2011-01-05 08:24:25,548 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.CancelButtonText from resource file 2011-01-05 08:24:25,549 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.ErrorMsgText1 from resource file 2011-01-05 08:24:25,549 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.Title from resource file 2011-01-05 08:24:25,552 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.WindowTitle from resource file 2011-01-05 08:24:25,553 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.AgreeText from resource file 2011-01-05 08:24:25,553 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.PrivacyText from resource file 2011-01-05 08:24:25,554 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.PrivacyTitle from resource file 2011-01-05 08:24:25,555 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.PrivacyLinkText from resource file 2011-01-05 08:24:25,555 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.controller.ResourceUtil] [ResourceUtil.getResourceFileString(0)] - Retrieving Key: com.cisco.uc.csf.prt.PF.DescriptionTitle from resource file 2011-01-05 08:24:25,629 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager..ctor(0)] - Starting SysInfoManager... 2011-01-05 08:24:25,634 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: systeminfo.exe 2011-01-05 08:24:25,669 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: tasklist.exe 2011-01-05 08:24:25,672 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: ipconfig.exe 2011-01-05 08:24:25,676 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: netstat.exe 2011-01-05 08:24:25,684 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.WindowsUtilsInfo] [WindowsUtilsInfo.startWindowsUtilsThreads(0)] - Launching worker thread: net.exe 2011-01-05 08:24:25,926 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.launchHardwareInfoThread(0)] - Launching worker thread: HardwareInfo 2011-01-05 08:24:25,928 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getHardWareInfo(0)] - Gathering CPU data 2011-01-05 08:24:26,149 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.launchCSFDirectoryInfoThread(0)] - Gathering CSF Directory Listing 2011-01-05 08:24:26,153 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [CSFDirectoryInfo.getCSFInstallPath(0)] - Retrieving CSF Install Directory 2011-01-05 08:24:26,159 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [CSFDirectoryInfo.getCSFInstallPath(0)] - CSF Install Path: C:\Program Files (x86)\Common Files\Cisco Systems\Client Services Framework 2011-01-05 08:24:26,162 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.launchWMIInfoThread(0)] - Launching worker thread: WMIInfo 2011-01-05 08:24:26,164 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getWMIInfo(0)] - Gathering Audio info... 2011-01-05 08:24:26,168 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.launchRegistryAndEnvironmentalVarInfoThread(0)] - Launching worker thread: Registry & Environment Variables 2011-01-05 08:24:26,173 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\Cisco Systems, Inc.\Client Services Framework\AdminData\ 2011-01-05 08:24:26,180 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\Policies\Cisco Systems, Inc.\Client Services Framework\AdminData\ 2011-01-05 08:24:26,182 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\Cisco Systems, Inc.\Unified Communications\CUCSF 2011-01-05 08:24:26,183 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\JavaSoft\Java Runtime Environment 2011-01-05 08:24:26,184 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\JavaSoft\Java Runtime Environment\1.6 2011-01-05 08:24:26,186 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\JavaSoft\Java Runtime Environment\1.6.0_17 2011-01-05 08:24:26,188 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.generateRegString(0)] - Gathering Registry data under: Software\JavaSoft\Java Runtime Environment\1.6.0_17\MSI 2011-01-05 08:24:26,190 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.RegistryEnvironmentInfo] [RegistryEnvironmentInfo.gatherRegistryAndEnvInfo(0)] - Gathering Environment Variables data 2011-01-05 08:24:26,283 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getWMIInfo(0)] - Gathering Video driver info... 2011-01-05 08:24:26,750 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.writeFile(0)] - Creating file: DirectoryInfo.txt 2011-01-05 08:24:26,759 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getWMIInfo(0)] - Gathering Monitor info... 2011-01-05 08:24:34,483 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.gatherFiles(0)] - Config Dir C:\Users\m.sadeghi\AppData\Roaming\Cisco\Unified Communications\ 2011-01-05 08:24:34,530 [WARN ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.addFile(0)] - C:\Users\MA899~1.SAD\AppData\Local\Temp\36\CUCIMOCInstaller.txt not found 2011-01-05 08:24:34,561 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.addSystemInfo(0)] - Waiting for worker threads... 2011-01-05 08:24:38,180 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.CSFDirectoryInfo] [HardwareInfo.getHardWareInfo(0)] - Gathering Resolution data 2011-01-05 08:24:55,565 [ERROR] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.addSystemInfo(0)] - One or more worker threads have not returned in a timely manner. Forcing quit. 2011-01-05 08:24:55,568 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.sysinfo.SysInfoManager] [SysInfoManager.writeFile(0)] - Creating file: SystemInfo.txt 2011-01-05 08:24:55,577 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Checking for files to be excluded 2011-01-05 08:24:55,578 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: d11bfd8f-9745-41db-a35b-200389e65583.dat 2011-01-05 08:24:55,579 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: cacerts 2011-01-05 08:24:55,580 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: Voicemail.2639.20110103081119+0330.wav 2011-01-05 08:24:55,581 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: Voicemail.farhad.20101224165510+0330.wav 2011-01-05 08:24:55,581 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: Voicemail.postmaster.20101224165906+0330.wav 2011-01-05 08:24:55,582 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: VoicemailBeep.wav 2011-01-05 08:24:55,583 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.FileUtil] [FileUtil.removePrivateFiles(0)] - Excluding: secModeNone 2011-01-05 08:24:55,586 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Preparing to create zip file... 2011-01-05 08:24:55,588 [INFO ] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - 60 files found 2011-01-05 08:24:55,589 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying .CSFExit.loc to temp folder. 2011-01-05 08:24:55,595 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CSF.loc to temp folder. 2011-01-05 08:24:55,597 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CsfAddress.dat to temp folder. 2011-01-05 08:24:55,600 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CSFLogSetting.dat to temp folder. 2011-01-05 08:24:55,634 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CSFSecurityKey.dat to temp folder. 2011-01-05 08:24:55,637 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CommunicationHistory.xml to temp folder. 2011-01-05 08:24:55,641 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying MehdiSadeghi.cnf.xml to temp folder. 2011-01-05 08:24:55,751 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying jtapi.jar to temp folder. 2011-01-05 08:24:55,812 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi.index to temp folder. 2011-01-05 08:24:55,820 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi01.log to temp folder. 2011-01-05 08:24:55,887 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi02.log to temp folder. 2011-01-05 08:24:55,968 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi03.log to temp folder. 2011-01-05 08:24:55,972 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi04.log to temp folder. 2011-01-05 08:24:56,008 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi05.log to temp folder. 2011-01-05 08:24:56,038 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi06.log to temp folder. 2011-01-05 08:24:56,079 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi07.log to temp folder. 2011-01-05 08:24:56,100 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi08.log to temp folder. 2011-01-05 08:24:56,140 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi09.log to temp folder. 2011-01-05 08:24:56,215 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoJtapi10.log to temp folder. 2011-01-05 08:24:56,296 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log to temp folder. 2011-01-05 08:24:56,319 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.1 to temp folder. 2011-01-05 08:24:56,498 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.2 to temp folder. 2011-01-05 08:24:56,708 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.3 to temp folder. 2011-01-05 08:24:56,912 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.4 to temp folder. 2011-01-05 08:24:57,105 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.5 to temp folder. 2011-01-05 08:24:57,292 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Core.log.6 to temp folder. 2011-01-05 08:24:57,505 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying tracker.log to temp folder. 2011-01-05 08:24:57,523 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying VideoEngineEncryptedTrace.txt to temp folder. 2011-01-05 08:24:57,542 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying VoiceEngineDebugTrace.txt to temp folder. 2011-01-05 08:24:57,545 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying VoiceEngineTrace.txt to temp folder. 2011-01-05 08:24:57,548 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying operationreport.log to temp folder. 2011-01-05 08:24:57,551 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying voicemailbox.dat to temp folder. 2011-01-05 08:24:57,554 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying voicemailfolder.dat to temp folder. 2011-01-05 08:24:57,558 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying UIPrefs.xml to temp folder. 2011-01-05 08:24:57,562 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log to temp folder. 2011-01-05 08:24:57,569 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.1 to temp folder. 2011-01-05 08:24:57,752 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.10 to temp folder. 2011-01-05 08:24:58,099 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.2 to temp folder. 2011-01-05 08:24:58,302 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.3 to temp folder. 2011-01-05 08:24:58,517 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.4 to temp folder. 2011-01-05 08:24:58,697 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.5 to temp folder. 2011-01-05 08:24:58,899 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.6 to temp folder. 2011-01-05 08:24:59,100 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.7 to temp folder. 2011-01-05 08:24:59,303 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.8 to temp folder. 2011-01-05 08:24:59,500 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying uc-client.log.9 to temp folder. 2011-01-05 08:24:59,895 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Cisco.ClickToCall.Common.Core.dll.config to temp folder. 2011-01-05 08:24:59,915 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying ClickToCall.pref to temp folder. 2011-01-05 08:24:59,918 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoClickToCall.dll.config to temp folder. 2011-01-05 08:24:59,928 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoClickToCallContacts.dll.config to temp folder. 2011-01-05 08:24:59,948 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying CiscoPersonName.dll.config to temp folder. 2011-01-05 08:24:59,980 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying userData.properties to temp folder. 2011-01-05 08:24:59,988 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying userData.properties.backup to temp folder. 2011-01-05 08:24:59,990 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying cisco-uc-client.log4net.config to temp folder. 2011-01-05 08:24:59,994 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying cisco-uc-tab.log4net.config to temp folder. 2011-01-05 08:25:00,011 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying LocalSettings.xml to temp folder. 2011-01-05 08:25:00,025 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying Description.txt to temp folder. 2011-01-05 08:25:00,028 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying LaunchInfo.txt to temp folder. 2011-01-05 08:25:00,031 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying DirectoryInfo.txt to temp folder. 2011-01-05 08:25:00,034 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying SystemInfo.txt to temp folder. 2011-01-05 08:25:00,036 [DEBUG] [com.cisco.uc.ucsf.ProblemReportingTool.file.Zip] [Zip.zipMultipleFiles(0)] - Copying csf-prt.log to temp folder.

    Read the article

  • The Incremental Architect&rsquo;s Napkin - #5 - Design functions for extensibility and readability

    - by Ralf Westphal
    Originally posted on: http://geekswithblogs.net/theArchitectsNapkin/archive/2014/08/24/the-incremental-architectrsquos-napkin---5---design-functions-for.aspx The functionality of programs is entered via Entry Points. So what we´re talking about when designing software is a bunch of functions handling the requests represented by and flowing in through those Entry Points. Designing software thus consists of at least three phases: Analyzing the requirements to find the Entry Points and their signatures Designing the functionality to be executed when those Entry Points get triggered Implementing the functionality according to the design aka coding I presume, you´re familiar with phase 1 in some way. And I guess you´re proficient in implementing functionality in some programming language. But in my experience developers in general are not experienced in going through an explicit phase 2. “Designing functionality? What´s that supposed to mean?” you might already have thought. Here´s my definition: To design functionality (or functional design for short) means thinking about… well, functions. You find a solution for what´s supposed to happen when an Entry Point gets triggered in terms of functions. A conceptual solution that is, because those functions only exist in your head (or on paper) during this phase. But you may have guess that, because it´s “design” not “coding”. And here is, what functional design is not: It´s not about logic. Logic is expressions (e.g. +, -, && etc.) and control statements (e.g. if, switch, for, while etc.). Also I consider calling external APIs as logic. It´s equally basic. It´s what code needs to do in order to deliver some functionality or quality. Logic is what´s doing that needs to be done by software. Transformations are either done through expressions or API-calls. And then there is alternative control flow depending on the result of some expression. Basically it´s just jumps in Assembler, sometimes to go forward (if, switch), sometimes to go backward (for, while, do). But calling your own function is not logic. It´s not necessary to produce any outcome. Functionality is not enhanced by adding functions (subroutine calls) to your code. Nor is quality increased by adding functions. No performance gain, no higher scalability etc. through functions. Functions are not relevant to functionality. Strange, isn´t it. What they are important for is security of investment. By introducing functions into our code we can become more productive (re-use) and can increase evolvability (higher unterstandability, easier to keep code consistent). That´s no small feat, however. Evolvable code can hardly be overestimated. That´s why to me functional design is so important. It´s at the core of software development. To sum this up: Functional design is on a level of abstraction above (!) logical design or algorithmic design. Functional design is only done until you get to a point where each function is so simple you are very confident you can easily code it. Functional design an logical design (which mostly is coding, but can also be done using pseudo code or flow charts) are complementary. Software needs both. If you start coding right away you end up in a tangled mess very quickly. Then you need back out through refactoring. Functional design on the other hand is bloodless without actual code. It´s just a theory with no experiments to prove it. But how to do functional design? An example of functional design Let´s assume a program to de-duplicate strings. The user enters a number of strings separated by commas, e.g. a, b, a, c, d, b, e, c, a. And the program is supposed to clear this list of all doubles, e.g. a, b, c, d, e. There is only one Entry Point to this program: the user triggers the de-duplication by starting the program with the string list on the command line C:\>deduplicate "a, b, a, c, d, b, e, c, a" a, b, c, d, e …or by clicking on a GUI button. This leads to the Entry Point function to get called. It´s the program´s main function in case of the batch version or a button click event handler in the GUI version. That´s the physical Entry Point so to speak. It´s inevitable. What then happens is a three step process: Transform the input data from the user into a request. Call the request handler. Transform the output of the request handler into a tangible result for the user. Or to phrase it a bit more generally: Accept input. Transform input into output. Present output. This does not mean any of these steps requires a lot of effort. Maybe it´s just one line of code to accomplish it. Nevertheless it´s a distinct step in doing the processing behind an Entry Point. Call it an aspect or a responsibility - and you will realize it most likely deserves a function of its own to satisfy the Single Responsibility Principle (SRP). Interestingly the above list of steps is already functional design. There is no logic, but nevertheless the solution is described - albeit on a higher level of abstraction than you might have done yourself. But it´s still on a meta-level. The application to the domain at hand is easy, though: Accept string list from command line De-duplicate Present de-duplicated strings on standard output And this concrete list of processing steps can easily be transformed into code:static void Main(string[] args) { var input = Accept_string_list(args); var output = Deduplicate(input); Present_deduplicated_string_list(output); } Instead of a big problem there are three much smaller problems now. If you think each of those is trivial to implement, then go for it. You can stop the functional design at this point. But maybe, just maybe, you´re not so sure how to go about with the de-duplication for example. Then just implement what´s easy right now, e.g.private static string Accept_string_list(string[] args) { return args[0]; } private static void Present_deduplicated_string_list( string[] output) { var line = string.Join(", ", output); Console.WriteLine(line); } Accept_string_list() contains logic in the form of an API-call. Present_deduplicated_string_list() contains logic in the form of an expression and an API-call. And then repeat the functional design for the remaining processing step. What´s left is the domain logic: de-duplicating a list of strings. How should that be done? Without any logic at our disposal during functional design you´re left with just functions. So which functions could make up the de-duplication? Here´s a suggestion: De-duplicate Parse the input string into a true list of strings. Register each string in a dictionary/map/set. That way duplicates get cast away. Transform the data structure into a list of unique strings. Processing step 2 obviously was the core of the solution. That´s where real creativity was needed. That´s the core of the domain. But now after this refinement the implementation of each step is easy again:private static string[] Parse_string_list(string input) { return input.Split(',') .Select(s => s.Trim()) .ToArray(); } private static Dictionary<string,object> Compile_unique_strings(string[] strings) { return strings.Aggregate( new Dictionary<string, object>(), (agg, s) => { agg[s] = null; return agg; }); } private static string[] Serialize_unique_strings( Dictionary<string,object> dict) { return dict.Keys.ToArray(); } With these three additional functions Main() now looks like this:static void Main(string[] args) { var input = Accept_string_list(args); var strings = Parse_string_list(input); var dict = Compile_unique_strings(strings); var output = Serialize_unique_strings(dict); Present_deduplicated_string_list(output); } I think that´s very understandable code: just read it from top to bottom and you know how the solution to the problem works. It´s a mirror image of the initial design: Accept string list from command line Parse the input string into a true list of strings. Register each string in a dictionary/map/set. That way duplicates get cast away. Transform the data structure into a list of unique strings. Present de-duplicated strings on standard output You can even re-generate the design by just looking at the code. Code and functional design thus are always in sync - if you follow some simple rules. But about that later. And as a bonus: all the functions making up the process are small - which means easy to understand, too. So much for an initial concrete example. Now it´s time for some theory. Because there is method to this madness ;-) The above has only scratched the surface. Introducing Flow Design Functional design starts with a given function, the Entry Point. Its goal is to describe the behavior of the program when the Entry Point is triggered using a process, not an algorithm. An algorithm consists of logic, a process on the other hand consists just of steps or stages. Each processing step transforms input into output or a side effect. Also it might access resources, e.g. a printer, a database, or just memory. Processing steps thus can rely on state of some sort. This is different from Functional Programming, where functions are supposed to not be stateful and not cause side effects.[1] In its simplest form a process can be written as a bullet point list of steps, e.g. Get data from user Output result to user Transform data Parse data Map result for output Such a compilation of steps - possibly on different levels of abstraction - often is the first artifact of functional design. It can be generated by a team in an initial design brainstorming. Next comes ordering the steps. What should happen first, what next etc.? Get data from user Parse data Transform data Map result for output Output result to user That´s great for a start into functional design. It´s better than starting to code right away on a given function using TDD. Please get me right: TDD is a valuable practice. But it can be unnecessarily hard if the scope of a functionn is too large. But how do you know beforehand without investing some thinking? And how to do this thinking in a systematic fashion? My recommendation: For any given function you´re supposed to implement first do a functional design. Then, once you´re confident you know the processing steps - which are pretty small - refine and code them using TDD. You´ll see that´s much, much easier - and leads to cleaner code right away. For more information on this approach I call “Informed TDD” read my book of the same title. Thinking before coding is smart. And writing down the solution as a bunch of functions possibly is the simplest thing you can do, I´d say. It´s more according to the KISS (Keep It Simple, Stupid) principle than returning constants or other trivial stuff TDD development often is started with. So far so good. A simple ordered list of processing steps will do to start with functional design. As shown in the above example such steps can easily be translated into functions. Moving from design to coding thus is simple. However, such a list does not scale. Processing is not always that simple to be captured in a list. And then the list is just text. Again. Like code. That means the design is lacking visuality. Textual representations need more parsing by your brain than visual representations. Plus they are limited in their “dimensionality”: text just has one dimension, it´s sequential. Alternatives and parallelism are hard to encode in text. In addition the functional design using numbered lists lacks data. It´s not visible what´s the input, output, and state of the processing steps. That´s why functional design should be done using a lightweight visual notation. No tool is necessary to draw such designs. Use pen and paper; a flipchart, a whiteboard, or even a napkin is sufficient. Visualizing processes The building block of the functional design notation is a functional unit. I mostly draw it like this: Something is done, it´s clear what goes in, it´s clear what comes out, and it´s clear what the processing step requires in terms of state or hardware. Whenever input flows into a functional unit it gets processed and output is produced and/or a side effect occurs. Flowing data is the driver of something happening. That´s why I call this approach to functional design Flow Design. It´s about data flow instead of control flow. Control flow like in algorithms is of no concern to functional design. Thinking about control flow simply is too low level. Once you start with control flow you easily get bogged down by tons of details. That´s what you want to avoid during design. Design is supposed to be quick, broad brush, abstract. It should give overview. But what about all the details? As Robert C. Martin rightly said: “Programming is abot detail”. Detail is a matter of code. Once you start coding the processing steps you designed you can worry about all the detail you want. Functional design does not eliminate all the nitty gritty. It just postpones tackling them. To me that´s also an example of the SRP. Function design has the responsibility to come up with a solution to a problem posed by a single function (Entry Point). And later coding has the responsibility to implement the solution down to the last detail (i.e. statement, API-call). TDD unfortunately mixes both responsibilities. It´s just coding - and thereby trying to find detailed implementations (green phase) plus getting the design right (refactoring). To me that´s one reason why TDD has failed to deliver on its promise for many developers. Using functional units as building blocks of functional design processes can be depicted very easily. Here´s the initial process for the example problem: For each processing step draw a functional unit and label it. Choose a verb or an “action phrase” as a label, not a noun. Functional design is about activities, not state or structure. Then make the output of an upstream step the input of a downstream step. Finally think about the data that should flow between the functional units. Write the data above the arrows connecting the functional units in the direction of the data flow. Enclose the data description in brackets. That way you can clearly see if all flows have already been specified. Empty brackets mean “no data is flowing”, but nevertheless a signal is sent. A name like “list” or “strings” in brackets describes the data content. Use lower case labels for that purpose. A name starting with an upper case letter like “String” or “Customer” on the other hand signifies a data type. If you like, you also can combine descriptions with data types by separating them with a colon, e.g. (list:string) or (strings:string[]). But these are just suggestions from my practice with Flow Design. You can do it differently, if you like. Just be sure to be consistent. Flows wired-up in this manner I call one-dimensional (1D). Each functional unit just has one input and/or one output. A functional unit without an output is possible. It´s like a black hole sucking up input without producing any output. Instead it produces side effects. A functional unit without an input, though, does make much sense. When should it start to work? What´s the trigger? That´s why in the above process even the first processing step has an input. If you like, view such 1D-flows as pipelines. Data is flowing through them from left to right. But as you can see, it´s not always the same data. It get´s transformed along its passage: (args) becomes a (list) which is turned into (strings). The Principle of Mutual Oblivion A very characteristic trait of flows put together from function units is: no functional units knows another one. They are all completely independent of each other. Functional units don´t know where their input is coming from (or even when it´s gonna arrive). They just specify a range of values they can process. And they promise a certain behavior upon input arriving. Also they don´t know where their output is going. They just produce it in their own time independent of other functional units. That means at least conceptually all functional units work in parallel. Functional units don´t know their “deployment context”. They now nothing about the overall flow they are place in. They are just consuming input from some upstream, and producing output for some downstream. That makes functional units very easy to test. At least as long as they don´t depend on state or resources. I call this the Principle of Mutual Oblivion (PoMO). Functional units are oblivious of others as well as an overall context/purpose. They are just parts of a whole focused on a single responsibility. How the whole is built, how a larger goal is achieved, is of no concern to the single functional units. By building software in such a manner, functional design interestingly follows nature. Nature´s building blocks for organisms also follow the PoMO. The cells forming your body do not know each other. Take a nerve cell “controlling” a muscle cell for example:[2] The nerve cell does not know anything about muscle cells, let alone the specific muscel cell it is “attached to”. Likewise the muscle cell does not know anything about nerve cells, let a lone a specific nerve cell “attached to” it. Saying “the nerve cell is controlling the muscle cell” thus only makes sense when viewing both from the outside. “Control” is a concept of the whole, not of its parts. Control is created by wiring-up parts in a certain way. Both cells are mutually oblivious. Both just follow a contract. One produces Acetylcholine (ACh) as output, the other consumes ACh as input. Where the ACh is going, where it´s coming from neither cell cares about. Million years of evolution have led to this kind of division of labor. And million years of evolution have produced organism designs (DNA) which lead to the production of these different cell types (and many others) and also to their co-location. The result: the overall behavior of an organism. How and why this happened in nature is a mystery. For our software, though, it´s clear: functional and quality requirements needs to be fulfilled. So we as developers have to become “intelligent designers” of “software cells” which we put together to form a “software organism” which responds in satisfying ways to triggers from it´s environment. My bet is: If nature gets complex organisms working by following the PoMO, who are we to not apply this recipe for success to our much simpler “machines”? So my rule is: Wherever there is functionality to be delivered, because there is a clear Entry Point into software, design the functionality like nature would do it. Build it from mutually oblivious functional units. That´s what Flow Design is about. In that way it´s even universal, I´d say. Its notation can also be applied to biology: Never mind labeling the functional units with nouns. That´s ok in Flow Design. You´ll do that occassionally for functional units on a higher level of abstraction or when their purpose is close to hardware. Getting a cockroach to roam your bedroom takes 1,000,000 nerve cells (neurons). Getting the de-duplication program to do its job just takes 5 “software cells” (functional units). Both, though, follow the same basic principle. Translating functional units into code Moving from functional design to code is no rocket science. In fact it´s straightforward. There are two simple rules: Translate an input port to a function. Translate an output port either to a return statement in that function or to a function pointer visible to that function. The simplest translation of a functional unit is a function. That´s what you saw in the above example. Functions are mutually oblivious. That why Functional Programming likes them so much. It makes them composable. Which is the reason, nature works according to the PoMO. Let´s be clear about one thing: There is no dependency injection in nature. For all of an organism´s complexity no DI container is used. Behavior is the result of smooth cooperation between mutually oblivious building blocks. Functions will often be the adequate translation for the functional units in your designs. But not always. Take for example the case, where a processing step should not always produce an output. Maybe the purpose is to filter input. Here the functional unit consumes words and produces words. But it does not pass along every word flowing in. Some words are swallowed. Think of a spell checker. It probably should not check acronyms for correctness. There are too many of them. Or words with no more than two letters. Such words are called “stop words”. In the above picture the optionality of the output is signified by the astrisk outside the brackets. It means: Any number of (word) data items can flow from the functional unit for each input data item. It might be none or one or even more. This I call a stream of data. Such behavior cannot be translated into a function where output is generated with return. Because a function always needs to return a value. So the output port is translated into a function pointer or continuation which gets passed to the subroutine when called:[3]void filter_stop_words( string word, Action<string> onNoStopWord) { if (...check if not a stop word...) onNoStopWord(word); } If you want to be nitpicky you might call such a function pointer parameter an injection. And technically you´re right. Conceptually, though, it´s not an injection. Because the subroutine is not functionally dependent on the continuation. Firstly continuations are procedures, i.e. subroutines without a return type. Remember: Flow Design is about unidirectional data flow. Secondly the name of the formal parameter is chosen in a way as to not assume anything about downstream processing steps. onNoStopWord describes a situation (or event) within the functional unit only. Translating output ports into function pointers helps keeping functional units mutually oblivious in cases where output is optional or produced asynchronically. Either pass the function pointer to the function upon call. Or make it global by putting it on the encompassing class. Then it´s called an event. In C# that´s even an explicit feature.class Filter { public void filter_stop_words( string word) { if (...check if not a stop word...) onNoStopWord(word); } public event Action<string> onNoStopWord; } When to use a continuation and when to use an event dependens on how a functional unit is used in flows and how it´s packed together with others into classes. You´ll see examples further down the Flow Design road. Another example of 1D functional design Let´s see Flow Design once more in action using the visual notation. How about the famous word wrap kata? Robert C. Martin has posted a much cited solution including an extensive reasoning behind his TDD approach. So maybe you want to compare it to Flow Design. The function signature given is:string WordWrap(string text, int maxLineLength) {...} That´s not an Entry Point since we don´t see an application with an environment and users. Nevertheless it´s a function which is supposed to provide a certain functionality. The text passed in has to be reformatted. The input is a single line of arbitrary length consisting of words separated by spaces. The output should consist of one or more lines of a maximum length specified. If a word is longer than a the maximum line length it can be split in multiple parts each fitting in a line. Flow Design Let´s start by brainstorming the process to accomplish the feat of reformatting the text. What´s needed? Words need to be assembled into lines Words need to be extracted from the input text The resulting lines need to be assembled into the output text Words too long to fit in a line need to be split Does sound about right? I guess so. And it shows a kind of priority. Long words are a special case. So maybe there is a hint for an incremental design here. First let´s tackle “average words” (words not longer than a line). Here´s the Flow Design for this increment: The the first three bullet points turned into functional units with explicit data added. As the signature requires a text is transformed into another text. See the input of the first functional unit and the output of the last functional unit. In between no text flows, but words and lines. That´s good to see because thereby the domain is clearly represented in the design. The requirements are talking about words and lines and here they are. But note the asterisk! It´s not outside the brackets but inside. That means it´s not a stream of words or lines, but lists or sequences. For each text a sequence of words is output. For each sequence of words a sequence of lines is produced. The asterisk is used to abstract from the concrete implementation. Like with streams. Whether the list of words gets implemented as an array or an IEnumerable is not important during design. It´s an implementation detail. Does any processing step require further refinement? I don´t think so. They all look pretty “atomic” to me. And if not… I can always backtrack and refine a process step using functional design later once I´ve gained more insight into a sub-problem. Implementation The implementation is straightforward as you can imagine. The processing steps can all be translated into functions. Each can be tested easily and separately. Each has a focused responsibility. And the process flow becomes just a sequence of function calls: Easy to understand. It clearly states how word wrapping works - on a high level of abstraction. And it´s easy to evolve as you´ll see. Flow Design - Increment 2 So far only texts consisting of “average words” are wrapped correctly. Words not fitting in a line will result in lines too long. Wrapping long words is a feature of the requested functionality. Whether it´s there or not makes a difference to the user. To quickly get feedback I decided to first implement a solution without this feature. But now it´s time to add it to deliver the full scope. Fortunately Flow Design automatically leads to code following the Open Closed Principle (OCP). It´s easy to extend it - instead of changing well tested code. How´s that possible? Flow Design allows for extension of functionality by inserting functional units into the flow. That way existing functional units need not be changed. The data flow arrow between functional units is a natural extension point. No need to resort to the Strategy Pattern. No need to think ahead where extions might need to be made in the future. I just “phase in” the remaining processing step: Since neither Extract words nor Reformat know of their environment neither needs to be touched due to the “detour”. The new processing step accepts the output of the existing upstream step and produces data compatible with the existing downstream step. Implementation - Increment 2 A trivial implementation checking the assumption if this works does not do anything to split long words. The input is just passed on: Note how clean WordWrap() stays. The solution is easy to understand. A developer looking at this code sometime in the future, when a new feature needs to be build in, quickly sees how long words are dealt with. Compare this to Robert C. Martin´s solution:[4] How does this solution handle long words? Long words are not even part of the domain language present in the code. At least I need considerable time to understand the approach. Admittedly the Flow Design solution with the full implementation of long word splitting is longer than Robert C. Martin´s. At least it seems. Because his solution does not cover all the “word wrap situations” the Flow Design solution handles. Some lines would need to be added to be on par, I guess. But even then… Is a difference in LOC that important as long as it´s in the same ball park? I value understandability and openness for extension higher than saving on the last line of code. Simplicity is not just less code, it´s also clarity in design. But don´t take my word for it. Try Flow Design on larger problems and compare for yourself. What´s the easier, more straightforward way to clean code? And keep in mind: You ain´t seen all yet ;-) There´s more to Flow Design than described in this chapter. In closing I hope I was able to give you a impression of functional design that makes you hungry for more. To me it´s an inevitable step in software development. Jumping from requirements to code does not scale. And it leads to dirty code all to quickly. Some thought should be invested first. Where there is a clear Entry Point visible, it´s functionality should be designed using data flows. Because with data flows abstraction is possible. For more background on why that´s necessary read my blog article here. For now let me point out to you - if you haven´t already noticed - that Flow Design is a general purpose declarative language. It´s “programming by intention” (Shalloway et al.). Just write down how you think the solution should work on a high level of abstraction. This breaks down a large problem in smaller problems. And by following the PoMO the solutions to those smaller problems are independent of each other. So they are easy to test. Or you could even think about getting them implemented in parallel by different team members. Flow Design not only increases evolvability, but also helps becoming more productive. All team members can participate in functional design. This goes beyon collective code ownership. We´re talking collective design/architecture ownership. Because with Flow Design there is a common visual language to talk about functional design - which is the foundation for all other design activities.   PS: If you like what you read, consider getting my ebook “The Incremental Architekt´s Napkin”. It´s where I compile all the articles in this series for easier reading. I like the strictness of Function Programming - but I also find it quite hard to live by. And it certainly is not what millions of programmers are used to. Also to me it seems, the real world is full of state and side effects. So why give them such a bad image? That´s why functional design takes a more pragmatic approach. State and side effects are ok for processing steps - but be sure to follow the SRP. Don´t put too much of it into a single processing step. ? Image taken from www.physioweb.org ? My code samples are written in C#. C# sports typed function pointers called delegates. Action is such a function pointer type matching functions with signature void someName(T t). Other languages provide similar ways to work with functions as first class citizens - even Java now in version 8. I trust you find a way to map this detail of my translation to your favorite programming language. I know it works for Java, C++, Ruby, JavaScript, Python, Go. And if you´re using a Functional Programming language it´s of course a no brainer. ? Taken from his blog post “The Craftsman 62, The Dark Path”. ?

    Read the article

  • LVM / Device Mapper maps wrong device

    - by DaDaDom
    Hi, I run a LVM setup on a raid1 created by mdadm. md2 is based on sda6 (major:minor 8:6) and sdb6 (8:22). md2 is partition 9:2. The VG on top of md2 has 4 LVs, var, home, usr, tmp. First the problem: While booting it seems as if the device mapper takes the wrong partition for the mapping! Immediately after boot the information is like ~# dmsetup table systemlvm-home: 0 4194304 linear 8:22 384 systemlvm-home: 4194304 16777216 linear 8:22 69206400 systemlvm-home: 20971520 8388608 linear 8:22 119538048 systemlvm-home: 29360128 6291456 linear 8:22 243270016 systemlvm-tmp: 0 2097152 linear 8:22 41943424 systemlvm-usr: 0 10485760 linear 8:22 20971904 systemlvm-var: 0 10485760 linear 8:22 10486144 systemlvm-var: 10485760 6291456 linear 8:22 4194688 systemlvm-var: 16777216 4194304 linear 8:22 44040576 systemlvm-var: 20971520 10485760 linear 8:22 31457664 systemlvm-var: 31457280 20971520 linear 8:22 48234880 systemlvm-var: 52428800 33554432 linear 8:22 85983616 systemlvm-var: 85983232 115343360 linear 8:22 127926656 ~# cat /proc/mdstat Personalities : [raid1] md2 : active (auto-read-only) raid1 sda6[0] 151798080 blocks [2/1] [U_] md0 : active raid1 sda1[0] sdb1[1] 96256 blocks [2/2] [UU] md1 : active raid1 sda2[0] sdb2[1] 2931776 blocks [2/2] [UU] I have to manually "lvchange -an" all LVs, add /dev/sdb6 back to the raid and reactivate the LVs, then all is fine. But it prevents me from automounting the partitions and obviously leads to a bunch of other problems. If everything works fine, the information is like ~$ cat /proc/mdstat Personalities : [raid1] md2 : active raid1 sdb6[1] sda6[0] 151798080 blocks [2/2] [UU] ... ~# dmsetup table systemlvm-home: 0 4194304 linear 9:2 384 systemlvm-home: 4194304 16777216 linear 9:2 69206400 systemlvm-home: 20971520 8388608 linear 9:2 119538048 systemlvm-home: 29360128 6291456 linear 9:2 243270016 systemlvm-tmp: 0 2097152 linear 9:2 41943424 systemlvm-usr: 0 10485760 linear 9:2 20971904 systemlvm-var: 0 10485760 linear 9:2 10486144 systemlvm-var: 10485760 6291456 linear 9:2 4194688 systemlvm-var: 16777216 4194304 linear 9:2 44040576 systemlvm-var: 20971520 10485760 linear 9:2 31457664 systemlvm-var: 31457280 20971520 linear 9:2 48234880 systemlvm-var: 52428800 33554432 linear 9:2 85983616 systemlvm-var: 85983232 115343360 linear 9:2 127926656 I think that LVM for some reason just "takes" /dev/sdb6 which is then missing in the raid. I tried almost all options in the lvm.conf but none seems to work. Below is some more information, like config files. Does anyone have any idea about what is going on here and how to prevent that? If you need any additional information, please let me know Thanks in advance! Dominik The information (off a "repaired" system): ~# cat /etc/debian_version 5.0.4 ~# uname -a Linux kermit 2.6.26-2-686 #1 SMP Wed Feb 10 08:59:21 UTC 2010 i686 GNU/Linux ~# lvm version LVM version: 2.02.39 (2008-06-27) Library version: 1.02.27 (2008-06-25) Driver version: 4.13.0 ~# cat /etc/mdadm/mdadm.conf DEVICE partitions ARRAY /dev/md1 level=raid1 num-devices=2 metadata=00.90 UUID=11e9dc6c:1da99f3f:b3088ca6:c6fe60e9 ARRAY /dev/md0 level=raid1 num-devices=2 metadata=00.90 UUID=92ed1e4b:897361d3:070682b3:3baa4fa1 ARRAY /dev/md2 level=raid1 num-devices=2 metadata=00.90 UUID=601d4642:39dc80d7:96e8bbac:649924ba ~# mount /dev/md1 on / type ext3 (rw,errors=remount-ro) tmpfs on /lib/init/rw type tmpfs (rw,nosuid,mode=0755) proc on /proc type proc (rw,noexec,nosuid,nodev) sysfs on /sys type sysfs (rw,noexec,nosuid,nodev) procbususb on /proc/bus/usb type usbfs (rw) udev on /dev type tmpfs (rw,mode=0755) tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev) devpts on /dev/pts type devpts (rw,noexec,nosuid,gid=5,mode=620) /dev/md0 on /boot type ext3 (rw) /dev/mapper/systemlvm-usr on /usr type reiserfs (rw) /dev/mapper/systemlvm-tmp on /tmp type reiserfs (rw) /dev/mapper/systemlvm-home on /home type reiserfs (rw) /dev/mapper/systemlvm-var on /var type reiserfs (rw) ~# grep -v ^$ /etc/lvm/lvm.conf | grep -v "#" devices { dir = "/dev" scan = [ "/dev" ] preferred_names = [ ] filter = [ "a|/dev/md.*|", "r/.*/" ] cache_dir = "/etc/lvm/cache" cache_file_prefix = "" write_cache_state = 1 sysfs_scan = 1 md_component_detection = 1 ignore_suspended_devices = 0 } log { verbose = 0 syslog = 1 overwrite = 0 level = 0 indent = 1 command_names = 0 prefix = " " } backup { backup = 1 backup_dir = "/etc/lvm/backup" archive = 1 archive_dir = "/etc/lvm/archive" retain_min = 10 retain_days = 30 } shell { history_size = 100 } global { umask = 077 test = 0 units = "h" activation = 1 proc = "/proc" locking_type = 1 fallback_to_clustered_locking = 1 fallback_to_local_locking = 1 locking_dir = "/lib/init/rw" } activation { missing_stripe_filler = "/dev/ioerror" reserved_stack = 256 reserved_memory = 8192 process_priority = -18 mirror_region_size = 512 readahead = "auto" mirror_log_fault_policy = "allocate" mirror_device_fault_policy = "remove" } :~# vgscan -vvv Processing: vgscan -vvv O_DIRECT will be used Setting global/locking_type to 1 File-based locking selected. Setting global/locking_dir to /lib/init/rw Locking /lib/init/rw/P_global WB Wiping cache of LVM-capable devices /dev/block/1:0: Added to device cache /dev/block/1:1: Added to device cache /dev/block/1:10: Added to device cache /dev/block/1:11: Added to device cache /dev/block/1:12: Added to device cache /dev/block/1:13: Added to device cache /dev/block/1:14: Added to device cache /dev/block/1:15: Added to device cache /dev/block/1:2: Added to device cache /dev/block/1:3: Added to device cache /dev/block/1:4: Added to device cache /dev/block/1:5: Added to device cache /dev/block/1:6: Added to device cache /dev/block/1:7: Added to device cache /dev/block/1:8: Added to device cache /dev/block/1:9: Added to device cache /dev/block/253:0: Added to device cache /dev/block/253:1: Added to device cache /dev/block/253:2: Added to device cache /dev/block/253:3: Added to device cache /dev/block/8:0: Added to device cache /dev/block/8:1: Added to device cache /dev/block/8:16: Added to device cache /dev/block/8:17: Added to device cache /dev/block/8:18: Added to device cache /dev/block/8:19: Added to device cache /dev/block/8:2: Added to device cache /dev/block/8:21: Added to device cache /dev/block/8:22: Added to device cache /dev/block/8:3: Added to device cache /dev/block/8:5: Added to device cache /dev/block/8:6: Added to device cache /dev/block/9:0: Already in device cache /dev/block/9:1: Already in device cache /dev/block/9:2: Already in device cache /dev/bsg/0:0:0:0: Not a block device /dev/bsg/1:0:0:0: Not a block device /dev/bus/usb/001/001: Not a block device [... many more "not a block device"] /dev/core: Not a block device /dev/cpu_dma_latency: Not a block device /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L507895: Aliased to /dev/block/8:16 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L507895-part1: Aliased to /dev/block/8:17 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L507895-part2: Aliased to /dev/block/8:18 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L507895-part3: Aliased to /dev/block/8:19 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L507895-part5: Aliased to /dev/block/8:21 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L507895-part6: Aliased to /dev/block/8:22 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L526800: Aliased to /dev/block/8:0 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L526800-part1: Aliased to /dev/block/8:1 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L526800-part2: Aliased to /dev/block/8:2 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L526800-part3: Aliased to /dev/block/8:3 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L526800-part5: Aliased to /dev/block/8:5 in device cache /dev/disk/by-id/ata-SAMSUNG_HD160JJ_S08HJ10L526800-part6: Aliased to /dev/block/8:6 in device cache /dev/disk/by-id/dm-name-systemlvm-home: Aliased to /dev/block/253:2 in device cache /dev/disk/by-id/dm-name-systemlvm-tmp: Aliased to /dev/block/253:3 in device cache /dev/disk/by-id/dm-name-systemlvm-usr: Aliased to /dev/block/253:1 in device cache /dev/disk/by-id/dm-name-systemlvm-var: Aliased to /dev/block/253:0 in device cache /dev/disk/by-id/dm-uuid-LVM-rL8Oq2dA7oeRYeu1orJA7Ufnb1kjOyvr25N7CRZpUMzR18NfS6zeSeAVnVT98LuU: Aliased to /dev/block/253:0 in device cache /dev/disk/by-id/dm-uuid-LVM-rL8Oq2dA7oeRYeu1orJA7Ufnb1kjOyvr3TpFXtLjYGEwn79IdXsSCZPl8AxmqbmQ: Aliased to /dev/block/253:1 in device cache /dev/disk/by-id/dm-uuid-LVM-rL8Oq2dA7oeRYeu1orJA7Ufnb1kjOyvrc5MJ4KolevMjt85PPBrQuRTkXbx6NvTi: Aliased to /dev/block/253:3 in device cache /dev/disk/by-id/dm-uuid-LVM-rL8Oq2dA7oeRYeu1orJA7Ufnb1kjOyvrYXrfdg5OSYDVkNeiQeQksgCI849Z2hx8: Aliased to /dev/block/253:2 in device cache /dev/disk/by-id/md-uuid-11e9dc6c:1da99f3f:b3088ca6:c6fe60e9: Already in device cache /dev/disk/by-id/md-uuid-601d4642:39dc80d7:96e8bbac:649924ba: Already in device cache /dev/disk/by-id/md-uuid-92ed1e4b:897361d3:070682b3:3baa4fa1: Already in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L507895: Aliased to /dev/block/8:16 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L507895-part1: Aliased to /dev/block/8:17 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L507895-part2: Aliased to /dev/block/8:18 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L507895-part3: Aliased to /dev/block/8:19 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L507895-part5: Aliased to /dev/block/8:21 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L507895-part6: Aliased to /dev/block/8:22 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L526800: Aliased to /dev/block/8:0 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L526800-part1: Aliased to /dev/block/8:1 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L526800-part2: Aliased to /dev/block/8:2 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L526800-part3: Aliased to /dev/block/8:3 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L526800-part5: Aliased to /dev/block/8:5 in device cache /dev/disk/by-id/scsi-SATA_SAMSUNG_HD160JJS08HJ10L526800-part6: Aliased to /dev/block/8:6 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-0:0:0:0: Aliased to /dev/block/8:0 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-0:0:0:0-part1: Aliased to /dev/block/8:1 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-0:0:0:0-part2: Aliased to /dev/block/8:2 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-0:0:0:0-part3: Aliased to /dev/block/8:3 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-0:0:0:0-part5: Aliased to /dev/block/8:5 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-0:0:0:0-part6: Aliased to /dev/block/8:6 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-1:0:0:0: Aliased to /dev/block/8:16 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-1:0:0:0-part1: Aliased to /dev/block/8:17 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-1:0:0:0-part2: Aliased to /dev/block/8:18 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-1:0:0:0-part3: Aliased to /dev/block/8:19 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-1:0:0:0-part5: Aliased to /dev/block/8:21 in device cache /dev/disk/by-path/pci-0000:00:0f.0-scsi-1:0:0:0-part6: Aliased to /dev/block/8:22 in device cache /dev/disk/by-uuid/13c1262b-e06f-40ce-b088-ce410640a6dc: Aliased to /dev/block/253:3 in device cache /dev/disk/by-uuid/379f57b0-2e03-414c-808a-f76160617336: Aliased to /dev/block/253:2 in device cache /dev/disk/by-uuid/4fb2d6d3-bd51-48d3-95ee-8e404faf243d: Already in device cache /dev/disk/by-uuid/5c6728ec-82c1-49c0-93c5-f6dbd5c0d659: Aliased to /dev/block/8:5 in device cache /dev/disk/by-uuid/a13cdfcd-2191-4185-a727-ffefaf7a382e: Aliased to /dev/block/253:1 in device cache /dev/disk/by-uuid/e0d5893d-ff88-412f-b753-9e3e9af3242d: Aliased to /dev/block/8:21 in device cache /dev/disk/by-uuid/e79c9da6-8533-4e55-93ec-208876671edc: Aliased to /dev/block/253:0 in device cache /dev/disk/by-uuid/f3f176f5-12f7-4af8-952a-c6ac43a6e332: Already in device cache /dev/dm-0: Aliased to /dev/block/253:0 in device cache (preferred name) /dev/dm-1: Aliased to /dev/block/253:1 in device cache (preferred name) /dev/dm-2: Aliased to /dev/block/253:2 in device cache (preferred name) /dev/dm-3: Aliased to /dev/block/253:3 in device cache (preferred name) /dev/fd: Symbolic link to directory /dev/full: Not a block device /dev/hpet: Not a block device /dev/initctl: Not a block device /dev/input/by-path/platform-i8042-serio-0-event-kbd: Not a block device /dev/input/event0: Not a block device /dev/input/mice: Not a block device /dev/kmem: Not a block device /dev/kmsg: Not a block device /dev/log: Not a block device /dev/loop/0: Added to device cache /dev/MAKEDEV: Not a block device /dev/mapper/control: Not a block device /dev/mapper/systemlvm-home: Aliased to /dev/dm-2 in device cache /dev/mapper/systemlvm-tmp: Aliased to /dev/dm-3 in device cache /dev/mapper/systemlvm-usr: Aliased to /dev/dm-1 in device cache /dev/mapper/systemlvm-var: Aliased to /dev/dm-0 in device cache /dev/md0: Already in device cache /dev/md1: Already in device cache /dev/md2: Already in device cache /dev/mem: Not a block device /dev/net/tun: Not a block device /dev/network_latency: Not a block device /dev/network_throughput: Not a block device /dev/null: Not a block device /dev/port: Not a block device /dev/ppp: Not a block device /dev/psaux: Not a block device /dev/ptmx: Not a block device /dev/pts/0: Not a block device /dev/ram0: Aliased to /dev/block/1:0 in device cache (preferred name) /dev/ram1: Aliased to /dev/block/1:1 in device cache (preferred name) /dev/ram10: Aliased to /dev/block/1:10 in device cache (preferred name) /dev/ram11: Aliased to /dev/block/1:11 in device cache (preferred name) /dev/ram12: Aliased to /dev/block/1:12 in device cache (preferred name) /dev/ram13: Aliased to /dev/block/1:13 in device cache (preferred name) /dev/ram14: Aliased to /dev/block/1:14 in device cache (preferred name) /dev/ram15: Aliased to /dev/block/1:15 in device cache (preferred name) /dev/ram2: Aliased to /dev/block/1:2 in device cache (preferred name) /dev/ram3: Aliased to /dev/block/1:3 in device cache (preferred name) /dev/ram4: Aliased to /dev/block/1:4 in device cache (preferred name) /dev/ram5: Aliased to /dev/block/1:5 in device cache (preferred name) /dev/ram6: Aliased to /dev/block/1:6 in device cache (preferred name) /dev/ram7: Aliased to /dev/block/1:7 in device cache (preferred name) /dev/ram8: Aliased to /dev/block/1:8 in device cache (preferred name) /dev/ram9: Aliased to /dev/block/1:9 in device cache (preferred name) /dev/random: Not a block device /dev/root: Already in device cache /dev/rtc: Not a block device /dev/rtc0: Not a block device /dev/sda: Aliased to /dev/block/8:0 in device cache (preferred name) /dev/sda1: Aliased to /dev/block/8:1 in device cache (preferred name) /dev/sda2: Aliased to /dev/block/8:2 in device cache (preferred name) /dev/sda3: Aliased to /dev/block/8:3 in device cache (preferred name) /dev/sda5: Aliased to /dev/block/8:5 in device cache (preferred name) /dev/sda6: Aliased to /dev/block/8:6 in device cache (preferred name) /dev/sdb: Aliased to /dev/block/8:16 in device cache (preferred name) /dev/sdb1: Aliased to /dev/block/8:17 in device cache (preferred name) /dev/sdb2: Aliased to /dev/block/8:18 in device cache (preferred name) /dev/sdb3: Aliased to /dev/block/8:19 in device cache (preferred name) /dev/sdb5: Aliased to /dev/block/8:21 in device cache (preferred name) /dev/sdb6: Aliased to /dev/block/8:22 in device cache (preferred name) /dev/shm/network/ifstate: Not a block device /dev/snapshot: Not a block device /dev/sndstat: stat failed: Datei oder Verzeichnis nicht gefunden /dev/stderr: Not a block device /dev/stdin: Not a block device /dev/stdout: Not a block device /dev/systemlvm/home: Aliased to /dev/dm-2 in device cache /dev/systemlvm/tmp: Aliased to /dev/dm-3 in device cache /dev/systemlvm/usr: Aliased to /dev/dm-1 in device cache /dev/systemlvm/var: Aliased to /dev/dm-0 in device cache /dev/tty: Not a block device /dev/tty0: Not a block device [... many more "not a block device"] /dev/vcsa6: Not a block device /dev/xconsole: Not a block device /dev/zero: Not a block device Wiping internal VG cache lvmcache: initialised VG #orphans_lvm1 lvmcache: initialised VG #orphans_pool lvmcache: initialised VG #orphans_lvm2 Reading all physical volumes. This may take a while... Finding all volume groups /dev/ram0: Skipping (regex) /dev/loop/0: Skipping (sysfs) /dev/sda: Skipping (regex) Opened /dev/md0 RO /dev/md0: size is 192512 sectors Closed /dev/md0 /dev/md0: size is 192512 sectors Opened /dev/md0 RW O_DIRECT /dev/md0: block size is 1024 bytes Closed /dev/md0 Using /dev/md0 Opened /dev/md0 RW O_DIRECT /dev/md0: block size is 1024 bytes /dev/md0: No label detected Closed /dev/md0 /dev/dm-0: Skipping (regex) /dev/ram1: Skipping (regex) /dev/sda1: Skipping (regex) Opened /dev/md1 RO /dev/md1: size is 5863552 sectors Closed /dev/md1 /dev/md1: size is 5863552 sectors Opened /dev/md1 RW O_DIRECT /dev/md1: block size is 4096 bytes Closed /dev/md1 Using /dev/md1 Opened /dev/md1 RW O_DIRECT /dev/md1: block size is 4096 bytes /dev/md1: No label detected Closed /dev/md1 /dev/dm-1: Skipping (regex) /dev/ram2: Skipping (regex) /dev/sda2: Skipping (regex) Opened /dev/md2 RO /dev/md2: size is 303596160 sectors Closed /dev/md2 /dev/md2: size is 303596160 sectors Opened /dev/md2 RW O_DIRECT /dev/md2: block size is 4096 bytes Closed /dev/md2 Using /dev/md2 Opened /dev/md2 RW O_DIRECT /dev/md2: block size is 4096 bytes /dev/md2: lvm2 label detected lvmcache: /dev/md2: now in VG #orphans_lvm2 (#orphans_lvm2) /dev/md2: Found metadata at 39936 size 2632 (in area at 2048 size 194560) for systemlvm (rL8Oq2-dA7o-eRYe-u1or-JA7U-fnb1-kjOyvr) lvmcache: /dev/md2: now in VG systemlvm with 1 mdas lvmcache: /dev/md2: setting systemlvm VGID to rL8Oq2dA7oeRYeu1orJA7Ufnb1kjOyvr lvmcache: /dev/md2: VG systemlvm: Set creation host to rescue. Closed /dev/md2 /dev/dm-2: Skipping (regex) /dev/ram3: Skipping (regex) /dev/sda3: Skipping (regex) /dev/dm-3: Skipping (regex) /dev/ram4: Skipping (regex) /dev/ram5: Skipping (regex) /dev/sda5: Skipping (regex) /dev/ram6: Skipping (regex) /dev/sda6: Skipping (regex) /dev/ram7: Skipping (regex) /dev/ram8: Skipping (regex) /dev/ram9: Skipping (regex) /dev/ram10: Skipping (regex) /dev/ram11: Skipping (regex) /dev/ram12: Skipping (regex) /dev/ram13: Skipping (regex) /dev/ram14: Skipping (regex) /dev/ram15: Skipping (regex) /dev/sdb: Skipping (regex) /dev/sdb1: Skipping (regex) /dev/sdb2: Skipping (regex) /dev/sdb3: Skipping (regex) /dev/sdb5: Skipping (regex) /dev/sdb6: Skipping (regex) Locking /lib/init/rw/V_systemlvm RB Finding volume group "systemlvm" Opened /dev/md2 RW O_DIRECT /dev/md2: block size is 4096 bytes /dev/md2: lvm2 label detected lvmcache: /dev/md2: now in VG #orphans_lvm2 (#orphans_lvm2) with 1 mdas /dev/md2: Found metadata at 39936 size 2632 (in area at 2048 size 194560) for systemlvm (rL8Oq2-dA7o-eRYe-u1or-JA7U-fnb1-kjOyvr) lvmcache: /dev/md2: now in VG systemlvm with 1 mdas lvmcache: /dev/md2: setting systemlvm VGID to rL8Oq2dA7oeRYeu1orJA7Ufnb1kjOyvr lvmcache: /dev/md2: VG systemlvm: Set creation host to rescue. Using cached label for /dev/md2 Read systemlvm metadata (19) from /dev/md2 at 39936 size 2632 /dev/md2 0: 0 16: home(0:0) /dev/md2 1: 16 24: var(40:0) /dev/md2 2: 40 40: var(0:0) /dev/md2 3: 80 40: usr(0:0) /dev/md2 4: 120 40: var(80:0) /dev/md2 5: 160 8: tmp(0:0) /dev/md2 6: 168 16: var(64:0) /dev/md2 7: 184 80: var(120:0) /dev/md2 8: 264 64: home(16:0) /dev/md2 9: 328 128: var(200:0) /dev/md2 10: 456 32: home(80:0) /dev/md2 11: 488 440: var(328:0) /dev/md2 12: 928 24: home(112:0) /dev/md2 13: 952 206: NULL(0:0) Found volume group "systemlvm" using metadata type lvm2 Read volume group systemlvm from /etc/lvm/backup/systemlvm Unlocking /lib/init/rw/V_systemlvm Closed /dev/md2 Unlocking /lib/init/rw/P_global ~# vgdisplay --- Volume group --- VG Name systemlvm System ID Format lvm2 Metadata Areas 1 Metadata Sequence No 19 VG Access read/write VG Status resizable MAX LV 0 Cur LV 4 Open LV 4 Max PV 0 Cur PV 1 Act PV 1 VG Size 144,75 GB PE Size 128,00 MB Total PE 1158 Alloc PE / Size 952 / 119,00 GB Free PE / Size 206 / 25,75 GB VG UUID rL8Oq2-dA7o-eRYe-u1or-JA7U-fnb1-kjOyvr ~# pvdisplay --- Physical volume --- PV Name /dev/md2 VG Name systemlvm PV Size 144,77 GB / not usable 16,31 MB Allocatable yes PE Size (KByte) 131072 Total PE 1158 Free PE 206 Allocated PE 952 PV UUID ZSAzP5-iBvr-L7jy-wB8T-AiWz-0g3m-HLK66Y :~# lvdisplay --- Logical volume --- LV Name /dev/systemlvm/home VG Name systemlvm LV UUID YXrfdg-5OSY-DVkN-eiQe-Qksg-CI84-9Z2hx8 LV Write Access read/write LV Status available # open 2 LV Size 17,00 GB Current LE 136 Segments 4 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 253:2 --- Logical volume --- LV Name /dev/systemlvm/var VG Name systemlvm LV UUID 25N7CR-ZpUM-zR18-NfS6-zeSe-AVnV-T98LuU LV Write Access read/write LV Status available # open 2 LV Size 96,00 GB Current LE 768 Segments 7 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 253:0 --- Logical volume --- LV Name /dev/systemlvm/usr VG Name systemlvm LV UUID 3TpFXt-LjYG-Ewn7-9IdX-sSCZ-Pl8A-xmqbmQ LV Write Access read/write LV Status available # open 2 LV Size 5,00 GB Current LE 40 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 253:1 --- Logical volume --- LV Name /dev/systemlvm/tmp VG Name systemlvm LV UUID c5MJ4K-olev-Mjt8-5PPB-rQuR-TkXb-x6NvTi LV Write Access read/write LV Status available # open 2 LV Size 1,00 GB Current LE 8 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 253:3

    Read the article

  • SQL Server 2012 - AlwaysOn

    - by Claus Jandausch
    Ich war nicht nur irritiert, ich war sogar regelrecht schockiert - und für einen kurzen Moment sprachlos (was nur selten der Fall ist). Gerade eben hatte mich jemand gefragt "Wann Oracle denn etwas Vergleichbares wie AlwaysOn bieten würde - und ob überhaupt?" War ich hier im falschen Film gelandet? Ich konnte nicht anders, als meinen Unmut kundzutun und zu erklären, dass die Fragestellung normalerweise anders herum läuft. Zugegeben - es mag vielleicht strittige Punkte geben im Vergleich zwischen Oracle und SQL Server - bei denen nicht unbedingt immer Oracle die Nase vorn haben muss - aber das Thema Clustering für Hochverfügbarkeit (HA), Disaster Recovery (DR) und Skalierbarkeit gehört mit Sicherheit nicht dazu. Dieses Erlebnis hakte ich am Nachgang als Einzelfall ab, der so nie wieder vorkommen würde. Bis ich kurz darauf eines Besseren belehrt wurde und genau die selbe Frage erneut zu hören bekam. Diesmal sogar im Exadata-Umfeld und einem Oracle Stretch Cluster. Einmal ist keinmal, doch zweimal ist einmal zu viel... Getreu diesem alten Motto war mir klar, dass man das so nicht länger stehen lassen konnte. Ich habe keine Ahnung, wie die Microsoft Marketing Abteilung es geschafft hat, unter dem AlwaysOn Brading eine innovative Technologie vermuten zu lassen - aber sie hat ihren Job scheinbar gut gemacht. Doch abgesehen von einem guten Marketing, stellt sich natürlich die Frage, was wirklich dahinter steckt und wie sich das Ganze mit Oracle vergleichen lässt - und ob überhaupt? Damit wären wir wieder bei der ursprünglichen Frage angelangt.  So viel zum Hintergrund dieses Blogbeitrags - von meiner Antwort handelt der restliche Blog. "Windows was the God ..." Um den wahren Unterschied zwischen Oracle und Microsoft verstehen zu können, muss man zunächst das bedeutendste Microsoft Dogma kennen. Es lässt sich schlicht und einfach auf den Punkt bringen: "Alles muss auf Windows basieren." Die Überschrift dieses Absatzes ist kein von mir erfundener Ausspruch, sondern ein Zitat. Konkret stammt es aus einem längeren Artikel von Kurt Eichenwald in der Vanity Fair aus dem August 2012. Er lautet Microsoft's Lost Decade und sei jedem ans Herz gelegt, der die "Microsoft-Maschinerie" unter Steve Ballmer und einige ihrer Kuriositäten besser verstehen möchte. "YOU TALKING TO ME?" Microsoft C.E.O. Steve Ballmer bei seiner Keynote auf der 2012 International Consumer Electronics Show in Las Vegas am 9. Januar   Manche Dinge in diesem Artikel mögen überspitzt dargestellt erscheinen - sind sie aber nicht. Vieles davon kannte ich bereits aus eigener Erfahrung und kann es nur bestätigen. Anderes hat sich mir erst so richtig erschlossen. Insbesondere die folgenden Passagen führten zum Aha-Erlebnis: “Windows was the god—everything had to work with Windows,” said Stone... “Every little thing you want to write has to build off of Windows (or other existing roducts),” one software engineer said. “It can be very confusing, …” Ich habe immer schon darauf hingewiesen, dass in einem SQL Server Failover Cluster die Microsoft Datenbank eigentlich nichts Nenneswertes zum Geschehen beiträgt, sondern sich voll und ganz auf das Windows Betriebssystem verlässt. Deshalb muss man auch die Windows Server Enterprise Edition installieren, soll ein Failover Cluster für den SQL Server eingerichtet werden. Denn hier werden die Cluster Services geliefert - nicht mit dem SQL Server. Er ist nur lediglich ein weiteres Server Produkt, für das Windows in Ausfallszenarien genutzt werden kann - so wie Microsoft Exchange beispielsweise, oder Microsoft SharePoint, oder irgendein anderes Server Produkt das auf Windows gehostet wird. Auch Oracle kann damit genutzt werden. Das Stichwort lautet hier: Oracle Failsafe. Nur - warum sollte man das tun, wenn gleichzeitig eine überlegene Technologie wie die Oracle Real Application Clusters (RAC) zur Verfügung steht, die dann auch keine Windows Enterprise Edition voraussetzen, da Oracle die eigene Clusterware liefert. Welche darüber hinaus für kürzere Failover-Zeiten sorgt, da diese Cluster-Technologie Datenbank-integriert ist und sich nicht auf "Dritte" verlässt. Wenn man sich also schon keine technischen Vorteile mit einem SQL Server Failover Cluster erkauft, sondern zusätzlich noch versteckte Lizenzkosten durch die Lizenzierung der Windows Server Enterprise Edition einhandelt, warum hat Microsoft dann in den vergangenen Jahren seit SQL Server 2000 nicht ebenfalls an einer neuen und innovativen Lösung gearbeitet, die mit Oracle RAC mithalten kann? Entwickler hat Microsoft genügend? Am Geld kann es auch nicht liegen? Lesen Sie einfach noch einmal die beiden obenstehenden Zitate und sie werden den Grund verstehen. Anders lässt es sich ja auch gar nicht mehr erklären, dass AlwaysOn aus zwei unterschiedlichen Technologien besteht, die beide jedoch wiederum auf dem Windows Server Failover Clustering (WSFC) basieren. Denn daraus ergeben sich klare Nachteile - aber dazu später mehr. Um AlwaysOn zu verstehen, sollte man sich zunächst kurz in Erinnerung rufen, was Microsoft bisher an HA/DR (High Availability/Desaster Recovery) Lösungen für SQL Server zur Verfügung gestellt hat. Replikation Basiert auf logischer Replikation und Pubisher/Subscriber Architektur Transactional Replication Merge Replication Snapshot Replication Microsoft's Replikation ist vergleichbar mit Oracle GoldenGate. Oracle GoldenGate stellt jedoch die umfassendere Technologie dar und bietet High Performance. Log Shipping Microsoft's Log Shipping stellt eine einfache Technologie dar, die vergleichbar ist mit Oracle Managed Recovery in Oracle Version 7. Das Log Shipping besitzt folgende Merkmale: Transaction Log Backups werden von Primary nach Secondary/ies geschickt Einarbeitung (z.B. Restore) auf jedem Secondary individuell Optionale dritte Server Instanz (Monitor Server) für Überwachung und Alarm Log Restore Unterbrechung möglich für Read-Only Modus (Secondary) Keine Unterstützung von Automatic Failover Database Mirroring Microsoft's Database Mirroring wurde verfügbar mit SQL Server 2005, sah aus wie Oracle Data Guard in Oracle 9i, war funktional jedoch nicht so umfassend. Für ein HA/DR Paar besteht eine 1:1 Beziehung, um die produktive Datenbank (Principle DB) abzusichern. Auf der Standby Datenbank (Mirrored DB) werden alle Insert-, Update- und Delete-Operationen nachgezogen. Modi Synchron (High-Safety Modus) Asynchron (High-Performance Modus) Automatic Failover Unterstützt im High-Safety Modus (synchron) Witness Server vorausgesetzt     Zur Frage der Kontinuität Es stellt sich die Frage, wie es um diesen Technologien nun im Zusammenhang mit SQL Server 2012 bestellt ist. Unter Fanfaren seinerzeit eingeführt, war Database Mirroring das erklärte Mittel der Wahl. Ich bin kein Produkt Manager bei Microsoft und kann hierzu nur meine Meinung äußern, aber zieht man den SQL AlwaysOn Team Blog heran, so sieht es nicht gut aus für das Database Mirroring - zumindest nicht langfristig. "Does AlwaysOn Availability Group replace Database Mirroring going forward?” “The short answer is we recommend that you migrate from the mirroring configuration or even mirroring and log shipping configuration to using Availability Group. Database Mirroring will still be available in the Denali release but will be phased out over subsequent releases. Log Shipping will continue to be available in future releases.” Damit wären wir endlich beim eigentlichen Thema angelangt. Was ist eine sogenannte Availability Group und was genau hat es mit der vielversprechend klingenden Bezeichnung AlwaysOn auf sich?   SQL Server 2012 - AlwaysOn Zwei HA-Features verstekcne sich hinter dem “AlwaysOn”-Branding. Einmal das AlwaysOn Failover Clustering aka SQL Server Failover Cluster Instances (FCI) - zum Anderen die AlwaysOn Availability Groups. Failover Cluster Instances (FCI) Entspricht ungefähr dem Stretch Cluster Konzept von Oracle Setzt auf Windows Server Failover Clustering (WSFC) auf Bietet HA auf Instanz-Ebene AlwaysOn Availability Groups (Verfügbarkeitsgruppen) Ähnlich der Idee von Consistency Groups, wie in Storage-Level Replikations-Software von z.B. EMC SRDF Abhängigkeiten zu Windows Server Failover Clustering (WSFC) Bietet HA auf Datenbank-Ebene   Hinweis: Verwechseln Sie nicht eine SQL Server Datenbank mit einer Oracle Datenbank. Und auch nicht eine Oracle Instanz mit einer SQL Server Instanz. Die gleichen Begriffe haben hier eine andere Bedeutung - nicht selten ein Grund, weshalb Oracle- und Microsoft DBAs schnell aneinander vorbei reden. Denken Sie bei einer SQL Server Datenbank eher an ein Oracle Schema, das kommt der Sache näher. So etwas wie die SQL Server Northwind Datenbank ist vergleichbar mit dem Oracle Scott Schema. Wenn Sie die genauen Unterschiede kennen möchten, finden Sie eine detaillierte Beschreibung in meinem Buch "Oracle10g Release 2 für Windows und .NET", erhältich bei Lehmanns, Amazon, etc.   Windows Server Failover Clustering (WSFC) Wie man sieht, basieren beide AlwaysOn Technologien wiederum auf dem Windows Server Failover Clustering (WSFC), um einerseits Hochverfügbarkeit auf Ebene der Instanz zu gewährleisten und andererseits auf der Datenbank-Ebene. Deshalb nun eine kurze Beschreibung der WSFC. Die WSFC sind ein mit dem Windows Betriebssystem geliefertes Infrastruktur-Feature, um HA für Server Anwendungen, wie Microsoft Exchange, SharePoint, SQL Server, etc. zu bieten. So wie jeder andere Cluster, besteht ein WSFC Cluster aus einer Gruppe unabhängiger Server, die zusammenarbeiten, um die Verfügbarkeit einer Applikation oder eines Service zu erhöhen. Falls ein Cluster-Knoten oder -Service ausfällt, kann der auf diesem Knoten bisher gehostete Service automatisch oder manuell auf einen anderen im Cluster verfügbaren Knoten transferriert werden - was allgemein als Failover bekannt ist. Unter SQL Server 2012 verwenden sowohl die AlwaysOn Avalability Groups, als auch die AlwaysOn Failover Cluster Instances die WSFC als Plattformtechnologie, um Komponenten als WSFC Cluster-Ressourcen zu registrieren. Verwandte Ressourcen werden in eine Ressource Group zusammengefasst, die in Abhängigkeit zu anderen WSFC Cluster-Ressourcen gebracht werden kann. Der WSFC Cluster Service kann jetzt die Notwendigkeit zum Neustart der SQL Server Instanz erfassen oder einen automatischen Failover zu einem anderen Server-Knoten im WSFC Cluster auslösen.   Failover Cluster Instances (FCI) Eine SQL Server Failover Cluster Instanz (FCI) ist eine einzelne SQL Server Instanz, die in einem Failover Cluster betrieben wird, der aus mehreren Windows Server Failover Clustering (WSFC) Knoten besteht und so HA (High Availability) auf Ebene der Instanz bietet. Unter Verwendung von Multi-Subnet FCI kann auch Remote DR (Disaster Recovery) unterstützt werden. Eine weitere Option für Remote DR besteht darin, eine unter FCI gehostete Datenbank in einer Availability Group zu betreiben. Hierzu später mehr. FCI und WSFC Basis FCI, das für lokale Hochverfügbarkeit der Instanzen genutzt wird, ähnelt der veralteten Architektur eines kalten Cluster (Aktiv-Passiv). Unter SQL Server 2008 wurde diese Technologie SQL Server 2008 Failover Clustering genannt. Sie nutzte den Windows Server Failover Cluster. In SQL Server 2012 hat Microsoft diese Basistechnologie unter der Bezeichnung AlwaysOn zusammengefasst. Es handelt sich aber nach wie vor um die klassische Aktiv-Passiv-Konfiguration. Der Ablauf im Failover-Fall ist wie folgt: Solange kein Hardware-oder System-Fehler auftritt, werden alle Dirty Pages im Buffer Cache auf Platte geschrieben Alle entsprechenden SQL Server Services (Dienste) in der Ressource Gruppe werden auf dem aktiven Knoten gestoppt Die Ownership der Ressource Gruppe wird auf einen anderen Knoten der FCI transferriert Der neue Owner (Besitzer) der Ressource Gruppe startet seine SQL Server Services (Dienste) Die Connection-Anforderungen einer Client-Applikation werden automatisch auf den neuen aktiven Knoten mit dem selben Virtuellen Network Namen (VNN) umgeleitet Abhängig vom Zeitpunkt des letzten Checkpoints, kann die Anzahl der Dirty Pages im Buffer Cache, die noch auf Platte geschrieben werden müssen, zu unvorhersehbar langen Failover-Zeiten führen. Um diese Anzahl zu drosseln, besitzt der SQL Server 2012 eine neue Fähigkeit, die Indirect Checkpoints genannt wird. Indirect Checkpoints ähnelt dem Fast-Start MTTR Target Feature der Oracle Datenbank, das bereits mit Oracle9i verfügbar war.   SQL Server Multi-Subnet Clustering Ein SQL Server Multi-Subnet Failover Cluster entspricht vom Konzept her einem Oracle RAC Stretch Cluster. Doch dies ist nur auf den ersten Blick der Fall. Im Gegensatz zu RAC ist in einem lokalen SQL Server Failover Cluster jeweils nur ein Knoten aktiv für eine Datenbank. Für die Datenreplikation zwischen geografisch entfernten Sites verlässt sich Microsoft auf 3rd Party Lösungen für das Storage Mirroring.     Die Verbesserung dieses Szenario mit einer SQL Server 2012 Implementierung besteht schlicht darin, dass eine VLAN-Konfiguration (Virtual Local Area Network) nun nicht mehr benötigt wird, so wie dies bisher der Fall war. Das folgende Diagramm stellt dar, wie der Ablauf mit SQL Server 2012 gehandhabt wird. In Site A und Site B wird HA jeweils durch einen lokalen Aktiv-Passiv-Cluster sichergestellt.     Besondere Aufmerksamkeit muss hier der Konfiguration und dem Tuning geschenkt werden, da ansonsten völlig inakzeptable Failover-Zeiten resultieren. Dies liegt darin begründet, weil die Downtime auf Client-Seite nun nicht mehr nur von der reinen Failover-Zeit abhängt, sondern zusätzlich von der Dauer der DNS Replikation zwischen den DNS Servern. (Rufen Sie sich in Erinnerung, dass wir gerade von Multi-Subnet Clustering sprechen). Außerdem ist zu berücksichtigen, wie schnell die Clients die aktualisierten DNS Informationen abfragen. Spezielle Konfigurationen für Node Heartbeat, HostRecordTTL (Host Record Time-to-Live) und Intersite Replication Frequeny für Active Directory Sites und Services werden notwendig. Default TTL für Windows Server 2008 R2: 20 Minuten Empfohlene Einstellung: 1 Minute DNS Update Replication Frequency in Windows Umgebung: 180 Minuten Empfohlene Einstellung: 15 Minuten (minimaler Wert)   Betrachtet man diese Werte, muss man feststellen, dass selbst eine optimale Konfiguration die rigiden SLAs (Service Level Agreements) heutiger geschäftskritischer Anwendungen für HA und DR nicht erfüllen kann. Denn dies impliziert eine auf der Client-Seite erlebte Failover-Zeit von insgesamt 16 Minuten. Hierzu ein Auszug aus der SQL Server 2012 Online Dokumentation: Cons: If a cross-subnet failover occurs, the client recovery time could be 15 minutes or longer, depending on your HostRecordTTL setting and the setting of your cross-site DNS/AD replication schedule.    Wir sind hier an einem Punkt unserer Überlegungen angelangt, an dem sich erklärt, weshalb ich zuvor das "Windows was the God ..." Zitat verwendet habe. Die unbedingte Abhängigkeit zu Windows wird zunehmend zum Problem, da sie die Komplexität einer Microsoft-basierenden Lösung erhöht, anstelle sie zu reduzieren. Und Komplexität ist das Letzte, was sich CIOs heutzutage wünschen.  Zur Ehrenrettung des SQL Server 2012 und AlwaysOn muss man sagen, dass derart lange Failover-Zeiten kein unbedingtes "Muss" darstellen, sondern ein "Kann". Doch auch ein "Kann" kann im unpassenden Moment unvorhersehbare und kostspielige Folgen haben. Die Unabsehbarkeit ist wiederum Ursache vieler an der Implementierung beteiligten Komponenten und deren Abhängigkeiten, wie beispielsweise drei Cluster-Lösungen (zwei von Microsoft, eine 3rd Party Lösung). Wie man die Sache auch dreht und wendet, kommt man an diesem Fakt also nicht vorbei - ganz unabhängig von der Dauer einer Downtime oder Failover-Zeiten. Im Gegensatz zu AlwaysOn und der hier vorgestellten Version eines Stretch-Clusters, vermeidet eine entsprechende Oracle Implementierung eine derartige Komplexität, hervorgerufen duch multiple Abhängigkeiten. Den Unterschied machen Datenbank-integrierte Mechanismen, wie Fast Application Notification (FAN) und Fast Connection Failover (FCF). Für Oracle MAA Konfigurationen (Maximum Availability Architecture) sind Inter-Site Failover-Zeiten im Bereich von Sekunden keine Seltenheit. Wenn Sie dem Link zur Oracle MAA folgen, finden Sie außerdem eine Reihe an Customer Case Studies. Auch dies ist ein wichtiges Unterscheidungsmerkmal zu AlwaysOn, denn die Oracle Technologie hat sich bereits zigfach in höchst kritischen Umgebungen bewährt.   Availability Groups (Verfügbarkeitsgruppen) Die sogenannten Availability Groups (Verfügbarkeitsgruppen) sind - neben FCI - der weitere Baustein von AlwaysOn.   Hinweis: Bevor wir uns näher damit beschäftigen, sollten Sie sich noch einmal ins Gedächtnis rufen, dass eine SQL Server Datenbank nicht die gleiche Bedeutung besitzt, wie eine Oracle Datenbank, sondern eher einem Oracle Schema entspricht. So etwas wie die SQL Server Northwind Datenbank ist vergleichbar mit dem Oracle Scott Schema.   Eine Verfügbarkeitsgruppe setzt sich zusammen aus einem Set mehrerer Benutzer-Datenbanken, die im Falle eines Failover gemeinsam als Gruppe behandelt werden. Eine Verfügbarkeitsgruppe unterstützt ein Set an primären Datenbanken (primäres Replikat) und einem bis vier Sets von entsprechenden sekundären Datenbanken (sekundäre Replikate).       Es können jedoch nicht alle SQL Server Datenbanken einer AlwaysOn Verfügbarkeitsgruppe zugeordnet werden. Der SQL Server Spezialist Michael Otey zählt in seinem SQL Server Pro Artikel folgende Anforderungen auf: Verfügbarkeitsgruppen müssen mit Benutzer-Datenbanken erstellt werden. System-Datenbanken können nicht verwendet werden Die Datenbanken müssen sich im Read-Write Modus befinden. Read-Only Datenbanken werden nicht unterstützt Die Datenbanken in einer Verfügbarkeitsgruppe müssen Multiuser Datenbanken sein Sie dürfen nicht das AUTO_CLOSE Feature verwenden Sie müssen das Full Recovery Modell nutzen und es muss ein vollständiges Backup vorhanden sein Eine gegebene Datenbank kann sich nur in einer einzigen Verfügbarkeitsgruppe befinden und diese Datenbank düerfen nicht für Database Mirroring konfiguriert sein Microsoft empfiehl außerdem, dass der Verzeichnispfad einer Datenbank auf dem primären und sekundären Server identisch sein sollte Wie man sieht, eignen sich Verfügbarkeitsgruppen nicht, um HA und DR vollständig abzubilden. Die Unterscheidung zwischen der Instanzen-Ebene (FCI) und Datenbank-Ebene (Availability Groups) ist von hoher Bedeutung. Vor kurzem wurde mir gesagt, dass man mit den Verfügbarkeitsgruppen auf Shared Storage verzichten könne und dadurch Kosten spart. So weit so gut ... Man kann natürlich eine Installation rein mit Verfügbarkeitsgruppen und ohne FCI durchführen - aber man sollte sich dann darüber bewusst sein, was man dadurch alles nicht abgesichert hat - und dies wiederum für Desaster Recovery (DR) und SLAs (Service Level Agreements) bedeutet. Kurzum, um die Kombination aus beiden AlwaysOn Produkten und der damit verbundene Komplexität kommt man wohl in der Praxis nicht herum.    Availability Groups und WSFC AlwaysOn hängt von Windows Server Failover Clustering (WSFC) ab, um die aktuellen Rollen der Verfügbarkeitsreplikate einer Verfügbarkeitsgruppe zu überwachen und zu verwalten, und darüber zu entscheiden, wie ein Failover-Ereignis die Verfügbarkeitsreplikate betrifft. Das folgende Diagramm zeigt de Beziehung zwischen Verfügbarkeitsgruppen und WSFC:   Der Verfügbarkeitsmodus ist eine Eigenschaft jedes Verfügbarkeitsreplikats. Synychron und Asynchron können also gemischt werden: Availability Modus (Verfügbarkeitsmodus) Asynchroner Commit-Modus Primäres replikat schließt Transaktionen ohne Warten auf Sekundäres Synchroner Commit-Modus Primäres Replikat wartet auf Commit von sekundärem Replikat Failover Typen Automatic Manual Forced (mit möglichem Datenverlust) Synchroner Commit-Modus Geplanter, manueller Failover ohne Datenverlust Automatischer Failover ohne Datenverlust Asynchroner Commit-Modus Nur Forced, manueller Failover mit möglichem Datenverlust   Der SQL Server kennt keinen separaten Switchover Begriff wie in Oracle Data Guard. Für SQL Server werden alle Role Transitions als Failover bezeichnet. Tatsächlich unterstützt der SQL Server keinen Switchover für asynchrone Verbindungen. Es gibt nur die Form des Forced Failover mit möglichem Datenverlust. Eine ähnliche Fähigkeit wie der Switchover unter Oracle Data Guard ist so nicht gegeben.   SQL Sever FCI mit Availability Groups (Verfügbarkeitsgruppen) Neben den Verfügbarkeitsgruppen kann eine zweite Failover-Ebene eingerichtet werden, indem SQL Server FCI (auf Shared Storage) mit WSFC implementiert wird. Ein Verfügbarkeitesreplikat kann dann auf einer Standalone Instanz gehostet werden, oder einer FCI Instanz. Zum Verständnis: Die Verfügbarkeitsgruppen selbst benötigen kein Shared Storage. Diese Kombination kann verwendet werden für lokale HA auf Ebene der Instanz und DR auf Datenbank-Ebene durch Verfügbarkeitsgruppen. Das folgende Diagramm zeigt dieses Szenario:   Achtung! Hier handelt es sich nicht um ein Pendant zu Oracle RAC plus Data Guard, auch wenn das Bild diesen Eindruck vielleicht vermitteln mag - denn alle sekundären Knoten im FCI sind rein passiv. Es existiert außerdem eine weitere und ernsthafte Einschränkung: SQL Server Failover Cluster Instanzen (FCI) unterstützen nicht das automatische AlwaysOn Failover für Verfügbarkeitsgruppen. Jedes unter FCI gehostete Verfügbarkeitsreplikat kann nur für manuelles Failover konfiguriert werden.   Lesbare Sekundäre Replikate Ein oder mehrere Verfügbarkeitsreplikate in einer Verfügbarkeitsgruppe können für den lesenden Zugriff konfiguriert werden, wenn sie als sekundäres Replikat laufen. Dies ähnelt Oracle Active Data Guard, jedoch gibt es Einschränkungen. Alle Abfragen gegen die sekundäre Datenbank werden automatisch auf das Snapshot Isolation Level abgebildet. Es handelt sich dabei um eine Versionierung der Rows. Microsoft versuchte hiermit die Oracle MVRC (Multi Version Read Consistency) nachzustellen. Tatsächlich muss man die SQL Server Snapshot Isolation eher mit Oracle Flashback vergleichen. Bei der Implementierung des Snapshot Isolation Levels handelt sich um ein nachträglich aufgesetztes Feature und nicht um einen inhärenten Teil des Datenbank-Kernels, wie im Falle Oracle. (Ich werde hierzu in Kürze einen weiteren Blogbeitrag verfassen, wenn ich mich mit der neuen SQL Server 2012 Core Lizenzierung beschäftige.) Für die Praxis entstehen aus der Abbildung auf das Snapshot Isolation Level ernsthafte Restriktionen, derer man sich für den Betrieb in der Praxis bereits vorab bewusst sein sollte: Sollte auf der primären Datenbank eine aktive Transaktion zu dem Zeitpunkt existieren, wenn ein lesbares sekundäres Replikat in die Verfügbarkeitsgruppe aufgenommen wird, werden die Row-Versionen auf der korrespondierenden sekundären Datenbank nicht sofort vollständig verfügbar sein. Eine aktive Transaktion auf dem primären Replikat muss zuerst abgeschlossen (Commit oder Rollback) und dieser Transaktions-Record auf dem sekundären Replikat verarbeitet werden. Bis dahin ist das Isolation Level Mapping auf der sekundären Datenbank unvollständig und Abfragen sind temporär geblockt. Microsoft sagt dazu: "This is needed to guarantee that row versions are available on the secondary replica before executing the query under snapshot isolation as all isolation levels are implicitly mapped to snapshot isolation." (SQL Storage Engine Blog: AlwaysOn: I just enabled Readable Secondary but my query is blocked?)  Grundlegend bedeutet dies, dass ein aktives lesbares Replikat nicht in die Verfügbarkeitsgruppe aufgenommen werden kann, ohne das primäre Replikat vorübergehend stillzulegen. Da Leseoperationen auf das Snapshot Isolation Transaction Level abgebildet werden, kann die Bereinigung von Ghost Records auf dem primären Replikat durch Transaktionen auf einem oder mehreren sekundären Replikaten geblockt werden - z.B. durch eine lang laufende Abfrage auf dem sekundären Replikat. Diese Bereinigung wird auch blockiert, wenn die Verbindung zum sekundären Replikat abbricht oder der Datenaustausch unterbrochen wird. Auch die Log Truncation wird in diesem Zustant verhindert. Wenn dieser Zustand längere Zeit anhält, empfiehlt Microsoft das sekundäre Replikat aus der Verfügbarkeitsgruppe herauszunehmen - was ein ernsthaftes Downtime-Problem darstellt. Die Read-Only Workload auf den sekundären Replikaten kann eingehende DDL Änderungen blockieren. Obwohl die Leseoperationen aufgrund der Row-Versionierung keine Shared Locks halten, führen diese Operatioen zu Sch-S Locks (Schemastabilitätssperren). DDL-Änderungen durch Redo-Operationen können dadurch blockiert werden. Falls DDL aufgrund konkurrierender Lese-Workload blockiert wird und der Schwellenwert für 'Recovery Interval' (eine SQL Server Konfigurationsoption) überschritten wird, generiert der SQL Server das Ereignis sqlserver.lock_redo_blocked, welches Microsoft zum Kill der blockierenden Leser empfiehlt. Auf die Verfügbarkeit der Anwendung wird hierbei keinerlei Rücksicht genommen.   Keine dieser Einschränkungen existiert mit Oracle Active Data Guard.   Backups auf sekundären Replikaten  Über die sekundären Replikate können Backups (BACKUP DATABASE via Transact-SQL) nur als copy-only Backups einer vollständigen Datenbank, Dateien und Dateigruppen erstellt werden. Das Erstellen inkrementeller Backups ist nicht unterstützt, was ein ernsthafter Rückstand ist gegenüber der Backup-Unterstützung physikalischer Standbys unter Oracle Data Guard. Hinweis: Ein möglicher Workaround via Snapshots, bleibt ein Workaround. Eine weitere Einschränkung dieses Features gegenüber Oracle Data Guard besteht darin, dass das Backup eines sekundären Replikats nicht ausgeführt werden kann, wenn es nicht mit dem primären Replikat kommunizieren kann. Darüber hinaus muss das sekundäre Replikat synchronisiert sein oder sich in der Synchronisation befinden, um das Beackup auf dem sekundären Replikat erstellen zu können.   Vergleich von Microsoft AlwaysOn mit der Oracle MAA Ich komme wieder zurück auf die Eingangs erwähnte, mehrfach an mich gestellte Frage "Wann denn - und ob überhaupt - Oracle etwas Vergleichbares wie AlwaysOn bieten würde?" und meine damit verbundene (kurze) Irritation. Wenn Sie diesen Blogbeitrag bis hierher gelesen haben, dann kennen Sie jetzt meine darauf gegebene Antwort. Der eine oder andere Punkt traf dabei nicht immer auf Jeden zu, was auch nicht der tiefere Sinn und Zweck meiner Antwort war. Wenn beispielsweise kein Multi-Subnet mit im Spiel ist, sind alle diesbezüglichen Kritikpunkte zunächst obsolet. Was aber nicht bedeutet, dass sie nicht bereits morgen schon wieder zum Thema werden könnten (Sag niemals "Nie"). In manch anderes Fettnäpfchen tritt man wiederum nicht unbedingt in einer Testumgebung, sondern erst im laufenden Betrieb. Erst recht nicht dann, wenn man sich potenzieller Probleme nicht bewusst ist und keine dedizierten Tests startet. Und wer AlwaysOn erfolgreich positionieren möchte, wird auch gar kein Interesse daran haben, auf mögliche Schwachstellen und den besagten Teufel im Detail aufmerksam zu machen. Das ist keine Unterstellung - es ist nur menschlich. Außerdem ist es verständlich, dass man sich in erster Linie darauf konzentriert "was geht" und "was gut läuft", anstelle auf das "was zu Problemen führen kann" oder "nicht funktioniert". Wer will schon der Miesepeter sein? Für mich selbst gesprochen, kann ich nur sagen, dass ich lieber vorab von allen möglichen Einschränkungen wissen möchte, anstelle sie dann nach einer kurzen Zeit der heilen Welt schmerzhaft am eigenen Leib erfahren zu müssen. Ich bin davon überzeugt, dass es Ihnen nicht anders geht. Nachfolgend deshalb eine Zusammenfassung all jener Punkte, die ich im Vergleich zur Oracle MAA (Maximum Availability Architecture) als unbedingt Erwähnenswert betrachte, falls man eine Evaluierung von Microsoft AlwaysOn in Betracht zieht. 1. AlwaysOn ist eine komplexe Technologie Der SQL Server AlwaysOn Stack ist zusammengesetzt aus drei verschiedenen Technlogien: Windows Server Failover Clustering (WSFC) SQL Server Failover Cluster Instances (FCI) SQL Server Availability Groups (Verfügbarkeitsgruppen) Man kann eine derartige Lösung nicht als nahtlos bezeichnen, wofür auch die vielen von Microsoft dargestellten Einschränkungen sprechen. Während sich frühere SQL Server Versionen in Richtung eigener HA/DR Technologien entwickelten (wie Database Mirroring), empfiehlt Microsoft nun die Migration. Doch weshalb dieser Schwenk? Er führt nicht zu einem konsisten und robusten Angebot an HA/DR Technologie für geschäftskritische Umgebungen.  Liegt die Antwort in meiner These begründet, nach der "Windows was the God ..." noch immer gilt und man die Nachteile der allzu engen Kopplung mit Windows nicht sehen möchte? Entscheiden Sie selbst ... 2. Failover Cluster Instanzen - Kein RAC-Pendant Die SQL Server und Windows Server Clustering Technologie basiert noch immer auf dem veralteten Aktiv-Passiv Modell und führt zu einer Verschwendung von Systemressourcen. In einer Betrachtung von lediglich zwei Knoten erschließt sich auf Anhieb noch nicht der volle Mehrwert eines Aktiv-Aktiv Clusters (wie den Real Application Clusters), wie er von Oracle bereits vor zehn Jahren entwickelt wurde. Doch kennt man die Vorzüge der Skalierbarkeit durch einfaches Hinzufügen weiterer Cluster-Knoten, die dann alle gemeinsam als ein einziges logisches System zusammenarbeiten, versteht man was hinter dem Motto "Pay-as-you-Grow" steckt. In einem Aktiv-Aktiv Cluster geht es zwar auch um Hochverfügbarkeit - und ein Failover erfolgt zudem schneller, als in einem Aktiv-Passiv Modell - aber es geht eben nicht nur darum. An dieser Stelle sei darauf hingewiesen, dass die Oracle 11g Standard Edition bereits die Nutzung von Oracle RAC bis zu vier Sockets kostenfrei beinhaltet. Möchten Sie dazu Windows nutzen, benötigen Sie keine Windows Server Enterprise Edition, da Oracle 11g die eigene Clusterware liefert. Sie kommen in den Genuss von Hochverfügbarkeit und Skalierbarkeit und können dazu die günstigere Windows Server Standard Edition nutzen. 3. SQL Server Multi-Subnet Clustering - Abhängigkeit zu 3rd Party Storage Mirroring  Die SQL Server Multi-Subnet Clustering Architektur unterstützt den Aufbau eines Stretch Clusters, basiert dabei aber auf dem Aktiv-Passiv Modell. Das eigentlich Problematische ist jedoch, dass man sich zur Absicherung der Datenbank auf 3rd Party Storage Mirroring Technologie verlässt, ohne Integration zwischen dem Windows Server Failover Clustering (WSFC) und der darunterliegenden Mirroring Technologie. Wenn nun im Cluster ein Failover auf Instanzen-Ebene erfolgt, existiert keine Koordination mit einem möglichen Failover auf Ebene des Storage-Array. 4. Availability Groups (Verfügbarkeitsgruppen) - Vier, oder doch nur Zwei? Ein primäres Replikat erlaubt bis zu vier sekundäre Replikate innerhalb einer Verfügbarkeitsgruppe, jedoch nur zwei im Synchronen Commit Modus. Während dies zwar einen Vorteil gegenüber dem stringenten 1:1 Modell unter Database Mirroring darstellt, fällt der SQL Server 2012 damit immer noch weiter zurück hinter Oracle Data Guard mit bis zu 30 direkten Stanbdy Zielen - und vielen weiteren durch kaskadierende Ziele möglichen. Damit eignet sich Oracle Active Data Guard auch für die Bereitstellung einer Reader-Farm Skalierbarkeit für Internet-basierende Unternehmen. Mit AwaysOn Verfügbarkeitsgruppen ist dies nicht möglich. 5. Availability Groups (Verfügbarkeitsgruppen) - kein asynchrones Switchover  Die Technologie der Verfügbarkeitsgruppen wird auch als geeignetes Mittel für administrative Aufgaben positioniert - wie Upgrades oder Wartungsarbeiten. Man muss sich jedoch einem gravierendem Defizit bewusst sein: Im asynchronen Verfügbarkeitsmodus besteht die einzige Möglichkeit für Role Transition im Forced Failover mit Datenverlust! Um den Verlust von Daten durch geplante Wartungsarbeiten zu vermeiden, muss man den synchronen Verfügbarkeitsmodus konfigurieren, was jedoch ernstzunehmende Auswirkungen auf WAN Deployments nach sich zieht. Spinnt man diesen Gedanken zu Ende, kommt man zu dem Schluss, dass die Technologie der Verfügbarkeitsgruppen für geplante Wartungsarbeiten in einem derartigen Umfeld nicht effektiv genutzt werden kann. 6. Automatisches Failover - Nicht immer möglich Sowohl die SQL Server FCI, als auch Verfügbarkeitsgruppen unterstützen automatisches Failover. Möchte man diese jedoch kombinieren, wird das Ergebnis kein automatisches Failover sein. Denn ihr Zusammentreffen im Failover-Fall führt zu Race Conditions (Wettlaufsituationen), weshalb diese Konfiguration nicht länger das automatische Failover zu einem Replikat in einer Verfügbarkeitsgruppe erlaubt. Auch hier bestätigt sich wieder die tiefere Problematik von AlwaysOn, mit einer Zusammensetzung aus unterschiedlichen Technologien und der Abhängigkeit zu Windows. 7. Problematische RTO (Recovery Time Objective) Microsoft postioniert die SQL Server Multi-Subnet Clustering Architektur als brauchbare HA/DR Architektur. Bedenkt man jedoch die Problematik im Zusammenhang mit DNS Replikation und den möglichen langen Wartezeiten auf Client-Seite von bis zu 16 Minuten, sind strenge RTO Anforderungen (Recovery Time Objectives) nicht erfüllbar. Im Gegensatz zu Oracle besitzt der SQL Server keine Datenbank-integrierten Technologien, wie Oracle Fast Application Notification (FAN) oder Oracle Fast Connection Failover (FCF). 8. Problematische RPO (Recovery Point Objective) SQL Server ermöglicht Forced Failover (erzwungenes Failover), bietet jedoch keine Möglichkeit zur automatischen Übertragung der letzten Datenbits von einem alten zu einem neuen primären Replikat, wenn der Verfügbarkeitsmodus asynchron war. Oracle Data Guard hingegen bietet diese Unterstützung durch das Flush Redo Feature. Dies sichert "Zero Data Loss" und beste RPO auch in erzwungenen Failover-Situationen. 9. Lesbare Sekundäre Replikate mit Einschränkungen Aufgrund des Snapshot Isolation Transaction Level für lesbare sekundäre Replikate, besitzen diese Einschränkungen mit Auswirkung auf die primäre Datenbank. Die Bereinigung von Ghost Records auf der primären Datenbank, wird beeinflusst von lang laufenden Abfragen auf der lesabaren sekundären Datenbank. Die lesbare sekundäre Datenbank kann nicht in die Verfügbarkeitsgruppe aufgenommen werden, wenn es aktive Transaktionen auf der primären Datenbank gibt. Zusätzlich können DLL Änderungen auf der primären Datenbank durch Abfragen auf der sekundären blockiert werden. Und imkrementelle Backups werden hier nicht unterstützt.   Keine dieser Restriktionen existiert unter Oracle Data Guard.

    Read the article

  • Windows CE Chat Transcript (March 30, 2010)

    - by Bruce Eitman
    For those of you who missed the chat today, here is the raw transcript.   By raw, I mean that I copied and pasted the discussion without any edits. This is divided into two parts, the top part is the answers from the Microsoft Experts and the bottom part is the questions from the audience. Answers from Microsoft:   Karel Danihelka [MS] (Expert)[2010-3-30 12:2]: Hi everyone, my name is Karel Danihelka and I am developer in partner response team. Sing Wee [MS] (Expert)[2010-3-30 12:2]: Hi, I'm Sing Wee, part of the CoreOS/BSP Test Team. GLanger_MS (Expert)[2010-3-30 12:2]: Hi, I'm Glen Langer, program manager on the Core Team. Karel Danihelka [MS] (Expert)[2010-3-30 12:3]: Q: I need to implement hardware timers on my windows CE 6.0 device to trigger events at microsecond intervals. Where should i start? A: Until you are using CPU with GHz frequency your only chance is use interrupt handler and implement all funcionality there. But it will be really tricky and may reduce system performance. If period will be near to millisecond timeframe you can use normal thread wait for event pattern. Karel Danihelka [MS] (Expert)[2010-3-30 12:5]: Q: I want to partition my NAND Flash device. One partition to use for hive ragistry and the other for the apps and data. The only way to do it is programmatically or setting some registry values ? A: It need to be set in registry - generally you need mark this partition as boot partition. Karel Danihelka [MS] (Expert)[2010-3-30 12:7]: Q: My CPU is Intel celeron M processor 1Ghz. A: In this case you can try use normal approach - in interrupt handler return SYSINTR and start thread in device driver which will spin thread waiting on event attached to this SYSINTR. Karel Danihelka [MS] (Expert)[2010-3-30 12:7]: Q: If i need to implement it using interrupt handlers, What are all the files that I should look at? A: Good quesiton - I would recommend documentation and there was BSP development book to download for free. mikehall_ms (Moderator)[2010-3-30 12:8]: Q: Hi guys, what's the formal way to report bugs back to the core team / product team? The mechanism of calling the support phone number every time is really onerous and time-consuming. Is there another mechanism? A: Using product support is the formal way to report bugs/issues - Product support can then create an issue that can be tracked. Karel Danihelka [MS] (Expert)[2010-3-30 12:9]: Q: But the operation for creating the partitions ? A: This is tricky - if you will make it autopartition & autoformat it will be created by filesystem. But generally it depends on your boot loader. mikehall_ms (Moderator)[2010-3-30 12:10]: Q: Is Windows Phone 7 related to Windows CE? If so, can you tell me what version of Windows CE is the basis? Is it in fact the new version of Windows Mobile? A: At MIX 2010 Charlie Kindel presented a session that described some of the core technologies that make up Windows Phone 7 Series, including the underlying operating system (Windows CE) and the new ISV programming model based on Silverlight and .NET - check out the Mix Online Videos to get more information. davbo-msft (Moderator)[2010-3-30 12:10]: Q: Is Windows Phone 7 related to Windows CE? If so, can you tell me what version of Windows CE is the basis? Is it in fact the new version of Windows Mobile? A: This forum is to discussed released products in the industry. Windows Mobile & Windows CE are based on the same Windows CE Kernel/system. Windows CE is focused on deliverying the OS for embedded customers in the market where Windows Mobile is focused on deliverying compelling Windows Phone platform. davbo-msft (Moderator)[2010-3-30 12:11]: Q: Is Windows Phone 7 related to Windows CE? If so, can you tell me what version of Windows CE is the basis? Is it in fact the new version of Windows Mobile? A: http://en.wikipedia.org/wiki/Windows_CE Wikipedia gives a good breakdown of the version history. Travis Hobrla [MS] (Expert)[2010-3-30 12:13]: Q: I created a OS design with KITL and kernel debugger enabled. But I am unable to connect to the target for debugging. I am getting the following error when i try to connect with the device. PB Debugger Cannot initialize the Kernel Debugger. PB Debugger Debugger could not initialize connection. PB Debugger The Kernel Debugger is waiting to connect with target. PB Debugger The Kernel Debugger has been disconnected successfully. A: One possibility is that a rogue cesvchost.exe has co-opted the debugger. I am assuming this is CE 6.0? Can you try exiting visual studio and manually killing the cesvchost.exe process from the Task Manager? davbo-msft (Moderator)[2010-3-30 12:14]: Q: Hi guys, what's the formal way to report bugs back to the core team / product team? The mechanism of calling the support phone number every time is really onerous and time-consuming. Is there another mechanism? A: For info on contacting Microsoft support refer to the support page on the Embedded website: http://msdn.microsoft.com/en-us/windowsembedded/dd897633.aspx Sing Wee [MS] (Expert)[2010-3-30 12:16]: Q: Do u mean ISR/IST implementation? How can i register an interrupt? What kind of interrupt should i register? A: A good introduction to interrupts in WinCE 6.0 can be found here (aside from the documentation on MSDN): http://download.microsoft.com/download/9/c/f/9cffaa58-4000-48d6-a4b2-5fed9e4e6410/Chapter%206%20-%20Developing%20Device%20Drivers.pdf mikehall_ms (Moderator)[2010-3-30 12:16]: Q: What will be different in Windows Compact 7 from CE 6.0? A: Unfortunately we cannot discuss unreleased products on this chat - keep an eye on the Windows Embedded web site and blogs to keep up to date with product announcements. Travis Hobrla [MS] (Expert)[2010-3-30 12:16]: Q: I am using CE 6.0. There is no cesvchost process running in my system. A: What operating system are you using? Karel Danihelka [MS] (Expert)[2010-3-30 12:16]: Q: So...I have to modify file system code to create 2 partition at system startup ?!! I haven't understood.... A: You don't need to modify code, there are registry settings to achive this (look to documentation). But you may need to create partition table in boot loader. Unfortunatelly there isn't simple way how to do it. davbo-msft (Moderator)[2010-3-30 12:18]: Q: I would like to get a handle to a Silverlight screen section, is that possiable? A: Windows Embedded CE 6.0 R3 includes Sliverlight for Windows Embedded. Refer to New Features overview on the embedded web site. http://www.microsoft.com/windowsembedded/en-us/products/windowsce/default.mspx. Silverlight - The power of Silverlight brought to Windows Embedded CE to create rich applications and user interfaces is new part of Windows CE Embedded. mikehall_ms (Moderator)[2010-3-30 12:20]: Q: The link for developing device drivers is not working. can u please check that? A: http://msdn.microsoft.com/en-us/library/ms923714.aspx davbo-msft (Moderator)[2010-3-30 12:20]: Q: I would like to get a handle to a Silverlight screen section, is that possiable? A: Sorry misunderstood the question I thought you were asking if embedded CE could handle Silverlight. Please repost so that the question goes back into the active queue because once answered no way to put the status back to open. Travis Hobrla [MS] (Expert)[2010-3-30 12:21]: Q: sorry! Windows XP SP3 A: Can you try exiting VS2005 and confirming cesvchost.exe is not running, then renaming C:\Documents and Settings\USERNAME\Local Settings\Application Data\Microsoft\CoreCon\1.0 to 1.0_backup, then restarting VS2005? Sing Wee [MS] (Expert)[2010-3-30 12:24]: Q: Can I have the book's name please? A: I believe the downloadable version is related to the last link I sent. If you go to the following website, I believe you can download the whole thing: http://msdn.microsoft.com/en-us/windowsembedded/ce/cc294468.aspx davbo-msft (Moderator)[2010-3-30 12:24]: Q: If one has an image on a Silverlight page, it seems to be cached. How would one refresh that cache after changing the underlying image? A: change the URI of the image or use a writeable bitmap if they want to manually toggle the pixels Sing Wee [MS] (Expert)[2010-3-30 12:25]: A: Whoops, hit [ENTER] too early. On the right side, you'll see there an "Exam Preparation Kit" link that can be downloaded in several different languages. Sing Wee [MS] (Expert)[2010-3-30 12:25]: Q: Can I have the book's name please? A: Whoops, hit [ENTER] too early. On the right side, you'll see there an "Exam Preparation Kit" link that can be downloaded in several different languages. Karel Danihelka [MS] (Expert)[2010-3-30 12:26]: Q: I have a NAND Flash on my target device. On this flash I have the hive registry and an application.I have observed that when the NAND flash is fully, the system startup time is longer....is there a degradation of NAND use that influences the startup time ? Why ? A: Yes - on boot flash abstraction library (old one) read metadata from all sectors to rebuild physical - logical mapping table. davbo-msft (Moderator)[2010-3-30 12:27]: Q: I would like to get a handle to a Silverlight screen section, is that possiable? A: Need addition info on this question. Can you provide more details on what you are trying to do in Silverlight? davbo-msft (Moderator)[2010-3-30 12:27]: Q: If one has an image on a Silverlight page, it seems to be cached. How would one refresh that cache after changing the underlying image? A: Additonal Info: if you want to manually touch the pixels use WriteableBitmap if you want to use the underlying HWND then use IXRVisualHost::GetHWND() davbo-msft (Moderator)[2010-3-30 12:28]: Q: Writable bitmap, is there an example of the syntax? A: if you want to manually touch the pixels use WriteableBitmap if you want to use the underlying HWND then use IXRVisualHost::GetHWND() davbo-msft (Moderator)[2010-3-30 12:29]: Q: I would like to get a handle to a Silverlight screen section, is that possiable? A: Can I get more information about this question about what you are trying to accomplish in Silverlight? davbo-msft (Moderator)[2010-3-30 12:31]: Q: IXRVisualHost::GetHWND() exactly what I needed Thanks, A: Your welcome Sing Wee [MS] (Expert)[2010-3-30 12:31]: Q: ok. thanks for the book's link A: No problem. Travis Hobrla [MS] (Expert)[2010-3-30 12:32]: Q: Typically for SoC devices you name your hardware specific libraries in the form "SOCDIRNAME_LIBNAME". In our platform "OMAP35XX_TPS659XX_TI_V1" if you do this we cause the catalog parser to die... For example if we have a library "Musbfn_OMAP35XX_TPS659XX_TI_V1.dll" entering this in the catalogs pbcxml file in a <module> section causes the XML parser to fail with : Error 3 The 'urn:Microsoft.PlatformBuilder/Catalog:Module' element is        invalid - The value '012345678901234567890123456789.dll' is invalid         according to its datatype         'urn:Microsoft.PlatformBuilder/Catalog:CatalogFileName' - The actual         length is greater than the MaxLength value. A: There are a couple workarounds I can think of. I believe the Module element is only used when doing SYSGEN parsing to make sure dependent SYSGENs are present when the item is selected, so I believe it is optional to the catalog. The other obvious workaround is to shorten the soc name. I realize neither of these solutions is ideal. This is not something we anticipated when we tested CE6.0, sorry. Travis Hobrla [MS] (Expert)[2010-3-30 12:33]: Q: I am getting this error only when I select the KdStub as the debugger in Target device connectivity. A: Right, but KdStub is the debugger that you should use. Have you tried the steps I suggested? Travis Hobrla [MS] (Expert)[2010-3-30 12:36]: Q: If I select Active KTIL, My OS doesn't boots. It says "loading NK.EXE at 0x<xxxxx> location" after that nothing comes in the debug log. A: Can you look at the serial debug output and see what is happening there? Often it can give you a clue to the KITL driver malfunctioning. Travis Hobrla [MS] (Expert)[2010-3-30 12:38]: Q: I have tried that and I am getting the same error. A: I am assuming you have a device created in Target -> Connectivity Options in Platform Builder. What are the Kernel download / Kernel transport for your device? Travis Hobrla [MS] (Expert)[2010-3-30 12:40]: Q: KITL: *** Device Name CEPC56059 *** WARN: KITL will run in polling mode VBridge:: built on [Jul 10 2009] time [10:20:14] VBridgeInit()...TX = [16384] bytes -- Rx = [16384] bytes Tx buffer [0xA1B84860] to [0xA1B88860]. Rx buffer [0xA1B88880] to [0xA1B8C880]. VBridge:: NK add MAC: [0-60-65-2-DA-FB] Connecting to Desktop KITL: Connected host IP: 1 Port: 1086 .. this is the output of the serial debug A: This looks reasonable and does not give clues as to why boot would halt at that point. If you capture a network trace or turn on KITL debug zones via dpCurSettings in kitl.dll, do you see KITL active after this? Travis Hobrla [MS] (Expert)[2010-3-30 12:41]: Q: Both is happening via Ethernet. A: Only thing I have left to suggest is a Platform Builder installation Repair, then. Karel Danihelka [MS] (Expert)[2010-3-30 12:42]: Q: Hi, I saw that the ATADISK is quite generic and des not have any optimizations. Do you have any advice to consider while tryin to improve the performance of it? A: If I remember correctly sample code has support for some specific hardware controllers (little obsolete now). This should be good start point (if you will not decide take existing driver as sample and write you own). Travis Hobrla [MS] (Expert)[2010-3-30 12:44]: Q: I didn't do that. I have to try. A: I think that's the next valid step. You need to figure out whether KITL is hanging or the device - use instrumented serial debug messages and network trace to determine this. Sing Wee [MS] (Expert)[2010-3-30 12:46]: Q: KITL: *** Device Name CEPC56059 *** WARN: KITL will run in polling mode VBridge:: built on [Jul 10 2009] time [10:20:14] VBridgeInit()...TX = [16384] bytes -- Rx = [16384] bytes Tx buffer [0xA1B84860] to [0xA1B88860]. Rx buffer [0xA1B88880] to [0xA1B8C880]. VBridge:: NK add MAC: [0-60-65-2-DA-FB] Connecting to Desktop KITL: Connected host IP: 1 Port: 1086 .. this is the output of the serial debug A: Neo, have you by any chance tried looking into your firewall to see if it might be blocking traffic on any particular ports? Wireshark/netmon might be able to help you here if that's the issue. davbo-msft (Moderator)[2010-3-30 12:48]: Q: I lost spell check, how can i get it back A: Hello - can you give additional details about your question? Is this related to a Windows CE Embedded application? masatos_MSFT (Expert)[2010-3-30 12:51]: Q: When attempting to run the CETK cellcore tests the documentation states the pre-requisites include "stinger.ini", "ltk.ini" but windows CE doesn't provide these or document what they fully need to contain. Implicitly you also need "datatrans.xml" which isn't supplied. If you get around this error and steal these from Windows Mobile instead, when you try and run the CETK tests you get a data abort in radiometricsdll.dll. How should we invoke the cellcore parts of CETK? A: Hi Pev, what version of Windows CE and CETK are you using? I do not have the expertise to answer this question, but can find somebody who can. Travis Hobrla [MS] (Expert)[2010-3-30 12:52]: Q: I don't see a kitlcore.dll in my OS. is my debug image fails to load because of that? A: kitl.dll should be all that's needed, kitlcore.lib is linked into that. Travis Hobrla [MS] (Expert)[2010-3-30 12:55]: Q: I've got a platform (not developed by myself) where I2C bus support has to be provided through the OAL as the kernel needs to talk to devices such as the power management IC and gas gauge so a 'proper' I2C driver hanging off device manager isn't possible. This happens to be a polled driver, so obviously it hits the system hard when either under lots of traffic or an error condition occurs and the driver constantly polls. I originally thought that there was no straightforward way to make such code interrupt driven in the kernel (as it's a cludge) but I realised that that's exactly what ETHDBG drivers do. Is there any reason why I shouldn't have a go at implementing a similar mechanism for our kernel resident I2C driver? If not, are there any obvious pitfalls - I've not seen any other BSP's do this in the past... A: You can make a 'proper' driver that calls down into the OAL to do the actual I2C transactions. Alternatively you can build an interrupt-based version in the OAL where you handle everything in the ISR. There is nothing wrong with that so long as the rest of your drivers and app threads can handle longer times with interrupts off while you are servicing I2C interrupts. Sing Wee [MS] (Expert)[2010-3-30 12:55]: Q: I am having trouble with my mouse, I have the microsoft wireless mobile mouse 3000, when I push the scroll button I am suppose to have autoscroll instead it shows other web pages,Can you help me out tell me what to do!!! A: Sorry, this current chat is about Windows Embedded Compact. Hope you're able to find an answer to your question elsewhere. davbo-msft (Moderator)[2010-3-30 12:56]: Q: Is the Silverlight Animation "Spline" a BezierSpline? A: Spline - http://msdn.microsoft.com/en-us/library/ee501495.aspx<BR< a>>   masatos_MSFT (Expert)[2010-3-30 12:57]: Thanks for the info Pev. I will follow up with the CETK experts here and get back to you. davbo-msft (Moderator)[2010-3-30 12:59]: Q: Spline- bad link A: http://msdn.microsoft.com/en-us/library/ee501495.aspx davbo-msft (Moderator)[2010-3-30 13:0]: Q: Sorry, got to tirm the ">" A: No Worries http://msdn.microsoft.com/en-us/library/ee501495.aspx davbo-msft (Moderator)[2010-3-30 13:0]: Hello everyone, we are just about out of time. Thank you for joining us for our Windows Embedded CE 6.0 chat today! <http://www.Microsoft.com/Embedded>; A special thank you to the product group members for coming out. The transcript of today’s chat will be posted online as soon as possible, to <http://msdn.microsoft.com/en-us/chats>;. We’ll see you again for another chat next month. Please check <http://msdn.microsoft.com/en-us/chats>; for the list of upcoming chats. If you still have unanswered questions, let me suggest that you post them on one of our newsgroups on <http://msdn.microsoft.com/en-us/windowsembedded/ce/default.aspx> -Windows Embedded CE 6.0 R3 Now Available! <http://msdn.microsoft.com/windowsembedded/ce/dd630616.aspx>; davbo-msft (Moderator)[2010-3-30 13:1]: Q: hi everybody. I would like to know if there is something know about a bug in RTC API (VOIP), especially when using SIP. According the to the analysis with application verifyier there is a heap link in rtcdllmedia.dll. All of the unreleased chunks seem to have a size of 6560 bytes. A: I will follow up with the Networking Team for a response. davbo-msft (Moderator)[2010-3-30 13:1]: Q: Hi, we've problems with debugging of applications (= breakpoints in Platform Builder will be ignored) over KITL on Windows CE 5.0, if the PDB files are large (over 60MB). Are there any limitations to size of the PDB files? A: I will follow up with the tools team for a response and post with the transcript. Sing Wee [MS] (Expert)[2010-3-30 13:1]: Q: I am unable to use the target control in my development environment. any ideas? A: Make sure you have SYSGEN_SHELL=1 set in your build environment. davbo-msft (Moderator)[2010-3-30 13:3]: Q: what are the main differences between Object Store and RAM disk ? They are both in RAM...are there performance differences ? access differences ? A: I will follow up with the Core Team and get a response posted with the transcript to MSDN  The Questions   [2010-3-30 12:57]: Thanks for the info Pev. I will follow up with the CETK experts here and get back to you. [2010-3-30 12:59]:   [2010-3-30 13:0]:   [2010-3-30 13:0]: Hello everyone, we are just about out of time. Thank you for joining us for our Windows Embedded CE 6.0 chat today! <http://www.Microsoft.com/Embedded>; A special thank you to the product group members for coming out. The transcript of today’s chat will be posted online as soon as possible, to <http://msdn.microsoft.com/en-us/chats>;. We’ll see you again for another chat next month. Please check <http://msdn.microsoft.com/en-us/chats>; for the list of upcoming chats. If you still have unanswered questions, let me suggest that you post them on one of our newsgroups on <http://msdn.microsoft.com/en-us/windowsembedded/ce/default.aspx> -Windows Embedded CE 6.0 R3 Now Available! <http://msdn.microsoft.com/windowsembedded/ce/dd630616.aspx>; [2010-3-30 13:1]: [2010-3-30 13:1]: [2010-3-30 13:1]: [2010-3-30 13:3]: neo (Guest)[2010-3-30 11:37]: Hi all KellyG (Guest)[2010-3-30 11:37]: Hi KellyG (Guest)[2010-3-30 11:37]: I have a question unrelated to windows Ce embedded, can you please help me?? neo (Guest)[2010-3-30 11:38]: I need to implement hardware timers on my windows CE 6.0 device to trigger events at microsecond intervals! c neo (Guest)[2010-3-30 11:38]: yes. post it. May be i cud give a try KellyG (Guest)[2010-3-30 11:38]: My Product key listed on my tower is not the product key I need for microsoft office, but that is the only product key listed. neo (Guest)[2010-3-30 11:39]: I hope this is a chat for windows embedded. please post ur queries in office forums KellyG (Guest)[2010-3-30 11:39]: it is but i could not find a forum for office neo (Guest)[2010-3-30 11:40]: I think moderators will help u out. @ davbo-msft: can u help this guy? neo (Guest)[2010-3-30 11:41]: Q: I need to implement hardware timers on my windows CE 6.0 device to trigger events at microsecond intervals. Where should i start? davbo-msft (Moderator)[2010-3-30 11:50]: Our chat today covers the topic of Windows Embedded CE! 1. This chat will last for one hour. During this hour, our Experts will respond to as many questions as they can. Please understand that there may be some questions we cannot respond to due to lack of information or because the information is not yet public. 2. We encourage you to submit questions for our Experts. To do so, type your questions in the send box, select the “ask the Experts” box and click SEND. Questions sent directly to the Guest Chat room will not be answered by the Experts, but we encourage other community members to assist. 3. We ask that you stay on topic for the duration of the chat. This helps the Guests and Experts follow the conversation more easily. We invite you to ask off topic questions after this chat is over, but not during. 4. Please abide by the Chat Code of Conduct. Chat code of conduct: <http://msdn.microsoft.com/chats/chatroom.aspx?ctl=hlp#Conduct>; Pev (Guest)[2010-3-30 11:54]: Evening! davbo-msft (Moderator)[2010-3-30 11:54]: Hello everyone this is Dave Boyce - I worked in the Multimedia area for Windows CE. neo (Guest)[2010-3-30 11:55]: hello dave neo (Guest)[2010-3-30 11:55]: The chat code of conduct link is not working! Pev (Guest)[2010-3-30 11:56]: Best be polite just in case then ;-) neo (Guest)[2010-3-30 11:56]: davbo-msft (Moderator)[2010-3-30 11:57]: I'll check out the issue w/ the link paolopat (Guest)[2010-3-30 12:0]: Hello davbo-msft (Moderator)[2010-3-30 12:0]: We are pleased to welcome our Experts for today’s chat. I will have them introduce themselves now. Chat will begin in a couple of minutes. <http://www.Microsoft.com/Embedded>; paolopat (Guest)[2010-3-30 12:3]: Hello Experts ! neo (Guest)[2010-3-30 12:3]: Welcome all! paolopat (Guest)[2010-3-30 12:3]: Q: I want to partition my NAND Flash device. One partition to use for hive ragistry and the other for the apps and data. The only way to do it is programmatically or setting some registry values ? neo (Guest)[2010-3-30 12:3]: Q: I need to implement hardware timers on my windows CE 6.0 device to trigger events at microsecond intervals. Where should i start? neo (Guest)[2010-3-30 12:5]: Q: My CPU is Intel celeron M processor 1Ghz. Pev (Guest)[2010-3-30 12:5]: neo: if your silicon has multiple general purpose timers, pick one that's not in use for the system timer / profiler and set it up to trigger irqs for your purpose. You can't guarantee hard realtime type responses though... GarySwalling (Guest)[2010-3-30 12:5]: Q: Is Windows Phone 7 related to Windows CE? If so, can you tell me what version of Windows CE is the basis? Is it in fact the new version of Windows Mobile? Pev (Guest)[2010-3-30 12:6]: Q: Hi guys, what's the formal way to report bugs back to the core team / product team? The mechanism of calling the support phone number every time is really onerous and time-consuming. Is there another mechanism? paolopat (Guest)[2010-3-30 12:6]: Q: But the operation for creating the partitions ? neo (Guest)[2010-3-30 12:6]: Q: If i need to implement it using interrupt handlers, What are all the files that I should look at? GPM (Guest)[2010-3-30 12:6]: Q: I would like to get a handle to a Silverlight screen section, is that possiable? Jhony (Guest)[2010-3-30 12:7]: Q: I created a OS design with KITL and kernel debugger enabled. But I am unable to connect to the target for debugging. I am getting the following error when i try to connect with the device. PB Debugger Cannot initialize the Kernel Debugger. PB Debugger Debugger could not initialize connection. PB Debugger The Kernel Debugger is waiting to connect with target. PB Debugger The Kernel Debugger has been disconnected successfully. Charles (Guest)[2010-3-30 12:7]: What will be different in Windows Compact 7 from CE 6.0? neo (Guest)[2010-3-30 12:8]: Can I have the book's name please? kiefs_dev (Guest)[2010-3-30 12:8]: Q: hi everybody. I would like to know if there is something know about a bug in RTC API (VOIP), especially when using SIP. According the to the analysis with application verifyier there is a heap link in rtcdllmedia.dll. All of the unreleased chunks seem to have a size of 6560 bytes. paolopat (Guest)[2010-3-30 12:10]: Q: So...I have to modify file system code to create 2 partition at system startup ?!! I haven't understood.... neo (Guest)[2010-3-30 12:10]: Q: Do u mean ISR/IST implementation? How can i register an interrupt? What kind of interrupt should i register? neo (Guest)[2010-3-30 12:11]: Q: Can I have the book's name please? Charles (Guest)[2010-3-30 12:11]: Q: What will be different in Windows Compact 7 from CE 6.0? PaulT (Guest)[2010-3-30 12:11]: neo: I'd say that you really need the docs for YOUR BSP, not generic documents for BSPs in general. Each BSP may be architected differently. If you're using the CEPC BSP, then the documentation that comes with Platform Builder is a reasonable place to look. GPM (Guest)[2010-3-30 12:11]: Q: If one has an image on a Silverlight page, it seems to be cached. How would one refresh that cache after changing the underlying image? Elektrobit (Guest)[2010-3-30 12:12]: Q: Hi, we've problems with debugging of applications (= breakpoints in Platform Builder will be ignored) over KITL on Windows CE 5.0, if the PDB files are large (over 60MB). Are there any limitations to size of the PDB files? Jhony (Guest)[2010-3-30 12:15]: Q: I am using CE 6.0. There is no cesvchost process running in my system. alexquisi (Guest)[2010-3-30 12:15]: Q: Hi, I saw that the ATADISK is quite generic and des not have any optimizations. Do you have any advice to consider while tryin to improve the performance of it? Jhony (Guest)[2010-3-30 12:17]: Windows XP service pack 1 Jhony (Guest)[2010-3-30 12:17]: Q: sorry! Windows XP SP3 neo (Guest)[2010-3-30 12:19]: Q: The link for developing device drivers is not working. can u please check that? paolopat (Guest)[2010-3-30 12:20]: Q: I have a NAND Flash on my target device. On this flash I have the hive registry and an application.I have observed that when the NAND flash is fully, the system startup time is longer....is there a degradation of NAND use that influences the startup time ? Why ? Pev (Guest)[2010-3-30 12:20]: Q: When attempting to run the CETK cellcore tests the documentation states the pre-requisites include "stinger.ini", "ltk.ini" but windows CE doesn't provide these or document what they fully need to contain. Implicitly you also need "datatrans.xml" which isn't supplied. If you get around this error and steal these from Windows Mobile instead, when you try and run the CETK tests you get a data abort in radiometricsdll.dll. How should we invoke the cellcore parts of CETK? Pev (Guest)[2010-3-30 12:21]: Hi all, Pev (Guest)[2010-3-30 12:21]: oops Pev (Guest)[2010-3-30 12:21]: :-D Pev (Guest)[2010-3-30 12:24]: Typically for SoC devices you name your hardware specific libraries in the form "SOCDIRNAME_LIBNAME". In our platform "OMAP35XX_TPS659XX_TI_V1" if you do this we cause the catalog parser to die... For example if we have a library "Musbfn_OMAP35XX_TPS659XX_TI_V1.dll" entering this in the catalogs pbcxml file in a <module> section causes the XML parser to fail with : Pev (Guest)[2010-3-30 12:25]: Q: Error 3 The 'urn:Microsoft.PlatformBuilder/Catalog:Module' element is        invalid - The value '012345678901234567890123456789.dll' is invalid         according to its datatype         'urn:Microsoft.PlatformBuilder/Catalog:CatalogFileName' - The actual         length is greater than the MaxLength value. GPM (Guest)[2010-3-30 12:25]: Q: Writable bitmap, is there an example of the syntax? Pev (Guest)[2010-3-30 12:25]: Q: Typically for SoC devices you name your hardware specific libraries in the form "SOCDIRNAME_LIBNAME". In our platform "OMAP35XX_TPS659XX_TI_V1" if you do this we cause the catalog parser to die... For example if we have a library "Musbfn_OMAP35XX_TPS659XX_TI_V1.dll" entering this in the catalogs pbcxml file in a <module> section causes the XML parser to fail with : Error 3 The 'urn:Microsoft.PlatformBuilder/Catalog:Module' element is        invalid - The value '012345678901234567890123456789.dll' is invalid         according to its datatype         'urn:Microsoft.PlatformBuilder/Catalog:CatalogFileName' - The actual         length is greater than the MaxLength value. Pev (Guest)[2010-3-30 12:25]: sorry, messed up submission there! GPM (Guest)[2010-3-30 12:26]: Q: I would like to get a handle to a Silverlight screen section, is that possiable? GPM (Guest)[2010-3-30 12:28]: Q: IXRVisualHost::GetHWND() exactly what I needed Thanks, PaulT (Guest)[2010-3-30 12:29]: GPM: You don't have to keep submitting the questions. The chat experts have an application that they're using to follow the chat and all Ask the Experts questions are logged. Jhony (Guest)[2010-3-30 12:29]: Q: I am getting this error only when I select the KdStub as the debugger in Target device connectivity. neo (Guest)[2010-3-30 12:30]: Q: ok. thanks for the book's link Pev (Guest)[2010-3-30 12:31]: Hm, did those two I submitted get picked up by anyone? neo (Guest)[2010-3-30 12:33]: Q: If I select Active KTIL, My OS doesn't boots. It says "loading NK.EXE at 0x<xxxxx> location" after that nothing comes in the debug log. PaulT (Guest)[2010-3-30 12:34]: Pev: PaulT (Guest)[2010-3-30 12:35]: Pev: I'm sure they did. The guys who are actually on the chat may not be experts in that part of things. That's usually the explanation when you don't get an answer in 10 minutes or so. Pev (Guest)[2010-3-30 12:36]: Ah, fair enough Susie (Guest)[2010-3-30 12:36]: My Outlook Express incoming mail is corrput. No ONE has been able to fix the problem, Dell or Norton. I have dial up I'm in a rural area Jhony (Guest)[2010-3-30 12:37]: Q: I have tried that and I am getting the same error. Pev (Guest)[2010-3-30 12:37]: Susie : Use Thunderbird instead :-D PaulT (Guest)[2010-3-30 12:37]: Susie: Sorry, but this chat is not about Windows, but Embedded (like what runs on a phone). Your best chance is to find a local expert or talk to your ISP. paolopat (Guest)[2010-3-30 12:37]: Q: what are the main differences between Object Store and RAM disk ? They are both in RAM...are there performance differences ? access differences ? Susie (Guest)[2010-3-30 12:38]: My computer knowlege is very limited, what is Thunderbird? Pev (Guest)[2010-3-30 12:38]: A different email client :-D neo (Guest)[2010-3-30 12:39]: Q: KITL: *** Device Name CEPC56059 *** WARN: KITL will run in polling mode VBridge:: built on [Jul 10 2009] time [10:20:14] VBridgeInit()...TX = [16384] bytes -- Rx = [16384] bytes Tx buffer [0xA1B84860] to [0xA1B88860]. Rx buffer [0xA1B88880] to [0xA1B8C880]. VBridge:: NK add MAC: [0-60-65-2-DA-FB] Connecting to Desktop KITL: Connected host IP: 1 Port: 1086 .. this is the output of the serial debug Susie (Guest)[2010-3-30 12:39]: Do I need to uninstall Outlook Express youngboyzie (Guest)[2010-3-30 12:39]: I need to start battery calibration for my new battery for my dell inspiron 1525 laptop and should be able to reach the BIOS screen by hitting f2 but this isnt working... help? Pev (Guest)[2010-3-30 12:39]: Nah, you can run it instead - you'll still need help from your ISP to configure it I expect Jhony (Guest)[2010-3-30 12:39]: Q: Both is happening via Ethernet. PaulT (Guest)[2010-3-30 12:40]: youngboyzie: You're off-topic. This is not a general chat for Windows and certainly not for Dell. You'll have to ask Dell how to get to setup; it's their machine. bill (Guest)[2010-3-30 12:41]: I lost spell check, how can i get et back neo (Guest)[2010-3-30 12:42]: Q: I didn't do that. I have to try. Jhony (Guest)[2010-3-30 12:43]: Q: Ok. I will do it then. bill (Guest)[2010-3-30 12:43]: Q: I lost spell check, how can i get it back PaulT (Guest)[2010-3-30 12:44]: bill: This isn't a general Windows chat. There are some Web forums that you might try. GarySwalling (Guest)[2010-3-30 12:45]: Q: Thanks, I found the Phone 7 presentation at http://live.visitmix.com/MIX10/Sessions/CL13 GPM (Guest)[2010-3-30 12:45]: Q: Is the Silverlight Animation "Spline" a BezierSpline? neo (Guest)[2010-3-30 12:46]: Q: ok. I'll do it. thanks Pev (Guest)[2010-3-30 12:46]: Whowever was asking about KITL connection : I've had this loads in the past. I think I started debugging last time by using wireshark to see what was happening on the network then setting up the OAL_ETHER and OAL_FUNC and OAL_VERBOSE as well as OAL_KITL flags to see what was actually happening in the driver.... Pev (Guest)[2010-3-30 12:47]: I'd generally make sure that you're testing though a 10baseT hub (instead of anything faster) and forcing Active KITL in polled mode too... neo (Guest)[2010-3-30 12:48]: Q: I disabled the firewall in my PC. Pev (Guest)[2010-3-30 12:50]: Q: I've got a platform (not developed by myself) where I2C bus support has to be provided through the OAL as the kernel needs to talk to devices such as the power management IC and gas gauge so a 'proper' I2C driver hanging off device manager isn't possible. This happens to be a polled driver, so obviously it hits the system hard when either under lots of traffic or an error condition occurs and the driver constantly polls. I originally thought that there was no straightforward way to make such code interrupt driven in the kernel (as it's a cludge) but I realised that that's exactly what ETHDBG drivers do. Is there any reason why I shouldn't have a go at implementing a similar mechanism for our kernel resident I2C driver? If not, are there any obvious pitfalls - I've not seen any other BSP's do this in the past... neo (Guest)[2010-3-30 12:51]: Q: I don't see a kitlcore.dll in my OS. is my debug image fails to load because of that? Pev (Guest)[2010-3-30 12:52]: Q: Hi masatos, I'm using Windows Embedded CE 6.0 with R3 and patched to feb 2010's QFE's (with it's associated CETK version) this is a machine with only CE 6.0 on (no conflicts with earlier CE or WM...) neo (Guest)[2010-3-30 12:54]: ok. Got it neo (Guest)[2010-3-30 12:54]: Q: ok. Got it Roundman (Guest)[2010-3-30 12:55]: Q: I am having trouble with my mouse, I have the microsoft wireless mobile mouse 3000, when I push the scroll button I am suppose to have autoscroll instead it shows other web pages,Can you help me out tell me what to do!!! Pev (Guest)[2010-3-30 12:55]: Hey neo, debugging kitl issues is really frustrating but dont lose heart :-) neo (Guest)[2010-3-30 12:55]: @ pev : u fixed the problem of KITL after that? neo (Guest)[2010-3-30 12:56]: I am getting the same error again and again. I even cleaned my environment and tried in a fresh PC. But didn't succeed yet Pev (Guest)[2010-3-30 12:56]: Well, eventually - my experiences probably won't help you as different platforms have different reasons for doing that neo (Guest)[2010-3-30 12:56]: I think so PaulT (Guest)[2010-3-30 12:58]: neo: Have you searched the old messages in microsoft.public.windowsce.platbuilder? It seems to me that there was a packet size situation where it was possible to have problems with KITL connections based on a setting on the PC. Google Groups, groups.google.com, Advanced Groups Search will allow you to search a single newsgroup or a set of newsgroups easily. GPM (Guest)[2010-3-30 12:58]: Q: Spline- bad link PaulT (Guest)[2010-3-30 12:58]: GPM: without the > at the end does it work? It seems to for me... neo (Guest)[2010-3-30 12:58]: But some times if i try to connect to the device again. The Image information is seen in the serial debug. what does that mean?Download BIN file information: ----------------------------------------------------- [0]: Base Address=0x220000 Length=0x18DAADC Received a broadcast message !CheckUDP: Not UDP (proto = 0x00000001) after this i am getting the old errors. PB debugger cannot initialize ... GPM (Guest)[2010-3-30 12:59]: Q: Sorry, got to tirm the ">" neo (Guest)[2010-3-30 12:59]: ok paul. I ll look into that. davbo-msft (Moderator)[2010-3-30 13:0]: Hello everyone, we are just about out of time. Thank you for joining us for our Windows Embedded CE 6.0 chat today! <http://www.Microsoft.com/Embedded>; A special thank you to the product group members for coming out. The transcript of today’s chat will be posted online as soon as possible, to <http://msdn.microsoft.com/en-us/chats>;. We’ll see you again for another chat next month. Please check <http://msdn.microsoft.com/en-us/chats>; for the list of upcoming chats. If you still have unanswered questions, let me suggest that you post them on one of our newsgroups on <http://msdn.microsoft.com/en-us/windowsembedded/ce/default.aspx> -Windows Embedded CE 6.0 R3 Now Available! <http://msdn.microsoft.com/windowsembedded/ce/dd630616.aspx>; neo (Guest)[2010-3-30 13:1]: Q: I am unable to use the target control in my development environment. any ideas? Pev (Guest)[2010-3-30 13:1]: Sure, if KITL isn't connected target control won't work as it runs over kitl... neo (Guest)[2010-3-30 13:1]: ok .thanks pev neo (Guest)[2010-3-30 13:2]: yes. sysgen_shell is set to 1 neo (Guest)[2010-3-30 13:2]: Q: yes. sysgen_shell is set to 1 Marcelovk (Guest)[2010-3-30 13:2]: Q: Is there any way to extract the default command lines of the tests in CETK? I want to have it running unconnected from the desktop.   Copyright © 2010 – Bruce Eitman All Rights Reserved

    Read the article

  • WordPress not resizing images with Nginx + php-fpm and other issues

    - by Julian Fernandes
    Recently i setup a Ubuntu 12.04 VPS with 512mb/1ghz CPU, Nginx + php-fpm + Varnish + APC + Percona's MySQL server + CloudFlare Pro for our Ubuntu LoCo Team's WordPress blog. The blog get about 3~4k daily hits, use about 180MB and 8~20% CPU. Everything seems to be working insanely fast... page load is really good and is about 16x faster than any of our competitors... but there is one problem. When we upload a image, WordPress don't resize it, so all we can do it insert the full image in the post. If the imagem have, let's say, 30kb, it resize fine... but if the image have 100kb+, it won't... In nginx error logs i see this: upstream timed out (110: Connection timed out) while reading response header from upstream, client: 150.162.216.64, server: www.ubuntubrsc.com, request: "POST /wp-admin/async-upload.php HTTP/1.1", upstream: "fastcgi://unix:/var/run/php5-fpm.sock:", host: "www.ubuntubrsc.com", referrer: "http://www.ubuntubrsc.com/wp-admin/media-upload.php?post_id=2668&" It seems to be related with the issue, but i dunno. When that timeout happens, i started to get it when i'm trying to view a post too: upstream timed out (110: Connection timed out) while reading response header from upstream, client: 150.162.216.64, server: www.ubuntubrsc.com, request: "GET /tutoriais-gimp-6-adicionando-aplicando-novos-pinceis.html HTTP/1.1", upstream: "fastcgi://unix:/var/run/php5-fpm.sock:", host: "www.ubuntubrsc.com", referrer: "http://www.ubuntubrsc.com/" And only a restart of php5-fpm fix it. I tryed increasing some timeouts and stuffs but it did not worked, so i guess it's some kind of limitation i did not figured yet. Could someone help me with it, please? /etc/nginx/nginx.conf: user www-data; worker_processes 1; pid /var/run/nginx.pid; events { worker_connections 1024; use epoll; multi_accept on; } http { ## # Basic Settings ## sendfile on; tcp_nopush on; tcp_nodelay off; keepalive_timeout 15; keepalive_requests 2000; types_hash_max_size 2048; server_tokens off; server_name_in_redirect off; open_file_cache max=1000 inactive=300s; open_file_cache_valid 360s; open_file_cache_min_uses 2; open_file_cache_errors off; server_names_hash_bucket_size 64; # server_name_in_redirect off; client_body_buffer_size 128K; client_header_buffer_size 1k; client_max_body_size 2m; large_client_header_buffers 4 8k; client_body_timeout 10m; client_header_timeout 10m; send_timeout 10m; include /etc/nginx/mime.types; default_type application/octet-stream; ## # Logging Settings ## error_log /var/log/nginx/error.log; access_log off; ## # CloudFlare's IPs (uncomment when site goes live) ## set_real_ip_from 204.93.240.0/24; set_real_ip_from 204.93.177.0/24; set_real_ip_from 199.27.128.0/21; set_real_ip_from 173.245.48.0/20; set_real_ip_from 103.22.200.0/22; set_real_ip_from 141.101.64.0/18; set_real_ip_from 108.162.192.0/18; set_real_ip_from 190.93.240.0/20; real_ip_header CF-Connecting-IP; set_real_ip_from 127.0.0.1/32; ## # Gzip Settings ## gzip on; gzip_disable "msie6"; gzip_vary on; gzip_proxied any; gzip_comp_level 9; gzip_min_length 1000; gzip_proxied expired no-cache no-store private auth; gzip_buffers 32 8k; # gzip_http_version 1.1; gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; ## # nginx-naxsi config ## # Uncomment it if you installed nginx-naxsi ## #include /etc/nginx/naxsi_core.rules; ## # nginx-passenger config ## # Uncomment it if you installed nginx-passenger ## #passenger_root /usr; #passenger_ruby /usr/bin/ruby; ## # Virtual Host Configs ## include /etc/nginx/conf.d/*.conf; include /etc/nginx/sites-enabled/*; } /etc/nginx/fastcgi_params: fastcgi_param QUERY_STRING $query_string; fastcgi_param REQUEST_METHOD $request_method; fastcgi_param CONTENT_TYPE $content_type; fastcgi_param CONTENT_LENGTH $content_length; fastcgi_param SCRIPT_FILENAME $request_filename; fastcgi_param SCRIPT_NAME $fastcgi_script_name; fastcgi_param REQUEST_URI $request_uri; fastcgi_param DOCUMENT_URI $document_uri; fastcgi_param DOCUMENT_ROOT $document_root; fastcgi_param SERVER_PROTOCOL $server_protocol; fastcgi_param GATEWAY_INTERFACE CGI/1.1; fastcgi_param SERVER_SOFTWARE nginx/$nginx_version; fastcgi_param REMOTE_ADDR $remote_addr; fastcgi_param REMOTE_PORT $remote_port; fastcgi_param SERVER_ADDR $server_addr; fastcgi_param SERVER_PORT $server_port; fastcgi_param SERVER_NAME $server_name; fastcgi_param HTTPS $https; fastcgi_send_timeout 180; fastcgi_read_timeout 180; fastcgi_buffer_size 128k; fastcgi_buffers 256 4k; # PHP only, required if PHP was built with --enable-force-cgi-redirect fastcgi_param REDIRECT_STATUS 200; /etc/nginx/sites-avaiable/default: ## # DEFAULT HANDLER # ubuntubrsc.com ## server { listen 8080; # Make site available from main domain server_name www.ubuntubrsc.com; # Root directory root /var/www; index index.php index.html index.htm; include /var/www/nginx.conf; access_log off; location / { try_files $uri $uri/ /index.php?q=$uri&$args; } location = /favicon.ico { log_not_found off; access_log off; } location = /robots.txt { allow all; log_not_found off; access_log off; } location ~ /\. { deny all; access_log off; log_not_found off; } location ~* ^/wp-content/uploads/.*.php$ { deny all; access_log off; log_not_found off; } rewrite /wp-admin$ $scheme://$host$uri/ permanent; error_page 404 = @wordpress; log_not_found off; location @wordpress { include /etc/nginx/fastcgi_params; fastcgi_pass unix:/var/run/php5-fpm.sock; fastcgi_param SCRIPT_NAME /index.php; fastcgi_param SCRIPT_FILENAME $document_root/index.php; } location ~ \.php$ { try_files $uri =404; include /etc/nginx/fastcgi_params; fastcgi_index index.php; fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; if (-f $request_filename) { fastcgi_pass unix:/var/run/php5-fpm.sock; } } } server { listen 8080; server_name ubuntubrsc.* www.ubuntubrsc.net www.ubuntubrsc.org www.ubuntubrsc.com.br www.ubuntubrsc.info www.ubuntubrsc.in; return 301 $scheme://www.ubuntubrsc.com$request_uri; } /var/www/nginx.conf: # BEGIN W3TC Minify cache location ~ /wp-content/w3tc/min.*\.js$ { types {} default_type application/x-javascript; expires modified 31536000s; add_header X-Powered-By "W3 Total Cache/0.9.2.5b"; add_header Vary "Accept-Encoding"; add_header Pragma "public"; add_header Cache-Control "max-age=31536000, public, must-revalidate, proxy-revalidate"; } location ~ /wp-content/w3tc/min.*\.css$ { types {} default_type text/css; expires modified 31536000s; add_header X-Powered-By "W3 Total Cache/0.9.2.5b"; add_header Vary "Accept-Encoding"; add_header Pragma "public"; add_header Cache-Control "max-age=31536000, public, must-revalidate, proxy-revalidate"; } location ~ /wp-content/w3tc/min.*js\.gzip$ { gzip off; types {} default_type application/x-javascript; expires modified 31536000s; add_header X-Powered-By "W3 Total Cache/0.9.2.5b"; add_header Vary "Accept-Encoding"; add_header Pragma "public"; add_header Cache-Control "max-age=31536000, public, must-revalidate, proxy-revalidate"; add_header Content-Encoding gzip; } location ~ /wp-content/w3tc/min.*css\.gzip$ { gzip off; types {} default_type text/css; expires modified 31536000s; add_header X-Powered-By "W3 Total Cache/0.9.2.5b"; add_header Vary "Accept-Encoding"; add_header Pragma "public"; add_header Cache-Control "max-age=31536000, public, must-revalidate, proxy-revalidate"; add_header Content-Encoding gzip; } # END W3TC Minify cache # BEGIN W3TC Browser Cache gzip on; gzip_types text/css application/x-javascript text/x-component text/richtext image/svg+xml text/plain text/xsd text/xsl text/xml image/x-icon; location ~ \.(css|js|htc)$ { expires 31536000s; add_header Pragma "public"; add_header Cache-Control "max-age=31536000, public, must-revalidate, proxy-revalidate"; add_header X-Powered-By "W3 Total Cache/0.9.2.5b"; } location ~ \.(html|htm|rtf|rtx|svg|svgz|txt|xsd|xsl|xml)$ { expires 3600s; add_header Pragma "public"; add_header Cache-Control "max-age=3600, public, must-revalidate, proxy-revalidate"; add_header X-Powered-By "W3 Total Cache/0.9.2.5b"; try_files $uri $uri/ $uri.html /index.php?$args; } location ~ \.(asf|asx|wax|wmv|wmx|avi|bmp|class|divx|doc|docx|eot|exe|gif|gz|gzip|ico|jpg|jpeg|jpe|mdb|mid|midi|mov|qt|mp3|m4a|mp4|m4v|mpeg|mpg|mpe|mpp|otf|odb|odc|odf|odg|odp|ods|odt|ogg|pdf|png|pot|pps|ppt|pptx|ra|ram|svg|svgz|swf|tar|tif|tiff|ttf|ttc|wav|wma|wri|xla|xls|xlsx|xlt|xlw|zip)$ { expires 31536000s; add_header Pragma "public"; add_header Cache-Control "max-age=31536000, public, must-revalidate, proxy-revalidate"; add_header X-Powered-By "W3 Total Cache/0.9.2.5b"; } # END W3TC Browser Cache # BEGIN W3TC Minify core rewrite ^/wp-content/w3tc/min/w3tc_rewrite_test$ /wp-content/w3tc/min/index.php?w3tc_rewrite_test=1 last; set $w3tc_enc ""; if ($http_accept_encoding ~ gzip) { set $w3tc_enc .gzip; } if (-f $request_filename$w3tc_enc) { rewrite (.*) $1$w3tc_enc break; } rewrite ^/wp-content/w3tc/min/(.+\.(css|js))$ /wp-content/w3tc/min/index.php?file=$1 last; # END W3TC Minify core # BEGIN W3TC Skip 404 error handling by WordPress for static files if (-f $request_filename) { break; } if (-d $request_filename) { break; } if ($request_uri ~ "(robots\.txt|sitemap(_index)?\.xml(\.gz)?|[a-z0-9_\-]+-sitemap([0-9]+)?\.xml(\.gz)?)") { break; } if ($request_uri ~* \.(css|js|htc|htm|rtf|rtx|svg|svgz|txt|xsd|xsl|xml|asf|asx|wax|wmv|wmx|avi|bmp|class|divx|doc|docx|eot|exe|gif|gz|gzip|ico|jpg|jpeg|jpe|mdb|mid|midi|mov|qt|mp3|m4a|mp4|m4v|mpeg|mpg|mpe|mpp|otf|odb|odc|odf|odg|odp|ods|odt|ogg|pdf|png|pot|pps|ppt|pptx|ra|ram|svg|svgz|swf|tar|tif|tiff|ttf|ttc|wav|wma|wri|xla|xls|xlsx|xlt|xlw|zip)$) { return 404; } # END W3TC Skip 404 error handling by WordPress for static files # BEGIN Better WP Security location ~ /\.ht { deny all; } location ~ wp-config.php { deny all; } location ~ readme.html { deny all; } location ~ readme.txt { deny all; } location ~ /install.php { deny all; } set $susquery 0; set $rule_2 0; set $rule_3 0; rewrite ^wp-includes/(.*).php /not_found last; rewrite ^/wp-admin/includes(.*)$ /not_found last; if ($request_method ~* "^(TRACE|DELETE|TRACK)"){ return 403; } set $rule_0 0; if ($request_method ~ "POST"){ set $rule_0 1; } if ($uri ~ "^(.*)wp-comments-post.php*"){ set $rule_0 2$rule_0; } if ($http_user_agent ~ "^$"){ set $rule_0 4$rule_0; } if ($rule_0 = "421"){ return 403; } if ($args ~* "\.\./") { set $susquery 1; } if ($args ~* "boot.ini") { set $susquery 1; } if ($args ~* "tag=") { set $susquery 1; } if ($args ~* "ftp:") { set $susquery 1; } if ($args ~* "http:") { set $susquery 1; } if ($args ~* "https:") { set $susquery 1; } if ($args ~* "(<|%3C).*script.*(>|%3E)") { set $susquery 1; } if ($args ~* "mosConfig_[a-zA-Z_]{1,21}(=|%3D)") { set $susquery 1; } if ($args ~* "base64_encode") { set $susquery 1; } if ($args ~* "(%24&x)") { set $susquery 1; } if ($args ~* "(\[|\]|\(|\)|<|>|ê|\"|;|\?|\*|=$)"){ set $susquery 1; } if ($args ~* "(&#x22;|&#x27;|&#x3C;|&#x3E;|&#x5C;|&#x7B;|&#x7C;|%24&x)"){ set $susquery 1; } if ($args ~* "(%0|%A|%B|%C|%D|%E|%F|127.0)") { set $susquery 1; } if ($args ~* "(globals|encode|localhost|loopback)") { set $susquery 1; } if ($args ~* "(request|select|insert|concat|union|declare)") { set $susquery 1; } if ($http_cookie !~* "wordpress_logged_in_" ) { set $susquery "${susquery}2"; set $rule_2 1; set $rule_3 1; } if ($susquery = 12) { return 403; } # END Better WP Security /etc/php5/fpm/php-fpm.conf: pid = /var/run/php5-fpm.pid error_log = /var/log/php5-fpm.log emergency_restart_threshold = 3 emergency_restart_interval = 1m process_control_timeout = 10s events.mechanism = epoll /etc/php5/fpm/php.ini (only options i changed): open_basedir ="/var/www/" disable_functions = pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,dl,system,shell_exec,fsockopen,parse_ini_file,passthru,popen,proc_open,proc_close,shell_exec,show_source,symlink,proc_close,proc_get_status,proc_nice,proc_open,proc_terminate,shell_exec ,highlight_file,escapeshellcmd,define_syslog_variables,posix_uname,posix_getpwuid,apache_child_terminate,posix_kill,posix_mkfifo,posix_setpgid,posix_setsid,posix_setuid,escapeshellarg,posix_uname,ftp_exec,ftp_connect,ftp_login,ftp_get,ftp_put,ftp_nb_fput,ftp_raw,ftp_rawlist,ini_alter,ini_restore,inject_code,syslog,openlog,define_syslog_variables,apache_setenv,mysql_pconnect,eval,phpAds_XmlRpc,phpA ds_remoteInfo,phpAds_xmlrpcEncode,phpAds_xmlrpcDecode,xmlrpc_entity_decode,fp,fput,virtual,show_source,pclose,readfile,wget expose_php = off max_execution_time = 30 max_input_time = 60 memory_limit = 128M display_errors = Off post_max_size = 2M allow_url_fopen = off default_socket_timeout = 60 APC settings: [APC] apc.enabled = 1 apc.shm_segments = 1 apc.shm_size = 64M apc.optimization = 0 apc.num_files_hint = 4096 apc.ttl = 60 apc.user_ttl = 7200 apc.gc_ttl = 0 apc.cache_by_default = 1 apc.filters = "" apc.mmap_file_mask = "/tmp/apc.XXXXXX" apc.slam_defense = 0 apc.file_update_protection = 2 apc.enable_cli = 0 apc.max_file_size = 10M apc.stat = 1 apc.write_lock = 1 apc.report_autofilter = 0 apc.include_once_override = 0 apc.localcache = 0 apc.localcache.size = 512 apc.coredump_unmap = 0 apc.stat_ctime = 0 /etc/php5/fpm/pool.d/www.conf user = www-data group = www-data listen = /var/run/php5-fpm.sock listen.owner = www-data listen.group = www-data listen.mode = 0666 pm = ondemand pm.max_children = 5 pm.process_idle_timeout = 3s; pm.max_requests = 50 I also started to get 404 errors in front page if i use W3 Total Cache's Page Cache (Disk Enhanced). It worked fine untill somedays ago, and then, out of nowhere, it started to happen. Tonight i will disable my mobile plugin and activate only W3 Total Cache to see if it's a conflict with them... And to finish all this, i have been getting this error: PHP Warning: apc_store(): Unable to allocate memory for pool. in /var/www/wp-content/plugins/w3-total-cache/lib/W3/Cache/Apc.php on line 41 I already modifed my APC settings, but no sucess. So... could anyone help me with those issuees, please? Ooohh... if it helps, i instaled PHP like this: sudo apt-get install php5-fpm php5-suhosin php-apc php5-gd php5-imagick php5-curl And Nginx from the official PPA. Sorry for my bad english and thanks for your time people! (:

    Read the article

  • ls -l freezes terminal locally and remotely

    - by Jakobud
    I've been reading other SF threads regarding ls not returning results or freezing and stalling terminal sessions and it appears they usually the fault of network problems. My problem however, occurs both over remote SSH sessions but also if I am physically at the server itself... I just installed CentOS 5.4 on one of our servers. I'm setting up some rdiff-backup scripts and when I downloaded librsync and untared it, thats when I started seeing some weird behavior with ls -l. wget http://sourceforge.net/projects/librsync/files/librsync/0.9.7/librsync-0.9.7.tar.gz/download /tmp cd /tmp tar -xzf librsync-0.9.7.tar.gz Simple enough. To view the files in this directory I did this: ls results: librsync-0.9.7 librsync-0.9.7.tar.gz Now, if I ls -l, my terminal freezes. I have to re-ssh in to keep going. After reading SF threads, I thought it was network related. So I was extremely surprised to go sit down at the server itself and see the exact same thing happen... So its obviously not a network issues. Even if I ls /tmp/librsync-0.9.7, my terminal freezes just the same... Next I did an strace and got this (warning: wall of text coming....): strace ls -l /tmp execve("/bin/ls", ["ls", "-l", "/tmp"], [/* 21 vars */]) = 0 brk(0) = 0x1c521000 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b8582cc0000 uname({sys="Linux", node="massive.answeron.com", ...}) = 0 access("/etc/ld.so.preload", R_OK) = -1 ENOENT (No such file or directory) open("/etc/ld.so.cache", O_RDONLY) = 3 fstat(3, {st_mode=S_IFREG|0644, st_size=71746, ...}) = 0 mmap(NULL, 71746, PROT_READ, MAP_PRIVATE, 3, 0) = 0x2b8582cc1000 close(3) = 0 open("/lib64/librt.so.1", O_RDONLY) = 3 read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0 \"\200\2730\0\0\0"..., 832) = 832 fstat(3, {st_mode=S_IFREG|0755, st_size=53448, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b8582cd3000 mmap(0x30bb800000, 2132936, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x30bb800000 mprotect(0x30bb807000, 2097152, PROT_NONE) = 0 mmap(0x30bba07000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x7000) = 0x30bba07000 close(3) = 0 open("/lib64/libacl.so.1", O_RDONLY) = 3 read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\0\31@\2740\0\0\0"..., 832) = 832 fstat(3, {st_mode=S_IFREG|0755, st_size=28008, ...}) = 0 mmap(0x30bc400000, 2120992, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x30bc400000 mprotect(0x30bc406000, 2093056, PROT_NONE) = 0 mmap(0x30bc605000, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x5000) = 0x30bc605000 close(3) = 0 open("/lib64/libselinux.so.1", O_RDONLY) = 3 read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0`E\300\2730\0\0\0"..., 832) = 832 fstat(3, {st_mode=S_IFREG|0755, st_size=95464, ...}) = 0 mmap(0x30bbc00000, 2192784, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x30bbc00000 mprotect(0x30bbc15000, 2097152, PROT_NONE) = 0 mmap(0x30bbe15000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x15000) = 0x30bbe15000 mmap(0x30bbe17000, 1424, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x30bbe17000 close(3) = 0 open("/lib64/libc.so.6", O_RDONLY) = 3 read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\220\332\201\2720\0\0\0"..., 832) = 832 fstat(3, {st_mode=S_IFREG|0755, st_size=1717800, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b8582cd4000 mmap(0x30ba800000, 3498328, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x30ba800000 mprotect(0x30ba94d000, 2097152, PROT_NONE) = 0 mmap(0x30bab4d000, 20480, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x14d000) = 0x30bab4d000 mmap(0x30bab52000, 16728, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x30bab52000 close(3) = 0 open("/lib64/libpthread.so.0", O_RDONLY) = 3 read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\220W\0\2730\0\0\0"..., 832) = 832 fstat(3, {st_mode=S_IFREG|0755, st_size=145824, ...}) = 0 mmap(0x30bb000000, 2204528, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x30bb000000 mprotect(0x30bb016000, 2093056, PROT_NONE) = 0 mmap(0x30bb215000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x15000) = 0x30bb215000 mmap(0x30bb217000, 13168, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x30bb217000 close(3) = 0 open("/lib64/libattr.so.1", O_RDONLY) = 3 read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\320\17\300\2750\0\0\0"..., 832) = 832 fstat(3, {st_mode=S_IFREG|0755, st_size=17888, ...}) = 0 mmap(0x30bdc00000, 2110728, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x30bdc00000 mprotect(0x30bdc04000, 2093056, PROT_NONE) = 0 mmap(0x30bde03000, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x3000) = 0x30bde03000 close(3) = 0 open("/lib64/libdl.so.2", O_RDONLY) = 3 read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\20\16\300\2720\0\0\0"..., 832) = 832 fstat(3, {st_mode=S_IFREG|0755, st_size=23360, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b8582cd5000 mmap(0x30bac00000, 2109696, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x30bac00000 mprotect(0x30bac02000, 2097152, PROT_NONE) = 0 mmap(0x30bae02000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x2000) = 0x30bae02000 close(3) = 0 open("/lib64/libsepol.so.1", O_RDONLY) = 3 read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\0=\0\2740\0\0\0"..., 832) = 832 fstat(3, {st_mode=S_IFREG|0755, st_size=247496, ...}) = 0 mmap(0x30bc000000, 2383136, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x30bc000000 mprotect(0x30bc03b000, 2097152, PROT_NONE) = 0 mmap(0x30bc23b000, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x3b000) = 0x30bc23b000 mmap(0x30bc23c000, 40224, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x30bc23c000 close(3) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b8582cd6000 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b8582cd7000 arch_prctl(ARCH_SET_FS, 0x2b8582cd6c50) = 0 mprotect(0x30bba07000, 4096, PROT_READ) = 0 mprotect(0x30bab4d000, 16384, PROT_READ) = 0 mprotect(0x30bb215000, 4096, PROT_READ) = 0 mprotect(0x30ba61b000, 4096, PROT_READ) = 0 mprotect(0x30bae02000, 4096, PROT_READ) = 0 munmap(0x2b8582cc1000, 71746) = 0 set_tid_address(0x2b8582cd6ce0) = 24102 set_robust_list(0x2b8582cd6cf0, 0x18) = 0 futex(0x7fff72d02d6c, FUTEX_WAKE_PRIVATE, 1) = 0 rt_sigaction(SIGRTMIN, {0x30bb005370, [], SA_RESTORER|SA_SIGINFO, 0x30bb00e7c0}, NULL, 8) = 0 rt_sigaction(SIGRT_1, {0x30bb0052b0, [], SA_RESTORER|SA_RESTART|SA_SIGINFO, 0x30bb00e7c0}, NULL, 8) = 0 rt_sigprocmask(SIG_UNBLOCK, [RTMIN RT_1], NULL, 8) = 0 getrlimit(RLIMIT_STACK, {rlim_cur=10240*1024, rlim_max=RLIM_INFINITY}) = 0 access("/etc/selinux/", F_OK) = 0 brk(0) = 0x1c521000 brk(0x1c542000) = 0x1c542000 open("/etc/selinux/config", O_RDONLY) = 3 fstat(3, {st_mode=S_IFREG|0644, st_size=448, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b8582cc1000 read(3, "# This file controls the state o"..., 4096) = 448 read(3, "", 4096) = 0 close(3) = 0 munmap(0x2b8582cc1000, 4096) = 0 open("/proc/mounts", O_RDONLY) = 3 fstat(3, {st_mode=S_IFREG|0444, st_size=0, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b8582cc1000 read(3, "rootfs / rootfs rw 0 0\n/dev/root"..., 4096) = 577 close(3) = 0 munmap(0x2b8582cc1000, 4096) = 0 open("/selinux/mls", O_RDONLY) = 3 read(3, "1", 19) = 1 close(3) = 0 socket(PF_FILE, SOCK_STREAM, 0) = 3 connect(3, {sa_family=AF_FILE, path="/var/run/setrans/.setrans-unix"...}, 110) = 0 sendmsg(3, {msg_name(0)=NULL, msg_iov(5)=[{"\1\0\0\0", 4}, {"\1\0\0\0", 4}, {"\1\0\0\0", 4}, {"\0", 1}, {"\0", 1}], msg_controllen=0, msg_flags=0}, MSG_NOSIGNAL) = 14 readv(3, [{"\1\0\0\0", 4}, {"\1\0\0\0", 4}, {"\0\0\0\0", 4}], 3) = 12 readv(3, [{"\0", 1}], 1) = 1 close(3) = 0 open("/usr/lib/locale/locale-archive", O_RDONLY) = 3 fstat(3, {st_mode=S_IFREG|0644, st_size=56413824, ...}) = 0 mmap(NULL, 56413824, PROT_READ, MAP_PRIVATE, 3, 0) = 0x2b8582cd8000 close(3) = 0 ioctl(1, SNDCTL_TMR_TIMEBASE or TCGETS, {B38400 opost isig icanon echo ...}) = 0 ioctl(1, TIOCGWINSZ, {ws_row=65, ws_col=137, ws_xpixel=0, ws_ypixel=0}) = 0 open("/usr/share/locale/locale.alias", O_RDONLY) = 3 fstat(3, {st_mode=S_IFREG|0644, st_size=2528, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(3, "# Locale name alias data base.\n#"..., 4096) = 2528 read(3, "", 4096) = 0 close(3) = 0 munmap(0x2b85862a5000, 4096) = 0 open("/usr/share/locale/en_US.UTF-8/LC_TIME/coreutils.mo", O_RDONLY) = -1 ENOENT (No such file or directory) open("/usr/share/locale/en_US.utf8/LC_TIME/coreutils.mo", O_RDONLY) = -1 ENOENT (No such file or directory) open("/usr/share/locale/en_US/LC_TIME/coreutils.mo", O_RDONLY) = -1 ENOENT (No such file or directory) open("/usr/share/locale/en.UTF-8/LC_TIME/coreutils.mo", O_RDONLY) = -1 ENOENT (No such file or directory) open("/usr/share/locale/en.utf8/LC_TIME/coreutils.mo", O_RDONLY) = -1 ENOENT (No such file or directory) open("/usr/share/locale/en/LC_TIME/coreutils.mo", O_RDONLY) = -1 ENOENT (No such file or directory) lstat("/tmp", {st_mode=S_IFDIR|S_ISVTX|0777, st_size=4096, ...}) = 0 getxattr("/tmp", "system.posix_acl_access", 0x0, 0) = -1 ENODATA (No data available) getxattr("/tmp", "system.posix_acl_default", 0x0, 0) = -1 ENODATA (No data available) socket(PF_FILE, SOCK_STREAM, 0) = 3 fcntl(3, F_SETFL, O_RDWR|O_NONBLOCK) = 0 connect(3, {sa_family=AF_FILE, path="/var/run/nscd/socket"...}, 110) = -1 ENOENT (No such file or directory) close(3) = 0 socket(PF_FILE, SOCK_STREAM, 0) = 3 fcntl(3, F_SETFL, O_RDWR|O_NONBLOCK) = 0 connect(3, {sa_family=AF_FILE, path="/var/run/nscd/socket"...}, 110) = -1 ENOENT (No such file or directory) close(3) = 0 open("/etc/nsswitch.conf", O_RDONLY) = 3 fstat(3, {st_mode=S_IFREG|0644, st_size=1711, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(3, "#\n# /etc/nsswitch.conf\n#\n# An ex"..., 4096) = 1711 read(3, "", 4096) = 0 close(3) = 0 munmap(0x2b85862a5000, 4096) = 0 open("/etc/ld.so.cache", O_RDONLY) = 3 fstat(3, {st_mode=S_IFREG|0644, st_size=71746, ...}) = 0 mmap(NULL, 71746, PROT_READ, MAP_PRIVATE, 3, 0) = 0x2b85862a5000 close(3) = 0 open("/lib64/libnss_files.so.2", O_RDONLY) = 3 read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\340\37\0\0\0\0\0\0"..., 832) = 832 fstat(3, {st_mode=S_IFREG|0755, st_size=53880, ...}) = 0 mmap(NULL, 2139432, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x2b85862b7000 mprotect(0x2b85862c1000, 2093056, PROT_NONE) = 0 mmap(0x2b85864c0000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x9000) = 0x2b85864c0000 close(3) = 0 mprotect(0x2b85864c0000, 4096, PROT_READ) = 0 munmap(0x2b85862a5000, 71746) = 0 open("/etc/passwd", O_RDONLY) = 3 fcntl(3, F_GETFD) = 0 fcntl(3, F_SETFD, FD_CLOEXEC) = 0 fstat(3, {st_mode=S_IFREG|0644, st_size=1823, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(3, "root:x:0:0:root:/root:/bin/bash\n"..., 4096) = 1823 close(3) = 0 munmap(0x2b85862a5000, 4096) = 0 socket(PF_FILE, SOCK_STREAM, 0) = 3 fcntl(3, F_SETFL, O_RDWR|O_NONBLOCK) = 0 connect(3, {sa_family=AF_FILE, path="/var/run/nscd/socket"...}, 110) = -1 ENOENT (No such file or directory) close(3) = 0 socket(PF_FILE, SOCK_STREAM, 0) = 3 fcntl(3, F_SETFL, O_RDWR|O_NONBLOCK) = 0 connect(3, {sa_family=AF_FILE, path="/var/run/nscd/socket"...}, 110) = -1 ENOENT (No such file or directory) close(3) = 0 open("/etc/group", O_RDONLY) = 3 fcntl(3, F_GETFD) = 0 fcntl(3, F_SETFD, FD_CLOEXEC) = 0 fstat(3, {st_mode=S_IFREG|0644, st_size=743, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(3, "root:x:0:root\nbin:x:1:root,bin,d"..., 4096) = 743 close(3) = 0 munmap(0x2b85862a5000, 4096) = 0 open("/tmp", O_RDONLY|O_NONBLOCK|O_DIRECTORY) = 3 fcntl(3, F_SETFD, FD_CLOEXEC) = 0 getdents(3, /* 8 entries */, 32768) = 264 lstat("/tmp/librsync-0.9.7.tar.gz", {st_mode=S_IFREG|0644, st_size=453802, ...}) = 0 getxattr("/tmp/librsync-0.9.7.tar.gz", "system.posix_acl_access", 0x0, 0) = -1 ENODATA (No data available) getxattr("/tmp/librsync-0.9.7.tar.gz", "system.posix_acl_default", 0x0, 0) = -1 ENODATA (No data available) lstat("/tmp/librsync-0.9.7", {st_mode=S_IFDIR|0777, st_size=4096, ...}) = 0 getxattr("/tmp/librsync-0.9.7", "system.posix_acl_access", 0x0, 0) = -1 ENODATA (No data available) getxattr("/tmp/librsync-0.9.7", "system.posix_acl_default", 0x0, 0) = -1 ENODATA (No data available) open("/etc/passwd", O_RDONLY) = 4 fcntl(4, F_GETFD) = 0 fcntl(4, F_SETFD, FD_CLOEXEC) = 0 fstat(4, {st_mode=S_IFREG|0644, st_size=1823, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(4, "root:x:0:0:root:/root:/bin/bash\n"..., 4096) = 1823 read(4, "", 4096) = 0 close(4) = 0 munmap(0x2b85862a5000, 4096) = 0 open("/etc/ld.so.cache", O_RDONLY) = 4 fstat(4, {st_mode=S_IFREG|0644, st_size=71746, ...}) = 0 mmap(NULL, 71746, PROT_READ, MAP_PRIVATE, 4, 0) = 0x2b85862a5000 close(4) = 0 open("/lib64/libnss_ldap.so.2", O_RDONLY) = 4 read(4, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\300r\4\0\0\0\0\0"..., 832) = 832 fstat(4, {st_mode=S_IFREG|0755, st_size=3169960, ...}) = 0 mmap(NULL, 5329912, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 4, 0) = 0x2b85864c2000 mprotect(0x2b858679e000, 2093056, PROT_NONE) = 0 mmap(0x2b858699d000, 176128, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 4, 0x2db000) = 0x2b858699d000 mmap(0x2b85869c8000, 62456, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x2b85869c8000 close(4) = 0 open("/lib64/libcom_err.so.2", O_RDONLY) = 4 read(4, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\320\n\300\2770\0\0\0"..., 832) = 832 fstat(4, {st_mode=S_IFREG|0755, st_size=10000, ...}) = 0 mmap(0x30bfc00000, 2103048, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 4, 0) = 0x30bfc00000 mprotect(0x30bfc02000, 2093056, PROT_NONE) = 0 mmap(0x30bfe01000, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 4, 0x1000) = 0x30bfe01000 close(4) = 0 open("/lib64/libkeyutils.so.1", O_RDONLY) = 4 read(4, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0`\n@\2760\0\0\0"..., 832) = 832 fstat(4, {st_mode=S_IFREG|0755, st_size=9472, ...}) = 0 mmap(0x30be400000, 2102416, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 4, 0) = 0x30be400000 mprotect(0x30be402000, 2093056, PROT_NONE) = 0 mmap(0x30be601000, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 4, 0x1000) = 0x30be601000 close(4) = 0 open("/lib64/libresolv.so.2", O_RDONLY) = 4 read(4, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\2402\0\2760\0\0\0"..., 832) = 832 fstat(4, {st_mode=S_IFREG|0755, st_size=92736, ...}) = 0 mmap(0x30be000000, 2181864, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 4, 0) = 0x30be000000 mprotect(0x30be011000, 2097152, PROT_NONE) = 0 mmap(0x30be211000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 4, 0x11000) = 0x30be211000 mmap(0x30be213000, 6888, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x30be213000 close(4) = 0 mprotect(0x30be211000, 4096, PROT_READ) = 0 munmap(0x2b85862a5000, 71746) = 0 rt_sigaction(SIGPIPE, {0x1, [], SA_RESTORER, 0x30ba8302d0}, {SIG_DFL, [], 0}, 8) = 0 geteuid() = 0 futex(0x2b85869c7708, FUTEX_WAKE_PRIVATE, 2147483647) = 0 open("/etc/ldap.conf", O_RDONLY) = 4 fstat(4, {st_mode=S_IFREG|0644, st_size=9119, ...}) = 0 fstat(4, {st_mode=S_IFREG|0644, st_size=9119, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(4, "# @(#)$Id: ldap.conf,v 1.38 2006"..., 4096) = 4096 read(4, "Use the OpenLDAP password change"..., 4096) = 4096 read(4, " OpenLDAP 2.0 and earlier is \"no"..., 4096) = 927 read(4, "", 4096) = 0 close(4) = 0 munmap(0x2b85862a5000, 4096) = 0 uname({sys="Linux", node="massive.answeron.com", ...}) = 0 open("/etc/resolv.conf", O_RDONLY) = 4 fstat(4, {st_mode=S_IFREG|0644, st_size=107, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(4, "; generated by /sbin/dhclient-sc"..., 4096) = 107 read(4, "", 4096) = 0 close(4) = 0 munmap(0x2b85862a5000, 4096) = 0 socket(PF_FILE, SOCK_STREAM, 0) = 4 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0 connect(4, {sa_family=AF_FILE, path="/var/run/nscd/socket"...}, 110) = -1 ENOENT (No such file or directory) close(4) = 0 socket(PF_FILE, SOCK_STREAM, 0) = 4 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0 connect(4, {sa_family=AF_FILE, path="/var/run/nscd/socket"...}, 110) = -1 ENOENT (No such file or directory) close(4) = 0 open("/etc/host.conf", O_RDONLY) = 4 fstat(4, {st_mode=S_IFREG|0644, st_size=17, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(4, "order hosts,bind\n", 4096) = 17 read(4, "", 4096) = 0 close(4) = 0 munmap(0x2b85862a5000, 4096) = 0 futex(0x30bab54d44, FUTEX_WAKE_PRIVATE, 2147483647) = 0 open("/etc/hosts", O_RDONLY) = 4 fcntl(4, F_GETFD) = 0 fcntl(4, F_SETFD, FD_CLOEXEC) = 0 fstat(4, {st_mode=S_IFREG|0644, st_size=187, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(4, "# Do not remove the following li"..., 4096) = 187 read(4, "", 4096) = 0 close(4) = 0 munmap(0x2b85862a5000, 4096) = 0 open("/etc/ld.so.cache", O_RDONLY) = 4 fstat(4, {st_mode=S_IFREG|0644, st_size=71746, ...}) = 0 mmap(NULL, 71746, PROT_READ, MAP_PRIVATE, 4, 0) = 0x2b85862a5000 close(4) = 0 open("/lib64/libnss_dns.so.2", O_RDONLY) = 4 read(4, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\340\17\0\0\0\0\0\0"..., 832) = 832 fstat(4, {st_mode=S_IFREG|0755, st_size=23736, ...}) = 0 mmap(NULL, 2113792, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 4, 0) = 0x2b85869d8000 mprotect(0x2b85869dc000, 2093056, PROT_NONE) = 0 mmap(0x2b8586bdb000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 4, 0x3000) = 0x2b8586bdb000 close(4) = 0 mprotect(0x2b8586bdb000, 4096, PROT_READ) = 0 munmap(0x2b85862a5000, 71746) = 0 socket(PF_INET, SOCK_DGRAM, IPPROTO_IP) = 4 connect(4, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("192.168.10.20")}, 28) = 0 fcntl(4, F_GETFL) = 0x2 (flags O_RDWR) fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0 gettimeofday({1276265920, 823870}, NULL) = 0 poll([{fd=4, events=POLLOUT}], 1, 0) = 1 ([{fd=4, revents=POLLOUT}]) sendto(4, "C\v\1\0\0\1\0\0\0\0\0\0\7massive\10answeron\3co"..., 38, MSG_NOSIGNAL, NULL, 0) = 38 poll([{fd=4, events=POLLIN}], 1, 5000) = 1 ([{fd=4, revents=POLLIN}]) ioctl(4, FIONREAD, [122]) = 0 recvfrom(4, "C\v\205\200\0\1\0\1\0\2\0\2\7massive\10answeron\3co"..., 1024, 0, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("192.168.10.20")}, [16]) = 122 close(4) = 0 open("/etc/openldap/ldap.conf", O_RDONLY) = 4 fstat(4, {st_mode=S_IFREG|0644, st_size=335, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(4, "#\n# LDAP Defaults\n#\n\n# See ldap."..., 4096) = 335 read(4, "", 4096) = 0 close(4) = 0 munmap(0x2b85862a5000, 4096) = 0 getuid() = 0 geteuid() = 0 getgid() = 0 getegid() = 0 open("/root/ldaprc", O_RDONLY) = -1 ENOENT (No such file or directory) open("/root/.ldaprc", O_RDONLY) = -1 ENOENT (No such file or directory) stat("/etc/ldap.conf", {st_mode=S_IFREG|0644, st_size=9119, ...}) = 0 geteuid() = 0 brk(0x1c566000) = 0x1c566000 open("/etc/hosts", O_RDONLY) = 4 fcntl(4, F_GETFD) = 0 fcntl(4, F_SETFD, FD_CLOEXEC) = 0 fstat(4, {st_mode=S_IFREG|0644, st_size=187, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(4, "# Do not remove the following li"..., 4096) = 187 read(4, "", 4096) = 0 close(4) = 0 munmap(0x2b85862a5000, 4096) = 0 open("/etc/hosts", O_RDONLY) = 4 fcntl(4, F_GETFD) = 0 fcntl(4, F_SETFD, FD_CLOEXEC) = 0 fstat(4, {st_mode=S_IFREG|0644, st_size=187, ...}) = 0 mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b85862a5000 read(4, "# Do not remove the following li"..., 4096) = 187 read(4, "", 4096) = 0 close(4) = 0 munmap(0x2b85862a5000, 4096) = 0 socket(PF_INET, SOCK_DGRAM, IPPROTO_IP) = 4 connect(4, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("192.168.10.20")}, 28) = 0 fcntl(4, F_GETFL) = 0x2 (flags O_RDWR) fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0 gettimeofday({1276265920, 855948}, NULL) = 0 poll([{fd=4, events=POLLOUT}], 1, 0) = 1 ([{fd=4, revents=POLLOUT}]) sendto(4, "\32 \1\0\0\1\0\0\0\0\0\0\4ldap\10answeron\3com\0\0"..., 35, MSG_NOSIGNAL, NULL, 0) = 35 poll([{fd=4, events=POLLIN}], 1, 5000) = 1 ([{fd=4, revents=POLLIN}]) ioctl(4, FIONREAD, [104]) = 0 recvfrom(4, "\32 \205\200\0\1\0\1\0\1\0\0\4ldap\10answeron\3com\0\0"..., 1024, 0, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("192.168.10.20")}, [16]) = 104 close(4) = 0 socket(PF_INET, SOCK_DGRAM, IPPROTO_IP) = 4 connect(4, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("192.168.10.20")}, 28) = 0 fcntl(4, F_GETFL) = 0x2 (flags O_RDWR) fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0 gettimeofday({1276265920, 858536}, NULL) = 0 poll([{fd=4, events=POLLOUT}], 1, 0) = 1 ([{fd=4, revents=POLLOUT}]) sendto(4, "I\375\1\0\0\1\0\0\0\0\0\0\4ldap\10answeron\3com\0\0"..., 35, MSG_NOSIGNAL, NULL, 0) = 35 poll([{fd=4, events=POLLIN}], 1, 5000) = 1 ([{fd=4, revents=POLLIN}]) ioctl(4, FIONREAD, [139]) = 0 recvfrom(4, "I\375\205\200\0\1\0\2\0\2\0\2\4ldap\10answeron\3com\0\0"..., 1024, 0, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("192.168.10.20")}, [16]) = 139 close(4) = 0 socket(PF_INET, SOCK_STREAM, IPPROTO_IP) = 4 fcntl(4, F_SETFD, FD_CLOEXEC) = 0 setsockopt(4, SOL_SOCKET, SO_KEEPALIVE, [1], 4) = 0 setsockopt(4, SOL_TCP, TCP_NODELAY, [1], 4) = 0 fcntl(4, F_GETFL) = 0x2 (flags O_RDWR) fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0 connect(4, {sa_family=AF_INET, sin_port=htons(389), sin_addr=inet_addr("10.20.0.30")}, 16) = -1 EINPROGRESS (Operation now in progress) poll([{fd=4, events=POLLOUT|POLLERR|POLLHUP}], 1, 120000 And thats where it stops, right there after that last 120000.... Using strace, I can obviously CTRL+C to keep going. But like I said, normally the terminal completely freezes. Anyone have any clues?

    Read the article

  • 256 Windows Azure Worker Roles, Windows Kinect and a 90's Text-Based Ray-Tracer

    - by Alan Smith
    For a couple of years I have been demoing a simple render farm hosted in Windows Azure using worker roles and the Azure Storage service. At the start of the presentation I deploy an Azure application that uses 16 worker roles to render a 1,500 frame 3D ray-traced animation. At the end of the presentation, when the animation was complete, I would play the animation delete the Azure deployment. The standing joke with the audience was that it was that it was a “$2 demo”, as the compute charges for running the 16 instances for an hour was $1.92, factor in the bandwidth charges and it’s a couple of dollars. The point of the demo is that it highlights one of the great benefits of cloud computing, you pay for what you use, and if you need massive compute power for a short period of time using Windows Azure can work out very cost effective. The “$2 demo” was great for presenting at user groups and conferences in that it could be deployed to Azure, used to render an animation, and then removed in a one hour session. I have always had the idea of doing something a bit more impressive with the demo, and scaling it from a “$2 demo” to a “$30 demo”. The challenge was to create a visually appealing animation in high definition format and keep the demo time down to one hour.  This article will take a run through how I achieved this. Ray Tracing Ray tracing, a technique for generating high quality photorealistic images, gained popularity in the 90’s with companies like Pixar creating feature length computer animations, and also the emergence of shareware text-based ray tracers that could run on a home PC. In order to render a ray traced image, the ray of light that would pass from the view point must be tracked until it intersects with an object. At the intersection, the color, reflectiveness, transparency, and refractive index of the object are used to calculate if the ray will be reflected or refracted. Each pixel may require thousands of calculations to determine what color it will be in the rendered image. Pin-Board Toys Having very little artistic talent and a basic understanding of maths I decided to focus on an animation that could be modeled fairly easily and would look visually impressive. I’ve always liked the pin-board desktop toys that become popular in the 80’s and when I was working as a 3D animator back in the 90’s I always had the idea of creating a 3D ray-traced animation of a pin-board, but never found the energy to do it. Even if I had a go at it, the render time to produce an animation that would look respectable on a 486 would have been measured in months. PolyRay Back in 1995 I landed my first real job, after spending three years being a beach-ski-climbing-paragliding-bum, and was employed to create 3D ray-traced animations for a CD-ROM that school kids would use to learn physics. I had got into the strange and wonderful world of text-based ray tracing, and was using a shareware ray-tracer called PolyRay. PolyRay takes a text file describing a scene as input and, after a few hours processing on a 486, produced a high quality ray-traced image. The following is an example of a basic PolyRay scene file. background Midnight_Blue   static define matte surface { ambient 0.1 diffuse 0.7 } define matte_white texture { matte { color white } } define matte_black texture { matte { color dark_slate_gray } } define position_cylindrical 3 define lookup_sawtooth 1 define light_wood <0.6, 0.24, 0.1> define median_wood <0.3, 0.12, 0.03> define dark_wood <0.05, 0.01, 0.005>     define wooden texture { noise surface { ambient 0.2  diffuse 0.7  specular white, 0.5 microfacet Reitz 10 position_fn position_cylindrical position_scale 1  lookup_fn lookup_sawtooth octaves 1 turbulence 1 color_map( [0.0, 0.2, light_wood, light_wood] [0.2, 0.3, light_wood, median_wood] [0.3, 0.4, median_wood, light_wood] [0.4, 0.7, light_wood, light_wood] [0.7, 0.8, light_wood, median_wood] [0.8, 0.9, median_wood, light_wood] [0.9, 1.0, light_wood, dark_wood]) } } define glass texture { surface { ambient 0 diffuse 0 specular 0.2 reflection white, 0.1 transmission white, 1, 1.5 }} define shiny surface { ambient 0.1 diffuse 0.6 specular white, 0.6 microfacet Phong 7  } define steely_blue texture { shiny { color black } } define chrome texture { surface { color white ambient 0.0 diffuse 0.2 specular 0.4 microfacet Phong 10 reflection 0.8 } }   viewpoint {     from <4.000, -1.000, 1.000> at <0.000, 0.000, 0.000> up <0, 1, 0> angle 60     resolution 640, 480 aspect 1.6 image_format 0 }       light <-10, 30, 20> light <-10, 30, -20>   object { disc <0, -2, 0>, <0, 1, 0>, 30 wooden }   object { sphere <0.000, 0.000, 0.000>, 1.00 chrome } object { cylinder <0.000, 0.000, 0.000>, <0.000, 0.000, -4.000>, 0.50 chrome }   After setting up the background and defining colors and textures, the viewpoint is specified. The “camera” is located at a point in 3D space, and it looks towards another point. The angle, image resolution, and aspect ratio are specified. Two lights are present in the image at defined coordinates. The three objects in the image are a wooden disc to represent a table top, and a sphere and cylinder that intersect to form a pin that will be used for the pin board toy in the final animation. When the image is rendered, the following image is produced. The pins are modeled with a chrome surface, so they reflect the environment around them. Note that the scale of the pin shaft is not correct, this will be fixed later. Modeling the Pin Board The frame of the pin-board is made up of three boxes, and six cylinders, the front box is modeled using a clear, slightly reflective solid, with the same refractive index of glass. The other shapes are modeled as metal. object { box <-5.5, -1.5, 1>, <5.5, 5.5, 1.2> glass } object { box <-5.5, -1.5, -0.04>, <5.5, 5.5, -0.09> steely_blue } object { box <-5.5, -1.5, -0.52>, <5.5, 5.5, -0.59> steely_blue } object { cylinder <-5.2, -1.2, 1.4>, <-5.2, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <5.2, -1.2, 1.4>, <5.2, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <-5.2, 5.2, 1.4>, <-5.2, 5.2, -0.74>, 0.2 steely_blue } object { cylinder <5.2, 5.2, 1.4>, <5.2, 5.2, -0.74>, 0.2 steely_blue } object { cylinder <0, -1.2, 1.4>, <0, -1.2, -0.74>, 0.2 steely_blue } object { cylinder <0, 5.2, 1.4>, <0, 5.2, -0.74>, 0.2 steely_blue }   In order to create the matrix of pins that make up the pin board I used a basic console application with a few nested loops to create two intersecting matrixes of pins, which models the layout used in the pin boards. The resulting image is shown below. The pin board contains 11,481 pins, with the scene file containing 23,709 lines of code. For the complete animation 2,000 scene files will be created, which is over 47 million lines of code. Each pin in the pin-board will slide out a specific distance when an object is pressed into the back of the board. This is easily modeled by setting the Z coordinate of the pin to a specific value. In order to set all of the pins in the pin-board to the correct position, a bitmap image can be used. The position of the pin can be set based on the color of the pixel at the appropriate position in the image. When the Windows Azure logo is used to set the Z coordinate of the pins, the following image is generated. The challenge now was to make a cool animation. The Azure Logo is fine, but it is static. Using a normal video to animate the pins would not work; the colors in the video would not be the same as the depth of the objects from the camera. In order to simulate the pin board accurately a series of frames from a depth camera could be used. Windows Kinect The Kenect controllers for the X-Box 360 and Windows feature a depth camera. The Kinect SDK for Windows provides a programming interface for Kenect, providing easy access for .NET developers to the Kinect sensors. The Kinect Explorer provided with the Kinect SDK is a great starting point for exploring Kinect from a developers perspective. Both the X-Box 360 Kinect and the Windows Kinect will work with the Kinect SDK, the Windows Kinect is required for commercial applications, but the X-Box Kinect can be used for hobby projects. The Windows Kinect has the advantage of providing a mode to allow depth capture with objects closer to the camera, which makes for a more accurate depth image for setting the pin positions. Creating a Depth Field Animation The depth field animation used to set the positions of the pin in the pin board was created using a modified version of the Kinect Explorer sample application. In order to simulate the pin board accurately, a small section of the depth range from the depth sensor will be used. Any part of the object in front of the depth range will result in a white pixel; anything behind the depth range will be black. Within the depth range the pixels in the image will be set to RGB values from 0,0,0 to 255,255,255. A screen shot of the modified Kinect Explorer application is shown below. The Kinect Explorer sample application was modified to include slider controls that are used to set the depth range that forms the image from the depth stream. This allows the fine tuning of the depth image that is required for simulating the position of the pins in the pin board. The Kinect Explorer was also modified to record a series of images from the depth camera and save them as a sequence JPEG files that will be used to animate the pins in the animation the Start and Stop buttons are used to start and stop the image recording. En example of one of the depth images is shown below. Once a series of 2,000 depth images has been captured, the task of creating the animation can begin. Rendering a Test Frame In order to test the creation of frames and get an approximation of the time required to render each frame a test frame was rendered on-premise using PolyRay. The output of the rendering process is shown below. The test frame contained 23,629 primitive shapes, most of which are the spheres and cylinders that are used for the 11,800 or so pins in the pin board. The 1280x720 image contains 921,600 pixels, but as anti-aliasing was used the number of rays that were calculated was 4,235,777, with 3,478,754,073 object boundaries checked. The test frame of the pin board with the depth field image applied is shown below. The tracing time for the test frame was 4 minutes 27 seconds, which means rendering the2,000 frames in the animation would take over 148 hours, or a little over 6 days. Although this is much faster that an old 486, waiting almost a week to see the results of an animation would make it challenging for animators to create, view, and refine their animations. It would be much better if the animation could be rendered in less than one hour. Windows Azure Worker Roles The cost of creating an on-premise render farm to render animations increases in proportion to the number of servers. The table below shows the cost of servers for creating a render farm, assuming a cost of $500 per server. Number of Servers Cost 1 $500 16 $8,000 256 $128,000   As well as the cost of the servers, there would be additional costs for networking, racks etc. Hosting an environment of 256 servers on-premise would require a server room with cooling, and some pretty hefty power cabling. The Windows Azure compute services provide worker roles, which are ideal for performing processor intensive compute tasks. With the scalability available in Windows Azure a job that takes 256 hours to complete could be perfumed using different numbers of worker roles. The time and cost of using 1, 16 or 256 worker roles is shown below. Number of Worker Roles Render Time Cost 1 256 hours $30.72 16 16 hours $30.72 256 1 hour $30.72   Using worker roles in Windows Azure provides the same cost for the 256 hour job, irrespective of the number of worker roles used. Provided the compute task can be broken down into many small units, and the worker role compute power can be used effectively, it makes sense to scale the application so that the task is completed quickly, making the results available in a timely fashion. The task of rendering 2,000 frames in an animation is one that can easily be broken down into 2,000 individual pieces, which can be performed by a number of worker roles. Creating a Render Farm in Windows Azure The architecture of the render farm is shown in the following diagram. The render farm is a hybrid application with the following components: ·         On-Premise o   Windows Kinect – Used combined with the Kinect Explorer to create a stream of depth images. o   Animation Creator – This application uses the depth images from the Kinect sensor to create scene description files for PolyRay. These files are then uploaded to the jobs blob container, and job messages added to the jobs queue. o   Process Monitor – This application queries the role instance lifecycle table and displays statistics about the render farm environment and render process. o   Image Downloader – This application polls the image queue and downloads the rendered animation files once they are complete. ·         Windows Azure o   Azure Storage – Queues and blobs are used for the scene description files and completed frames. A table is used to store the statistics about the rendering environment.   The architecture of each worker role is shown below.   The worker role is configured to use local storage, which provides file storage on the worker role instance that can be use by the applications to render the image and transform the format of the image. The service definition for the worker role with the local storage configuration highlighted is shown below. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="CloudRay" >   <WorkerRole name="CloudRayWorkerRole" vmsize="Small">     <Imports>     </Imports>     <ConfigurationSettings>       <Setting name="DataConnectionString" />     </ConfigurationSettings>     <LocalResources>       <LocalStorage name="RayFolder" cleanOnRoleRecycle="true" />     </LocalResources>   </WorkerRole> </ServiceDefinition>     The two executable programs, PolyRay.exe and DTA.exe are included in the Azure project, with Copy Always set as the property. PolyRay will take the scene description file and render it to a Truevision TGA file. As the TGA format has not seen much use since the mid 90’s it is converted to a JPG image using Dave's Targa Animator, another shareware application from the 90’s. Each worker roll will use the following process to render the animation frames. 1.       The worker process polls the job queue, if a job is available the scene description file is downloaded from blob storage to local storage. 2.       PolyRay.exe is started in a process with the appropriate command line arguments to render the image as a TGA file. 3.       DTA.exe is started in a process with the appropriate command line arguments convert the TGA file to a JPG file. 4.       The JPG file is uploaded from local storage to the images blob container. 5.       A message is placed on the images queue to indicate a new image is available for download. 6.       The job message is deleted from the job queue. 7.       The role instance lifecycle table is updated with statistics on the number of frames rendered by the worker role instance, and the CPU time used. The code for this is shown below. public override void Run() {     // Set environment variables     string polyRayPath = Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), PolyRayLocation);     string dtaPath = Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), DTALocation);       LocalResource rayStorage = RoleEnvironment.GetLocalResource("RayFolder");     string localStorageRootPath = rayStorage.RootPath;       JobQueue jobQueue = new JobQueue("renderjobs");     JobQueue downloadQueue = new JobQueue("renderimagedownloadjobs");     CloudRayBlob sceneBlob = new CloudRayBlob("scenes");     CloudRayBlob imageBlob = new CloudRayBlob("images");     RoleLifecycleDataSource roleLifecycleDataSource = new RoleLifecycleDataSource();       Frames = 0;       while (true)     {         // Get the render job from the queue         CloudQueueMessage jobMsg = jobQueue.Get();           if (jobMsg != null)         {             // Get the file details             string sceneFile = jobMsg.AsString;             string tgaFile = sceneFile.Replace(".pi", ".tga");             string jpgFile = sceneFile.Replace(".pi", ".jpg");               string sceneFilePath = Path.Combine(localStorageRootPath, sceneFile);             string tgaFilePath = Path.Combine(localStorageRootPath, tgaFile);             string jpgFilePath = Path.Combine(localStorageRootPath, jpgFile);               // Copy the scene file to local storage             sceneBlob.DownloadFile(sceneFilePath);               // Run the ray tracer.             string polyrayArguments =                 string.Format("\"{0}\" -o \"{1}\" -a 2", sceneFilePath, tgaFilePath);             Process polyRayProcess = new Process();             polyRayProcess.StartInfo.FileName =                 Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), polyRayPath);             polyRayProcess.StartInfo.Arguments = polyrayArguments;             polyRayProcess.Start();             polyRayProcess.WaitForExit();               // Convert the image             string dtaArguments =                 string.Format(" {0} /FJ /P{1}", tgaFilePath, Path.GetDirectoryName (jpgFilePath));             Process dtaProcess = new Process();             dtaProcess.StartInfo.FileName =                 Path.Combine(Environment.GetEnvironmentVariable("RoleRoot"), dtaPath);             dtaProcess.StartInfo.Arguments = dtaArguments;             dtaProcess.Start();             dtaProcess.WaitForExit();               // Upload the image to blob storage             imageBlob.UploadFile(jpgFilePath);               // Add a download job.             downloadQueue.Add(jpgFile);               // Delete the render job message             jobQueue.Delete(jobMsg);               Frames++;         }         else         {             Thread.Sleep(1000);         }           // Log the worker role activity.         roleLifecycleDataSource.Alive             ("CloudRayWorker", RoleLifecycleDataSource.RoleLifecycleId, Frames);     } }     Monitoring Worker Role Instance Lifecycle In order to get more accurate statistics about the lifecycle of the worker role instances used to render the animation data was tracked in an Azure storage table. The following class was used to track the worker role lifecycles in Azure storage.   public class RoleLifecycle : TableServiceEntity {     public string ServerName { get; set; }     public string Status { get; set; }     public DateTime StartTime { get; set; }     public DateTime EndTime { get; set; }     public long SecondsRunning { get; set; }     public DateTime LastActiveTime { get; set; }     public int Frames { get; set; }     public string Comment { get; set; }       public RoleLifecycle()     {     }       public RoleLifecycle(string roleName)     {         PartitionKey = roleName;         RowKey = Utils.GetAscendingRowKey();         Status = "Started";         StartTime = DateTime.UtcNow;         LastActiveTime = StartTime;         EndTime = StartTime;         SecondsRunning = 0;         Frames = 0;     } }     A new instance of this class is created and added to the storage table when the role starts. It is then updated each time the worker renders a frame to record the total number of frames rendered and the total processing time. These statistics are used be the monitoring application to determine the effectiveness of use of resources in the render farm. Rendering the Animation The Azure solution was deployed to Windows Azure with the service configuration set to 16 worker role instances. This allows for the application to be tested in the cloud environment, and the performance of the application determined. When I demo the application at conferences and user groups I often start with 16 instances, and then scale up the application to the full 256 instances. The configuration to run 16 instances is shown below. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="CloudRay" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*">   <Role name="CloudRayWorkerRole">     <Instances count="16" />     <ConfigurationSettings>       <Setting name="DataConnectionString"         value="DefaultEndpointsProtocol=https;AccountName=cloudraydata;AccountKey=..." />     </ConfigurationSettings>   </Role> </ServiceConfiguration>     About six minutes after deploying the application the first worker roles become active and start to render the first frames of the animation. The CloudRay Monitor application displays an icon for each worker role instance, with a number indicating the number of frames that the worker role has rendered. The statistics on the left show the number of active worker roles and statistics about the render process. The render time is the time since the first worker role became active; the CPU time is the total amount of processing time used by all worker role instances to render the frames.   Five minutes after the first worker role became active the last of the 16 worker roles activated. By this time the first seven worker roles had each rendered one frame of the animation.   With 16 worker roles u and running it can be seen that one hour and 45 minutes CPU time has been used to render 32 frames with a render time of just under 10 minutes.     At this rate it would take over 10 hours to render the 2,000 frames of the full animation. In order to complete the animation in under an hour more processing power will be required. Scaling the render farm from 16 instances to 256 instances is easy using the new management portal. The slider is set to 256 instances, and the configuration saved. We do not need to re-deploy the application, and the 16 instances that are up and running will not be affected. Alternatively, the configuration file for the Azure service could be modified to specify 256 instances.   <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="CloudRay" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*">   <Role name="CloudRayWorkerRole">     <Instances count="256" />     <ConfigurationSettings>       <Setting name="DataConnectionString"         value="DefaultEndpointsProtocol=https;AccountName=cloudraydata;AccountKey=..." />     </ConfigurationSettings>   </Role> </ServiceConfiguration>     Six minutes after the new configuration has been applied 75 new worker roles have activated and are processing their first frames.   Five minutes later the full configuration of 256 worker roles is up and running. We can see that the average rate of frame rendering has increased from 3 to 12 frames per minute, and that over 17 hours of CPU time has been utilized in 23 minutes. In this test the time to provision 140 worker roles was about 11 minutes, which works out at about one every five seconds.   We are now half way through the rendering, with 1,000 frames complete. This has utilized just under three days of CPU time in a little over 35 minutes.   The animation is now complete, with 2,000 frames rendered in a little over 52 minutes. The CPU time used by the 256 worker roles is 6 days, 7 hours and 22 minutes with an average frame rate of 38 frames per minute. The rendering of the last 1,000 frames took 16 minutes 27 seconds, which works out at a rendering rate of 60 frames per minute. The frame counts in the server instances indicate that the use of a queue to distribute the workload has been very effective in distributing the load across the 256 worker role instances. The first 16 instances that were deployed first have rendered between 11 and 13 frames each, whilst the 240 instances that were added when the application was scaled have rendered between 6 and 9 frames each.   Completed Animation I’ve uploaded the completed animation to YouTube, a low resolution preview is shown below. Pin Board Animation Created using Windows Kinect and 256 Windows Azure Worker Roles   The animation can be viewed in 1280x720 resolution at the following link: http://www.youtube.com/watch?v=n5jy6bvSxWc Effective Use of Resources According to the CloudRay monitor statistics the animation took 6 days, 7 hours and 22 minutes CPU to render, this works out at 152 hours of compute time, rounded up to the nearest hour. As the usage for the worker role instances are billed for the full hour, it may have been possible to render the animation using fewer than 256 worker roles. When deciding the optimal usage of resources, the time required to provision and start the worker roles must also be considered. In the demo I started with 16 worker roles, and then scaled the application to 256 worker roles. It would have been more optimal to start the application with maybe 200 worker roles, and utilized the full hour that I was being billed for. This would, however, have prevented showing the ease of scalability of the application. The new management portal displays the CPU usage across the worker roles in the deployment. The average CPU usage across all instances is 93.27%, with over 99% used when all the instances are up and running. This shows that the worker role resources are being used very effectively. Grid Computing Scenarios Although I am using this scenario for a hobby project, there are many scenarios where a large amount of compute power is required for a short period of time. Windows Azure provides a great platform for developing these types of grid computing applications, and can work out very cost effective. ·         Windows Azure can provide massive compute power, on demand, in a matter of minutes. ·         The use of queues to manage the load balancing of jobs between role instances is a simple and effective solution. ·         Using a cloud-computing platform like Windows Azure allows proof-of-concept scenarios to be tested and evaluated on a very low budget. ·         No charges for inbound data transfer makes the uploading of large data sets to Windows Azure Storage services cost effective. (Transaction charges still apply.) Tips for using Windows Azure for Grid Computing Scenarios I found the implementation of a render farm using Windows Azure a fairly simple scenario to implement. I was impressed by ease of scalability that Azure provides, and by the short time that the application took to scale from 16 to 256 worker role instances. In this case it was around 13 minutes, in other tests it took between 10 and 20 minutes. The following tips may be useful when implementing a grid computing project in Windows Azure. ·         Using an Azure Storage queue to load-balance the units of work across multiple worker roles is simple and very effective. The design I have used in this scenario could easily scale to many thousands of worker role instances. ·         Windows Azure accounts are typically limited to 20 cores. If you need to use more than this, a call to support and a credit card check will be required. ·         Be aware of how the billing model works. You will be charged for worker role instances for the full clock our in which the instance is deployed. Schedule the workload to start just after the clock hour has started. ·         Monitor the utilization of the resources you are provisioning, ensure that you are not paying for worker roles that are idle. ·         If you are deploying third party applications to worker roles, you may well run into licensing issues. Purchasing software licenses on a per-processor basis when using hundreds of processors for a short time period would not be cost effective. ·         Third party software may also require installation onto the worker roles, which can be accomplished using start-up tasks. Bear in mind that adding a startup task and possible re-boot will add to the time required for the worker role instance to start and activate. An alternative may be to use a prepared VM and use VM roles. ·         Consider using the Windows Azure Autoscaling Application Block (WASABi) to autoscale the worker roles in your application. When using a large number of worker roles, the utilization must be carefully monitored, if the scaling algorithms are not optimal it could get very expensive!

    Read the article

  • Disable .htaccess from apache allowoverride none, still reads .htaccess files

    - by John Magnolia
    I have moved all of our .htaccess config into <Directory> blocks and set AllowOverride None in the default and default-ssl. Although after restarting apache it is still reading the .htaccess files. How can I completely turn off reading these files? Update of all files with "AllowOverride" /etc/apache2/mods-available/userdir.conf <IfModule mod_userdir.c> UserDir public_html UserDir disabled root <Directory /home/*/public_html> AllowOverride FileInfo AuthConfig Limit Indexes Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec <Limit GET POST OPTIONS> Order allow,deny Allow from all </Limit> <LimitExcept GET POST OPTIONS> Order deny,allow Deny from all </LimitExcept> </Directory> </IfModule> /etc/apache2/mods-available/alias.conf <IfModule alias_module> # # Aliases: Add here as many aliases as you need (with no limit). The format is # Alias fakename realname # # Note that if you include a trailing / on fakename then the server will # require it to be present in the URL. So "/icons" isn't aliased in this # example, only "/icons/". If the fakename is slash-terminated, then the # realname must also be slash terminated, and if the fakename omits the # trailing slash, the realname must also omit it. # # We include the /icons/ alias for FancyIndexed directory listings. If # you do not use FancyIndexing, you may comment this out. # Alias /icons/ "/usr/share/apache2/icons/" <Directory "/usr/share/apache2/icons"> Options Indexes MultiViews AllowOverride None Order allow,deny Allow from all </Directory> </IfModule> /etc/apache2/httpd.conf # # Directives to allow use of AWStats as a CGI # Alias /awstatsclasses "/usr/share/doc/awstats/examples/wwwroot/classes/" Alias /awstatscss "/usr/share/doc/awstats/examples/wwwroot/css/" Alias /awstatsicons "/usr/share/doc/awstats/examples/wwwroot/icon/" ScriptAlias /awstats/ "/usr/share/doc/awstats/examples/wwwroot/cgi-bin/" # # This is to permit URL access to scripts/files in AWStats directory. # <Directory "/usr/share/doc/awstats/examples/wwwroot"> Options None AllowOverride None Order allow,deny Allow from all </Directory> Alias /awstats-icon/ /usr/share/awstats/icon/ <Directory /usr/share/awstats/icon> Options None AllowOverride None Order allow,deny Allow from all </Directory> /etc/apache2/sites-available/default-ssl <IfModule mod_ssl.c> <VirtualHost _default_:443> ServerAdmin webmaster@localhost DocumentRoot /var/www <Directory /> Options FollowSymLinks AllowOverride None </Directory> <Directory /var/www/> Options Indexes FollowSymLinks MultiViews AllowOverride None </Directory> ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/ <Directory "/usr/lib/cgi-bin"> AllowOverride None Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch Order allow,deny Allow from all </Directory> ErrorLog ${APACHE_LOG_DIR}/error.log # Possible values include: debug, info, notice, warn, error, crit, # alert, emerg. LogLevel warn CustomLog ${APACHE_LOG_DIR}/ssl_access.log combined # SSL Engine Switch: # Enable/Disable SSL for this virtual host. SSLEngine on # A self-signed (snakeoil) certificate can be created by installing # the ssl-cert package. See # /usr/share/doc/apache2.2-common/README.Debian.gz for more info. # If both key and certificate are stored in the same file, only the # SSLCertificateFile directive is needed. SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key # Server Certificate Chain: # Point SSLCertificateChainFile at a file containing the # concatenation of PEM encoded CA certificates which form the # certificate chain for the server certificate. Alternatively # the referenced file can be the same as SSLCertificateFile # when the CA certificates are directly appended to the server # certificate for convinience. #SSLCertificateChainFile /etc/apache2/ssl.crt/server-ca.crt # Certificate Authority (CA): # Set the CA certificate verification path where to find CA # certificates for client authentication or alternatively one # huge file containing all of them (file must be PEM encoded) # Note: Inside SSLCACertificatePath you need hash symlinks # to point to the certificate files. Use the provided # Makefile to update the hash symlinks after changes. #SSLCACertificatePath /etc/ssl/certs/ #SSLCACertificateFile /etc/apache2/ssl.crt/ca-bundle.crt # Certificate Revocation Lists (CRL): # Set the CA revocation path where to find CA CRLs for client # authentication or alternatively one huge file containing all # of them (file must be PEM encoded) # Note: Inside SSLCARevocationPath you need hash symlinks # to point to the certificate files. Use the provided # Makefile to update the hash symlinks after changes. #SSLCARevocationPath /etc/apache2/ssl.crl/ #SSLCARevocationFile /etc/apache2/ssl.crl/ca-bundle.crl # Client Authentication (Type): # Client certificate verification type and depth. Types are # none, optional, require and optional_no_ca. Depth is a # number which specifies how deeply to verify the certificate # issuer chain before deciding the certificate is not valid. #SSLVerifyClient require #SSLVerifyDepth 10 # Access Control: # With SSLRequire you can do per-directory access control based # on arbitrary complex boolean expressions containing server # variable checks and other lookup directives. The syntax is a # mixture between C and Perl. See the mod_ssl documentation # for more details. #<Location /> #SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)/ \ # and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \ # and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \ # and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \ # and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \ # or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/ #</Location> # SSL Engine Options: # Set various options for the SSL engine. # o FakeBasicAuth: # Translate the client X.509 into a Basic Authorisation. This means that # the standard Auth/DBMAuth methods can be used for access control. The # user name is the `one line' version of the client's X.509 certificate. # Note that no password is obtained from the user. Every entry in the user # file needs this password: `xxj31ZMTZzkVA'. # o ExportCertData: # This exports two additional environment variables: SSL_CLIENT_CERT and # SSL_SERVER_CERT. These contain the PEM-encoded certificates of the # server (always existing) and the client (only existing when client # authentication is used). This can be used to import the certificates # into CGI scripts. # o StdEnvVars: # This exports the standard SSL/TLS related `SSL_*' environment variables. # Per default this exportation is switched off for performance reasons, # because the extraction step is an expensive operation and is usually # useless for serving static content. So one usually enables the # exportation for CGI and SSI requests only. # o StrictRequire: # This denies access when "SSLRequireSSL" or "SSLRequire" applied even # under a "Satisfy any" situation, i.e. when it applies access is denied # and no other module can change it. # o OptRenegotiate: # This enables optimized SSL connection renegotiation handling when SSL # directives are used in per-directory context. #SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire <FilesMatch "\.(cgi|shtml|phtml|php)$"> SSLOptions +StdEnvVars </FilesMatch> <Directory /usr/lib/cgi-bin> SSLOptions +StdEnvVars </Directory> # SSL Protocol Adjustments: # The safe and default but still SSL/TLS standard compliant shutdown # approach is that mod_ssl sends the close notify alert but doesn't wait for # the close notify alert from client. When you need a different shutdown # approach you can use one of the following variables: # o ssl-unclean-shutdown: # This forces an unclean shutdown when the connection is closed, i.e. no # SSL close notify alert is send or allowed to received. This violates # the SSL/TLS standard but is needed for some brain-dead browsers. Use # this when you receive I/O errors because of the standard approach where # mod_ssl sends the close notify alert. # o ssl-accurate-shutdown: # This forces an accurate shutdown when the connection is closed, i.e. a # SSL close notify alert is send and mod_ssl waits for the close notify # alert of the client. This is 100% SSL/TLS standard compliant, but in # practice often causes hanging connections with brain-dead browsers. Use # this only for browsers where you know that their SSL implementation # works correctly. # Notice: Most problems of broken clients are also related to the HTTP # keep-alive facility, so you usually additionally want to disable # keep-alive for those clients, too. Use variable "nokeepalive" for this. # Similarly, one has to force some clients to use HTTP/1.0 to workaround # their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and # "force-response-1.0" for this. BrowserMatch "MSIE [2-6]" \ nokeepalive ssl-unclean-shutdown \ downgrade-1.0 force-response-1.0 # MSIE 7 and newer should be able to use keepalive BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown </VirtualHost> </IfModule> /etc/apache2/sites-available/default <VirtualHost *:80> ServerAdmin webmaster@localhost DocumentRoot /var/www <Directory /> Options FollowSymLinks AllowOverride None </Directory> <Directory /var/www/> Options -Indexes FollowSymLinks MultiViews AllowOverride None Order allow,deny allow from all </Directory> ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/ <Directory "/usr/lib/cgi-bin"> AllowOverride None Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch Order allow,deny Allow from all </Directory> Alias /delboy /usr/share/phpmyadmin <Directory /usr/share/phpmyadmin> # Restrict phpmyadmin access Order Deny,Allow Allow from all </Directory> ErrorLog ${APACHE_LOG_DIR}/error.log # Possible values include: debug, info, notice, warn, error, crit, # alert, emerg. LogLevel warn CustomLog ${APACHE_LOG_DIR}/access.log combined Alias /doc/ "/usr/share/doc/" <Directory "/usr/share/doc/"> Options Indexes MultiViews FollowSymLinks AllowOverride None Order deny,allow Deny from all Allow from 127.0.0.0/255.0.0.0 ::1/128 </Directory> </VirtualHost> /etc/apache2/conf.d/security # # Disable access to the entire file system except for the directories that # are explicitly allowed later. # # This currently breaks the configurations that come with some web application # Debian packages. # #<Directory /> # AllowOverride None # Order Deny,Allow # Deny from all #</Directory> # Changing the following options will not really affect the security of the # server, but might make attacks slightly more difficult in some cases. # # ServerTokens # This directive configures what you return as the Server HTTP response # Header. The default is 'Full' which sends information about the OS-Type # and compiled in modules. # Set to one of: Full | OS | Minimal | Minor | Major | Prod # where Full conveys the most information, and Prod the least. # #ServerTokens Minimal ServerTokens OS #ServerTokens Full # # Optionally add a line containing the server version and virtual host # name to server-generated pages (internal error documents, FTP directory # listings, mod_status and mod_info output etc., but not CGI generated # documents or custom error documents). # Set to "EMail" to also include a mailto: link to the ServerAdmin. # Set to one of: On | Off | EMail # #ServerSignature Off ServerSignature On # # Allow TRACE method # # Set to "extended" to also reflect the request body (only for testing and # diagnostic purposes). # # Set to one of: On | Off | extended # TraceEnable Off #TraceEnable On /etc/apache2/apache2.conf # # Based upon the NCSA server configuration files originally by Rob McCool. # # This is the main Apache server configuration file. It contains the # configuration directives that give the server its instructions. # See http://httpd.apache.org/docs/2.2/ for detailed information about # the directives. # # Do NOT simply read the instructions in here without understanding # what they do. They're here only as hints or reminders. If you are unsure # consult the online docs. You have been warned. # # The configuration directives are grouped into three basic sections: # 1. Directives that control the operation of the Apache server process as a # whole (the 'global environment'). # 2. Directives that define the parameters of the 'main' or 'default' server, # which responds to requests that aren't handled by a virtual host. # These directives also provide default values for the settings # of all virtual hosts. # 3. Settings for virtual hosts, which allow Web requests to be sent to # different IP addresses or hostnames and have them handled by the # same Apache server process. # # Configuration and logfile names: If the filenames you specify for many # of the server's control files begin with "/" (or "drive:/" for Win32), the # server will use that explicit path. If the filenames do *not* begin # with "/", the value of ServerRoot is prepended -- so "foo.log" # with ServerRoot set to "/etc/apache2" will be interpreted by the # server as "/etc/apache2/foo.log". # ### Section 1: Global Environment # # The directives in this section affect the overall operation of Apache, # such as the number of concurrent requests it can handle or where it # can find its configuration files. # # # ServerRoot: The top of the directory tree under which the server's # configuration, error, and log files are kept. # # NOTE! If you intend to place this on an NFS (or otherwise network) # mounted filesystem then please read the LockFile documentation (available # at <URL:http://httpd.apache.org/docs/2.2/mod/mpm_common.html#lockfile>); # you will save yourself a lot of trouble. # # Do NOT add a slash at the end of the directory path. # #ServerRoot "/etc/apache2" # # The accept serialization lock file MUST BE STORED ON A LOCAL DISK. # LockFile ${APACHE_LOCK_DIR}/accept.lock # # PidFile: The file in which the server should record its process # identification number when it starts. # This needs to be set in /etc/apache2/envvars # PidFile ${APACHE_PID_FILE} # # Timeout: The number of seconds before receives and sends time out. # Timeout 300 # # KeepAlive: Whether or not to allow persistent connections (more than # one request per connection). Set to "Off" to deactivate. # KeepAlive On # # MaxKeepAliveRequests: The maximum number of requests to allow # during a persistent connection. Set to 0 to allow an unlimited amount. # We recommend you leave this number high, for maximum performance. # MaxKeepAliveRequests 100 # # KeepAliveTimeout: Number of seconds to wait for the next request from the # same client on the same connection. # KeepAliveTimeout 4 ## ## Server-Pool Size Regulation (MPM specific) ## # prefork MPM # StartServers: number of server processes to start # MinSpareServers: minimum number of server processes which are kept spare # MaxSpareServers: maximum number of server processes which are kept spare # MaxClients: maximum number of server processes allowed to start # MaxRequestsPerChild: maximum number of requests a server process serves <IfModule mpm_prefork_module> StartServers 5 MinSpareServers 5 MaxSpareServers 10 MaxClients 150 MaxRequestsPerChild 500 </IfModule> # worker MPM # StartServers: initial number of server processes to start # MaxClients: maximum number of simultaneous client connections # MinSpareThreads: minimum number of worker threads which are kept spare # MaxSpareThreads: maximum number of worker threads which are kept spare # ThreadLimit: ThreadsPerChild can be changed to this maximum value during a # graceful restart. ThreadLimit can only be changed by stopping # and starting Apache. # ThreadsPerChild: constant number of worker threads in each server process # MaxRequestsPerChild: maximum number of requests a server process serves <IfModule mpm_worker_module> StartServers 2 MinSpareThreads 25 MaxSpareThreads 75 ThreadLimit 64 ThreadsPerChild 25 MaxClients 150 MaxRequestsPerChild 0 </IfModule> # event MPM # StartServers: initial number of server processes to start # MaxClients: maximum number of simultaneous client connections # MinSpareThreads: minimum number of worker threads which are kept spare # MaxSpareThreads: maximum number of worker threads which are kept spare # ThreadsPerChild: constant number of worker threads in each server process # MaxRequestsPerChild: maximum number of requests a server process serves <IfModule mpm_event_module> StartServers 2 MaxClients 150 MinSpareThreads 25 MaxSpareThreads 75 ThreadLimit 64 ThreadsPerChild 25 MaxRequestsPerChild 0 </IfModule> # These need to be set in /etc/apache2/envvars User ${APACHE_RUN_USER} Group ${APACHE_RUN_GROUP} # # AccessFileName: The name of the file to look for in each directory # for additional configuration directives. See also the AllowOverride # directive. # AccessFileName .htaccess # # The following lines prevent .htaccess and .htpasswd files from being # viewed by Web clients. # <Files ~ "^\.ht"> Order allow,deny Deny from all Satisfy all </Files> # # DefaultType is the default MIME type the server will use for a document # if it cannot otherwise determine one, such as from filename extensions. # If your server contains mostly text or HTML documents, "text/plain" is # a good value. If most of your content is binary, such as applications # or images, you may want to use "application/octet-stream" instead to # keep browsers from trying to display binary files as though they are # text. # DefaultType text/plain # # HostnameLookups: Log the names of clients or just their IP addresses # e.g., www.apache.org (on) or 204.62.129.132 (off). # The default is off because it'd be overall better for the net if people # had to knowingly turn this feature on, since enabling it means that # each client request will result in AT LEAST one lookup request to the # nameserver. # HostnameLookups Off # ErrorLog: The location of the error log file. # If you do not specify an ErrorLog directive within a <VirtualHost> # container, error messages relating to that virtual host will be # logged here. If you *do* define an error logfile for a <VirtualHost> # container, that host's errors will be logged there and not here. # ErrorLog ${APACHE_LOG_DIR}/error.log # # LogLevel: Control the number of messages logged to the error_log. # Possible values include: debug, info, notice, warn, error, crit, # alert, emerg. # LogLevel warn # Include module configuration: Include mods-enabled/*.load Include mods-enabled/*.conf # Include all the user configurations: Include httpd.conf # Include ports listing Include ports.conf # # The following directives define some format nicknames for use with # a CustomLog directive (see below). # If you are behind a reverse proxy, you might want to change %h into %{X-Forwarded-For}i # LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined LogFormat "%h %l %u %t \"%r\" %>s %O" common LogFormat "%{Referer}i -> %U" referer LogFormat "%{User-agent}i" agent # Include of directories ignores editors' and dpkg's backup files, # see README.Debian for details. # Include generic snippets of statements Include conf.d/ # Include the virtual host configurations: Include sites-enabled/

    Read the article

  • Part 2&ndash;Load Testing In The Cloud

    - by Tarun Arora
    Welcome to Part 2, In Part 1 we discussed the advantages of creating a Test Rig in the cloud, the Azure edge and the Test Rig Topology we want to get to. In Part 2, Let’s start by understanding the components of Azure we’ll be making use of followed by manually putting them together to create the test rig, so… let’s get down dirty start setting up the Test Rig.  What Components of Azure will I be using for building the Test Rig in the Cloud? To run the Test Agents we’ll make use of Windows Azure Compute and to enable communication between Test Controller and Test Agents we’ll make use of Windows Azure Connect.  Azure Connect The Test Controller is on premise and the Test Agents are in the cloud (How will they talk?). To enable communication between the two, we’ll make use of Windows Azure Connect. With Windows Azure Connect, you can use a simple user interface to configure IPsec protected connections between computers or virtual machines (VMs) in your organization’s network, and roles running in Windows Azure. With this you can now join Windows Azure role instances to your domain, so that you can use your existing methods for domain authentication, name resolution, or other domain-wide maintenance actions. For more details refer to an overview of Windows Azure connect. A very useful video explaining everything you wanted to know about Windows Azure connect.  Azure Compute Windows Azure compute provides developers a platform to host and manage applications in Microsoft’s data centres across the globe. A Windows Azure application is built from one or more components called ‘roles.’ Roles come in three different types: Web role, Worker role, and Virtual Machine (VM) role, we’ll be using the Worker role to set up the Test Agents. A very nice blog post discussing the difference between the 3 role types. Developers are free to use the .NET framework or other software that runs on Windows with the Worker role or Web role. Developers can also create applications using languages such as PHP and Java. More on Windows Azure Compute. Each Windows Azure compute instance represents a virtual server... Virtual Machine Size CPU Cores Memory Cost Per Hour Extra Small Shared 768 MB $0.04 Small 1 1.75 GB $0.12 Medium 2 3.50 GB $0.24 Large 4 7.00 GB $0.48 Extra Large 8 14.00 GB $0.96   You might want to review the Windows Azure Pricing FAQ. Let’s Get Started building the Test Rig… Configuration Machine Role Comments VM – 1 Domain Controller for Playpit.com On Premise VM – 2 TFS, Test Controller On Premise VM – 3 Test Agent Cloud   In this blog post I would assume that you have the domain, Team Foundation Server and Test Controller Installed and set up already. If not, please refer to the TFS 2010 Installation Guide and this walkthrough on MSDN to set up your Test Controller. You can also download a preconfigured TFS 2010 VM from Brian Keller's blog, Brian also has some great hands on Labs on TFS 2010 that you may want to explore. I. Lets start building VM – 3: The Test Agent Download the Windows Azure SDK and Tools Open Visual Studio and create a new Windows Azure Project using the Cloud Template                   Choose the Worker Role for reasons explained in the earlier post         The WorkerRole.cs implements the Run() and OnStart() methods, no code changes required. You should be able to compile the project and run it in the compute emulator (The compute emulator should have been installed as part of the Windows Azure Toolkit) on your local machine.                   We will only be making changes to WindowsAzureProject, open ServiceDefinition.csdef. Ensure that the vmsize is small (remember the cost chart above). Import the “Connect” module. I am importing the Connect module because I need to join the Worker role VM to the Playpit domain. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceDefinition"> <WorkerRole name="WorkerRole1" vmsize="Small"> <Imports> <Import moduleName="Diagnostics" /> <Import moduleName="Connect"/> </Imports> </WorkerRole> </ServiceDefinition> Go to the ServiceConfiguration.Cloud.cscfg and note that settings with key ‘Microsoft.WindowsAzure.Plugins.Connect.%%%%’ have been added to the configuration file. This is because you decided to import the connect module. See the config below. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="1" osVersion="*"> <Role name="WorkerRole1"> <Instances count="1" /> <ConfigurationSettings> <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="UseDevelopmentStorage=true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.ActivationToken" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Refresh" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.WaitForConnectivity" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Upgrade" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.EnableDomainJoin" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainFQDN" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainControllerFQDN" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainAccountName" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainPassword" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainOU" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Administrators" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainSiteName" value="" /> </ConfigurationSettings> </Role> </ServiceConfiguration>             Let’s go step by step and understand all the highlighted parameters and where you can find the values for them.       osFamily – By default this is set to 1 (Windows Server 2008 SP2). Change this to 2 if you want the Windows Server 2008 R2 operating system. The Advantage of using osFamily = “2” is that you get Powershell 2.0 rather than Powershell 1.0. In Powershell 2.0 you could simply use “powershell -ExecutionPolicy Unrestricted ./myscript.ps1” and it will work while in Powershell 1.0 you will have to change the registry key by including the following in your command file “reg add HKLM\Software\Microsoft\PowerShell\1\ShellIds\Microsoft.PowerShell /v ExecutionPolicy /d Unrestricted /f” before you can execute any power shell. The other reason you might want to move to os2 is if you wanted IIS 7.5.       Activation Token – To enable communication between the on premise machine and the Windows Azure Worker role VM both need to have the same token. Log on to Windows Azure Management Portal, click on Connect, click on Get Activation Token, this should give you the activation token, copy the activation token to the clipboard and paste it in the configuration file. Note – Later in the blog I’ll be showing you how to install connect on the on premise machine.                       EnableDomainJoin – Set the value to true, ofcourse we want to join the on windows azure worker role VM to the domain.       DomainFQDN, DomainControllerFQDN, DomainAccountName, DomainPassword, DomainOU, Administrators – This information is specific to your domain. I have extracted this information from the ‘service manager’ and ‘Active Directory Users and Computers’. Also, i created a new Domain-OU namely ‘CloudInstances’ so all my cloud instances joined to my domain show up here, this is optional. You can encrypt the DomainPassword – refer to the instructions here. Or hold fire, I’ll be covering that when i come to certificates and encryption in the coming section.       Now once you have filled all this information up, the configuration file should look something like below, <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="2" osVersion="*"> <Role name="WorkerRole1"> <Instances count="1" /> <ConfigurationSettings> <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="UseDevelopmentStorage=true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.ActivationToken" value="45f55fea-f194-4fbc-b36e-25604faac784" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Refresh" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.WaitForConnectivity" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Upgrade" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.EnableDomainJoin" value="true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainFQDN" value="play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainControllerFQDN" value="WIN-KUDQMQFGQOL.play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainAccountName" value="playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainPassword" value="************************" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainOU" value="OU=CloudInstances, DC=Play, DC=Pit, DC=com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Administrators" value="Playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainSiteName" value="" /> </ConfigurationSettings> </Role> </ServiceConfiguration> Next we will be enabling the Remote Desktop module in to the ServiceDefinition.csdef, we could make changes manually or allow a beautiful wizard to help us make changes. I prefer the second option. So right click on the Windows Azure project and choose Publish       Now once you get the publish wizard, if you haven’t already you would be asked to import your Windows Azure subscription, this is simply the Msdn subscription activation key xml. Once you have done click Next to go to the Settings page and check ‘Enable Remote Desktop for all roles’.       As soon as you do that you get another pop up asking you the details for the user that you would be logging in with (make sure you enter a reasonable expiry date, you do not want the user account to expire today). Notice the more information tag at the bottom, click that to get access to the certificate section. See screen shot below.       From the drop down select the option to create a new certificate        In the pop up window enter the friendly name for your certificate. In my case I entered ‘WAC – Test Rig’ and click ok. This will create a new certificate for you. Click on the view button to see the certificate details. Do you see the Thumbprint, this is the value that will go in the config file (very important). Now click on the Copy to File button to copy the certificate, we will need to import the certificate to the windows Azure Management portal later. So, make sure you save it a safe location.                                Click Finish and enter details of the user you would like to create with permissions for remote desktop access, once you have entered the details on the ‘Remote desktop configuration’ screen click on Ok. From the Publish Windows Azure Wizard screen press Cancel. Cancel because we don’t want to publish the role just yet and Yes because we want to save all the changes in the config file.       Now if you go to the ServiceDefinition.csdef file you will see that the RemoteAccess and RemoteForwarder roles have been imported for you. <?xml version="1.0" encoding="utf-8"?> <ServiceDefinition name="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceDefinition"> <WorkerRole name="WorkerRole1" vmsize="Small"> <Imports> <Import moduleName="Diagnostics" /> <Import moduleName="Connect" /> <Import moduleName="RemoteAccess" /> <Import moduleName="RemoteForwarder" /> </Imports> </WorkerRole> </ServiceDefinition> Now go to the ServiceConfiguration.Cloud.cscfg file and you see a whole bunch for setting “Microsoft.WindowsAzure.Plugins.RemoteAccess.%%%” values added for you. <?xml version="1.0" encoding="utf-8"?> <ServiceConfiguration serviceName="WindowsAzureProject2" xmlns="http://schemas.microsoft.com/ServiceHosting/2008/10/ServiceConfiguration" osFamily="2" osVersion="*"> <Role name="WorkerRole1"> <Instances count="1" /> <ConfigurationSettings> <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="UseDevelopmentStorage=true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.ActivationToken" value="45f55fea-f194-4fbc-b36e-25604faac784" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Refresh" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.WaitForConnectivity" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Upgrade" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.EnableDomainJoin" value="true" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainFQDN" value="play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainControllerFQDN" value="WIN-KUDQMQFGQOL.play.pit.com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainAccountName" value="playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainPassword" value="************************" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainOU" value="OU=CloudInstances, DC=Play, DC=Pit, DC=com" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.Administrators" value="Playpit\Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.Connect.DomainSiteName" value="" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" value="true" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" value="Administrator" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" value="MIIBnQYJKoZIhvcNAQcDoIIBjjCCAYoCAQAxggFOMIIBSgIBADAyMB4xHDAaBgNVBAMME1dpbmRvd 3MgQXp1cmUgVG9vbHMCEGa+B46voeO5T305N7TSG9QwDQYJKoZIhvcNAQEBBQAEggEABg4ol5Xol66Ip6QKLbAPWdmD4ae ADZ7aKj6fg4D+ATr0DXBllZHG5Umwf+84Sj2nsPeCyrg3ZDQuxrfhSbdnJwuChKV6ukXdGjX0hlowJu/4dfH4jTJC7sBWS AKaEFU7CxvqYEAL1Hf9VPL5fW6HZVmq1z+qmm4ecGKSTOJ20Fptb463wcXgR8CWGa+1w9xqJ7UmmfGeGeCHQ4QGW0IDSBU6ccg vzF2ug8/FY60K1vrWaCYOhKkxD3YBs8U9X/kOB0yQm2Git0d5tFlIPCBT2AC57bgsAYncXfHvPesI0qs7VZyghk8LVa9g5IqaM Cp6cQ7rmY/dLsKBMkDcdBHuCTAzBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECDRVifSXbA43gBApNrp40L1VTVZ1iGag+3O1" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" value="2012-11-27T23:59:59.0000000+00:00" /> <Setting name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" value="true" /> </ConfigurationSettings> <Certificates> <Certificate name="Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" thumbprint="AA23016CF0BDFC344400B5B82706B608B92E4217" thumbprintAlgorithm="sha1" /> </Certificates> </Role> </ServiceConfiguration>          Okay let’s look at them one at a time,       Enabled - Yes, we would like to enable Remote Access.       AccountUserName – This is the user name you entered while you were on the publish windows azure role screen, as detailed above.       AccountEncrytedPassword – Try and decode that, the certificate is used to encrypt the password you specified for the user account. Remember earlier i said, either use the instructions or wait and i’ll be showing you encryption, now the user account i am using for rdp has the same password as my domain password, so i can simply copy the value of the AccountEncryptedPassword to the DomainPassword as well.       AccountExpiration – This is the expiration as you specified in the wizard earlier, make sure your account does not expire today.       Remote Forwarder – Check out the documentation, below is how I understand it, -- One role in an application that implements a remote desktop connection must import the RemoteForwarder module. The two modules work together to enable the remote desktop connections to role instances. -- If you have multiple roles defined in the service model, it does not matter which role you add the RemoteForwarder module to, but you must add it to only one of the role definitions.       Certificate – Remember the certificate thumbprint from the wizard, the on premise machine and windows azure role machine that need to speak to each other must have the same thumbprint. More on that when we install Windows Azure connect Endpoints on the on premise machine. As i said earlier, in this blog post, I’ll be showing you the manual process so i won’t be scripting any star up tasks to install the test agent or register the test agent with the TFS Server. I’ll be showing you all this cool stuff in the next blog post, that’s because it’s important to understand the manual side of it, it becomes easier for you to troubleshoot in case something fails. Having said that, the changes we have made are sufficient to spin up the Windows Azure Worker Role aka Test Agent VM, have it connected with the play.pit.com domain and have remote access enabled on it. Before we deploy the Test Agent VM we need to set up Windows Azure Connect on the TFS Server. II. Windows Azure Connect: Setting up Connect on VM – 2 i.e. TFS & Test Controller Glad you made it so far, now to enable communication between the on premise TFS/Test Controller and Azure-ed Test Agent we need to enable communication. We have set up the Azure connect module in the Test Agent configuration, now the connect end points need to be enabled on the on premise machines, let’s have a look at how we can do this. Log on to VM – 2 running the TFS Server and Test Controller Log on to the Windows Azure Management Portal and click on Virtual Network Click on Virtual Network, if you already have a subscription you should see the below screen shot, if not, you would be asked to complete the subscription first        Click on Install Local Endpoints from the top left on the panel and you get a url appended with a token id in it, remember the token i showed you earlier, in theory the token you get here should match the token you added to the Test Agent config file.        Copy the url to the clip board and paste it in IE explorer (important, the installation at present only works out of IE and you need to have cookies enabled in order to complete the installation). As stated in the pop up, you can NOT download and run the software later, you need to run it as is, since it contains a token. Once the installation completes you should see the Windows Azure connect icon in the system tray.                         Right click the Azure Connect icon, choose Diagnostics and refer to this link for diagnostic detail terminology. NOTE – Unfortunately I could not see the Windows Azure connect icon in the system tray, a bit of binging with Google revealed that the azure connect icon is only shown when the ‘Windows Azure Connect Endpoint’ Service is started. So go to services.msc and make sure that the service is started, if not start it, unfortunately again, the service did not start for me on a manual start and i realised that one of the dependant services was disabled, you can look at the service dependencies and start them and then start windows azure connect. Bottom line, you need to start Windows Azure connect service before you can proceed. Please refer here on MSDN for more on Troubleshooting Windows Azure connect. (Follow the next step as well)   Now go back to the Windows Azure Management Portal and from Groups and Roles create a new group, lets call it ‘Test Rig’. Make sure you add the VM – 2 (the TFS Server VM where you just installed the endpoint).       Now if you go back to the Azure Connect icon in the system tray and click ‘Refresh Policy’ you will notice that the disconnected status of the icon should change to ready for connection. III. Importing Certificate in to Windows Azure Management Portal But before that you need to import the certificate you created in Step I in to the Windows Azure Management Portal. Log on to the Windows Azure Management Portal and click on ‘Hosted Services, Storage Accounts & CDN’ and then ‘Management Certificates’ followed by Add Certificates as shown in the screen shot below        Browse to the location where you saved the certificate earlier, remember… Refer to Step I in case you forgot.        Now you should be able to see the imported certificate here, make sure the thumbprint of the certificate matches the one you inserted in the config files        IV. Publish Windows Azure Worker Role aka Test Agent Having completed I, II and III, you are ready to publish the Test Agent VM – 3 to the cloud. Go to Visual Studio and right click the Windows Azure project and select Publish. Verify the infomration in the wizard, from the advanced settings tab, you can also enabled capture of intellitrace or profiling information.         Click Next and Click Publish! From the view menu bar select the Windows Azure Activity Log window.       Now you should be able to see the deployment progress in real time.             In the Windows Azure Management Portal, you should also be able to see the progress of creation of a new Worker Role.       Once the deployment is complete you should be able to RDP (go to run prompt type mstsc and in the pop up the machine name) in to the Test Agent Worker Role VM from the Playpit network using the domain admin user account. In case you are unable to log in to the Test Agent using the domain admin user account it means the process of joining the Test Agent to the domain has failed! But the good news is, because you imported the connect module, you can connect to the Test Agent machine using Windows Azure Management Portal and troubleshoot the reason for failure, you will be able to log in with the user name and password you specified in the config file for the keys ‘RemoteAccess.AccountUsername, RemoteAccess.EncryptedPassword (just that enter the password unencrypted)’, fix it or manually join the machine to the domain. Once you have managed to Join the Test Agent VM to the Domain move to the next step.      So, log in to the Test Agent Worker Role VM with the Playpit Domain Administrator and verify that you can log in, the machine is connected to the domain and the connect service is successfully running. If yes, give your self a pat on the back, you are 80% mission accomplished!         Go to the Windows Azure Management Portal and click on Virtual Network, click on Groups and Roles and click on Test Rig, click Edit Group, the edit the Test Rig group you created earlier. In the Connect to section, click on Add to select the worker role you have just deployed. Also, check the ‘Allow connections between endpoints in the group’ with this you will enable to communication between test controller and test agents and test agents/test agents. Click Save.      Now, you are ready to deploy the Test Agent software on the Worker Role Test Agent VM and configure it to work with the Test Controller. V. Configuring VM – 3: Installing Test Agent and Associating Test Agent to Controller Log in to the Worker Role Test Agent VM that you have just successfully deployed, make sure you log in with the domain administrator account. Download the All Agents software from MSDN, ‘en_visual_studio_agents_2010_x86_x64_dvd_509679.iso’, extract the iso and navigate to where you have extracted the iso. In my case, i have extracted the iso to “C:\Resources\Temp\VsAgentSetup”. Open the Test Agent folder and double click on setup.exe. Once you have installed the Test Agent you should reach the configuration window. If you face any issues installing TFS Test Agent on the VM, refer to the walkthrough on MSDN.       Once you have successfully installed the Test Agent software you will need to configure the test agent. Right click the test agent configuration tool and run as a different user. i.e. an Administrator. This is really to run the configuration wizard with elevated privileges (you might have UAC block something's otherwise).        In the run options, you can select ‘service’ you do not need to run the agent as interactive un less you are running coded UI tests. I have specified the domain administrator to connect to the TFS Test Controller. In real life, i would never do that, i would create a separate test user service account for this purpose. But for the blog post, we are using the most powerful user so that any policies or restrictions don’t block you.        Click the Apply Settings button and you should be all green! If not, the summary usually gives helpful error messages that you can resolve and proceed. As per my experience, you may run in to either a permission or a firewall blocking communication issue.        And now the moment of truth! Go to VM –2 open up Visual Studio and from the Test Menu select Manage Test Controller       Mission Accomplished! You should be able to see the Test Agent that you have just configured here,         VI. Creating and Running Load Tests on your brand new Azure-ed Test Rig I have various blog posts on Performance Testing with Visual Studio Ultimate, you can follow the links and videos below, Blog Posts: - Part 1 – Performance Testing using Visual Studio 2010 Ultimate - Part 2 – Performance Testing using Visual Studio 2010 Ultimate - Part 3 – Performance Testing using Visual Studio 2010 Ultimate Videos: - Test Tools Configuration & Settings in Visual Studio - Why & How to Record Web Performance Tests in Visual Studio Ultimate - Goal Driven Load Testing using Visual Studio Ultimate Now that you have created your load tests, there is one last change you need to make before you can run the tests on your Azure Test Rig, create a new Test settings file, and change the Test Execution method to ‘Remote Execution’ and select the test controller you have configured the Worker Role Test Agent against in our case VM – 2 So, go on, fire off a test run and see the results of the test being executed on the Azur-ed Test Rig. Review and What’s next? A quick recap of the benefits of running the Test Rig in the cloud and what i will be covering in the next blog post AND I would love to hear your feedback! Advantages Utilizing the power of Azure compute to run a heavy virtual user load. Benefiting from the Azure flexibility, destroy Test Agents when not in use, takes < 25 minutes to spin up a new Test Agent. Most important test Network Latency, (network latency and speed of connection are two different things – usually network latency is very hard to test), by placing the Test Agents in Microsoft Data centres around the globe, one can actually test the lag in transferring the bytes not because of a slow connection but because the page has been requested from the other side of the globe. Next Steps The process of spinning up the Test Agents in windows Azure is not 100% automated. I am working on the Worker process and power shell scripts to make the role deployment, unattended install of test agent software and registration of the test agent to the test controller automated. In the next blog post I will show you how to make the complete process unattended and automated. Remember to subscribe to http://feeds.feedburner.com/TarunArora. Hope you enjoyed this post, I would love to hear your feedback! If you have any recommendations on things that I should consider or any questions or feedback, feel free to leave a comment. See you in Part III.   Share this post : CodeProject

    Read the article

  • Messenger Thinks My Ip is Invalid

    - by Umut Benzer
    Hello. I am using Windows Live Messenger 2009 on Windows 7. I am using a 3G modem (ZTE Propietary USB Modem) I connect to the Internet using a software my ISP provided me. In last three days, my Messenger started to disconnect. Here is what I observed and tried to do: 1- I can browse web, can do FTP transfer etc. and obiviously have a valid IP. 2- I can sign in to Messenger (appear offline) but when I change my status to anything other then appear offline, Messenger says my connection to service has been lost. (However, it exists.) 3- When I run, MSN Connection Troubleshooter, it says my IP is invalid. When I click repair, it says repaired and just after that, I run the troubleshooter again, and it says my IP is invalid again. (However, it is valid and I am browsing the net.) 4- If I connect the Internet through eterhet or wireless there is no problem at all. 5- I re-installed Messenger (deleting all settings manually through registry and folders), re-installed all drivers and software related to USB 3G Modem. It doesn't work. And then, I took a full backup then formatted entire computer, installed a fresh windows 7, after 5 minutes, the same problem occured. What do you recommend? What can I do? Addition As seen on screenshot, it says Server IPv4 adress is 0.0.0.0 It seems like a problem, I don't know if it is. If it is, how can I solve it? Here is what I get, when I netstat. PPP adapter TTNET internet: Connection-specific DNS Suffix . : IPv4 Address. . . . . . . . . . . : 217.174.39.122 Subnet Mask . . . . . . . . . . . : 255.255.255.255 Default Gateway . . . . . . . . . : 0.0.0.0 Wireless LAN adapter Wireless Network Connection 2: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Wireless LAN adapter Wireless Network Connection: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : ege.edu.tr Ethernet adapter Local Area Connection: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 16: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 13: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter 6TO4 Adapter: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 9: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 11: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 12: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 14: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 17: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 25: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 20: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 18: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 19: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 22: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 21: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 15: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 23: Connection-specific DNS Suffix . : IPv6 Address. . . . . . . . . . . : 2001:0:4137:9e74:2448:3909:2a2c:eb7b Link-local IPv6 Address . . . . . : fe80::2448:3909:2a2c:eb7b%30 Default Gateway . . . . . . . . . : Tunnel adapter Local Area Connection* 24: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter isatap.{CFFCFEDB-6B53-42E0-B091-548B9ADE9C9D}: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 26: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 27: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 29: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 31: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 28: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter Local Area Connection* 32: Connection-specific DNS Suffix . : IPv6 Address. . . . . . . . . . . : 2002:d9ae:277a::d9ae:277a Default Gateway . . . . . . . . . : 2002:c058:6301::c058:6301 Tunnel adapter Local Area Connection* 30: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter isatap.{157CF713-B3AC-4701-87A9-14C23CA60AAB}: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter isatap.ege.edu.tr: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . : Tunnel adapter isatap.{0D3CD01B-0993-4B37-89B8-12557ECF484D}: Media State . . . . . . . . . . . : Media disconnected Connection-specific DNS Suffix . :

    Read the article

< Previous Page | 2163 2164 2165 2166 2167 2168 2169  | Next Page >