Search Results

Search found 42428 results on 1698 pages for 'database query'.

Page 767/1698 | < Previous Page | 763 764 765 766 767 768 769 770 771 772 773 774  | Next Page >

  • ?????create or replace???PL/SQL??

    - by Liu Maclean(???)
    ????T.Askmaclean.com?????10gR2??????procedure,?????????create or replace ??????????????????,????Oracle???????????????????procedure? ??Maclean ??2?10gR2???????????PL/SQL?????: ??1: ??Flashback Query ????,?????????????flashback database,??????????create or replace???SQL??source$??????????undo data,????????????: SQL> select * from V$version; BANNER ---------------------------------------------------------------- Oracle Database 10g Enterprise Edition Release 10.2.0.5.0 - 64bi PL/SQL Release 10.2.0.5.0 - Production CORE 10.2.0.5.0 Production TNS for Linux: Version 10.2.0.5.0 - Production NLSRTL Version 10.2.0.5.0 - Production SQL> select * from global_name; GLOBAL_NAME -------------------------------------------------------------------------------- www.oracledatabase12g.com SQL> create or replace procedure maclean_proc as   2  begin   3  execute immediate 'select 1 from dual';   4  end;   5  / Procedure created. SQL> select * from dba_source where name='MACLEAN_PROC'; OWNER      NAME                           TYPE               LINE TEXT ---------- ------------------------------ ------------ ---------- -------------------------------------------------- SYS        MACLEAN_PROC                   PROCEDURE             1 procedure maclean_proc as SYS        MACLEAN_PROC                   PROCEDURE             2 begin SYS        MACLEAN_PROC                   PROCEDURE             3 execute immediate 'select 1 from dual'; SYS        MACLEAN_PROC                   PROCEDURE             4 end; SQL> select current_scn from v$database; CURRENT_SCN -----------     2660057 create or replace procedure maclean_proc as begin -- I am new procedure execute immediate 'select 2 from dual'; end; / Procedure created. SQL> select current_scn from v$database; CURRENT_SCN -----------     2660113 SQL> select * from dba_source where name='MACLEAN_PROC'; OWNER      NAME                           TYPE               LINE TEXT ---------- ------------------------------ ------------ ---------- -------------------------------------------------- SYS        MACLEAN_PROC                   PROCEDURE             1 procedure maclean_proc as SYS        MACLEAN_PROC                   PROCEDURE             2 begin SYS        MACLEAN_PROC                   PROCEDURE             3 -- I am new procedure SYS        MACLEAN_PROC                   PROCEDURE             4 execute immediate 'select 2 from dual'; SYS        MACLEAN_PROC                   PROCEDURE             5 end; SQL> create table old_source as select * from dba_source as of scn 2660057 where name='MACLEAN_PROC'; Table created. SQL> select * from old_source where name='MACLEAN_PROC'; OWNER      NAME                           TYPE               LINE TEXT ---------- ------------------------------ ------------ ---------- -------------------------------------------------- SYS        MACLEAN_PROC                   PROCEDURE             1 procedure maclean_proc as SYS        MACLEAN_PROC                   PROCEDURE             2 begin SYS        MACLEAN_PROC                   PROCEDURE             3 execute immediate 'select 1 from dual'; SYS        MACLEAN_PROC                   PROCEDURE             4 end; ?????????scn??flashback query????,????????as of timestamp??????????,????PL/SQL????????????????undo??????????,????????????replace/drop ??????PL/SQL??? ??2 ??logminer??replace/drop PL/SQL?????SQL???DELETE??,??logminer?UNDO SQL???PL/SQL?????? ????????????????archivelog????,??????????????? minimal supplemental logging,??????????Unsupported SQLREDO???: create or replace?? ?? procedure???????SQL??????, ??????procedure????????????????, source$??????????????: SQL> ALTER DATABASE ADD SUPPLEMENTAL LOG DATA; Database altered. SQL> create or replace procedure maclean_proc as   2  begin   3  execute immediate 'select 1 from dual';   4  end;   5  / Procedure created. SQL> SQL> oradebug setmypid; Statement processed. SQL> SQL> oradebug event 10046 trace name context forever,level 12; Statement processed. SQL> SQL> create or replace procedure maclean_proc as   2  begin   3  execute immediate 'select 2 from dual';   4  end;   5  / Procedure created. SQL> oradebug tracefile_name /s01/admin/G10R25/udump/g10r25_ora_4305.trc [oracle@vrh8 ~]$ egrep  "update|insert|delete|merge"  /s01/admin/G10R25/udump/g10r25_ora_4305.trc delete from procedureinfo$ where obj#=:1 delete from argument$ where obj#=:1 delete from procedurec$ where obj#=:1 delete from procedureplsql$ where obj#=:1 delete from procedurejava$ where obj#=:1 delete from vtable$ where obj#=:1 insert into procedureinfo$(obj#,procedure#,overload#,procedurename,properties,itypeobj#) values (:1,:2,:3,:4,:5,:6) insert into argument$( obj#,procedure$,procedure#,overload#,position#,sequence#,level#,argument,type#,default#,in_out,length,precision#,scale,radix,charsetid,charsetform,properties,type_owner,type_name,type_subname,type_linkname,pls_type) values (:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23) insert into procedureplsql$(obj#,procedure#,entrypoint#) values (:1,:2,:3) update procedure$ set audit$=:2,options=:3 where obj#=:1 delete from source$ where obj#=:1 insert into source$(obj#,line,source) values (:1,:2,:3) delete from idl_ub1$ where obj#=:1 and part=:2 and version<>:3 delete from idl_char$ where obj#=:1 and part=:2 and version<>:3 delete from idl_ub2$ where obj#=:1 and part=:2 and version<>:3 delete from idl_sb4$ where obj#=:1 and part=:2 and version<>:3 delete from ncomp_dll$ where obj#=:1 returning dllname into :2 update idl_sb4$ set piece#=:1 ,length=:2 , piece=:3 where obj#=:4 and part=:5 and piece#=:6 and version=:7 update idl_ub1$ set piece#=:1 ,length=:2 , piece=:3 where obj#=:4 and part=:5 and piece#=:6 and version=:7 update idl_char$ set piece#=:1 ,length=:2 , piece=:3 where obj#=:4 and part=:5 and piece#=:6 and version=:7 update idl_ub2$ set piece#=:1 ,length=:2 , piece=:3 where obj#=:4 and part=:5 and piece#=:6 and version=:7 delete from idl_ub1$ where obj#=:1 and part=:2 and version<>:3 delete from idl_char$ where obj#=:1 and part=:2 and version<>:3 delete from idl_ub2$ where obj#=:1 and part=:2 and version<>:3 delete from idl_sb4$ where obj#=:1 and part=:2 and version<>:3 delete from ncomp_dll$ where obj#=:1 returning dllname into :2 delete from idl_ub1$ where obj#=:1 and part=:2 and (piece#<:3 or piece#>:4) and version=:5 delete from idl_char$ where obj#=:1 and part=:2 and (piece#<:3 or piece#>:4) and version=:5 delete from idl_ub2$ where obj#=:1 and part=:2 and (piece#<:3 or piece#>:4) and version=:5 delete from idl_sb4$ where obj#=:1 and part=:2 and (piece#<:3 or piece#>:4) and version=:5 delete from idl_ub1$ where obj#=:1 and part=:2 and version<>:3 delete from idl_char$ where obj#=:1 and part=:2 and version<>:3 delete from idl_ub2$ where obj#=:1 and part=:2 and version<>:3 delete from idl_sb4$ where obj#=:1 and part=:2 and version<>:3 delete from ncomp_dll$ where obj#=:1 returning dllname into :2 update idl_sb4$ set piece#=:1 ,length=:2 , piece=:3 where obj#=:4 and part=:5 and piece#=:6 and version=:7 update idl_ub1$ set piece#=:1 ,length=:2 , piece=:3 where obj#=:4 and part=:5 and piece#=:6 and version=:7 delete from idl_char$ where obj#=:1 and part=:2 and (piece#<:3 or piece#>:4) and version=:5 delete from idl_ub2$ where obj#=:1 and part=:2 and (piece#<:3 or piece#>:4) and version=:5 delete from error$ where obj#=:1 delete from settings$ where obj# = :1 insert into settings$(obj#, param, value) values (:1, :2, :3) delete from warning_settings$ where obj# = :1 insert into warning_settings$(obj#, warning_num, global_mod, property) values (:1, :2, :3, :4) delete from dependency$ where d_obj#=:1 delete from access$ where d_obj#=:1 insert into dependency$(d_obj#,d_timestamp,order#,p_obj#,p_timestamp, property, d_attrs)values (:1,:2,:3,:4,:5,:6, :7) insert into access$(d_obj#,order#,columns,types) values (:1,:2,:3,:4) update obj$ set obj#=:6,type#=:7,ctime=:8,mtime=:9,stime=:10,status=:11,dataobj#=:13,flags=:14,oid$=:15,spare1=:16, spare2=:17 where owner#=:1 and name=:2 and namespace=:3 and(remoteowner=:4 or remoteowner is null and :4 is null)and(linkname=:5 or linkname is null and :5 is null)and(subname=:12 or subname is null and :12 is null) ?drop procedure??????source$???PL/SQL?????: SQL> oradebug setmypid; Statement processed. SQL> oradebug event 10046 trace name context forever,level 12; Statement processed. SQL> drop procedure maclean_proc; Procedure dropped. SQL> oradebug tracefile_name /s01/admin/G10R25/udump/g10r25_ora_4331.trc delete from context$ where obj#=:1 delete from dir$ where obj#=:1 delete from type_misc$ where obj#=:1 delete from library$ where obj#=:1 delete from procedure$ where obj#=:1 delete from javaobj$ where obj#=:1 delete from operator$ where obj#=:1 delete from opbinding$ where obj#=:1 delete from opancillary$ where obj#=:1 delete from oparg$ where obj# = :1 delete from com$ where obj#=:1 delete from source$ where obj#=:1 delete from idl_ub1$ where obj#=:1 and part=:2 delete from idl_char$ where obj#=:1 and part=:2 delete from idl_ub2$ where obj#=:1 and part=:2 delete from idl_sb4$ where obj#=:1 and part=:2 delete from ncomp_dll$ where obj#=:1 returning dllname into :2 delete from idl_ub1$ where obj#=:1 and part=:2 delete from idl_char$ where obj#=:1 and part=:2 delete from idl_ub2$ where obj#=:1 and part=:2 delete from idl_sb4$ where obj#=:1 and part=:2 delete from ncomp_dll$ where obj#=:1 returning dllname into :2 delete from idl_ub1$ where obj#=:1 and part=:2 delete from idl_char$ where obj#=:1 and part=:2 delete from idl_ub2$ where obj#=:1 and part=:2 delete from idl_sb4$ where obj#=:1 and part=:2 delete from ncomp_dll$ where obj#=:1 returning dllname into :2 delete from error$ where obj#=:1 delete from settings$ where obj# = :1 delete from procedureinfo$ where obj#=:1 delete from argument$ where obj#=:1 delete from procedurec$ where obj#=:1 delete from procedureplsql$ where obj#=:1 delete from procedurejava$ where obj#=:1 delete from vtable$ where obj#=:1 delete from dependency$ where d_obj#=:1 delete from access$ where d_obj#=:1 delete from objauth$ where obj#=:1 update obj$ set obj#=:6,type#=:7,ctime=:8,mtime=:9,stime=:10,status=:11,dataobj#=:13,flags=:14,oid$=:15,spare1=:16, spare2=:17 where owner#=:1 and name=:2 and namespace=:3 and(remoteowner=:4 or remoteowner is null and :4 is null)and(linkname=:5 or linkname is null and :5 is null)and(subname=:12 or subname is null and :12 is null) ??????????source$???redo: SQL> alter system switch logfile; System altered. SQL> select sequence#,name from v$archived_log where sequence#=(select max(sequence#) from v$archived_log);  SEQUENCE# ---------- NAME --------------------------------------------------------------------------------        242 /s01/flash_recovery_area/G10R25/archivelog/2012_05_21/o1_mf_1_242_7vnm13k6_.arc SQL> exec dbms_logmnr.add_logfile ('/s01/flash_recovery_area/G10R25/archivelog/2012_05_21/o1_mf_1_242_7vnm13k6_.arc',options => dbms_logmnr.new); PL/SQL procedure successfully completed. SQL> exec dbms_logmnr.start_logmnr(options => dbms_logmnr.dict_from_online_catalog); PL/SQL procedure successfully completed. SQL> select sql_redo,sql_undo from v$logmnr_contents where seg_name = 'SOURCE$' and operation='DELETE'; delete from "SYS"."SOURCE$" where "OBJ#" = '56059' and "LINE" = '1' and "SOURCE" = 'procedure maclean_proc as ' and ROWID = 'AAAABIAABAAALpyAAN'; insert into "SYS"."SOURCE$"("OBJ#","LINE","SOURCE") values ('56059','1','procedure maclean_proc as '); delete from "SYS"."SOURCE$" where "OBJ#" = '56059' and "LINE" = '2' and "SOURCE" = 'begin ' and ROWID = 'AAAABIAABAAALpyAAO'; insert into "SYS"."SOURCE$"("OBJ#","LINE","SOURCE") values ('56059','2','begin '); delete from "SYS"."SOURCE$" where "OBJ#" = '56059' and "LINE" = '3' and "SOURCE" = 'execute immediate ''select 1 from dual''; ' and ROWID = 'AAAABIAABAAALpyAAP'; insert into "SYS"."SOURCE$"("OBJ#","LINE","SOURCE") values ('56059','3','execute immediate ''select 1 from dual''; '); delete from "SYS"."SOURCE$" where "OBJ#" = '56059' and "LINE" = '4' and "SOURCE" = 'end;' and ROWID = 'AAAABIAABAAALpyAAQ'; insert into "SYS"."SOURCE$"("OBJ#","LINE","SOURCE") values ('56059','4','end;'); delete from "SYS"."SOURCE$" where "OBJ#" = '56059' and "LINE" = '1' and "SOURCE" = 'procedure maclean_proc as ' and ROWID = 'AAAABIAABAAALpyAAJ'; insert into "SYS"."SOURCE$"("OBJ#","LINE","SOURCE") values ('56059','1','procedure maclean_proc as '); delete from "SYS"."SOURCE$" where "OBJ#" = '56059' and "LINE" = '2' and "SOURCE" = 'begin ' and ROWID = 'AAAABIAABAAALpyAAK'; insert into "SYS"."SOURCE$"("OBJ#","LINE","SOURCE") values ('56059','2','begin '); delete from "SYS"."SOURCE$" where "OBJ#" = '56059' and "LINE" = '3' and "SOURCE" = 'execute immediate ''select 2 from dual''; ' and ROWID = 'AAAABIAABAAALpyAAL'; insert into "SYS"."SOURCE$"("OBJ#","LINE","SOURCE") values ('56059','3','execute immediate ''select 2 from dual''; '); delete from "SYS"."SOURCE$" where "OBJ#" = '56059' and "LINE" = '4' and "SOURCE" = 'end;' and ROWID = 'AAAABIAABAAALpyAAM'; insert into "SYS"."SOURCE$"("OBJ#","LINE","SOURCE") values ('56059','4','end;'); ???? logminer???UNDO SQL???????source$????,?DELETE????????????,????SOURCE????????????PL/SQL???DDL???

    Read the article

  • ??????????? Oracle OpenWorld Tokyo 2012 ?????? ~?????????????~ ????????!

    - by M.Morozumi
    Oracle OpenWorld Tokyo 2012?JavaOne Tokyo 2012 ???????????????????????????Oracle Open World ?????????????????2????????? Exadata ???????·DBA·????????? Exadata?????????(???)???Oracle Exadata Database Machine ??? Exadata Storage Server X2-2 ??????????Exadata ???????????????????????????????????Database Machine?????????????????????????????????????????????????????????? ?????????(???) 2012?4?2?~4?3? Oracle BI Suite EE 10g??????! 11g????????????????????!! Oracle BIEE 10g???? 11g Report/Dashboard ?????Oracle BI Suite EE 10g?????????11g???(????)????????????????????????? Oracle BIEE 10g???? 11g Report/Dashboard ?? 2012?4?3?~4?4?

    Read the article

  • ?????? ??????????! ?Bronze???? vol.4

    - by M.Morozumi
    ??????????????????????????????????????????????????????????????????? ???ORACLE MASTER Bronze Oracle Database 11g??????????????????????? ------------------------------- ????: Oracle Database 11g ???READ ONLY ?????????????????????????? ???????????????????????????1????????? a.?????????? DML ???????? b.?????????? SELECT ... FOR UPDATE ???????? c.??????? DROP ??????????? d.????????????????????ALTER TABLE ... READ WRITE ????????? ???????????????

    Read the article

  • ?????? ??????????! ?Bronze???? vol.4 <??>

    - by M.Morozumi
    ???ORACLE MASTER Bronze Oracle Database 11g?????????????? ?????????????????????? ------------------------------- ????: Oracle Database 11g ???READ ONLY ?????????????????????????? ???????????????????????????1????????? a.?????????? DML ???????? b.?????????? SELECT ... FOR UPDATE ???????? c.??????? DROP ??????????? d.????????????????????ALTER TABLE ... READ WRITE ????????? ??????????????? ------------------------------- ??:c.??????? DROP ??????????? ??: ??????????? DROP ???????????????????????

    Read the article

  • Setting up Rails to work with sqlserver

    - by FortunateDuke
    Ok I followed the steps for setting up ruby and rails on my Vista machine and I am having a problem connecting to the database. Contents of database.yml development: adapter: sqlserver database: APPS_SETUP Host: WindowsVT06\SQLEXPRESS Username: se Password: paswd Run rake db:migrate from myapp directory ---------- rake aborted! no such file to load -- deprecated ADO I have dbi 0.4.0 installed and have created the ADO folder in C:\Ruby\lib\ruby\site_ruby\1.8\DBD\ADO I got the ado.rb from the dbi 0.2.2 What else should I be looking at to fix the issue connecting to the database? Please don't tell me to use MySql or Sqlite or Postgres. *UPDATE* I have installed the activerecord-sqlserver-adapter gem from --source=http://gems.rubyonrails.org Still not working. I have verified that I can connect to the database by logging into SQL Management Studio with the credentials. rake db:migrate --trace PS C:\Inetpub\wwwroot\myapp> rake db:migrate --trace (in C:/Inetpub/wwwroot/myapp) ** Invoke db:migrate (first_time) ** Invoke environment (first_time) ** Execute environment ** Execute db:migrate rake aborted! no such file to load -- deprecated C:/Ruby/lib/ruby/site_ruby/1.8/rubygems/custom_require.rb:27:in `gem_original_require' C:/Ruby/lib/ruby/site_ruby/1.8/rubygems/custom_require.rb:27:in `require' C:/Ruby/lib/ruby/gems/1.8/gems/activesupport-2.1.1/lib/active_support/dependencies.rb:510:in `require' C:/Ruby/lib/ruby/gems/1.8/gems/activesupport-2.1.1/lib/active_support/dependencies.rb:355:in `new_constants_in' C:/Ruby/lib/ruby/gems/1.8/gems/activesupport-2.1.1/lib/active_support/dependencies.rb:510:in `require' C:/Ruby/lib/ruby/site_ruby/1.8/dbi.rb:48 C:/Ruby/lib/ruby/site_ruby/1.8/rubygems/custom_require.rb:27:in `gem_original_require' C:/Ruby/lib/ruby/site_ruby/1.8/rubygems/custom_require.rb:27:in `require' C:/Ruby/lib/ruby/gems/1.8/gems/activesupport-2.1.1/lib/active_support/dependencies.rb:510:in `require' C:/Ruby/lib/ruby/gems/1.8/gems/activesupport-2.1.1/lib/active_support/dependencies.rb:355:in `new_constants_in' C:/Ruby/lib/ruby/gems/1.8/gems/activesupport-2.1.1/lib/active_support/dependencies.rb:510:in `require' C:/Ruby/lib/ruby/gems/1.8/gems/activesupport-2.1.1/lib/active_support/core_ext/kernel/requires.rb:7:in `require_library_ or_gem' C:/Ruby/lib/ruby/gems/1.8/gems/activesupport-2.1.1/lib/active_support/core_ext/kernel/reporting.rb:11:in `silence_warnin gs' C:/Ruby/lib/ruby/gems/1.8/gems/activesupport-2.1.1/lib/active_support/core_ext/kernel/requires.rb:5:in `require_library_ or_gem' C:/Ruby/lib/ruby/gems/1.8/gems/activerecord-sqlserver-adapter-1.0.0.9250/lib/active_record/connection_adapters/sqlserver _adapter.rb:29:in `sqlserver_connection' C:/Ruby/lib/ruby/gems/1.8/gems/activerecord-2.1.1/lib/active_record/connection_adapters/abstract/connection_specificatio n.rb:292:in `send' C:/Ruby/lib/ruby/gems/1.8/gems/activerecord-2.1.1/lib/active_record/connection_adapters/abstract/connection_specificatio n.rb:292:in `connection=' C:/Ruby/lib/ruby/gems/1.8/gems/activerecord-2.1.1/lib/active_record/connection_adapters/abstract/connection_specificatio n.rb:260:in `retrieve_connection' C:/Ruby/lib/ruby/gems/1.8/gems/activerecord-2.1.1/lib/active_record/connection_adapters/abstract/connection_specificatio n.rb:78:in `connection' C:/Ruby/lib/ruby/gems/1.8/gems/activerecord-2.1.1/lib/active_record/migration.rb:408:in `initialize' C:/Ruby/lib/ruby/gems/1.8/gems/activerecord-2.1.1/lib/active_record/migration.rb:373:in `new' C:/Ruby/lib/ruby/gems/1.8/gems/activerecord-2.1.1/lib/active_record/migration.rb:373:in `up' C:/Ruby/lib/ruby/gems/1.8/gems/activerecord-2.1.1/lib/active_record/migration.rb:356:in `migrate' C:/Ruby/lib/ruby/gems/1.8/gems/rails-2.1.1/lib/tasks/databases.rake:99 C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:621:in `call' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:621:in `execute' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:616:in `each' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:616:in `execute' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:582:in `invoke_with_call_chain' C:/Ruby/lib/ruby/1.8/monitor.rb:242:in `synchronize' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:575:in `invoke_with_call_chain' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:568:in `invoke' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:2031:in `invoke_task' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:2009:in `top_level' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:2009:in `each' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:2009:in `top_level' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:2048:in `standard_exception_handling' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:2003:in `top_level' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:1982:in `run' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:2048:in `standard_exception_handling' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/lib/rake.rb:1979:in `run' C:/Ruby/lib/ruby/gems/1.8/gems/rake-0.8.2/bin/rake:31 C:/Ruby/bin/rake:19:in `load' C:/Ruby/bin/rake:19 PS C:\Inetpub\wwwroot\myapp>

    Read the article

  • Entity Framework many-to-many using VB.Net Lambda

    - by bgs264
    Hello, I'm a newbie to StackOverflow so please be kind ;) I'm using Entity Framework in Visual Studio 2010 Beta 2 (.NET framework 4.0 Beta 2). I have created an entity framework .edmx model from my database and I have a handful of many-to-many relationships. A trivial example of my database schema is Roles (ID, Name, Active) Members (ID, DateOfBirth, DateCreated) RoleMembership(RoleID, MemberID) I am now writing the custom role provider (Inheriting System.Configuration.Provider.RoleProvider) and have come to write the implementation of IsUserInRole(username, roleName). The LINQ-to-Entity queries which I wrote, when SQL-Profiled, all produced CROSS JOIN statements when what I want is for them to INNER JOIN. Dim query = From m In dc.Members From r In dc.Roles Where m.ID = 100 And r.Name = "Member" Select m My problem is almost exactly described here: http://stackoverflow.com/questions/553918/entity-framework-and-many-to-many-queries-unusable I'm sure that the solution presented there works well, but whilst I studied Java at uni and I can mostly understand C# I cannot understand this Lambda syntax provided and I need to get a similar example in VB. I've looked around the web for the best part of half a day but I'm not closer to my answer. So please can somebody advise how, in VB, I can construct a LINQ statement which would do this equivalent in SQL: SELECT rm.RoleID FROM RoleMembership rm INNER JOIN Roles r ON r.ID = rm.RoleID INNER JOIN Members m ON m.ID = rm.MemberID WHERE r.Name = 'Member' AND m.ID = 101 I would use this query to see if Member 101 is in Role 3. (I appreciate I probably don't need the join to the Members table in SQL but I imagine in LINQ I'd need to bring in the Member object?) UPDATE: I'm a bit closer by using multiple methods: Protected Sub Page_Load(ByVal sender As Object, ByVal e As System.EventArgs) Handles Me.Load Dim count As Integer Using dc As New CBLModel.CBLEntities Dim persons = dc.Members.Where(AddressOf myTest) count = persons.Count End Using System.Diagnostics.Debugger.Break() End Sub Function myTest(ByVal m As Member) As Boolean Return m.ID = "100" AndAlso m.Roles.Select(AddressOf myRoleTest).Count > 0 End Function Function myRoleTest(ByVal r As Role) As Boolean Return r.Name = "Member" End Function SQL Profiler shows this: SQL:BatchStarting SELECT [Extent1].[ID] AS [ID], ... (all columns from Members snipped for brevity) ... FROM [dbo].[Members] AS [Extent1] RPC:Completed exec sp_executesql N'SELECT [Extent2].[ID] AS [ID], [Extent2].[Name] AS [Name], [Extent2].[Active] AS [Active] FROM [dbo].[RoleMembership] AS [Extent1] INNER JOIN [dbo].[Roles] AS [Extent2] ON [Extent1].[RoleID] = [Extent2].[ID] WHERE [Extent1].[MemberID] = @EntityKeyValue1',N'@EntityKeyValue1 int',@EntityKeyValue1=100 SQL:BatchCompleted SELECT [Extent1].[ID] AS [ID], ... (all columns from Members snipped for brevity) ... FROM [dbo].[Members] AS [Extent1] I'm not certain why it is using sp_execsql for the inner join statement and why it's still running a select to select ALL members though. Thanks. UPDATE 2 I've written it by turning the above "multiple methods" into lambda expressions then all into one query, like this: Dim allIDs As String = String.Empty Using dc As New CBLModel.CBLEntities For Each retM In dc.Members.Where(Function(m As Member) m.ID = 100 AndAlso m.Roles.Select(Function(r As Role) r.Name = "Doctor").Count > 0) allIDs &= retM.ID.ToString & ";" Next End Using But it doesn't seem to work: "Doctor" is not a role that exists, I just put it in there for testing purposes, yet "allIDs" still gets set to "100;" The SQL in SQL Profiler this time looks like this: SELECT [Project1].* FROM ( SELECT [Extent1].*, (SELECT COUNT(1) AS [A1] FROM [dbo].[RoleMembership] AS [Extent2] WHERE [Extent1].[ID] = [Extent2].[MemberID]) AS [C1] FROM [dbo].[Members] AS [Extent1] ) AS [Project1] WHERE (100 = [Project1].[ID]) AND ([Project1].[C1] > 0) For brevity I turned the list of all the columns from the Members table into * As you can see it's just ignoring the "Role" query... :/

    Read the article

  • Fetching data (responsebody) with a HttpClient in an AsyncTask and returning the data outside the As

    - by Peter Warbo
    Basically I'm wondering how I'm able to do what I've written in the topic. I've looked through many tutorials on AsyncTask but I can't get it to work. I have a little form (EditText) that will take what the user inputs there and make it to a url query for the application to lookup and then display the results. What I think would seem to work is something like this: In my main activity i have a string called responseBody. Then the user clicks on the search button it will go to my search function and from there call the GrabUrl method with the url which will start the asyncdata and when that process is finished the onPostExecute method will use the function activity.this.setResponseBody(content). This is what my code looks like simpliefied with the most important parts (I think). public class activity extends Activity { private String responseBody; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); initControls(); } public void initControls() { fieldSearch = (EditText) findViewById(R.id.EditText01); buttonSearch = (Button)findViewById(R.id.Button01); buttonSearch.setOnClickListener(new Button.OnClickListener() { public void onClick (View v){ search(); }}); } public void grabURL(String url) { new GrabURL().execute(url); } private class GrabURL extends AsyncTask<String, Void, String> { private final HttpClient client = new DefaultHttpClient(); private String content; private boolean error = false; private ProgressDialog dialog = new ProgressDialog(activity.this); protected void onPreExecute() { dialog.setMessage("Getting your data... Please wait..."); dialog.show(); } protected String doInBackground(String... urls) { try { HttpGet httpget = new HttpGet(urls[0]); ResponseHandler<String> responseHandler = new BasicResponseHandler(); content = client.execute(httpget, responseHandler); } catch (ClientProtocolException e) { error = true; cancel(true); } catch (IOException e) { error = true; cancel(true); } return content; } protected void onPostExecute(String content) { dialog.dismiss(); if (error) { Toast toast = Toast.makeText(activity.this, getString(R.string.offline), Toast.LENGTH_LONG); toast.setGravity(Gravity.TOP, 0, 75); toast.show(); } else { activity.this.setResponseBody(content); } } } public void search() { String query = fieldSearch.getText().toString(); String url = "http://example.com/example.php?query=" + query; //this is just an example url, I have a "real" url in my application but for privacy reasons I've replaced it grabURL(url); // the method that will start the asynctask processData(responseBody); // process the responseBody and display stuff on the ui-thread with the data that I would like to get from the asyntask but doesn't obviously }

    Read the article

  • Dynamic gridview columns event problem

    - by ropstah
    Hi, i have a GridView (selectable) in which I want to generate a dynamic GridView in a new row BELOW the selected row. I can add the row and gridview dynamically in the Gridview1 PreRender event. I need to use this event because: _OnDataBound is not called on every postback (same for _OnRowDataBound) _OnInit is not possible because the 'Inner table' for the Gridview is added after Init _OnLoad is not possible because the 'selected' row is not selected yet. I can add the columns to the dynamic GridView based on my ITemplate class. But now the button events won't fire.... Any suggestions? The dynamic adding of the gridview: Private Sub GridView1_PreRender(ByVal sender As Object, ByVal e As System.EventArgs) Handles GridView1.PreRender Dim g As GridView = sender g.DataBind() If g.SelectedRow IsNot Nothing AndAlso g.Controls.Count &gt; 0 Then Dim t As Table = g.Controls(0) Dim r As New GridViewRow(-1, -1, DataControlRowType.DataRow, DataControlRowState.Normal) Dim c As New TableCell Dim visibleColumnCount As Integer = 0 For Each d As DataControlField In g.Columns If d.Visible Then visibleColumnCount += 1 End If Next c.ColumnSpan = visibleColumnCount Dim ph As New PlaceHolder ph.Controls.Add(CreateStockGrid(g.SelectedDataKey.Value)) c.Controls.Add(ph) r.Cells.Add(c) t.Rows.AddAt(g.SelectedRow.RowIndex + 2, r) End If End Sub Private Function CreateStockGrid(ByVal PnmAutoKey As String) As GridView Dim col As Interfaces.esColumnMetadata Dim coll As New BLL.ViewStmCollection Dim entity As New BLL.ViewStm Dim query As BLL.ViewStmQuery = coll.Query Me._gridStock.AutoGenerateColumns = False Dim buttonf As New TemplateField() buttonf.ItemTemplate = New QuantityTemplateField(ListItemType.Item, "", "Button") buttonf.HeaderTemplate = New QuantityTemplateField(ListItemType.Header, "", "Button") buttonf.EditItemTemplate = New QuantityTemplateField(ListItemType.EditItem, "", "Button") Me._gridStock.Columns.Add(buttonf) For Each col In coll.es.Meta.Columns Dim headerf As New QuantityTemplateField(ListItemType.Header, col.PropertyName, col.Type.Name) Dim itemf As New QuantityTemplateField(ListItemType.Item, col.PropertyName, col.Type.Name) Dim editf As New QuantityTemplateField(ListItemType.EditItem, col.PropertyName, col.Type.Name) Dim f As New TemplateField() f.HeaderTemplate = headerf f.ItemTemplate = itemf f.EditItemTemplate = editf Me._gridStock.Columns.Add(f) Next query.Where(query.PnmAutoKey.Equal(PnmAutoKey)) coll.LoadAll() Me._gridStock.ID = "gvChild" Me._gridStock.DataSource = coll AddHandler Me._gridStock.RowCommand, AddressOf Me.gv_RowCommand Me._gridStock.DataBind() Return Me._gridStock End Function The ITemplate class: Public Class QuantityTemplateField : Implements ITemplate Private _itemType As ListItemType Private _fieldName As String Private _infoType As String Public Sub New(ByVal ItemType As ListItemType, ByVal FieldName As String, ByVal InfoType As String) Me._itemType = ItemType Me._fieldName = FieldName Me._infoType = InfoType End Sub Public Sub InstantiateIn(ByVal container As System.Web.UI.Control) Implements System.Web.UI.ITemplate.InstantiateIn Select Case Me._itemType Case ListItemType.Header Dim l As New Literal l.Text = "&lt;b&gt;" & Me._fieldName & "</b>" container.Controls.Add(l) Case ListItemType.Item Select Case Me._infoType Case "Button" Dim ib As New Button() Dim eb As New Button() ib.ID = "InsertButton" eb.ID = "EditButton" ib.Text = "Insert" eb.Text = "Edit" ib.CommandName = "Edit" eb.CommandName = "Edit" AddHandler ib.Click, AddressOf Me.InsertButton_OnClick AddHandler eb.Click, AddressOf Me.EditButton_OnClick container.Controls.Add(ib) container.Controls.Add(eb) Case Else Dim l As New Label l.ID = Me._fieldName l.Text = "" AddHandler l.DataBinding, AddressOf Me.OnDataBinding container.Controls.Add(l) End Select Case ListItemType.EditItem Select Case Me._infoType Case "Button" Dim b As New Button b.ID = "UpdateButton" b.Text = "Update" b.CommandName = "Update" b.OnClientClick = "return confirm('Sure?')" container.Controls.Add(b) Case Else Dim t As New TextBox t.ID = Me._fieldName AddHandler t.DataBinding, AddressOf Me.OnDataBinding container.Controls.Add(t) End Select End Select End Sub Private Sub InsertButton_OnClick(ByVal sender As Object, ByVal e As EventArgs) Console.WriteLine("insert click") End Sub Private Sub EditButton_OnClick(ByVal sender As Object, ByVal e As EventArgs) Console.WriteLine("edit click") End Sub Private Sub OnDataBinding(ByVal sender As Object, ByVal e As EventArgs) Dim boundValue As Object = Nothing Dim ctrl As Control = sender Dim dataItemContainer As IDataItemContainer = ctrl.NamingContainer boundValue = DataBinder.Eval(dataItemContainer.DataItem, Me._fieldName) Select Case Me._itemType Case ListItemType.Item Dim fieldLiteral As Label = sender fieldLiteral.Text = boundValue.ToString() Case ListItemType.EditItem Dim fieldTextbox As TextBox = sender fieldTextbox.Text = boundValue.ToString() End Select End Sub End Class

    Read the article

  • Push Trunk or Push Branch to Production

    - by coffeeaddict
    I'd like to get an idea of some processes on build process with Tortoise SVN. Primarily I'm wondering do you push: The Mainline Trunk A branch after QA has grabbed it into a working copy locally and tested the branch and then some build pushes that branch The problem I have is I work at a craphole (hey, it is what it is and I'm venting on stackoverflow, you better believe it..good way to relieve stress due to complete utter chaos) and we have no formal process for pushing anything. In fact even worse my boss directly codes against production. When I have changes, he pushes the mainline trunk. The problem becomes when I make database changes on our Dev database for lets say Branch A. Well...that breaks Branch B and C. I have like 4 projects going on at once! Why? Well, I will not get into that (chaos). So consequently I rename a table field, or add a field or whatever in SQL Server and walla, now my other branches have stale code pointing to previous field names. So what happens? I have to merge certain changes to this branch, to that branch, etc. It feels like a war zone. Finally, what happens is I try to only merge the minimum. Lets say I made DB changes for Branch A's code but now I had to jump back on Branch B's project. Well I need to merge "some" of A's changes over for those database changes so that B's code is not going to bomb out and is able to work with the new table changes. Finally boss pushes the mainline trunk to production. Now I get an email "you forgot to remove the hyperlink for this". That hyperlink was actually a feature I added in Branch A. But what he's talking about here is he just pushed the mainline trunk to production which now has my merged changes from Branch B and any database scripts for Branch A because remember I had some DB changes and if he pushes code, it's got to reflect those changes thus some partial database changes must also be pushed even if it's not related to this project. Well...I missed the hyperlink, so kill me. Maybe that's why we need a build process boss? (sorry, it's been a nightmare working here which is why this thread is getting so detailed). Anyway, obviously this is a nightmare. And he dictates almost everything. The only reason we have source control is because I've worked on hard core teams and that's the first thing you setup. Well there was none here. Problem is I can't dictate the structure..he does but he's never really used source control!! My God. So we have no QA. This is an e-commerce website. That's another huge issue. So consequently I'm expected to be perfect. That means mainline trunk needs to be perfect for whatever we're pushing, whatever branch feature. Is this luda? wtf do I do? I could go off on him after tying so many times tactfully to explain that we need a freakin build process (not just copy local mainline trunk to production!) but I've tried to push before and got yelled at. So I gave up on that. So it will help me tremendously to know how others are pushing their source from Tortoise to production. I was not the person pushing when I was on previous teams so really I'm not too versed in build processes. We are a fairly good size e-commerce site and get a couple millions hits a month.

    Read the article

  • SQL Select syntax error

    - by Odette
    hi guys thanks for your help yesterday. I am now trying to incorporate the query from yesterday into an existing query so I can show the highest itemcode's reporting group in the existing query..but I have a syntax error somewhere at my Select statement. ERROR: Keyword SELECT not expected. I have tried putting brackets at every possible place but still no go..can you please help? (ps-this whole query has been giving me nightmares!) WITH CALC1 AS (SELECT OTQUOT, OTIT01 AS ITEMS, ROUND(OQCQ01 * OVRC01,2) AS COST FROM @[email protected] WHERE OTIT01 < '' UNION ALL SELECT OTQUOT, OTIT02 AS ITEMS, ROUND(OQCQ02 * OVRC02,2) AS COST FROM @[email protected] WHERE OTIT02 < '' UNION ALL SELECT OTQUOT, OTIT03 AS ITEMS, ROUND(OQCQ03 * OVRC03,2) AS COST FROM @[email protected] WHERE OTIT03 < '' UNION ALL SELECT OTQUOT, OTIT04 AS ITEMS, ROUND(OQCQ04 * OVRC04,2) AS COST FROM @[email protected] WHERE OTIT04 < '' UNION ALL SELECT OTQUOT, OTIT05 AS ITEMS, ROUND(OQCQ05 * OVRC05,2) AS COST FROM @[email protected] WHERE OTIT05 < '' UNION ALL SELECT OTQUOT, OTIT06 AS ITEMS, ROUND(OQCQ06 * OVRC06,2) AS COST FROM @[email protected] WHERE OTIT06 < '' UNION ALL SELECT OTQUOT, OTIT07 AS ITEMS, ROUND(OQCQ07 * OVRC07,2) AS COST FROM @[email protected] WHERE OTIT07 < '' UNION ALL SELECT OTQUOT, OTIT08 AS ITEMS, ROUND(OQCQ08 * OVRC08,2) AS COST FROM @[email protected] WHERE OTIT08 < '' UNION ALL SELECT OTQUOT, OTIT09 AS ITEMS, ROUND(OQCQ09 * OVRC09,2) AS COST FROM @[email protected] WHERE OTIT09 < '' UNION ALL SELECT OTQUOT, OTIT10 AS ITEMS, ROUND(OQCQ10 * OVRC10,2) AS COST FROM @[email protected] WHERE OTIT10 < '' ) (SELECT OTQUOT, DESC FROM ( SELECT OTQUOT, ITEMS, B.IXRPGP AS GROUP, C.OTRDSC AS DESC, COST, ROW_NUMBER() OVER (ORDER BY COST DESC) AS RN FROM CALC1 AS A INNER JOIN @[email protected] AS B ON (A.ITEMS = B.IKITMC) INNER JOIN DATAGRP.GDSGRP AS C ON (B.IXRPGP = C.OKRPGP) ) T WHERE T.RN = 1) SELECT A.OKPBRN, A.OCAREA, A.OTCCDE, A.OTCNAM, A.OTSMAN, A.OKPBRN||A.OAPNUM AS OTQUOT, A.OTONUM, A.OTCAD1, A.OTCAD2, A.OTCAD3, A.OTPCDE, A.OTDEL1, A.OTDEL2, A.OTDEL3, CHAR(DATE(CASE WHEN SUBSTR(A.ODOQDT,5,4) = '0000' THEN '0001' ELSE SUBSTR(A.ODOQDT,5,4) END ||'-'|| CASE WHEN SUBSTR(A.ODOQDT,4,2) = '00' THEN '01' ELSE SUBSTR(A.ODOQDT,3,2) END ||'-'|| CASE WHEN SUBSTR(A.ODOQDT,1,2) = '00' THEN '01' ELSE SUBSTR(A.ODOQDT,1,2) END), ISO) AS ODOQDT_CCYYMMDD, CHAR(DATE(CASE WHEN SUBSTR(A.ODDELD,7,2) = '' THEN '0001' ELSE '20'||SUBSTR(A.ODDELD,7,2) END ||'-'|| CASE WHEN SUBSTR(A.ODDELD,4,2) = '' THEN '01' ELSE SUBSTR(A.ODDELD,4,2) END ||'-'|| CASE WHEN SUBSTR(A.ODDELD,1,2) = '' THEN '01' ELSE SUBSTR(A.ODDELD,1,2) END), ISO) AS ODDELD_CCYYMMDD, B.DESC, A.OVQTVL FROM @[email protected] AS A INNER JOIN CALC1 AS B ON (A.OKPBRN||A.OAPNUM = B.OTQUOT) WHERE A.OKPBRN = '@OKPBRN@' AND A.OTCCDE NOT LIKE '*DEP%' AND CHAR(DATE(CASE WHEN SUBSTR(A.ODOQDT,5,4) = '0000' THEN '0001' ELSE SUBSTR (A.ODOQDT,5,4) END ||'-'|| CASE WHEN SUBSTR(A.ODOQDT,4,2) = '00' THEN '01' ELSE SUBSTR(A.ODOQDT,3,2) END ||'-'|| CASE WHEN SUBSTR(A.ODOQDT,1,2) = '00' THEN '01' ELSE SUBSTR(A.ODOQDT,1,2) END), ISO) = CHAR(CURDATE() - 3 MONTH, ISO) AND A.OCQF01 = '0' AND A.OCQF02 = '0' AND A.OCQF04 = '0' AND A.OCQF05 = '0' AND A.OCQF06 = '0' AND A.OCQF07 = '0' AND A.OCQF08 = '0' AND A.OCQF09 = '0' AND A.OCQF10 = '1' AND A.OTCGRP LIKE 'S/%' ORDER BY A.OTSMAN ASC, A.OVQTVL DESC, CHAR(DATE(CASE WHEN SUBSTR(A.ODDELD,7,2) = '' THEN '0001' ELSE '20'||SUBSTR(A.ODDELD,7,2) END ||'-'|| CASE WHEN SUBSTR(A.ODDELD,4,2) = '' THEN '01' ELSE SUBSTR(A.ODDELD,4,2) END ||'-'|| CASE WHEN SUBSTR(A.ODDELD,1,2) = '' THEN '01' ELSE SUBSTR(A.ODDELD,1,2) END),ISO) ASC

    Read the article

  • Search like google

    - by Rajanikant
    I have a task to make a search module in which i have database users and tablename userProfile and i want to search profile when i entered text in text box for ex. if i entered "I am looking for MBA in delhi" or 'mba information in delhi' it will displayed all user registered expertise as mba and city in delhi . this will be like job portal or any social networking portal my database is -- phpMyAdmin SQL Dump -- version 2.8.1 -- http://www.phpmyadmin.net -- Host: localhost -- Generation Time: May 01, 2010 at 10:58 AM -- Server version: 5.0.21 -- PHP Version: 5.1.4 -- Database: users -- -- Table structure for table userProfile CREATE TABLE userprofile ( id int(11) NOT NULL auto_increment, name varchar(50) collate latin1_general_ci NOT NULL, expertise varchar(50) collate latin1_general_ci NOT NULL, city varchar(50) collate latin1_general_ci NOT NULL, state varchar(50) collate latin1_general_ci NOT NULL, discription varchar(500) collate latin1_general_ci NOT NULL, PRIMARY KEY (id) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci AUTO_INCREMENT=3 ; -- -- Dumping data for table userProfile INSERT INTO userProfile VALUES (1, 'a', 'MBA HR', 'Delhi', 'Delhi', 'Fortune is top management college in Delhi, Best B-schools in India providing business studies and management training. FIIB is Delhi based most ranked ...'); INSERT INTO userProfile VALUES (2, 'b', 'MBA marketing', 'Delhi', 'Delhi', 'Fortune is top management college in Delhi, Best B-schools in India providing business studies and management training. FIIB is Delhi based most ranked ...'); and search.php page <?php include("config.php"); include("class.search.php"); $br=new search(); if($_POST['searchbutton']) { $str=$_POST['textfield']; $brstr=$br->breakkey($str); } ?> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Untitled Document</title> </head> <body> <table width="100%" border="0"> <form name="frmsearch" method="post"> <tr> <td width="367">&nbsp;</td> <td width="300"><label> <input name="textfield" type="text" id="textfield" size="50" /> </label></td> <td width="294"><label> <input type="submit" name="searchbutton" id="button" value="Search" /> </label></td> </tr></form> <tr> <td>&nbsp;</td> <td>&nbsp;</td> <td>&nbsp;</td> </tr> <tr> <td>&nbsp;</td> <td>&nbsp;</td> <td>&nbsp;</td> </tr> </table> </body> </html> and config.php is <?php error_reporting(E_ALL); $host="localhost"; $username="root"; $password=""; $dbname="users"; $con=mysql_connect($host,$username,$password) or die("could not connect database"); $db=mysql_select_db($dbname,$con) or die("could not select database"); ?> and class.search.php is <?php class search { function breakkey($key) { global $db; $words=explode(' ',$key); return $words; } function searchitem($perm) { global $db; foreach($perm as $k=>$v) { $sql="select * from users" } } } ?>

    Read the article

  • Update MySQL table using data from a text file through Java

    - by Karthi Karthi
    I have a text file with four lines, each line contains comma separated values like below file My file is: Raj,[email protected],123455 kumar,[email protected],23453 shilpa,[email protected],765468 suraj,[email protected],876567 and I have a MySQL table which contains four fields firstname lastname email phno ---------- ---------- --------- -------- Raj babu [email protected] 2343245 kumar selva [email protected] 23453 shilpa murali [email protected] 765468 suraj abd [email protected] 876567 Now I want to update my table using the data in the above text file through Java. I have tried using bufferedReader to read from the file and used split method using comma as delimiter and stored it in array. But it is not working. Any help appreciated. This is what I have tried so far void readingFile() { try { File f1 = new File("TestFile.txt"); FileReader fr = new FileReader(f1); BufferedReader br = new BufferedReader(fr); String strln = null; strln = br.readLine(); while((strln=br.readLine())!=null) { // System.out.println(strln); arr = strln.split(","); strfirstname = arr[0]; strlastname = arr[1]; stremail = arr[2]; strphno = arr[3]; System.out.println(strfirstname + " " + strlastname + " " + stremail +" "+ strphno); } // for(String i : arr) // { // } br.close(); fr.close(); } catch(IOException e) { System.out.println("Cannot read from File." + e); } try { st = conn.createStatement(); String query = "update sampledb set email = stremail,phno =strphno where firstname = strfirstname "; st.executeUpdate(query); st.close(); System.out.println("sampledb Table successfully updated."); } catch(Exception e3) { System.out.println("Unable to Update sampledb table. " + e3); } } and the output i got is: Ganesh Pandiyan [email protected] 9591982389 Dass Jeyan [email protected] 9689523645 Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: 1 Gowtham Selvan [email protected] 9894189423 at TemporaryPackages.FileReadAndUpdateTable.readingFile(FileReadAndUpdateTable.java:35) at TemporaryPackages.FileReadAndUpdateTable.main(FileReadAndUpdateTable.java:72) Java Result: 1 @varadaraj: This is the code of yours.... String stremail,strphno,strfirstname,strlastname; // String[] arr; Connection conn; Statement st; void readingFile() { try { BufferedReader bReader= new BufferedReader(new FileReader("TestFile.txt")); String fileValues; while ((fileValues = bReader.readLine()) != null) { String[] values=fileValues .split(","); strfirstname = values[0]; // strlastname = values[1]; stremail = values[1]; strphno = values[2]; System.out.println(strfirstname + " " + strlastname + " " + stremail +" "+ strphno); } bReader.close(); } catch (IOException e) { System.out.println("File Read Error"); } // for(String i : arr) // { // } try { st = conn.createStatement(); String query = "update sampledb set email = stremail,phno =strphno where firstname = strfirstname "; st.executeUpdate(query); st.close(); System.out.println("sampledb Table successfully updated."); } catch(Exception e3) { System.out.println("Unable to Update sampledb table. " + e3); } }

    Read the article

  • Solving Combinatory Problems with LINQ /.NET4

    - by slf
    I saw this article pop-up in my MSDN RSS feed, and after reading through it, and the sourced article here I began to wonder about the solution. The rules are simple: Find a number consisting of 9 digits in which each of the digits from 1 to 9 appears only once. This number must also satisfy these divisibility requirements: The number should be divisible by 9. If the rightmost digit is removed, the remaining number should be divisible by 8. If the rightmost digit of the new number is removed, the remaining number should be divisible by 7. And so on, until there's only one digit (which will necessarily be divisible by 1). This is his proposed monster LINQ query: // C# and LINQ solution to the numeric problem presented in: // http://software.intel.com/en-us/blogs/2009/12/07/intel-parallel-studio-great-for-serial-code-too-episode-1/ int[] oneToNine = new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; // the query var query = from i1 in oneToNine from i2 in oneToNine where i2 != i1 && (i1 * 10 + i2) % 2 == 0 from i3 in oneToNine where i3 != i2 && i3 != i1 && (i1 * 100 + i2 * 10 + i3) % 3 == 0 from i4 in oneToNine where i4 != i3 && i4 != i2 && i4 != i1 && (i1 * 1000 + i2 * 100 + i3 * 10 + i4) % 4 == 0 from i5 in oneToNine where i5 != i4 && i5 != i3 && i5 != i2 && i5 != i1 && (i1 * 10000 + i2 * 1000 + i3 * 100 + i4 * 10 + i5) % 5 == 0 from i6 in oneToNine where i6 != i5 && i6 != i4 && i6 != i3 && i6 != i2 && i6 != i1 && (i1 * 100000 + i2 * 10000 + i3 * 1000 + i4 * 100 + i5 * 10 + i6) % 6 == 0 from i7 in oneToNine where i7 != i6 && i7 != i5 && i7 != i4 && i7 != i3 && i7 != i2 && i7 != i1 && (i1 * 1000000 + i2 * 100000 + i3 * 10000 + i4 * 1000 + i5 * 100 + i6 * 10 + i7) % 7 == 0 from i8 in oneToNine where i8 != i7 && i8 != i6 && i8 != i5 && i8 != i4 && i8 != i3 && i8 != i2 && i8 != i1 && (i1 * 10000000 + i2 * 1000000 + i3 * 100000 + i4 * 10000 + i5 * 1000 + i6 * 100 + i7 * 10 + i8) % 8 == 0 from i9 in oneToNine where i9 != i8 && i9 != i7 && i9 != i6 && i9 != i5 && i9 != i4 && i9 != i3 && i9 != i2 && i9 != i1 let number = i1 * 100000000 + i2 * 10000000 + i3 * 1000000 + i4 * 100000 + i5 * 10000 + i6 * 1000 + i7 * 100 + i8 * 10 + i9 * 1 where number % 9 == 0 select number; // run it! foreach (int n in query) Console.WriteLine(n); Octavio states "Note that no attempt at all has been made to optimize the code", what I'd like to know is what if we DID attempt to optimize this code. Is this really the best this code can get? I'd like to know how we can do this best with .NET4, in particular doing as much in parallel as we possibly can. I'm not necessarily looking for an answer in pure LINQ, assume .NET4 in any form (managed c++, c#, etc all acceptable).

    Read the article

  • Changing html <-> ajax <-> php/mysql to threaded approach

    - by Saif Bechan
    I have an application that needs to be updated real-time. There are various counters and other information that have to come from the database and the system needs to be up to date for the user. My approach now is just a normal ajax request every second to get the new values from the database. There is a JavaScript which loops every second getting the values trough ajax. This works fine but I think its very inefficient. The problem There is an ajax script that loops every second requesting data from php # On the server it has to load the PHP interpeter The PHP file has to get the data and format it correctly # PHP has to make a connection with the mysql database Work with the database(reads,never writes) Format the data so it can be send Send the data back to the browser # Close the database connection, and close the php interpeter Last the browser has to read these values and update the various html parts Now with this approach it has to load the interpreter and make a db connection every second. I was thinking of a way to make this more efficient, and maybe use a threaded approach to this. Threaded aprouch Do a post to the PHP when you enter the page and keep the connection alive In PHP only load the interpreter once, and make a connection to the DB ones Every second send an ajax response to the javascript listener The javascript listener than just changes values as the response from php arrives. I think this approach will be a great optimization to the server load and overall performance. But I can spot some weak point in the system and i need some help with these. Problems with the approach PHP execution time limit I don't think PHP is designed for such a setup. I know there is a time limit on php script execution. I don't know if an everlasting loop in PHP will cause any serious cpu/memory problems. Sending ajax request without breaking I don't know if it is possible to have just one ajax post action and have open and accepting data. user exists the page What will happen when the user exists the page and the PHP script is still going. Will it go on forever. security issues so far i can't think of any security issues. Almost every setup you use have some security issues. Maybe there are some with this solution I do not know of. Open to other solution I really want to change the setup as it is now and move to a threaded approach or better. If someone has a better approach to tackle this I definitely want to hear that. Maybe the usage of some other scripts is better suited for having an ongoing runtime. I only know php and java so any suggestions are welcome and I am willing to dig trough. I know there are things like perl, python etcetera that are used for this type of threaded but i don't know which one is best suited. When using other script If the best way is to go with other type of script like perl,python etcetera I do have some critera. The script has to be accessible via ajax post If it accepts some kind of json encode/decode it would be nice The script has to be able to access the session file This is essential because I need to know if the user is logged in The script has to be able to easily talk to MySQL All comments are welcome, and I hope this question is helpful to other also. Cheers!

    Read the article

  • jquery-autocomplete does not work with my django app.

    - by HWM-Rocker
    Hi everybody, I have a problem with the jquery-autocomplete pluging and my django script. I want an easy to use autocomplete plugin. And for what I see this (http://code.google.com/p/jquery-autocomplete/) one seems very usefull and easy. For the django part I use this (http://code.google.com/p/django-ajax-selects/) I modified it a little, because the out put looked a little bit weired to me. It had 2 '\n' for each new line, and there was no Content-Length Header in the response. First I thought this could be the problem, because all the online examples I found had them. But that was not the problem. I have a very small test.html with the following body: <body> <form action="" method="post"> <p><label for="id_tag_list">Tag list:</label> <input id="id_tag_list" name="tag_list" maxlength="200" type="text" /> </p> <input type="submit" value="Submit" /> </form> </body> And this is the JQuery call to add autocomplete to the input. function formatItem_tag_list(bla,row) { return row[2] } function formatResult_tag_list(bla,row) { return row[1] } $(document).ready(function(){ $("input[id='id_tag_list']").autocomplete({ url:'http://gladis.org/ajax/tag', formatItem: formatItem_tag_list, formatResult: formatResult_tag_list, dataType:'text' }); }); When I'm typing something inside the Textfield Firefox (firebug) and Chromium-browser indicates that ther is an ajax call but with no response. If I just copy the line into my browser, I can see the the response. (this issue is solved, it was a safety feature from ajax not to get data from another domain) For example when I am typing Bi in the textfield, the url "http://gladis.org/ajax/tag?q=Bi&max... is generated. When you enter this in your browser you get this response: 4|Bier|Bier 43|Kolumbien|Kolumbien 33|Namibia|Namibia Now my ajax call get the correct response, but there is still no list showing up with all the possible entries. I tried also to format the output, but this doesn't work either. I set brakepoints to the function and realized that they won't be called at all. Here is a link to my minimum HTML file http://gladis.org/media/input.html Has anybody an idea what i did wrong. I also uploaded all the files as a small zip at http://gladis.org/media/example.zip. Thank you for your help! [Edit] here is the urls conf: (r'^ajax/(?P<channel>[a-z]+)$', 'ajax_select.views.ajax_lookup'), and the ajax lookup channel configuration AJAX_LOOKUP_CHANNELS = { # the simplest case, pass a DICT with the model and field to search against : 'tag' : dict(model='htags.Tag', search_field='text'), } and the view: def ajax_lookup(request,channel): """ this view supplies results for both foreign keys and many to many fields """ # it should come in as GET unless global $.ajaxSetup({type:"POST"}) has been set # in which case we'll support POST if request.method == "GET": # we could also insist on an ajax request if 'q' not in request.GET: return HttpResponse('') query = request.GET['q'] else: if 'q' not in request.POST: return HttpResponse('') # suspicious query = request.POST['q'] lookup_channel = get_lookup(channel) if query: instances = lookup_channel.get_query(query,request) else: instances = [] results = [] for item in instances: results.append(u"%s|%s|%s" % (item.pk,lookup_channel.format_item(item),lookup_channel.format_result(item))) ret_string = "\n".join(results) resp = HttpResponse(ret_string,mimetype="text/html") resp['Content-Length'] = len(ret_string) return resp

    Read the article

  • Android AlertDialog wait for result in calling activity

    - by insanesam
    I am trying to use an AlertDialog in my app to select the quantity of an item. The problem is that the activity that calls the AlertDialog doesn't wait for it to update the item before it adds it to the SQLite Database and change intents. At the moment, the QuantitySelector (AlertDialog) appears, then disappears straight away and changes the MealActivity class (which is just a ListView that reads from the database) through the intent change with an update to the database with quantity 0. I need the Activity to wait for the AlertDialog to close before it updates the database. What would be the correct way of implementing this? Here is some code for you: QuantitySelector (which runs the alertdialog): public class QuantitySelector{ protected static final int RESULT_OK = 0; private Context _context; private DatabaseHandler db; private HashMap<String, Double> measures; private Item item; private View v; private EditText quan; private NumberPicker pick; private int value; private Quantity quantity; /** * Function calls the quantity selector AlertDialog * @param _c: The application context * @param item: The item to be added to consumption * @return The quantity that is consumed */ public void select(Context _c, Item item, Quantity quantity){ this._context = _c; this.item = item; this.quantity = quantity; db = new DatabaseHandler(_context); //Get the measures to display createData(); //Set up the custom view LayoutInflater inflater = LayoutInflater.from(_context); v = inflater.inflate(R.layout.quantity_selector, null); //Set up the input fields quan = (EditText) v.findViewById(R.id.quantityNumber); pick = (NumberPicker) v.findViewById(R.id.numberPicker1); //Set up the custom measures into pick pick.setMaxValue(measures.size()-1); pick.setDisplayedValues(measures.keySet().toArray(new String[0])); //Start the alert dialog runDialog(); } public void createData(){ measures = new HashMap<String, Double>(); //Get the measurements from the database if(item!=null){ measures.putAll(db.getMeasures(item)); } //Add grams as the default measurement if(!measures.keySet().contains("grams")){ //Add grams as a standard measure measures.put("grams", 1.0); } } public void runDialog(){ AlertDialog dialog = new AlertDialog.Builder(_context).setTitle("Select Quantity") .setView(v) .setPositiveButton("OK", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int whichButton) { //Change the consumption to the new quantity if(!quan.getText().toString().matches("")){ value = Integer.parseInt(quan.getText().toString()); //Check if conversion from other units is needed String s[] = pick.getDisplayedValues(); String a = s[pick.getValue()]; //Convert the chosen measure back to grams if(!a.equals("grams")){ for(String m : measures.keySet()){ if(m==a){ value = (int) (value * measures.get(m)); } } } } quantity.setQuantity(value); dialog.dismiss(); } }) .setNegativeButton("Cancel", null).create(); dialog.show(); } } The method from favouritesAdapter (which calls the alertdialog): add.setOnClickListener(new OnClickListener(){ public void onClick(View arg0) { QuantitySelector q = new QuantitySelector(); Quantity quan = new Quantity(); q.select(_context, db.getItem(p.getID()), quan); db.addConsumption(p.getID(), p.getFavouriteShortName(), quan.getQuantity(), "FAVOURITE"); Intent intent = new Intent(_context,MealActivity.class); _context.startActivity(intent); } }); All help is appreciated :)

    Read the article

  • XML Parsing Error: junk after document element

    - by Jake
    I am using the following script to generate a RSS feed for my site: <?php class RSS { public function RSS() { $root = $_SERVER['DOCUMENT_ROOT']; require_once ("../connect.php"); } public function GetFeed() { return $this->getDetails() . $this->getItems(); } private function dbConnect() { DEFINE ('LINK', mysql_connect (DB_HOST, DB_USER, DB_PASSWORD)); } private function getDetails() { $detailsTable = "rss_feed_details"; $this->dbConnect($detailsTable); $query = "SELECT * FROM ". $detailsTable ." WHERE feed_category = ''"; $result = mysql_db_query (DB_NAME, $query, LINK); while($row = mysql_fetch_array($result)) { $details = '<?xml version="1.0" encoding="ISO-8859-1" ?> <rss version="2.0"> <channel> <title>'. $row['title'] .'</title> <link>'. $row['link'] .'</link> <description>'. $row['description'] .'</description> <language>'. $row['language'] .'</language> '; } return $details; } private function getItems() { $itemsTable = "rss_posts"; $this->dbConnect($itemsTable); $query = "SELECT * FROM ". $itemsTable ." ORDER BY id DESC"; $result = mysql_db_query (DB_NAME, $query, LINK); $items = ''; while($row = mysql_fetch_array($result)) { $items .= '<item> <title>'. $row["title"] .'</title> <link>'. $row["link"] .'</link> <description><![CDATA['.$row["readable_date"]."<br /><br />".$row["description"]."<br /><br />".']]></description> </item>'; } $items .= '</channel> </rss>'; return $items; } } ?> The baffling thing is, the script works perfectly fine on my localhost but gives the following error on my remote server: XML Parsing Error: junk after document element Location: http://mysite.com/rss/main/ Line Number 2, Column 1:<b>Parse error</b>: syntax error, unexpected T_STRING in <b>/home/studentw/public_html/rss/global-reach/rssClass.php</b> on line <b>1</b><br /> ^ Can someone please tell me what's wrong?

    Read the article

  • How to eliminate NULL fields in TSQL

    - by salvationishere
    I am developing a TSQL query in SSMS 2008 R2. I am trying to develop this query to identify one record / client. Because some of these values are NULL, I am currently doing LEFT JOINS on most of the tables. But the problem with the LEFT JOINs is that now I get 1 record for some clients. But if I change this to INNER JOINs then some clients are excluded entirely because they have NULL values for these columns. How do I limit the query result to just one record / client regardless of NULL values? And if there are non-NULL values then I want it to choose the record with non-NULL values. Here is some of my current output: group_profile_id profile_name license_number is_accepting is_accepting_placement managing_office region vendor_name vendor_id applicant_type Office Address status_description Cert Date2 race ethnicity_desc religion 9CD932F1-6BE1-4F80-AB81-0CE32C565BCF Atreides Foster Home 1 Atreides1 1 Yes Manchester, NH Gulf Atlantic Atreides1 00000007 Treatment Foster Home 4042 Arrakis Avenue, Springfield, VT 05156 Open/Re-opened 2011-06-01 00:00:00.000 NULL NULL NULL DCE354D5-A7CC-409F-B5A3-89BF664B7718 Averitte, Leon and Sandra 00000044 1 Yes Birmingham, AL Gulf Atlantic AL Averitte, Leon and Sandra 00000044 Treatment Foster Home 3816 5th Avenue, Bessemer, AL 35020, (205)482-4307 Open/Re-opened 2011-08-05 00:00:00.000 NULL NULL NULL DCE354D5-A7CC-409F-B5A3-89BF664B7718 Averitte, Leon and Sandra 00000044 1 Yes Birmingham, AL Gulf Atlantic AL Averitte, Leon and Sandra 00000044 Treatment Foster Home 3816 5th Avenue, Bessemer, AL 35020, (205)482-4307 Open/Re-opened 2011-08-05 00:00:00.000 Caucasian/White Non Hispanic NULL AD02A43C-6F38-4F35-8C9E-E12422690BFB Bass, Matthew and Sarah 00000076 1 Yes Jacks on, MS Central Gulf Coast MS Bass, Matthew and Sarah 00000076 Treatment Foster Home 506 Eagelwood Drive, Florence, MS 39073, (601)665-7169 Open/Re-opened 2011-04-01 00:00:00.000 NULL NULL NULL AD02A43C-6F38-4F35-8C9E-E12422690BFB Bass, Matthew and Sarah 00000076 1 Yes Jackson, MS Central Gulf Coast MS Bass, Matthew and Sarah 00000076 Treatment Foster Home 506 Eagelwood Drive, Florence, MS 39073, (601)665-7169 Open/Re-opened 2011-04-01 00:00:00.000 Caucasian/White NULL Baptist You can see that both Averitte and Bass profile names have one record with NULL race, ethnicity, religion. How do I eliminate these rows (rows 2 and 4)? Here is my query currently: select distinct gp.group_profile_id, gp.profile_name, gp.license_number, gp.is_accepting, case when gp.is_accepting = 1 then 'Yes' when gp.is_accepting = 0 then 'No ' end as is_accepting_placement, mo.profile_name as managing_office, regions.[region_description] as region, pv.vendor_name, pv.id as vendor_id, at.description as applicant_type, dbo.GetGroupAddress(gp.group_profile_id, null, 0) as [Office Address], gsv.status_description, ri.[description] as race, ethnicity.description as ethnicity_desc, religion.description as religion from group_profile gp With (NoLock) --Office Information inner join group_profile_type gpt With (NoLock) on gp.group_profile_type_id = gpt.group_profile_type_id and gpt.type_code = 'FOSTERHOME' and gp.agency_id = @agency_id and gp.is_deleted = 0 inner join group_profile mo With (NoLock) on gp.managing_office_id = mo.group_profile_id left outer join payor_vendor pv With (NoLock) on gp.payor_vendor_id = pv.payor_vendor_id left outer join applicant_type at With (NoLock) on gp.applicant_type_id = at.applicant_type_id and at.is_foster_home = 1 inner join group_status_view gsv With (NoLock) on gp.group_profile_id = gsv.group_profile_id and gsv.status_value = 'OPEN' and gsv.effective_date = (Select max(b.effective_date) from group_status_view b With (NoLock) where gp.group_profile_id = b.group_profile_id) left outer join regions With (NoLock) on isnull(mo.regions_id, gp.regions_id) = regions.regions_id left join enrollment en on en.group_profile_id = gp.group_profile_id join event_log el on el.event_log_id = en.event_log_id left join people client on client.people_id = el.people_id left join race With (NoLock) on el.people_id = race.people_id left join group_profile_race gpr with (nolock) on gpr.race_info_id = race.race_info_id left join race_info ri with (nolock) on ri.race_info_id = gpr.race_info_id left join ethnicity With(NoLock) On client.ethnicity = ethnicity.ethnicity_id left join religion on client.religion = religion.religion_id

    Read the article

  • How to structure a Kohana MVC application with dynamically added fields and provide validation and f

    - by Matt H
    I've got a bit of a problem. I have a Kohana application that has dynamically added fields. The fields that are added are called DISA numbers. In the model I look these up and the result is returned as an array. I encode the array into a JSON string and use JQuery to populate them The View knows the length of the array and so creates as many DISA elements as required before display. See the code below for a summary of how that works. What I'm finding is that this is starting to get difficult to manage. The code is becoming messy. Error handling of this type of dynamic content is ending up being spread all over the place. Not only that, it doesn't work how I want. What you see here is just a small snippet of code. For error handling I am using the validation library. I started by using add_rules on all the fields that come back in the post. As they are always phone numbers I set a required rule (when it's there) and a digit rule on the validation-as_array() keys. That works. The difficulty is actually giving it back to the view. i.e. dynamically added javascript field. Submits back to form. Save contents into a session. View has to load up fields from database + those from the previous post and signal the fields that have problems. It's all quite messy and I'm getting this code spread through both the view the controller and the model. So my question is. Have you done this before in Kohana and how have you handled it? There must be an easier way right? Code snippet. -- edit.php -- public function phone($id){ ... $this->template->content->disa_numbers = $phones->fetch_disa_numbers($this->account, $id); ... } -- phones.php -- public function fetch_disa_numbers($account, $id) { $query = $this->db->query("SELECT id, cid_in FROM disa WHERE owner_ext=?", array($id)); if (!$query){ return ''; } return $query; } -- edit_phones.php --- <script type="text/javascript"> var disaId = 1; function delDisaNumber(element){ /* Put 'X_' on the front of the element name to mark this for deletion */ $(element).prev().attr('name', 'X_'+$(element).prev().attr('name')); $(element).parent().hide(); } function addDisaNumber(){ /* input name is prepended with 'N_' which means new */ $("#disa_numbers").append("<li><input name='N_disa"+disaId+"' id='disa'"+ "type='text'/><a class='hide' onClick='delDisaNumber(this)'></a></li>"); disaId++; } </script> ... <php echo form::open("edit/saveDisaNumbers/".$phone, array("class"=>"section", "id"=>"disa_form")); echo form::open_fieldset(array("class"=>"balanced-grid")); ?> <ul class="fields" id="disa_numbers"> <?php $disaId = 1; foreach ( $disa_numbers as $disa_number ){ echo '<li>'; echo form::input('disa'.$disaId, $disa_number->cid_in); echo'<a class="hide" onclick="delDisaNumber(this)"></a>'; echo "</li>"; $disaId++; } ?> </ul> <button type="button"onclick="addDisaNumber()"><a class="add"></a>Add number</button> <?php echo form::submit('submit', 'Save'); echo form::close(); ?>

    Read the article

  • AJAX Password Change without Refresh

    - by Richard
    I'm trying to implement password change functionality into my website. I've got all the password changing script, validation, etc done. But now I need to prevent the page from going to the script page or refreshing. When the user clicks the submit button, I want nothing to change except a message displaying successfully changed or error. So here's my html: <form id="change_Pass" action="" method="post"> Current Password<input type="password" id="change_password" name="change_password"><br> New Password<input type="password" id="new_password" name="new_password"><br> Verify Password<input type="password" id="verify_password" name="verify_password"><br> <input type="submit" value="Submit" id="change_pass_submit"> </form> And my jquery: $('#change_pass_submit').click(function(){ var $this = $(this); $.ajax({ data: $this.serialize(), // get the form data type: "POST", // GET or POST url: "/Private/change_password.php", // the file to call success: function() { // on success.. //$('#success_div).html(response); // update the DIV alert("good"); }, error: function() { // on error.. //$('#error_div).html(e); // update the DIV alert("bad"); } }); return false; //so it doesn't refresh when submitting the page }); And my php: <?php session_start(); require_once '../classes/Bcrypt.php'; ini_set('display_errors', 'On'); error_reporting(E_ALL | E_STRICT); $usr = $_SESSION["username"]; $old_pwd = $_POST["change_password"]; $new_pwd = $_POST["new_password"]; $new_pwd = Bcrypt::hash($new_pwd); try { $link = new PDO('mysql:host=*;dbname=*;charset=UTF-8','*','*'); $query = "SELECT * FROM Conference WHERE Username = :un"; $stmt = $link->prepare($query); $stmt->bindParam(':un', $usr); $stmt->execute(); $row = $stmt->fetchAll(); $hash = $row[0]["Password"]; $is_correct = Bcrypt::check($old_pwd, $hash); if($is_correct) { $query = "UPDATE Conference SET `Password`=:new_pwd WHERE Username = :usr"; $stmt = $link->prepare($query); $stmt->bindParam(':new_pwd', $new_pwd); $stmt->bindParam(':usr', $usr); $stmt->execute(); return true; } else return false; } catch(PDOException $e) { print "Error!: " . $e->getMessage() . "<br/>"; die(); } But for some reason, when I hit the submit button, the page STILL goes to change_password.php. I have no idea why, i've looked at so many tutorials and my code matches theirs but for some reason mine won't stay on the same page. Where did I go wrong?

    Read the article

  • SQL Server 2008: Using Multiple dts Ranges to Build a Set of Dates

    - by raoulcousins
    I'm trying to build a query for a medical database that counts the number of patients that were on at least one medication from a class of medications (the medications listed below in the FAST_MEDS CTE) and had either: 1) A diagnosis of myopathy (the list of diagnoses in the FAST_DX CTE) 2) A CPK lab value above 1000 (the lab value in the FAST_LABS CTE) and this diagnosis or lab happened AFTER a patient was on a statin. The query I've included below does that under the assumption that once a patient is on a statin, they're on a statin forever. The first CTE collects the ids of patients that were on a statin along with the first date of their diagnosis, the second those with a diagnosis, and the third those with a high lab value. After this I count those that match the above criteria. What I would like to do is drop the assumption that once a patient is on a statin, they're on it for life. The table edw_dm.patient_medications has a column called start_dts and end_dts. This table has one row for each prescription written, with start_dts and end_dts denoting the start and end date of the prescription. End_dts could be null, which I'll take to assume that the patient is currently on this medication (it could be a missing record, but I can't do anything about this). If a patient is on two different statins, the start and ends dates can overlap, and there may be multiple records of the same medication for a patient, as in a record showing 3-11-2000 to 4-5-2003 and another for the same patient showing 5-6-2007 to 7-8-2009. I would like to use these two columns to build a query where I'm only counting the patients that had a lab value or diagnosis done during a time when they were already on a statin, or in the first n (say 3) months after they stopped taking a statin. I'm really not sure how to go about rewriting the first CTE to get this information and how to do the comparison after the CTEs are built. I know this is a vague question, but I'm really stumped. Any ideas? As always, thank you in advance. Here's the current query: WITH FAST_MEDS AS ( select distinct statins.mrd_pt_id, min(year(statins.order_dts)) as statin_yr from edw_dm.patient_medications as statins inner join mrd.medications as mrd on statins.mrd_med_id = mrd.mrd_med_id WHERE mrd.generic_nm in ( 'Lovastatin (9664708500)', 'lovastatin-niacin', 'Lovastatin/Niacin', 'Lovastatin', 'Simvastatin (9678583966)', 'ezetimibe-simvastatin', 'niacin-simvastatin', 'ezetimibe/Simvastatin', 'Niacin/Simvastatin', 'Simvastatin', 'Aspirin Buffered-Pravastatin', 'aspirin-pravastatin', 'Aspirin/Pravastatin', 'Pravastatin', 'amlodipine-atorvastatin', 'Amlodipine/atorvastatin', 'atorvastatin', 'fluvastatin', 'rosuvastatin' ) and YEAR(statins.order_dts) IS NOT NULL and statins.mrd_pt_id IS NOT NULL group by statins.mrd_pt_id ) select * into #meds from FAST_MEDS ; --return patients who had a diagnosis in the list and the year that --diagnosis was given with FAST_DX AS ( SELECT pd.mrd_pt_id, YEAR(pd.init_noted_dts) as init_yr FROM edw_dm.patient_diagnoses as pd inner join mrd.diagnoses as mrd on pd.mrd_dx_id = mrd.mrd_dx_id and mrd.icd9_cd in ('728.89','729.1','710.4','728.3','729.0','728.81','781.0','791.3') ) select * into #dx from FAST_DX; --return patients who had a high cpk value along with the year the cpk --value was taken with FAST_LABS AS ( SELECT pl.mrd_pt_id, YEAR(pl.order_dts) as lab_yr FROM edw_dm.patient_labs as pl inner join mrd.labs as mrd on pl.mrd_lab_id = mrd.mrd_lab_id and mrd.lab_nm = 'CK (CPK)' WHERE pl.lab_val between 1000 AND 999998 ) select * into #labs from FAST_LABS; -- count the number of patients who had a lab value or a medication -- value taken sometime AFTER their initial statin diagnosis select count(distinct p.mrd_pt_id) as ct from mrd.patient_demographics as p join #meds as m on p.mrd_pt_id = m.mrd_pt_id AND ( EXISTS ( SELECT 'A' FROM #labs l WHERE p.mrd_pt_id = l.mrd_pt_id and l.lab_yr >= m.statin_yr ) OR EXISTS( SELECT 'A' FROM #dx d WHERE p.mrd_pt_id = d.mrd_pt_id AND d.init_yr >= m.statin_yr ) )

    Read the article

  • What are good CLI tools for JSON?

    - by jasonmp85
    General Problem Though I may be diagnosing the root cause of an event, determining how many users it affected, or distilling timing logs in order to assess the performance and throughput impact of a recent code change, my tools stay the same: grep, awk, sed, tr, uniq, sort, zcat, tail, head, join, and split. To glue them all together, Unix gives us pipes, and for fancier filtering we have xargs. If these fail me, there's always perl -e. These tools are perfect for processing CSV files, tab-delimited files, log files with a predictable line format, or files with comma-separated key-value pairs. In other words, files where each line has next to no context. XML Analogues I recently needed to trawl through Gigabytes of XML to build a histogram of usage by user. This was easy enough with the tools I had, but for more complicated queries the normal approaches break down. Say I have files with items like this: <foo user="me"> <baz key="zoidberg" value="squid" /> <baz key="leela" value="cyclops" /> <baz key="fry" value="rube" /> </foo> And let's say I want to produce a mapping from user to average number of <baz>s per <foo>. Processing line-by-line is no longer an option: I need to know which user's <foo> I'm currently inspecting so I know whose average to update. Any sort of Unix one liner that accomplishes this task is likely to be inscrutable. Fortunately in XML-land, we have wonderful technologies like XPath, XQuery, and XSLT to help us. Previously, I had gotten accustomed to using the wonderful XML::XPath Perl module to accomplish queries like the one above, but after finding a TextMate Plugin that could run an XPath expression against my current window, I stopped writing one-off Perl scripts to query XML. And I just found out about XMLStarlet which is installing as I type this and which I look forward to using in the future. JSON Solutions? So this leads me to my question: are there any tools like this for JSON? It's only a matter of time before some investigation task requires me to do similar queries on JSON files, and without tools like XPath and XSLT, such a task will be a lot harder. If I had a bunch of JSON that looked like this: { "firstName": "Bender", "lastName": "Robot", "age": 200, "address": { "streetAddress": "123", "city": "New York", "state": "NY", "postalCode": "1729" }, "phoneNumber": [ { "type": "home", "number": "666 555-1234" }, { "type": "fax", "number": "666 555-4567" } ] } And wanted to find the average number of phone numbers each person had, I could do something like this with XPath: fn:avg(/fn:count(phoneNumber)) Questions Are there any command-line tools that can "query" JSON files in this way? If you have to process a bunch of JSON files on a Unix command line, what tools do you use? Heck, is there even work being done to make a query language like this for JSON? If you do use tools like this in your day-to-day work, what do you like/dislike about them? Are there any gotchas? I'm noticing more and more data serialization is being done using JSON, so processing tools like this will be crucial when analyzing large data dumps in the future. Language libraries for JSON are very strong and it's easy enough to write scripts to do this sort of processing, but to really let people play around with the data shell tools are needed. Related Questions Grep and Sed Equivalent for XML Command Line Processing Is there a query language for JSON? JSONPath or other XPath like utility for JSON/Javascript; or Jquery JSON

    Read the article

  • Is there a way to delay compilation of a stored procedure's execution plan?

    - by Ian Henry
    (At first glance this may look like a duplicate of http://stackoverflow.com/questions/421275 or http://stackoverflow.com/questions/414336, but my actual question is a bit different) Alright, this one's had me stumped for a few hours. My example here is ridiculously abstracted, so I doubt it will be possible to recreate locally, but it provides context for my question (Also, I'm running SQL Server 2005). I have a stored procedure with basically two steps, constructing a temp table, populating it with very few rows, and then querying a very large table joining against that temp table. It has multiple parameters, but the most relevant is a datetime "@MinDate." Essentially: create table #smallTable (ID int) insert into #smallTable select (a very small number of rows from some other table) select * from aGiantTable inner join #smallTable on #smallTable.ID = aGiantTable.ID inner join anotherTable on anotherTable.GiantID = aGiantTable.ID where aGiantTable.SomeDateField > @MinDate If I just execute this as a normal query, by declaring @MinDate as a local variable and running that, it produces an optimal execution plan that executes very quickly (first joins on #smallTable and then only considers a very small subset of rows from aGiantTable while doing other operations). It seems to realize that #smallTable is tiny, so it would be efficient to start with it. This is good. However, if I make that a stored procedure with @MinDate as a parameter, it produces a completely inefficient execution plan. (I am recompiling it each time, so it's not a bad cached plan...at least, I sure hope it's not) But here's where it gets weird. If I change the proc to the following: declare @LocalMinDate datetime set @LocalMinDate = @MinDate --where @MinDate is still a parameter create table #smallTable (ID int) insert into #smallTable select (a very small number of rows from some other table) select * from aGiantTable inner join #smallTable on #smallTable.ID = aGiantTable.ID inner join anotherTable on anotherTable.GiantID = aGiantTable.ID where aGiantTable.SomeDateField > @LocalMinDate Then it gives me the efficient plan! So my theory is this: when executing as a plain query (not as a stored procedure), it waits to construct the execution plan for the expensive query until the last minute, so the query optimizer knows that #smallTable is small and uses that information to give the efficient plan. But when executing as a stored procedure, it creates the entire execution plan at once, thus it can't use this bit of information to optimize the plan. But why does using the locally declared variables change this? Why does that delay the creation of the execution plan? Is that actually what's happening? If so, is there a way to force delayed compilation (if that indeed is what's going on here) even when not using local variables in this way? More generally, does anyone have sources on when the execution plan is created for each step of a stored procedure? Googling hasn't provided any helpful information, but I don't think I'm looking for the right thing. Or is my theory just completely unfounded? Edit: Since posting, I've learned of parameter sniffing, and I assume this is what's causing the execution plan to compile prematurely (unless stored procedures indeed compile all at once), so my question remains -- can you force the delay? Or disable the sniffing entirely? The question is academic, since I can force a more efficient plan by replacing the select * from aGiantTable with select * from (select * from aGiantTable where ID in (select ID from #smallTable)) as aGiantTable Or just sucking it up and masking the parameters, but still, this inconsistency has me pretty curious.

    Read the article

  • What is this XML Parse Error?

    - by Jake
    I am using the following script to generate a RSS feed for my site: <?php class RSS { public function RSS() { $root = $_SERVER['DOCUMENT_ROOT']; require_once ("../connect.php"); } public function GetFeed() { return $this->getDetails() . $this->getItems(); } private function dbConnect() { DEFINE ('LINK', mysql_connect (DB_HOST, DB_USER, DB_PASSWORD)); } private function getDetails() { $detailsTable = "rss_feed_details"; $this->dbConnect($detailsTable); $query = "SELECT * FROM ". $detailsTable ." WHERE feed_category = ''"; $result = mysql_db_query (DB_NAME, $query, LINK); while($row = mysql_fetch_array($result)) { $details = '<?xml version="1.0" encoding="ISO-8859-1" ?> <rss version="2.0"> <channel> <title>'. $row['title'] .'</title> <link>'. $row['link'] .'</link> <description>'. $row['description'] .'</description> <language>'. $row['language'] .'</language> '; } return $details; } private function getItems() { $itemsTable = "rss_posts"; $this->dbConnect($itemsTable); $query = "SELECT * FROM ". $itemsTable ." ORDER BY id DESC"; $result = mysql_db_query (DB_NAME, $query, LINK); $items = ''; while($row = mysql_fetch_array($result)) { $items .= '<item> <title>'. $row["title"] .'</title> <link>'. $row["link"] .'</link> <description><![CDATA['.$row["readable_date"]."<br /><br />".$row["description"]."<br /><br />".']]></description> </item>'; } $items .= '</channel> </rss>'; return $items; } } ?> The baffling thing is, the script works perfectly fine on my localhost but gives the following error on my remote server: XML Parsing Error: junk after document element Location: http://mysite.com/rss/main/ Line Number 2, Column 1:<b>Parse error</b>: syntax error, unexpected T_STRING in <b>/home/studentw/public_html/rss/global-reach/rssClass.php</b> on line <b>1</b><br /> ^

    Read the article

  • Where should I create my DbCommand instances?

    - by Domenic
    I seemingly have two choices: Make my class implement IDisposable. Create my DbCommand instances as private readonly fields, and in the constructor, add the parameters that they use. Whenever I want to write to the database, bind to these parameters (reusing the same command instances), set the Connection and Transaction properties, then call ExecuteNonQuery. In the Dispose method, call Dispose on each of these fields. Each time I want to write to the database, write using(var cmd = new DbCommand("...", connection, transaction)) around the usage of the command, and add parameters and bind to them every time as well, before calling ExecuteNonQuery. I assume I don't need a new command for each query, just a new command for each time I open the database (right?). Both of these seem somewhat inelegant and possibly incorrect. For #1, it is annoying for my users that I this class is now IDisposable just because I have used a few DbCommands (which should be an implementation detail that they don't care about). I also am somewhat suspicious that keeping a DbCommand instance around might inadvertently lock the database or something? For #2, it feels like I'm doing a lot of work (in terms of .NET objects) each time I want to write to the database, especially with the parameter-adding. It seems like I create the same object every time, which just feels like bad practice. For reference, here is my current code, using #1: using System; using System.Net; using System.Data.SQLite; public class Class1 : IDisposable { private readonly SQLiteCommand updateCookie = new SQLiteCommand("UPDATE moz_cookies SET value = @value, expiry = @expiry, isSecure = @isSecure, isHttpOnly = @isHttpOnly WHERE name = @name AND host = @host AND path = @path"); public Class1() { this.updateCookie.Parameters.AddRange(new[] { new SQLiteParameter("@name"), new SQLiteParameter("@value"), new SQLiteParameter("@host"), new SQLiteParameter("@path"), new SQLiteParameter("@expiry"), new SQLiteParameter("@isSecure"), new SQLiteParameter("@isHttpOnly") }); } private static void BindDbCommandToMozillaCookie(DbCommand command, Cookie cookie) { long expiresSeconds = (long)cookie.Expires.TotalSeconds; command.Parameters["@name"].Value = cookie.Name; command.Parameters["@value"].Value = cookie.Value; command.Parameters["@host"].Value = cookie.Domain; command.Parameters["@path"].Value = cookie.Path; command.Parameters["@expiry"].Value = expiresSeconds; command.Parameters["@isSecure"].Value = cookie.Secure; command.Parameters["@isHttpOnly"].Value = cookie.HttpOnly; } public void WriteCurrentCookiesToMozillaBasedBrowserSqlite(string databaseFilename) { using (SQLiteConnection connection = new SQLiteConnection("Data Source=" + databaseFilename)) { connection.Open(); using (SQLiteTransaction transaction = connection.BeginTransaction()) { this.updateCookie.Connection = connection; this.updateCookie.Transaction = transaction; foreach (Cookie cookie in SomeOtherClass.GetCookieArray()) { Class1.BindDbCommandToMozillaCookie(this.updateCookie, cookie); this.updateCookie.ExecuteNonQuery(); } transaction.Commit(); } } } #region IDisposable implementation protected virtual void Dispose(bool disposing) { if (!this.disposed && disposing) { this.updateCookie.Dispose(); } this.disposed = true; } public void Dispose() { this.Dispose(true); GC.SuppressFinalize(this); } ~Class1() { this.Dispose(false); } private bool disposed; #endregion }

    Read the article

< Previous Page | 763 764 765 766 767 768 769 770 771 772 773 774  | Next Page >