TryCatchFinally.net Some SQL, some .NET, and whatever else

8Sep/160

Query Business Objects repository for list of reports

We're migrating away from Business Objects Enterprise (XI-R2, not the newer 3.X version) and into SQL Reporting Services (it's about time!) and I needed an inventory of everything that was still in BOE. The repository is obfuscated so that it's not human readable, but thanks for some sleuthing and an 8-year-old post from a Business Objects forum, I wanted to share the script I was able to come up with.

The hard part (decoding the obfuscated text from the database into human-readable format) came from a post by "marklee" on a Business Objects board (his post is about 1/4 of the way down):

http://www.forumtopics.com/busobj/viewtopic.php?t=124272

His query was also written for Oracle, so I converted it to one that would run against SQL Server, and then I added some additional content types based on what we had in our database, and then attached a few additional rollups (like showing the root folder, building out the full path to the report, and returning the number of instances of each report that had been generated.

You can download the full script if you'd like, or you can view the contents below. Enjoy!

select 
 objectid, 
 parentid, 
 ownerid, 
 convert(datetime, REPLACE(left(lastmodifytime, 10),' ','-') + ' ' +
			replace(substring(lastmodifytime, 12, 8), ' ', ':') +
			'.' + substring(lastmodifytime, 21, 3)) as lastmodifytime, 
 upper(left(objectname,len(objectname)-2)) ObjectName, 
 typeid, 
 type, 
 convert(bit, 0) as IsInstance,
 si_cuid 
 into #results
 from 
 ( 
 SELECT 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 replace( 
 cast(objname as varchar(2000)) 
 ,'S','v') 
 ,'M','s') 
 ,'A','m') 
 ,')','a') 
 ,'+','b') 
 ,'C','n') 
 ,'-','c') 
 ,'/','d') 
 ,'O','t') 
 ,'E','o') 
 ,'1','e') 
 ,'3','f') 
 ,'G','p') 
 ,'5','g') 
 ,'7','h') 
 ,'W','x') 
 ,'U','w') 
 ,'Q','u') 
 ,'I','q') 
 ,'9','i') 
 ,':','i') 
 ,';','j') 
 ,'K','r') 
 ,'=','k') 
 ,'?','l') 
 ,'[','y') 
 ,']','z') 
 ,'!@',' ') 
 ,'B~S','&') 
 ,'!BO','.') 
 ,'B|C"','(') 
 ,'!B|D',')') 
 ,'M|Z','-') 
 ,'M}L',',') 
 ,'M|N','_') 
 ,'M}Z',':') 
 ,'!B{B','''') 
 ,'<code>|&lt;','0') 
 ,'</code>|&gt;','1') 
 ,'<code>|@','2') 
 ,'</code>|B','3') 
 ,'<code>|D','4') 
 ,'</code>|F','5') 
 ,'<code>|H','6') 
 ,'</code>|J','7') 
 ,'<code>|L','8') 
 ,'</code>|N','9') 
 ,'{','') 
 ,'!','') 
 ,'&quot;','') 
 ,'@','') 
 ObjectName, 
 convert(varchar(100), case 
	 when TypeID = 1 then 'Folder' 

	 when TypeID = 2 then 'Crystal Report'
	 when TypeID = 260 then 'Web Intelligence Document'
	 when TypeID = 290 then 'Object Package' 
	 when TypeID = 265 then 'Report Instance (TXT)'
	 when TypeID = 262 then 'Webi Report' 
	 when TypeID = 314 then 'Deski Report' 

	 when TypeID = 283 then 'PDF' 
	 when TypeID = 267 then 'Text' 
	 when TypeID = 323 then 'Excel' 
	 when TypeID = 319 then 'Excel (XLS)'
	 when TypeID = 259 then 'Word (DOC)'
	 when TypeID = 279 then 'Report Instance (PDF)'

	 when TypeID = 266 then 'Universe' 
	 when TypeID = 278 then 'Publication' 
	 when TypeID = 299 then 'Connection' 
	 when TypeID = 8  then 'Shortcut' 
	 when TypeID = 20 then 'Groups' 
	 when TypeID = 13 then 'Server' 
	 when TypeID = 16 then 'BO Server' 
	 when TypeID = 21 then 'Event' 
	 when TypeID = 24 then 'License Key' 

	 when TypeID = 19 then 'User type 19' 
	 when TypeID = 18 then 'User type 18' 
	 when TypeID = 47 then 'User type 47' 
	 when TypeID = 48 then 'User type 48' 
 
	 else 'Other' 
 end) Type, 
 * 
 FROM [dbo].[CMS_InfoObjects5] 
 where typeid &lt;&gt; 284 -- appears to be some kind of temporary file
 ) BORepository 

create clustered index #idx_c_results on #results (objectid)
create index #idx_results_parentid on #results (parentid)
create index #idx_results_ownerid on #results (ownerid)

-- Mark the ones that are instances of another object (children of non-folder)
update r
   set r.IsInstance = 1
 from #results r
join #results r2
  on r.ParentID = r2.ObjectID
where r2.TypeID &lt;&gt; 1
  and r.TypeID &lt;&gt; 1




-- Define all the levels of the objects and add them to a lookup table
DECLARE @ObjectLevel INT = 0

select 23 as objectid, 0 as parentid, 0 as ownerid, GETDATE() as lastmodifytime,
		convert(varchar(100), '$') as objectname, convert(int, null) as ChildInstances, convert(datetime, null) as LastInstanceTime,
		1 as typeid, convert(varchar(100), 'Root Folder') as type, @ObjectLevel as objectlevel 
into #objects

while @@rowcount &gt; 0
begin
	;with p as (select parentid, count(*) as instances, max(lastmodifytime) as LastInstanceTime from #results group by parentid)
	update #objects
	   set ChildInstances = p.instances,
		   LastInstanceTime = p.LastInstanceTime
	  from #objects o
	  join p
	    on o.objectid = p.ParentID
	 where o.ChildInstances is null
	   and o.typeid &lt;&gt; 1
	
	SET @ObjectLevel = @ObjectLevel + 1

	insert into #objects
	select r.objectid, r.parentid, r.ownerid, r.lastmodifytime, r.objectname, null, null, r.typeid, r.type, @ObjectLevel as objectlevel
	from #results r
	join #objects o
      on r.parentid = o.objectid
	 and o.typeid = 1
	left
	join #objects o2
	  on r.objectid = o2.objectid
   where o2.objectid is null

end


-- Set up the object paths
select objectid, parentid, lastmodifytime, convert(varchar(4000), '') as ObjectPath,
		objectname, childinstances, lastinstancetime, typeid, type
  into #objectswithpath
  from #objects
 where typeid &lt;&gt; 1

while @@rowcount &gt; 0
BEGIN
	update #objectswithpath
	   set parentid = o.parentid,
			objectpath = o.objectname + '\' + objectpath
	  from #objectswithpath owp
	  join #objects o
	    on owp.parentid = o.objectid
END

select objectpath, objectname, lastmodifytime, ChildInstances, LastInstanceTime, type
 from #objectswithpath
order by ObjectPath, objectname


--select * from #results
--select typeid, type, count(*) from #objects group by typeid, type order by count(*) DESC

/*
select type, case when isnull(ChildInstances,0) = 0 then 0 else 1 end, count(*), max(LastInstanceTime) --objectpath, objectname, lastmodifytime, ChildInstances, LastInstanceTime, type
 from #objectswithpath
 where ObjectPath not like '$\REPORTS OFF LINE\%'
group by type, case when isnull(ChildInstances,0) = 0 then 0 else 1 end
order by type, case when isnull(ChildInstances,0) = 0 then 0 else 1 end
*/

--select * from #objectswithpath where type = 'Word (DOC)'


drop table #objectswithpath
drop table #objects
drop table #results

23Jan/150

View SQL Server table updates per second

When trying to guage the level of database activity, you can use SQL Profiler to view the type and volume of transactions in motion at any given time and to view the overall level of database IO, but you can't use it to directly tell which database tables are being updated.

However, there's a handy dynamic management view called sys.dm_db_index_usage_stats that tells you the number of rows that have been updated in each database index since the instance was last restarted (or since the table/index was created, if that happened more recently):

SELECT *
FROM sys.dm_db_index_usage_stats

The view also has some additional information on index usage, including the number of scans, seeks, and lookups performed on each index - super helpful information if you're looking for unused indexes or which objects are heaviest-hit. If you look at indexes 0 and 1 (zero is the table heap, 1 is the clustered index), you'll see activity on the underlying table data itself.

I needed to see the row updates per second for every table in the database, so rather than run that select over and over (and compare the results), I wrote a quick script to do the comparison repeatedly for me:

SET NOCOUNT ON

-- Remove the working table if it already exists
-- so it doesn't get in the way
IF OBJECT_ID('tempdb..#TableActivity_After') IS NOT NULL
DROP TABLE #TableActivity_After


-- Collect our working data
SELECT object_name(us.object_id) as TableName,
		user_updates as UpdatedRows,
		last_user_update as LastUpdateTime
INTO #TableActivity_After
from sys.dm_db_index_usage_stats us
join sys.indexes si
	on us.object_id = si.object_id
	and us.index_id = si.index_id
where database_id = db_id()
and user_seeks + user_scans + user_lookups + user_updates &amp;gt; 0
and si.index_id in (0,1)
order by object_name(us.object_id)


-- Figure out if we're running it the first time or again
-- Put the data into the correct tables 
IF OBJECT_ID('tempdb..#TableActivity_Before') IS NULL
BEGIN
	-- First time it's being run - stage the existing data
	PRINT 'Initial table usage collected - execute again for changes'

END
ELSE
BEGIN
	-- Running script a subsequent time
	-- Compare this set of data to our last set

	-- See how long it's been since we ran this script last
	-- Or at least since last change in any table in the database
   DECLARE @SecondsSince DECIMAL(10,2)
	SELECT @SecondsSince = CONVERT(FLOAT, DATEDIFF(ms, MAX(LastUpdateTime ), GETDATE()))/1000
	  FROM #TableActivity_BEFORE

	SELECT @SecondsSince as 'Seconds since last execution'

	-- Do actual table comparison and give results
	SELECT a.TableName,
		   a.updatedrows - isnull(b.UpdatedRows,0) as RowsUpdated,
		  CONVERT(INT, (a.updatedrows - isnull(b.UpdatedRows,0)) / @SecondsSince) as RowsPerSecond
	 FROM #TableActivity_After a
	 LEFT
	 JOIN #TableActivity_Before b
	   ON b.TableName = a.TableName
    WHERE a.updatedrows - isnull(b.UpdatedRows,0) &amp;gt; 0
	ORDER BY RowsUpdated DESC

	
END

-- Swap the tables so the AFTER table becomes the new BEFORE
-- Then clean up AFTER table since we'll get a new one next time
IF OBJECT_ID('tempdb..#TableActivity_Before') IS NOT NULL
DROP TABLE #TableActivity_Before

SELECT *
  INTO #TableActivity_Before
  FROM #TableActivity_After

DROP TABLE #TableActivity_After

Running that script the first time will grab an snapshot of table activity. Running it again will tell you what has changed since you ran it the first time, and running it again will continue to tell you (updating the "before" image each time so you're getting an update on only the most recent database activity).

If you wanted to see activity on all database indexes, you could update the query at the top to show index name and remove the "WHERE si.index_id in (0,1)" and you'd see all the index details.

I hope this is helpful - if you have any feedback or would like to see something added, please feel free to leave a comment below!

Download the full script here

14Jan/150

Querying Active Directory from SQL Server

SQL Server provides some pretty flexible integration with Active Directory through the ADSI Linked Server provider, something that's present by default when you install SQL Server. If you've never used it before, it allows you to connect to a domain controller and query AD the same way you'd query any other linked server. For example, it gives you the option to:

  • Identify when logins to SQL Servers or databases that support financial applications exist, but have no matching AD account (either direct integrated logins, or if SQL logins or rows in a "User" table have been set up to match the AD login)
  • Kick off alerts to provision the user in various systems based on their AD group membership
  • Automatically trigger an action when a new account appears in active directory (for example, we auto-provision security badges and send an email alert to our head of security to assign the appropriate rights)

While much of this could also be done from Powershell as well, we use the SQL Server Agent to manage many of our scheduled job (because it's so handy to have the agent remotely accessible), as well as sometimes just needing data from AD in a query. To support a number of processes we have in place, we run a synchronization job every so often throughout the day that pulls about two dozen fields for all users and synchronizes them into a table if anything has changed.

Setting up the linked server itself is pretty straightforward (courtesy of http://community.spiceworks.com/how_to/show/27494-create-a-sql-linked-server-to-adsi):

  1. Create the linked server itself
  2. Set the security context (if you want to query AD as something other than the SQL Server Service account - by default, all domain users can do this and it's only required if the domain is remote or if, for some reason, your SQL Service account's AD rights have been restricted, like if you're running as "LOCAL SERVICE")
  3. Enable OPENQUERY (Ad Hoc Distributed Queries)

You'll notice that setting up the linked server itself doesn't actually specify where Active Directory is located or what domain/forest you'll be querying - that's actually done in the query itself. In each query, you'll need to specify the FQDN (Fully-qualified domain name) of the domain (or OU) of the domain you're querying. For example, we'd get all users from a domain by issuing the following query (in this example, "ADLinkedServerName" is the linked server we just created, and our domain is "corp.mycompany.local"):

SELECT EmployeeNumber, Name AS FullName, givenName as FirstName, sn as LastName,
L AS Location, samAccountName as ADAccount
FROM OPENQUERY(ADLinkedServerName,'SELECT Name, L, givenName, sn,
EmployeeNumber, EmployeeID,samAccountName,createtimestamp
FROM ''LDAP://OU=Users,DC=corp,DC=mycompany,DC=local''
WHERE objectClass =''user''') ad

This query will search that OU ("Users", in this case) and everything below it, so changing the FROM to "LDAP://DC=corp,DC=mycompany,DC=local" would fetch the entire directory (for all the "user" objects), regardless of what folder they appeared it - if your directory puts users in another OU (like "Associates", for example), you should adjust the query accordingly.

For column names, you can pull any AD properties at all that you’re looking for – even custom ones that aren't part of a standard AD configuration. To get an easy list of AD properties to choose from, I like using ADSIEDIT (part of Microsoft’s Remote Server Administration Tools - download RSAT for Windows 7 or RSAT for Windows 8.1) – just drill down all the way down to an object, like a user, right click on them and select “Properties”, and you can see a list of all the properties on that account. If you’ve got Domain Admin rights, this tool can be used to modify these values too, but for querying, you only need to be a domain user or somebody who has rights to browse AD. Make a note of the names of particular properties that you're interested in - also note that AD queries are case-sensitive, so you'll need to note the casing of these properties as well.

One potential gotcha that I've run into is that maximum result size that AD will return in a single query can be set as part of domain policy - by default it's 1000 records at once, and can be configured by setting or adjusting the "PageSize" property on your domain controllers (see https://support.microsoft.com/kb/315071/en-us). Also, there's a "MaxResultSetSize" property as well that's set to 256KB by default, but I've never hit it - unless you're pulling every single property back, you'd likely hit the PageSize row limit before you hit the ResultSize byte limit, but remember that both are there. If you do hit the AD result count limit, it will return the rows up to the limit, but then execution stops with a kind of cryptic error:

Msg 7330, Level 16, State 2, Line 2
Cannot fetch a row from OLE DB provider "ADsDSOObject" for linked server "YOURDOMAIN".

If your domain is larger than the PageSize limit, you'll need to cut your query into multiple return sets of data so you don't exceed the limit on any single query. Since our domain contains about 2400 users, we were able to do it in two queries, broken up like this:

SELECT samAccountName
  FROM OPENQUERY(ADLinkedServerName,'SELECT samAccountName
                                       FROM ''LDAP://OU=Users,DC=corp,DC=mycompany,DC=local''
                                      WHERE objectClass =''user''
                                        AND givenName<''L''') as c
UNION ALL
SELECT samAccountName
  FROM OPENQUERY(ADLinkedServerName,'SELECT samAccountName
                                       FROM ''LDAP://OU=Users,DC=corp,DC=mycompany,DC=local''
                                      WHERE objectClass =''user''
                                        AND givenName>=''L''') as c

By dividing the names on L, this cut the directory roughly in half - if yours was larger, you could divide it by querying each OU separately, or by looping through letters of the alphabet, or whatever makes sense in your setting. You could even do something dynamic like pull as many records as you can, then grab the value from the last record you pulled and use it as the baseline to pull the next set as far as you can, and then repeat until you run out of records. Linked servers don’t allow you to dynamically assemble your query at run-time – it has to be hard-coded in the query – but there are some ways around that (like building your OPENQUERY as a string and then executing it via sp_executesql, for example).

Now that you have your AD records stored in a temp table, you can identify new/changed records and merge them into a SQL table you already have ready using an INSERT/UPDATE/DELETE or MERGE statement, or possibly trigger notifications or some other business process.

I hope this is helpful - if you'd like some more detail, please leave a comment and I'm happy to elaborate where it's necessary!

2Jan/150

Removing expired/unused SSRS subscriptions

SQL Reporting Services doesn't do a very good job keeping the SQL Agent clean by removing expired or otherwise unusable subscriptions from the job list. To deal with this, we created a script that pulls some details about these old subscriptions, including the report responsible, the last run time and status, and the user who originally scheduled it. If you notice your SQL Agent job list getting excessively long, you can use this query to identify the culprit reports and owners, and then either notify them or remove the old subscriptions manually yourself (run this on the server with your SSRS databases):

  select c.Name as ReportName,
         s.EventType,
         s.Description as SubscriptionDescription,
         s.LastStatus as LastSubscriptionStatus,
         s.LastRunTime SubscriptionLastRunTime,
         case
            when recurrencetype = 1 then 'One Time'
            when recurrencetype = 2 then 'Hourly'
            when recurrencetype = 4 then 'Daily'
            when recurrencetype = 5 then 'Monthly' 
            when recurrencetype = 6 then 'Month Week' 
            else 'Other'
         end as RecurranceType,
         s.DeliveryExtension,
         u.UserName as SubscriptionSetUpBy,
         s.ModifiedDate as SubscriptionLastModifiedDate
    from [ReportServer].[dbo].[Subscriptions] s
    join [ReportServer].[dbo].[Catalog] c
      on c.ItemID = s.Report_OID
    join [ReportServer].[dbo].[Users] u
      on u.UserID = s.OwnerID
    join [ReportServer].[dbo].[reportschedule] rs
      on c.itemid = rs.reportid 
     and s.subscriptionid = rs.subscriptionid
    join [ReportServer].[dbo].[schedule] sch
      on rs.scheduleid = sch.scheduleid
   where s.EventType <> 'RefreshCache'
     and s.LastRunTime < dateadd(m, -3, getdate())
order by c.name

There are a number of similar scripts out there that pull much of this information together, but there wasn't one that collected all the details we were looking for in one place. From here, you can deal with the subscriptions as you see fit.

Note that you can just remove the old subscriptions by brute force if you'd prefer, and SSRS will clean up the orphaned SQL jobs, but I've preferred to review the list and notify users as we've never had too much volume to deal with. If you want to just delete them straight away, you can do so here:

DELETE ReportServer.dbo.Subscriptions
WHERE InactiveFlags != 0
	OR LastRunTime < dateadd(m, -3, getdate())
22Jul/145

Exporting from SQL Server to CSV with column names

SQL Server can easily export to CSV file, but it exports just the data, without the column names included. In order to export the column names, you need to actually perform two exports - one with the column names, and one with the data - and then combine the two files into a single file. It populates

You could do this using any query you want - native SQL, a linked server, a stored procedure, or anything else - and the results will export the same way once they're in the temp table. Since it builds the list of column name dynamically as well, you only need to change out the query being executed and set the export location - no other configuration is necessary.

-- Declare the variables
DECLARE @CMD VARCHAR(4000),
        @DelCMD VARCHAR(4000),
        @HEADERCMD VARCHAR(4000),
        @Combine VARCHAR(4000),
        @Path VARCHAR(4000),
        @COLUMNS VARCHAR(4000)

-- Set values as appropriate
    SET @COLUMNS = ''
    SET @Path = '\\servername\share\outputpath'

-- Set up the external commands and queries we'll use through xp_cmdshell
-- Note that they won't execute until we populate the temp tables they refer to
    SET @CMD = 'bcp "select * from ##OutputTable" queryout "' + @Path + '\Temp_RawData.csv" -S ' + @@SERVERNAME + ' -T -t , -c'
    SET @HEADERCMD = 'bcp "SELECT * from ##cols" queryout "' + @Path + '\Temp_Headers.csv" -S ' + @@SERVERNAME + ' -T -t , -c'
    SET @Combine = 'copy "' + @Path + '\Temp_Headers.csv" + "' + @Path + '\Temp_RawData.csv" "' + @Path + '\MyCombinedFile.csv"'
    SET @DelCMD = 'del "' + @Path + '\Temp_*.csv"'

-- Create and populate our temp table with the query results
SELECT *
  INTO ##OutputTable
  FROM YourSourceTable

-- Generate a list of columns	
 SELECT @COLUMNS = @COLUMNS + c.name + ','
   from tempdb..syscolumns c
   join tempdb..sysobjects t
     on c.id = t.id
  where t.name like '##OutputTable%'
  order by colid
  
  SELECT @COLUMNS as Cols INTO ##Cols
		
-- Run the two export queries - first for the header, then for the data
exec xp_cmdshell @HEADERCMD
exec xp_cmdshell @CMD

-- Combine the two files into a single file
exec xp_cmdshell @Combine

-- Clean up the two temp files we created
exec xp_cmdshell @DelCMD

-- Clean up our temp tables
drop table ##cols
drop table ##OutputTable

If you have any suggestions or run into any issues, please let me know!

6Apr/140

Working with bit masks in T-SQL

Decoding bitwise masks can be a bit confusing, so I wanted to share a couple of T-SQL functions I've created to make them easier to deal with. If you'd like to read more about bitmasks and applying them in T-SQL, you can read about it at SQL Fool: T-SQL Bitwise Operators.

The first will return the value in a given bit position of an integer - it accepts two parameters (the lookup value and the bit position) and returns a bit for the value in that position. Note that it starts with position zero, so make sure you're counting correctly:

CREATE FUNCTION dbo.BitwisePosition(
	@value BIGINT,
	@PositionNumber INT
) RETURNS BIT
WITH SCHEMABINDING
AS
BEGIN

	DECLARE @Result BIT
	DECLARE @Mask BIGINT
		SET @Mask = POWER(2,@PositionNumber)

	SET @Result = (CASE @value & @Mask WHEN @Mask then 1 else 0 end)

	RETURN @Result

END
GO

The second function returns a bit (true/false) based on whether a provided bitmask applies to a reference value:

CREATE FUNCTION dbo.BitwiseMask(
	@value BIGINT,
	@Mask BIGINT
) RETURNS BIT
WITH SCHEMABINDING
AS
BEGIN

	DECLARE @Result BIT

	SET @Result = (CASE @value & @Mask WHEN @Mask then 1 else 0 end)

	RETURN @Result

END
GO

Don't forget to grant them permissions:

GRANT EXECUTE ON dbo.BitwiseMask TO PUBLIC
GRANT EXECUTE ON dbo.BitwisePosition TO PUBLIC

To use these functions, you'd call them as in these examples:

-- Value:   1110001000
-- Position 9876543210
-- Checkpoing position 7, 4, and 0 should return 1, 0, 0
 select dbo.bitwiseposition(904, 7),
		dbo.bitwiseposition(904, 4),
		dbo.bitwiseposition(904, 0)

-- Value:   1110001000 = 904
-- Bitmask: 0010001000 = 136
-- Will return true since mask "fits inside" value
select dbo.bitwisemask(904, 136)

-- Value:   1110001000 = 904
-- Bitmask: 0010001001 = 137
-- false since mask has a bit "outside" value
select dbo.bitwisemask(904, 137)

I hope you find them helpful!

9Apr/130

Identifying row in SQL view (or table) with a calculation error

Computed columns can be a great tool if you want to add a value to a table that's dependent on other columns in the table, and you don't want to maintain it on its own every time the source columns change. In a view, these can provide a single, consistent calculation to the end users - say, for a

-- Set up a view with the same columns and calculation errors in it
-- The first calculation will always work
-- The second gives a divide by zero error on every 10th row
CREATE VIEW SourceObject AS
SELECT object_id AS KeyColumn1,
       column_id as KeyColumn2,
       object_id - column_id as CalcColumn1,
       (object_id - (column_id % 10)) as CalcColumn2
  FROM msdb.sys.columns

Now that it's set up, we can try selecting all the rows from the view we just created, and we'll see about 100 rows output, and then the query will stop with a "Divide by zero" error:

SELECT * FROM SourceObject

The calculation in this query is pretty straightforward, and you can see which rows are causing a problem (where column_id is divisible by 10), but what if it was more complicated? The problem is that SQL doesn't display the row that had a problem - it stops on the row before the problem, so finding the row with the error is bit more difficult. If there were multiple columns involved in the calculation, or different combinations of values that could result in an error? Tracking down the rows causing an error can be difficult - you have to find all the possible conditions that could cause an error, and then query for each of them.

This script will allow you to find all the rows in a view with a calculation error, all at once. It uses a cursor to pull the rows from the view one at a time, test the calculation, and then write any errors it finds to a table where you can see the rows that are causing problems. Using a CURSOR generally isn't ideal, but in this case, it's the only way to react to a bad calculation on a row-by-row basis and deal with it.

The script can use two key values from your view - they're called KeyColumn1 and KeyColumn2 - and you can modify the script to name them whatever you want, or just a single a value if that makes more sense in your scenario. It also supports two computed columns - CalcColumn1 and 2 - though again, it could be changed to just check a single column.

 -- Set up variables
DECLARE @KeyColumn1 INT,
		@KeyColumn2 INT,
		@CalcColumn1 INT,
		@CalcColumn2 INT
		
DECLARE @CurrentRow BIGINT
	SET @CurrentRow = 1

-- Set up a place to hold key values for rows that work  
  SELECT TOP 0 KeyColumn1, KeyColumn2
    INTO #WorkingRows
    FROM SourceObject

-- Set up a place to hold errors for rows that don't work    
CREATE TABLE #ErrorRows (
	RowNumber BIGINT,
	KeyColumn1 INT,
	KeyColumn2 INT,
	[ERROR_NUMBER] INT,
	[ERROR_MESSAGE] nvarchar(4000)
)    

-- Begin loop to look through rows in the view 
  DECLARE cur INSENSITIVE CURSOR FOR
  SELECT KeyColumn1, KeyColumn2
  FROM SourceObject
  ORDER BY KeyColumn1, KeyColumn2
  
  OPEN cur
  
  FETCH NEXT FROM cur
  INTO @KeyColumn1, @KeyColumn2
	
  WHILE @@FETCH_STATUS = 0
  BEGIN
  
	BEGIN TRY

		-- Try to select the calculated columns	
		-- If there's an error, it will jump to the CATCH block
		SELECT @CalcColumn1 = CalcColumn1,
				@CalcColumn2 = CalcColumn2
		  FROM SourceObject
		 WHERE KeyColumn1 = @KeyColumn1
		   AND KeyColumn2 = @KeyColumn2
		   
		-- This lookup succeeded
		INSERT INTO #WorkingRows
		SELECT @KeyColumn1, @KeyColumn2
		
	END TRY
	BEGIN CATCH
		
		-- The lookup failed - save details
		INSERT INTO #ErrorRows
		SELECT @CurrentRow,
				@KeyColumn1,
				@KeyColumn2,
				ERROR_NUMBER(),
				ERROR_MESSAGE()			
		
	END CATCH
	
	SET @CurrentRow = @CurrentRow + 1
	    
	FETCH NEXT FROM cur
	INTO @KeyColumn1, @KeyColumn2
	    
  END
  
  -- Show the key columns of rows with errors
  SELECT * FROM #ErrorRows
  
  -- Show the key columns of working rows
  SELECT * FROM #WorkingRows
  
  -- Clean things up
  close cur
  deallocate cur
  drop table #ErrorRows
  drop table #workingrows

At the end, you'll have two tables with results in them - #ErrorRows, which contains the key values for rows with errors in them, as well as details about the error message, and #WorkingRows, which contains the key values for all of the working rows from the view.

Note: I could just as easily set up a table with a computed column in it that causes the same problem You'd be unable to select the entire table without an error, and hunting down the row with an error is painful. The script to find the error is the same, but here's an example of a table that has a computed column with this problem:

-- Set up table with a list of numbers in it
SELECT object_id AS KeyColumn1,
		RANK() OVER (PARTITION BY 0
					 ORDER BY NEWID()) as KeyColumn2
  INTO SourceObject
  FROM msdb.sys.columns
  
-- Add two calculations to the table
-- The first will always work
-- The second will give a "Divide by zero" every 100 rows
  ALTER TABLE SourceObject ADD CalcColumn1 as (KeyColumn1 - KeyColumn2)
  ALTER TABLE SourceObject ADD CalcColumn2 as (KeyColumn1 / (KeyColumn2 % 100))

-- Note that you can't add a persisted computed column to a table
-- if there's a calculation error in any of the rows, so this
-- command will fail with a "Divide by zero" error
  ALTER TABLE SourceObject ADD CalcColumn3 as (KeyColumn1 / (KeyColumn2 % 100)) PERSISTED
5Apr/130

Identifying SQL Server 2005 Enterprise Edition features in use

I recently stumbled across a great system view, sys.dm_db_persisted_sku_features, which identifies any enterprise features in use in the current database, which would prevent you from moving this database to a Standard Edition instance. Unfortunately, it appears in SQL 2008, and I wanted to run this check on a SQL 2005 system.

There are a number of server-level features of SQL 2005 that require Enterprise Edition, but only two database-level features - partitioning and the VarDecimal storage format. Both are easy to check for, so I put together this quick query to do it:

select * from
   (-- VarDecimal Storage Format
    select case
             when max(objectproperty(object_id, N'TableHasVarDecimalStorageFormat')) = 0
               then ''
             else 'VarDecimal Storage'
           end as feature_name
      from sys.objects
    UNION ALL
    -- Partitioning
    select case
             when max(partition_number) > 1
               then 'Partitioning'
             else ''
           end
      from sys.partitions
) t
where feature_name <> ''

On a SQL 2005 server, this query will serve the same purpose that sys.dm_db_persisted_sku_features does on SQL 2008/2012 servers.

2Mar/130

Refreshing changed .NET SQL CLR assemblies after patching/updates

After applying some Windows updates to one of my servers, I started getting the following error when I ran a customer .NET SQL-CLR stored proc:

Server: Msg 6522, Level 16, State 2, Line 1
A .NET Framework error occurred during execution of user defined routine or aggregate 'somemethodyourecalling':

System.IO.FileLoadException: Could not load file or assembly 'System.Drawing, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a' or one of its dependencies. Assembly in host store has a different signature than assembly in GAC. (Exception from HRESULT: 0x80131050)

I'd imported some additional assemblies into SQL Server for use in SQL CLR mapped stored procedures, and the Windows updates had included a service pack for .NET, which changed some of the assemblies. Now the version I'd linked to SQL Server no longer existed on disk, and SQL couldn't load it - that made sense, and is documented pretty clearly in this MS Support article: http://support.microsoft.com/kb/949080

However, I had dozens of servers with SQL CLR components enabled, and hundreds of different assemblies loaded across them all, and not always the same in each server, so a standard update script wouldn't work to refresh all the changed assemblies (the MS Support link provides a list of the standard ones that cause that error, but if you've got custom assemblies loaded, or you've loaded an assembly that's not specifically cleared for SQL CLR, then it's not on the list either). To deal with this, I wrote a script that fetches the assembly list for a database and attempts to refresh every one of them from their disk location. If they haven't changed, the update attempt will fail with a specific error message about the MVID, and there's no change for that assembly.

Also, I've commented out the line that restricts it to just framework assemblies (System.* and Microsoft.*), but you can uncomment that line if you'd like to restrict the refresh from attempting to reload your custom assemblies as well.

DECLARE @AssemblyName VARCHAR(255),
	    @AssemblyLocation VARCHAR(255),
	    @AlterAssemblyCommand NVARCHAR(1024),
	    @DotNetFolder VARCHAR(100)

   SET @DotNetFolder = 'C:\Windows\Microsoft.NET\Framework\v2.0.50727'

CREATE TABLE #Results (
		AssemblyName VARCHAR(255),
		AssemblyLocation VARCHAR(255),
		AlterAssemblyCommand NVARCHAR(1024),
		Results VARCHAR(1024)
)

select sa.name as AssemblyName,
		saf.name as Assemblylocation,
		case when charindex('', saf.name) = 0
			then 'ALTER ASSEMBLY [' + sa.name + '] FROM ''' + @DotNetFolder
			else 'ALTER ASSEMBLY [' + sa.name + '] FROM '''
		end + saf.name + (case right(saf.name, 4) when '.dll' then '' else '.dll' end) + ''''
		as AlterAssemblyCommand
INTO #Refresh
from sys.assemblies sa
join sys.assembly_files saf
  on sa.assembly_id = saf.assembly_id
where sa.name <> ('Microsoft.SqlServer.Types')
  --and (sa.name like 'System.%' or sa.name like 'microsoft.%')

DECLARE Commands CURSOR FAST_FORWARD FOR
SELECT AssemblyName,
	   AssemblyLocation,
	   AlterAssemblyCommand
  FROM #Refresh

OPEN Commands

FETCH NEXT FROM Commands
INTO @AssemblyName,
	   @AssemblyLocation,
	   @AlterAssemblyCommand

WHILE @@FETCH_STATUS = 0
BEGIN

	BEGIN TRY
		exec sp_executesql @AlterAssemblyCommand

		insert into #Results
		SELECT @AssemblyName,
				@AssemblyLocation,
				@AlterAssemblyCommand,
				'Assembly refreshed successfully'

	END TRY
	BEGIN CATCH

		insert into #Results
		SELECT @AssemblyName,
				@AssemblyLocation,
				@AlterAssemblyCommand,
				CASE ERROR_NUMBER()
					WHEN 6285 THEN 'No update necessary (MVID match)'
					WHEN 6501 THEN 'Physical assembly not found at specified location (SQL Error 6501)'
					ELSE ERROR_MESSAGE() + ' (SQL Error ' + convert(varchar(10), ERROR_NUMBER()) + ')'
				END

	END CATCH

	FETCH NEXT FROM Commands
	INTO @AssemblyName,
		   @AssemblyLocation,
		   @AlterAssemblyCommand

END

CLOSE Commands
DEALLOCATE Commands

SELECT * FROM #Results

drop table #refresh
drop table #Results

While troubleshooting the error, I came across this as well - I don't have a SQL 2012 server handy to check with, it looks like this problem might be resolved with a reboot in SQL 2012:

http://msdn.microsoft.com/en-us/library/hh479773.aspx

I'm not sure if that's the case, but it would make things easier. Also, it would be nice if there was some kind of "ALTER ASSEMBLY [SomeAssembly] REFRESH" command to address this problem, but maybe in a future version.

Additionally, this error can be caused if the signing of an assembly has changed, but not necessarily the signature, but just reloading it from disk won't work because the method that SQL Server uses to determine if there's been a change is the MVID, and that doesn't change unless there's been a code change of some kind (see the Connect bug here). In those cases, you'll actually need to drop and recreate the assembly, including any supporting objects that rely on that assembly.

10Jan/131

Calculating working hours between two dates

As a follow-up to an earlier post (Return a list of all dates between a start and end date), I need to find the number of working hours between two timestamps - in this case, it was to see how long a support ticket had been open before it was initially assigned, but the user didn't want non-work hours to count against them.

To do this, I used the previous script to generate a list of dates and hours, and then marked the rows as work time or not (based on day of week and hour of day, evaluated together). The result was a table that would effectively let me do a SUM to find the value I was looking for. Once I had that table, I could join to it for rows between the two datetimes in question and SUM up rows that had "WorkTime" marked:

SELECT tt.TicketNumber,
       tt.TicketCreateTime,
       tt.TicketAssignTime,
       SUM(  CONVERT(INT, wh.WorkTime)) as WorkHoursBeforeAssigned
       COUNT(CONVERT(INT, wh.WorkTime)) as TotalHoursBeforeAssigned
  FROM TroubleTickets tt
  JOIN #WorkingHours wh
    ON wh.EvaluateTime BETWEEN tt.TicketCreateTime
                           AND tt.TicketAssignTime
GROUP BY tt.TicketNumber,
         tt.TicketCreateTime,
         tt.TicketAssignTime

In this case, tickets that were created and picked up after hours, without passing any worktime, would show as zero hours old (as they should, since they were interested in working time) - however, I've also included COUNT here to show total hours as well as work hours.

Also, this script only counts for raw day-of-week and hour-of-day working time - it ignores holidays and other special circumstances. I have a script that tracks holidays (American ones, at least), and I'll put that up shortly as well - if you want to take holidays into account, you could incorporate that into your evaluation.

Here's the script that builds the working time table (you can also download it here):

-- Set things up before we get started
--------------------------------------
DECLARE @WorkTimeStart		TINYINT,
		@WorkTimeEnd		TINYINT,
		@WorkDayOfWeekStart	TINYINT,
		@WorkDayOfWeekEnd	TINYINT

DECLARE @StartDate			DATETIME,
		@EndDate			DATETIME

CREATE TABLE #WorkingHours (
		EvaluateTime	DATETIME,
		IsWorktime		BIT DEFAULT(0)
)

--------------------------------------

	SET @WorkTimeStart = 7  --7AM
	SET @WorkTimeEnd   = 16 --4PM hour (4-5PM count as working)
	SET @WorkDayOfWeekStart = 2 --Monday
	SET @WorkDayOfWeekEnd   = 6 --Friday

	SET @StartDate	= '2000-01-01 00:00:00'
	SET @EndDate	= '2020-12-31 23:59:59'

--------------------------------------


-- Built the list of timestamps we're working with
;WITH numberlist(number)
   AS (SELECT RANK() over(order by c1.object_id,
                                   c1.column_id,
                                   c2.object_id,
                                   c2.column_id)
		 from sys.columns c1
        cross 
         join sys.columns c2)
INSERT INTO #WorkingHours (EvaluateTime)
SELECT DATEADD(hh, number-1, @StartDate)
  FROM numberlist
 WHERE DATEADD(hh, number-1, @StartDate) <= @EndDate


-- Set the times to worktime if they match criteria
UPDATE #WorkingHours
   SET IsWorktime = CASE WHEN (DATEPART(dw, EvaluateTime)
								BETWEEN @WorkDayOfWeekStart
								AND @WorkDayOfWeekEnd)
							  AND
							  (DATEPART(hh, EvaluateTime)
							   BETWEEN @WorkTimeStart
							   AND @WorkTimeEnd) THEN 1
						 ELSE 0
					END


-- Retun the results
 SELECT * FROM #WorkingHours
 ORDER BY EvaluateTime

 DROP TABLE #WorkingHours