I have seen many peoples having trouble with moving indexes to different file group.There is no such available feature to move all non-clustered indexes to other file gruop for better performance and handel disk threshold.
You can have this procedure executed on production to move indexes to another filegroup.
ALTER PROC uspMoveTableToFileGroup
@TblName VARCHAR(200),
@tblFileGroupName VARCHAR(200),
@indFileGroupName VARCHAR(200) AS
SET NOCOUNT ON
create table #indx (index_name VARCHAR(200), index_description VARCHAR(200), index_keys VARCHAR(200))
INSERT #indx (index_name , index_description , index_keys) EXEC sp_helpindex @TblName
--SELECT 'SELECT ' + '''' + 'Table :' + @TblName + ''''
--SELECT 'SELECT ' + '''' + 'Start Time : ' + '''' + ' + CONVERT(char(25), GETDATE(), 121)' +
-- CHAR(13) + ' GO '
--SELECT 'DECLARE @StartDt DATETIME'
--SELECT 'SET @StartDt = GETDATE()'
SELECT 'CREATE ' +
CASE WHEN CHARINDEX('UNIQUE', index_description, 0) > 0 THEN 'UNIQUE ' ELSE '' END +
CASE WHEN SUBSTRING(index_description,1,3) = 'non' THEN 'NONCLUSTERED ' ELSE 'CLUSTERED ' END +
' INDEX ' +
index_name + CHAR(13) + ' ON ' + 'dbo.[' + @TblName + ']' +
'(' + replace(index_keys, '(-)', ' DESC') + ')' + ' WITH DROP_EXISTING ON '
+ '[' + CASE WHEN SUBSTRING(index_description,1,3) = 'non' THEN @indFileGroupName ELSE @tblFileGroupName END + ']'
FROM #indx
--SELECT 'SELECT ' + '''' + @TblName + ', ' + '''' + ' + CONVERT(char(25), @StartDt, 121)' + '+' +
-- '''' + ', ' + '''' +
-- ' + CONVERT(char(25), GETDATE(), 121) ' + '+' + '''' + ', ' + '''' + '+' +
-- ' CAST(DATEDIFF(ss, @StartDt, GETDATE()) AS VARCHAR(20))' + CHAR(13) + ' GO '
/*
select 'exec uspMoveTableToFileGroup ' +
'''' + name + '''' + ', ' +
'''' + 'Primary' + '''' + ', ' +
'''' + 'INDEX_FG' + ''''
from sysobjects
where xtype = 'U'
*/
Thanks
Ajay
Sr. SQL DBA
More....Google....Search
Thursday, December 25, 2008
Wednesday, December 24, 2008
Setting Up Mirroring and Log Shipping Together.
Setting Up Mirroring and Log Shipping Together.
Prerequisite (Sql Server 2005)
•Server A ( Principal)
•Server B (Mirror)
•Server C ( Secondary Standby) for Logshipping
•Share Folder on Network, Accessible by all three Servers (A, B & C).
1.Restore backups of the principal/primary database with NORECOVERY onto another server instance to be later used as database mirroring mirror database for the principal/primary database.
2.Set up database mirroring.
E.g. Configuring Mirroring on Principal Server.
Server A: Mir (Principal , Synchronized)
Server B: Mir (Mirror , Synchronized/Restoring)
3.Restore backups of the principal/primary database to other Destination server instances to be used as log shipping.
E.g Restore Backup of Mir database from Server A to Server C with NoRecovery.
4.Create a Network share Folder to take Tlog backup for both Server A (Principal Databases) and Server B(Mirrored Database)
E.g. \\192.168.1.15\Backup -- This Share folder will be used\accessible by Server A, Server B and Server C.
5.Create a Network Share Folder for Copying Tlog from source to destination.
E.g. \\192.168.1.15\Dest_Backup -- Destination Share Folder from where restore of Log will be done to Server C.
6.Set up log shipping on the principal database from Server A to Server C according to Schedule backup, restore and latency required.( Use Sql 2005 Management Studio) check Restore Tab option for Standby\ No recovery.
Server C: Mir (Standby/Read only)
7.Disable Following job
1. Disable Logshipping Backup job on server A.
2. Disable Logshipping Copy Job on server C.
3. Disable Logshipping Restore Job on server C.
8.Set up log shipping on the mirror as the inactive primary database. Follow below steps.
a.Failover Database Mirroring to Server B.
b.Use same shared folder for Backup and Destination e.g. as used above
\\192.168.1.15\Backup
\\192.168.1.15\Dest_Backup
c.Set up log shipping similar to done in step 6 from Server B to Server C.
9.Failover Mirroring to Server A.
10.Enable all job disable in Step 7.
1.Enable Logshipping Backup job on server A.
2.Enable Logshipping Copy Job on server C.
3.Enable Logshipping Restore Job on server C.
Check logshipping Job are running sucessfully as schedule.
Prerequisite (Sql Server 2005)
•Server A ( Principal)
•Server B (Mirror)
•Server C ( Secondary Standby) for Logshipping
•Share Folder on Network, Accessible by all three Servers (A, B & C).
1.Restore backups of the principal/primary database with NORECOVERY onto another server instance to be later used as database mirroring mirror database for the principal/primary database.
2.Set up database mirroring.
E.g. Configuring Mirroring on Principal Server.
Server A: Mir (Principal , Synchronized)
Server B: Mir (Mirror , Synchronized/Restoring)
3.Restore backups of the principal/primary database to other Destination server instances to be used as log shipping.
E.g Restore Backup of Mir database from Server A to Server C with NoRecovery.
4.Create a Network share Folder to take Tlog backup for both Server A (Principal Databases) and Server B(Mirrored Database)
E.g. \\192.168.1.15\Backup -- This Share folder will be used\accessible by Server A, Server B and Server C.
5.Create a Network Share Folder for Copying Tlog from source to destination.
E.g. \\192.168.1.15\Dest_Backup -- Destination Share Folder from where restore of Log will be done to Server C.
6.Set up log shipping on the principal database from Server A to Server C according to Schedule backup, restore and latency required.( Use Sql 2005 Management Studio) check Restore Tab option for Standby\ No recovery.
Server C: Mir (Standby/Read only)
7.Disable Following job
1. Disable Logshipping Backup job on server A.
2. Disable Logshipping Copy Job on server C.
3. Disable Logshipping Restore Job on server C.
8.Set up log shipping on the mirror as the inactive primary database. Follow below steps.
a.Failover Database Mirroring to Server B.
b.Use same shared folder for Backup and Destination e.g. as used above
\\192.168.1.15\Backup
\\192.168.1.15\Dest_Backup
c.Set up log shipping similar to done in step 6 from Server B to Server C.
9.Failover Mirroring to Server A.
10.Enable all job disable in Step 7.
1.Enable Logshipping Backup job on server A.
2.Enable Logshipping Copy Job on server C.
3.Enable Logshipping Restore Job on server C.
Check logshipping Job are running sucessfully as schedule.
Wednesday, December 10, 2008
Replication involved in SQl 2000 and SQl 2005
Replication involved in SQl 2000 and SQl 2005
Publication: database in SQL 2000
Subscriber: database in SQL 2005
Case: Need to drop article from existing publication.
Ex: To drop article “Employees” from publication ‘ReplTest’ which is in sql server 2000 publication and database 'ReplTest'
Connect Publication server and you might follow below steps as in sql 2000 removing article from GUI doesn’t give you option to tick off the article and remove it from publication.
Use 'ReplTest'
GO
sp_droparticle ‘ReplTest’, 'Employees',
Server: Msg 14046, Level 16, State 1, Procedure sp_droparticle, Line 161
Could not drop article. A subscription exists on it.
Above error say’s if you want to drop article from publication then you need to first drop the subscription and then remove article from publication.
And dropping subscription and re- adding the same would require you to run fresh snapshot ,This might be huge pain to customer if snapshot size is very large say 150 GB Appox(6 hours). And then again initial synchronization may take (6 hours).
Total 12 hours maintenance.
Here what I found something interesting and would like to share.
If you want to remove article from publication involved with sql 2005 subscription.
Open SQL 2005 SSMS and connect publication SQL 2000 Server and follow below steps
·Right click publication properties àarticle and remove the article. (Here it allows us to remove article from same publication not allowed from Enterprise or Sql 2000 QA)
You might get below screen
The screen above show subscription needs to be reinitialize but when you run the snapshot it would not generate new snapshot which is good , something foolish error from Microsoft saying subscription be reinitialized from new snapshot.
Thus if you don’t want the whole snapshot to be run as per above scenario, always drop article from SQl 2005 SSMS the same way shown above.
Also if you need to add article just add the same way as you remove from publication and run the snapshot it would run for only one/ number. of added articles.
Please revert and let me know if this article helps you or you have any query.
Thanks,
Ajay Varma,MCTS
Publication: database in SQL 2000
Subscriber: database in SQL 2005
Case: Need to drop article from existing publication.
Ex: To drop article “Employees” from publication ‘ReplTest’ which is in sql server 2000 publication and database 'ReplTest'
Connect Publication server and you might follow below steps as in sql 2000 removing article from GUI doesn’t give you option to tick off the article and remove it from publication.
Use 'ReplTest'
GO
sp_droparticle ‘ReplTest’, 'Employees',
Server: Msg 14046, Level 16, State 1, Procedure sp_droparticle, Line 161
Could not drop article. A subscription exists on it.
Above error say’s if you want to drop article from publication then you need to first drop the subscription and then remove article from publication.
And dropping subscription and re- adding the same would require you to run fresh snapshot ,This might be huge pain to customer if snapshot size is very large say 150 GB Appox(6 hours). And then again initial synchronization may take (6 hours).
Total 12 hours maintenance.
Here what I found something interesting and would like to share.
If you want to remove article from publication involved with sql 2005 subscription.
Open SQL 2005 SSMS and connect publication SQL 2000 Server and follow below steps
·Right click publication properties àarticle and remove the article. (Here it allows us to remove article from same publication not allowed from Enterprise or Sql 2000 QA)
You might get below screen
The screen above show subscription needs to be reinitialize but when you run the snapshot it would not generate new snapshot which is good , something foolish error from Microsoft saying subscription be reinitialized from new snapshot.
Thus if you don’t want the whole snapshot to be run as per above scenario, always drop article from SQl 2005 SSMS the same way shown above.
Also if you need to add article just add the same way as you remove from publication and run the snapshot it would run for only one/ number. of added articles.
Please revert and let me know if this article helps you or you have any query.
Thanks,
Ajay Varma,MCTS
How Snapshot Isolation and Row Versioning Work
How Snapshot Isolation and Row Versioning Work
When the SNAPSHOT isolation level is enabled, each time a row is updated, the SQL Server Database Engine stores a copy of the original row in tempdb, and adds a transaction sequence number to the row. The following is the sequence of events that occurs:
A new transaction is initiated, and it is assigned a transaction sequence number.
The Database Engine reads a row within the transaction and retrieves the row version from tempdb whose sequence number is closest to, and lower than, the transaction sequence number.
The Database Engine checks to see if the transaction sequence number is not in the list of transaction sequence numbers of the uncommitted transactions active when the snapshot transaction started.
The transaction reads the version of the row from tempdb that was current as of the start of the transaction. It will not see new rows inserted after the transaction was started because those sequence number values will be higher than the value of the transaction sequence number.
The current transaction will see rows that were deleted after the transaction began, because there will be a row version in tempdb with a lower sequence number value.
The net effect of snapshot isolation is that the transaction sees all of the data as it existed at the start of the transaction, without honoring or placing any locks on the underlying tables. This can result in performance improvements in situations where there is contention.
A snapshot transaction always uses optimistic concurrency control, withholding any locks that would prevent other transactions from updating rows. If a snapshot transaction attempts to commit an update to a row that was changed after the transaction began, the transaction is rolled back, and an error is raised
Script to change Isolation Level.
ALTER DATABASE DB_NAME
SET READ_COMMITTED_SNAPSHOT ON;
When the SNAPSHOT isolation level is enabled, each time a row is updated, the SQL Server Database Engine stores a copy of the original row in tempdb, and adds a transaction sequence number to the row. The following is the sequence of events that occurs:
A new transaction is initiated, and it is assigned a transaction sequence number.
The Database Engine reads a row within the transaction and retrieves the row version from tempdb whose sequence number is closest to, and lower than, the transaction sequence number.
The Database Engine checks to see if the transaction sequence number is not in the list of transaction sequence numbers of the uncommitted transactions active when the snapshot transaction started.
The transaction reads the version of the row from tempdb that was current as of the start of the transaction. It will not see new rows inserted after the transaction was started because those sequence number values will be higher than the value of the transaction sequence number.
The current transaction will see rows that were deleted after the transaction began, because there will be a row version in tempdb with a lower sequence number value.
The net effect of snapshot isolation is that the transaction sees all of the data as it existed at the start of the transaction, without honoring or placing any locks on the underlying tables. This can result in performance improvements in situations where there is contention.
A snapshot transaction always uses optimistic concurrency control, withholding any locks that would prevent other transactions from updating rows. If a snapshot transaction attempts to commit an update to a row that was changed after the transaction began, the transaction is rolled back, and an error is raised
Script to change Isolation Level.
ALTER DATABASE DB_NAME
SET READ_COMMITTED_SNAPSHOT ON;
Monday, December 8, 2008
CXPACKET wait types
Are you experiencing CXPACKET wait types?
There is a performance issue and user facing slowness during running there application,to find out check the wait type occuring in sql server. If you find out CXPACKET waittype utilizing >5 then there might be a SQL Server parallelism problem and you must reduce MAXDOP to 1.
If your OLTP queries follow best practices and are well indexed, they probably will never generate a parallel plan. This is because they are fast and access a small amount of rows.
If they are missing indexes or SQL overestimates cardinality, SQL might decide to do scans, sorts, hashes, spools etc. These iterators, among others, can go parallel to reduce execution time at the cost of system resources. These iterators are not bad and they do have their place. It just isn’t on OLTP type of queries most of the time.
So dropping the MAXDOP on an OLTP system to 1 probably won’t hurt much because most of the time there is an IO bottleneck.
If the CXPACKET wait types are a symptoms of poor indexing and row count estimation, it won’t help either. Whole books have been written on indexing, query tuning and there is a nice whitepaper on stats best practices to avoid estimation problems so I am not going to go into that.
Some recommendations on values for CXPACKET waits and when
parallelism is desirable depending if this is an OLTP system on this whitepaper
SQL Server 2005 Waits and Queues
http://www.microsoft.com/technet/pro...ts_queues.mspx
I have found some queries to identify whether the CXPACKET is a symptom or the cause.
select r.cpu_time , r.logical_reads, r.session_id into #temp
from sys.dm_exec_sessions as s
inner join sys.dm_exec_requests as r
on s.session_id =r.session_id --and s.last_request_start_time=r.start_time
where is_user_process = 1
waitfor delay '00:00:01'
select substring(h.text,
(r.statement_start_offset/2)+1 ,
((case r.statement_end_offset when -1 then datalength(h.text) else r.statement_end_offset end - r.statement_start_offset)/2) + 1) as text,
r.cpu_time-t.cpu_time as CPUDiff ,
r.logical_reads - t.logical_reads as ReadDiff,
p.query_plan,
r.wait_type,
r.wait_time,
r.last_wait_type,
r.wait_resource,
r.command,
r.database_id,
r.blocking_session_id,
r.granted_query_memory,
r.session_id, r.reads, r.writes,
r.row_count, s.[host_name],
s.program_name,
s.login_name
from sys.dm_exec_sessions as s
inner join sys.dm_exec_requests as r
on s.session_id =r.session_id and s.last_request_start_time=r.start_time
full outer join #temp as t on t.session_id=s.session_id
CROSS APPLY sys.dm_exec_sql_text(r.sql_handle) h
cross apply sys.dm_exec_query_plan(r.plan_handle) porder by 3 desc
drop table #temp
You notice that you have several rows with CXPACKET wait types. With this query, you click the xml show plan link and:
1. search the xml for missing indexes.
2. Save as a .sqlplan and reopen in SSMS
3. Compare estimated and actual rows in the iterators on the right side of the plan. Poor estimates may bubble to the left as well.
4. If there are no missing indexes, estimates and actual are fairly close, reducing MAXDOP may help if it is not a huge report or query.
5. If there are missing index or bad estimations, fix it!
One thing to note, if you do turn down MAXDOP server-wide, turn in on at the query level(enterprise edition) on your index operation because they are optimized for it.
Alter index all on tblBlah rebuild with (maxdop=32);
There is a performance issue and user facing slowness during running there application,to find out check the wait type occuring in sql server. If you find out CXPACKET waittype utilizing >5 then there might be a SQL Server parallelism problem and you must reduce MAXDOP to 1.
If your OLTP queries follow best practices and are well indexed, they probably will never generate a parallel plan. This is because they are fast and access a small amount of rows.
If they are missing indexes or SQL overestimates cardinality, SQL might decide to do scans, sorts, hashes, spools etc. These iterators, among others, can go parallel to reduce execution time at the cost of system resources. These iterators are not bad and they do have their place. It just isn’t on OLTP type of queries most of the time.
So dropping the MAXDOP on an OLTP system to 1 probably won’t hurt much because most of the time there is an IO bottleneck.
If the CXPACKET wait types are a symptoms of poor indexing and row count estimation, it won’t help either. Whole books have been written on indexing, query tuning and there is a nice whitepaper on stats best practices to avoid estimation problems so I am not going to go into that.
Some recommendations on values for CXPACKET waits and when
parallelism is desirable depending if this is an OLTP system on this whitepaper
SQL Server 2005 Waits and Queues
http://www.microsoft.com/technet/pro...ts_queues.mspx
I have found some queries to identify whether the CXPACKET is a symptom or the cause.
select r.cpu_time , r.logical_reads, r.session_id into #temp
from sys.dm_exec_sessions as s
inner join sys.dm_exec_requests as r
on s.session_id =r.session_id --and s.last_request_start_time=r.start_time
where is_user_process = 1
waitfor delay '00:00:01'
select substring(h.text,
(r.statement_start_offset/2)+1 ,
((case r.statement_end_offset when -1 then datalength(h.text) else r.statement_end_offset end - r.statement_start_offset)/2) + 1) as text,
r.cpu_time-t.cpu_time as CPUDiff ,
r.logical_reads - t.logical_reads as ReadDiff,
p.query_plan,
r.wait_type,
r.wait_time,
r.last_wait_type,
r.wait_resource,
r.command,
r.database_id,
r.blocking_session_id,
r.granted_query_memory,
r.session_id, r.reads, r.writes,
r.row_count, s.[host_name],
s.program_name,
s.login_name
from sys.dm_exec_sessions as s
inner join sys.dm_exec_requests as r
on s.session_id =r.session_id and s.last_request_start_time=r.start_time
full outer join #temp as t on t.session_id=s.session_id
CROSS APPLY sys.dm_exec_sql_text(r.sql_handle) h
cross apply sys.dm_exec_query_plan(r.plan_handle) porder by 3 desc
drop table #temp
You notice that you have several rows with CXPACKET wait types. With this query, you click the xml show plan link and:
1. search the xml for missing indexes.
2. Save as a .sqlplan and reopen in SSMS
3. Compare estimated and actual rows in the iterators on the right side of the plan. Poor estimates may bubble to the left as well.
4. If there are no missing indexes, estimates and actual are fairly close, reducing MAXDOP may help if it is not a huge report or query.
5. If there are missing index or bad estimations, fix it!
One thing to note, if you do turn down MAXDOP server-wide, turn in on at the query level(enterprise edition) on your index operation because they are optimized for it.
Alter index all on tblBlah rebuild with (maxdop=32);
Wednesday, December 3, 2008
SQL Server 2005 REBUILD\REORGANIZE Index using DMV's
--INDEXREBUILD 'ADVENTUREWORKS',50,90,5000,'REBUILD','OFF'
--INDEXREBUILD 'ADVENTUREWORKS',30,60,5000,'REORGANIZE','OFF'
Alter PROC INDEXREBUILD
@DBNAME VARCHAR(100), -- Name of database
@MIN_FRAG INT, -- Minimum fragmentation value
@MAX_FRAG INT, -- Maximum fragmentation value
@TABSIZE BIGINT, -- Table size in pages
@INDEXTYPE VARCHAR(10), -- REBUILD or REORGANIZE
@REBUILDOPT VARCHAR(10) = 'OFF' -- ONLINE REUILD option (available only for Enterprise editions)
AS
SET NOCOUNT ON
BEGIN
DECLARE @INDEXID INT;
DECLARE @PARTITIONNUM BIGINT;
DECLARE @PARTITIONS BIGINT;
DECLARE @FRAG FLOAT;
DECLARE @COMMAND NVARCHAR(4000);
DECLARE @SQLSTRING NVARCHAR(1000);
DECLARE @PARMDEFINITION NVARCHAR(2000);
DECLARE @DBID INT
DECLARE @PARTITIONCOUNT BIGINT;
DECLARE @SCHEMANAME NVARCHAR(130);
DECLARE @OBJECTNAME NVARCHAR(130);
DECLARE @INDEXNAME NVARCHAR(130);
DECLARE @OBJECTID INT;
CREATE TABLE #INDEXSTATS
(
OBJNAME VARCHAR(130),
IDXNAME VARCHAR(130),
SCHNAME VARCHAR(130),
AVG_FRAGMENTATION_IN_PERCENT FLOAT,
TBLSIZE BIGINT
)
SET @COMMAND = 'SELECT O.NAME,
I.NAME,
S.NAME,
AVG_FRAGMENTATION_IN_PERCENT,
SUM(TOTAL_PAGES)
FROM SYS.DM_DB_INDEX_PHYSICAL_STATS ('+ CAST(DB_ID(@DBNAME) AS VARCHAR(3)) +', NULL, NULL, NULL, NULL) V
JOIN ['+ @DBNAME +'].SYS.OBJECTS AS O ON V.OBJECT_ID = O.OBJECT_ID
JOIN ['+ @DBNAME +'].SYS.SCHEMAS AS S ON S.SCHEMA_ID = O.SCHEMA_ID
JOIN ['+ @DBNAME +'].SYS.INDEXES AS I ON I.OBJECT_ID = O.OBJECT_ID
AND V.INDEX_ID = I.INDEX_ID
JOIN ['+ @DBNAME +'].SYS.PARTITIONS AS P ON P.OBJECT_ID = O.OBJECT_ID
JOIN ['+ @DBNAME +'].SYS.ALLOCATION_UNITS A ON P.PARTITION_ID = A.CONTAINER_ID
WHERE AVG_FRAGMENTATION_IN_PERCENT >= '+ CAST(@MIN_FRAG AS VARCHAR(8)) + '
AND AVG_FRAGMENTATION_IN_PERCENT <= '+ CAST(@MAX_FRAG AS VARCHAR(8)) + '
AND I.INDEX_ID > 0
GROUP BY O.NAME,
I.NAME,
S.NAME,
AVG_FRAGMENTATION_IN_PERCENT
HAVING SUM(TOTAL_PAGES) >= ' + CAST(@TABSIZE AS VARCHAR(50)) + ''
INSERT INTO #INDEXSTATS
(
OBJNAME,
IDXNAME,
SCHNAME,
AVG_FRAGMENTATION_IN_PERCENT,
TBLSIZE
)
EXEC(@COMMAND)
SET @COMMAND = ''
DECLARE TAB CURSOR FOR
SELECT OBJNAME,
IDXNAME,
SCHNAME
FROM #INDEXSTATS;
OPEN TAB;
FETCH NEXT
FROM TAB
INTO @OBJECTNAME, @INDEXNAME, @SCHEMANAME;
WHILE @@FETCH_STATUS = 0
BEGIN;
IF @INDEXTYPE = 'REBUILD'
BEGIN
IF @REBUILDOPT = 'ONLINE'
SET @COMMAND = N'ALTER INDEX [' + @INDEXNAME + N'] ON [' + @DBNAME + N'].[' + @SCHEMANAME + N'].[' + @OBJECTNAME + N'] REBUILD WITH (FILLFACTOR = 90,ONLINE = ON)';
ELSE
SET @COMMAND = N'ALTER INDEX [' + @INDEXNAME + N'] ON [' + @DBNAME + N'].[' + @SCHEMANAME + N'].[' + @OBJECTNAME + N'] REBUILD WITH (FILLFACTOR = 90,ONLINE = OFF)';
END
ELSE
SET @COMMAND = N'ALTER INDEX [' + @INDEXNAME + N'] ON [' + @DBNAME + N'].[' + @SCHEMANAME + N'].[' + @OBJECTNAME + N'] REORGANIZE';
SELECT'EXECUTING: ' + @COMMAND;
print @COMMAND
--EXEC (@COMMAND);
FETCH NEXT
FROM TAB
INTO @OBJECTNAME, @INDEXNAME, @SCHEMANAME;
END;
CLOSE TAB;
DEALLOCATE TAB;
DROP TABLE #INDEXSTATS
END
------
To run this SP against All Database use below cursor.
--Cursor for runing the procedure on all database.
Declare @DbName varchar(100)
Declare @cmd varchar(100)
Declare Reindex_cur cursor for
select name from sysdatabases where dbid>4
open Reindex_cur fetch next from Reindex_cur into @DbName
while (@@fetch_status = 0)
begin
Set @cmd='INDEXREBUILD '''+@DbName+''',50,90,5000,''REBUILD'',''ONLINE'''
EXEC( @cmd)
fetch next from Reindex_cur into @DbName
end
close Reindex_cur
deallocate Reindex_cur
--INDEXREBUILD 'ADVENTUREWORKS',30,60,5000,'REORGANIZE','OFF'
Alter PROC INDEXREBUILD
@DBNAME VARCHAR(100), -- Name of database
@MIN_FRAG INT, -- Minimum fragmentation value
@MAX_FRAG INT, -- Maximum fragmentation value
@TABSIZE BIGINT, -- Table size in pages
@INDEXTYPE VARCHAR(10), -- REBUILD or REORGANIZE
@REBUILDOPT VARCHAR(10) = 'OFF' -- ONLINE REUILD option (available only for Enterprise editions)
AS
SET NOCOUNT ON
BEGIN
DECLARE @INDEXID INT;
DECLARE @PARTITIONNUM BIGINT;
DECLARE @PARTITIONS BIGINT;
DECLARE @FRAG FLOAT;
DECLARE @COMMAND NVARCHAR(4000);
DECLARE @SQLSTRING NVARCHAR(1000);
DECLARE @PARMDEFINITION NVARCHAR(2000);
DECLARE @DBID INT
DECLARE @PARTITIONCOUNT BIGINT;
DECLARE @SCHEMANAME NVARCHAR(130);
DECLARE @OBJECTNAME NVARCHAR(130);
DECLARE @INDEXNAME NVARCHAR(130);
DECLARE @OBJECTID INT;
CREATE TABLE #INDEXSTATS
(
OBJNAME VARCHAR(130),
IDXNAME VARCHAR(130),
SCHNAME VARCHAR(130),
AVG_FRAGMENTATION_IN_PERCENT FLOAT,
TBLSIZE BIGINT
)
SET @COMMAND = 'SELECT O.NAME,
I.NAME,
S.NAME,
AVG_FRAGMENTATION_IN_PERCENT,
SUM(TOTAL_PAGES)
FROM SYS.DM_DB_INDEX_PHYSICAL_STATS ('+ CAST(DB_ID(@DBNAME) AS VARCHAR(3)) +', NULL, NULL, NULL, NULL) V
JOIN ['+ @DBNAME +'].SYS.OBJECTS AS O ON V.OBJECT_ID = O.OBJECT_ID
JOIN ['+ @DBNAME +'].SYS.SCHEMAS AS S ON S.SCHEMA_ID = O.SCHEMA_ID
JOIN ['+ @DBNAME +'].SYS.INDEXES AS I ON I.OBJECT_ID = O.OBJECT_ID
AND V.INDEX_ID = I.INDEX_ID
JOIN ['+ @DBNAME +'].SYS.PARTITIONS AS P ON P.OBJECT_ID = O.OBJECT_ID
JOIN ['+ @DBNAME +'].SYS.ALLOCATION_UNITS A ON P.PARTITION_ID = A.CONTAINER_ID
WHERE AVG_FRAGMENTATION_IN_PERCENT >= '+ CAST(@MIN_FRAG AS VARCHAR(8)) + '
AND AVG_FRAGMENTATION_IN_PERCENT <= '+ CAST(@MAX_FRAG AS VARCHAR(8)) + '
AND I.INDEX_ID > 0
GROUP BY O.NAME,
I.NAME,
S.NAME,
AVG_FRAGMENTATION_IN_PERCENT
HAVING SUM(TOTAL_PAGES) >= ' + CAST(@TABSIZE AS VARCHAR(50)) + ''
INSERT INTO #INDEXSTATS
(
OBJNAME,
IDXNAME,
SCHNAME,
AVG_FRAGMENTATION_IN_PERCENT,
TBLSIZE
)
EXEC(@COMMAND)
SET @COMMAND = ''
DECLARE TAB CURSOR FOR
SELECT OBJNAME,
IDXNAME,
SCHNAME
FROM #INDEXSTATS;
OPEN TAB;
FETCH NEXT
FROM TAB
INTO @OBJECTNAME, @INDEXNAME, @SCHEMANAME;
WHILE @@FETCH_STATUS = 0
BEGIN;
IF @INDEXTYPE = 'REBUILD'
BEGIN
IF @REBUILDOPT = 'ONLINE'
SET @COMMAND = N'ALTER INDEX [' + @INDEXNAME + N'] ON [' + @DBNAME + N'].[' + @SCHEMANAME + N'].[' + @OBJECTNAME + N'] REBUILD WITH (FILLFACTOR = 90,ONLINE = ON)';
ELSE
SET @COMMAND = N'ALTER INDEX [' + @INDEXNAME + N'] ON [' + @DBNAME + N'].[' + @SCHEMANAME + N'].[' + @OBJECTNAME + N'] REBUILD WITH (FILLFACTOR = 90,ONLINE = OFF)';
END
ELSE
SET @COMMAND = N'ALTER INDEX [' + @INDEXNAME + N'] ON [' + @DBNAME + N'].[' + @SCHEMANAME + N'].[' + @OBJECTNAME + N'] REORGANIZE';
SELECT'EXECUTING: ' + @COMMAND;
print @COMMAND
--EXEC (@COMMAND);
FETCH NEXT
FROM TAB
INTO @OBJECTNAME, @INDEXNAME, @SCHEMANAME;
END;
CLOSE TAB;
DEALLOCATE TAB;
DROP TABLE #INDEXSTATS
END
------
To run this SP against All Database use below cursor.
--Cursor for runing the procedure on all database.
Declare @DbName varchar(100)
Declare @cmd varchar(100)
Declare Reindex_cur cursor for
select name from sysdatabases where dbid>4
open Reindex_cur fetch next from Reindex_cur into @DbName
while (@@fetch_status = 0)
begin
Set @cmd='INDEXREBUILD '''+@DbName+''',50,90,5000,''REBUILD'',''ONLINE'''
EXEC( @cmd)
fetch next from Reindex_cur into @DbName
end
close Reindex_cur
deallocate Reindex_cur
Subscribe to:
Posts (Atom)