Dec 9, 2009

Export to Excel new way:c#,sql stored procedure


Create a windows project
add a class named as ExcelExportUtility:

using System;
using System.Collections.Generic;
using System.Text;
using System.Data;
using System.IO;
using System.Xml;

namespace ExcelExport
{
///
/// Summary description for ExcelExportUtility.
///

public class ExcelExportUtility
{
private int sheetNameColumnOrdinal = -1;
private string defaultFilename = "ExcelExportRequest";
private string mFilePath = "";
public ExcelExportUtility(string fileName, string filePath)
{
mFilePath = filePath;
defaultFilename = fileName;
}

///
/// Main method for exporting to Excel.
///

///
public void Export(DataSet data)
{
string outputFile;
outputFile = mFilePath + defaultFilename + ".xls";

XmlWriterSettings settings = new XmlWriterSettings();
settings.Indent = true;

using (XmlWriter xw = XmlWriter.Create(outputFile,settings))
{
//Required namespaces used for SpreadsheetML standard.
xw.WriteStartDocument();
xw.WriteProcessingInstruction("mso-application", "Excel.Sheet");
xw.WriteStartElement("Workbook", "urn:schemas-microsoft-com:office:Spreadsheet");
xw.WriteAttributeString("xmlns", "o", null, "urn:schemas-microsoft-com:office:office");
xw.WriteAttributeString("xmlns", "x", null, "urn:schemas-microsoft-com:office:excel");
xw.WriteAttributeString("xmlns", "ss", null, "urn:schemas-microsoft-com:office:Spreadsheet");
xw.WriteAttributeString("xmlns", "html", null, "http://www.w3.org/TR/REC-html40");

WriteHeaderInfo(xw);

//Iterate the tables in the dataset.
//Each table will become a tab or sheet in the workbook.
foreach (DataTable dt in data.Tables)
{
//Default the tab name to either the table name
//or pull tab name from first column of each resultset.
string sheetName;
if (dt.Rows.Count > 0)
sheetName = dt.Rows[0][sheetNameColumnOrdinal].ToString();
else
sheetName = dt.TableName;

//Start of a tab
xw.WriteStartElement("Worksheet");
xw.WriteAttributeString("ss", "Name", null, sheetName);

xw.WriteStartElement("Table");
xw.WriteAttributeString("ss", "DefaultColumnWidth", null, "100");

//Write out header data
xw.WriteStartElement("Row");
//Format column headings
foreach (DataColumn dc in dt.Columns)
{
if (dc.Ordinal != sheetNameColumnOrdinal)
{
xw.WriteStartElement("Cell");
xw.WriteAttributeString("ss", "StyleID", null, "Header");
xw.WriteStartElement("Data");
xw.WriteAttributeString("ss", "Type", null, "String");
xw.WriteString(dc.ColumnName);
xw.WriteEndElement(); //End Data
xw.WriteEndElement(); //End Cell
}
}
xw.WriteEndElement(); //End Row

//Write out row data
foreach (DataRow dr in dt.Rows)
{
xw.WriteStartElement("Row");
foreach (DataColumn dc in dt.Columns)
{
if (dc.Ordinal != sheetNameColumnOrdinal)
{
string dataType;
string style;
string output;

//Set appropriate styling of each cell based on datatype
//This depends on how sql server ends up reporting the datatype.
switch (dc.DataType.ToString())
{
case "System.DateTime":
dataType = "DateTime";
style = "Date";

try
{
output = DateTime.Parse(dr[dc].ToString()).ToString("yyyy-MM-dd");
}
catch (FormatException fe) //date is null or empty in dataset
{
output = "";
}

break;
case "System.Decimal":
case "System.Double":
case "System.Int16":
case "System.Int32":
case "System.Int64":
case "System.Byte":
dataType = "Number";
style = "Data";
output = dr[dc].ToString().Trim();
break;
default:
dataType = "String";
style = "Data";
output = dr[dc].ToString().Trim();
break;
}

//if no data then write empty cell node
xw.WriteStartElement("Cell");
xw.WriteStartAttribute("StyleID", "");
xw.WriteString(style);
xw.WriteEndAttribute(); //End Style Attribute
if (output != "")
{
xw.WriteStartElement("Data");
xw.WriteAttributeString("ss", "Type", null, dataType);
xw.WriteString(output);
xw.WriteEndElement(); //End Data

}
xw.WriteEndElement(); //End Cell
}
}
xw.WriteEndElement(); //End Row
}

xw.Flush();
xw.WriteEndElement(); //End Table
xw.WriteEndElement(); //End Worksheet
}

xw.WriteEndElement(); //End Workbook
xw.Flush();
}

}

///
/// Used to set up column headers and data type styling.
///

///
public void WriteHeaderInfo(XmlWriter xw)
{
xw.WriteStartElement("Styles");

//Default styling
xw.WriteStartElement("Style");
xw.WriteAttributeString("ss", "ID", null, "Default");
xw.WriteAttributeString("ss", "Name", null, "Normal");
xw.WriteStartElement("Alignment");
xw.WriteAttributeString("ss", "Vertical", null, "Bottom");
xw.WriteEndElement(); //End Alignment
xw.WriteElementString("Borders","");
xw.WriteElementString("Font", "");
xw.WriteElementString("Interior","");
xw.WriteElementString("NumberFormat","");
xw.WriteElementString("Protection", "");
xw.WriteEndElement(); //End Style

//Header styling
xw.WriteStartElement("Style");
xw.WriteAttributeString("ss", "ID", null, "Header");
xw.WriteStartElement("Borders");
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Bottom");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "2");
xw.WriteEndElement(); //End Border
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Left");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "2");
xw.WriteEndElement(); //End Border
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Right");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "2");
xw.WriteEndElement(); //End Border
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Top");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "2");
xw.WriteEndElement(); //End Border
xw.WriteEndElement(); //End Borders
xw.WriteStartElement("Font");
xw.WriteAttributeString("ss", "Bold",null,"1");
xw.WriteEndElement(); //End Font
xw.WriteStartElement("Interior");
xw.WriteAttributeString("ss", "Color", null, "#C0C0C0");
xw.WriteAttributeString("ss", "Pattern", null, "Solid");
xw.WriteEndElement(); //End Interior
xw.WriteEndElement(); //End Style

//Data styling
xw.WriteStartElement("Style");
xw.WriteAttributeString("ss", "ID", null, "Data");
xw.WriteStartElement("Alignment");
xw.WriteAttributeString("ss", "Vertical",null,"Bottom");
xw.WriteAttributeString("ss", "WrapText", null, "0");
xw.WriteEndElement(); //End Alignment
xw.WriteStartElement("Borders");
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Bottom");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "1");
xw.WriteAttributeString("ss", "Color", null, "#000000");
xw.WriteEndElement(); //End Border
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Left");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "1");
xw.WriteAttributeString("ss", "Color", null, "#000000");
xw.WriteEndElement(); //End Border
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Right");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "1");
xw.WriteAttributeString("ss", "Color", null, "#000000");
xw.WriteEndElement(); //End Border
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Top");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "1");
xw.WriteAttributeString("ss", "Color", null, "#000000");
xw.WriteEndElement(); //End Border
xw.WriteEndElement(); //End Borders
xw.WriteEndElement(); //End Style

//Date styling
xw.WriteStartElement("Style");
xw.WriteAttributeString("ss", "ID", null, "Date");
xw.WriteStartElement("Alignment");
xw.WriteAttributeString("ss", "Vertical", null, "Bottom");
xw.WriteAttributeString("ss", "WrapText", null, "1");
xw.WriteEndElement(); //End Alignment
xw.WriteStartElement("NumberFormat");
xw.WriteAttributeString("ss", "Format", null, "Short Date");
xw.WriteEndElement(); //End NumberFormat
xw.WriteStartElement("Borders");
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Bottom");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "1");
xw.WriteAttributeString("ss", "Color", null, "#000000");
xw.WriteEndElement(); //End Border
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Left");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "1");
xw.WriteAttributeString("ss", "Color", null, "#000000");
xw.WriteEndElement(); //End Border
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Right");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "1");
xw.WriteAttributeString("ss", "Color", null, "#000000");
xw.WriteEndElement(); //End Border
xw.WriteStartElement("Border");
xw.WriteAttributeString("ss", "Position", null, "Top");
xw.WriteAttributeString("ss", "LineStyle", null, "Continuous");
xw.WriteAttributeString("ss", "Weight", null, "1");
xw.WriteAttributeString("ss", "Color", null, "#000000");
xw.WriteEndElement(); //End Border
xw.WriteEndElement(); //End Borders
xw.WriteEndElement(); //End Style

xw.WriteEndElement(); //End Styles
xw.Flush();
}

///
/// Provides the column. ordinal number to use for the sheet name.
/// If not set then the default table names are used.
///

public int SheetNameColumnOrdinal
{
get
{
return sheetNameColumnOrdinal;
}
set
{
sheetNameColumnOrdinal = value;
}
}
///
/// Set default filename. Do not specify extension.
///

public string DefaultFilename
{
get
{
return defaultFilename;
}
set
{
defaultFilename = value;
}
}

public string FilePath
{
get
{
return mFilePath;
}
set
{
mFilePath = value;
}
}
}
}


now open the default form1 and rename as "frmExcelExp". Put two text boxes and one for filename and other for folder path. put the folderbrowserpath control and set the selected path for the folderpath text.

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
using System.Data.SqlClient;
using System.Data.SqlTypes;
using ExcelExport;
using System.Configuration;
namespace WindowsFormsApplication1
{
public partial class frmExcelExp : Form
{
public frmExcelExp()
{
InitializeComponent();
}

private void button1_Click(object sender, EventArgs e)
{
try
{
if (txtfilename.Text.Trim() == "")
{
MessageBox.Show("");
return;
}
if (txtfilepath.Text.Trim() == "")
{
MessageBox.Show("");
return;
}
string constring = Properties.Settings.Default.connstr;
DataSet exportData = new DataSet();
SqlConnection conn = new SqlConnection(constring);

SqlCommand cmd = new SqlCommand();
cmd.CommandType = CommandType.StoredProcedure;
cmd.Connection = conn;
cmd.CommandText = "MyExcelDemo";
cmd.Parameters.Add("@DateIn", SqlDbType.VarChar);
cmd.Parameters.Add("@DateOut", SqlDbType.VarChar);
cmd.Parameters["@DateIn"].Value = "01/01/2006";
cmd.Parameters["@DateOut"].Value = "01/01/2009";


SqlDataAdapter da = new SqlDataAdapter(cmd);
da.Fill(exportData);

ExcelExportUtility exportUtil = new ExcelExportUtility(txtfilename.Text.Trim(), txtfilepath.Text.Trim());
//This allows for flexible naming of the tabs in the workbook
exportUtil.SheetNameColumnOrdinal = 0;
exportUtil.Export(exportData);
conn.Close();
}
catch (Exception oe)
{
MessageBox.Show(oe.Message);
}

}

private void button2_Click(object sender, EventArgs e)
{

folderBrowserDialog1.ShowDialog( );
txtfilepath.Text = folderBrowserDialog1.SelectedPath;

}
}
}

now create the sql storedprocedure

CREATE PROCEDURE MyExcelDemo(@DateIn varchar(10),@DateOut varchar(10))
AS
BEGIN

SET DATEFORMAT dmy
--HERE 'SheetTest' IS THE EXCEL TAB NAME
SELECT 'SheetTest',* FROM yourtable WHERE convert(datetime,[DateTime],103) BETWEEN convert(datetime,@DateIn,103) AND convert(datetime,@DateOut,103)
END

Now build the project and run it

Dec 7, 2009

Exporting to Excel Using a CLR Stored Procedure:SQL,C#

Introduction

Like most of my jobs, my current one requires a lot of simple reports to Excel. Nothing fancy, no serious formatting requirements beyond looking good which in most cases means good column headings and correctly aligned data. DTS and SSIS has most of the time served me well in doing this, but can be rather cumbersome for just a simply exporting the results of a stored procedure.

The solution discussed here has been tested on both 32-bit SQL 2005 and 64-bit SQL 2008, however any scripts are for SQL 2008 so if there are any differences I apologize.
Solution

During a discussion about Excel one of the C# developers I work with, Nick Hanson, he mentioned that he had found an easy way to write results from C# to Excel. We then looked into what was required to make this code into a CLR stored procedure for SQL. Turned out this was really easy and he quickly had a test solution for me. The code to this is available for download. The solution is a CLR stored procedure that takes a stored procedure as a parameter, and puts the output of the stored procedure passed in as a parameter to an Excel file, the only modification needed to use this compared to a regular stored procedure is that the results in the first column becomes the name of the spreadsheet.

The attached DLL will do the work, but the source code is also included to compile it yourself. Visual Studio 2008 was used for this project.

Copy the DLL to whatever directory you want your CLR DLL's to be stored in, in my case that is C:\CLR

The following steps will set this up in a database. First enable CLR on the server:

sp_configure'clr',1
reconfigure

Next step is to set the database to TRUSTWORTHY. Be sure you understand the ramifications of doing this, it should not be take lightly changing this setting.

ALTER DATABASE ProdReports2008 SET TRUSTWORTHY ON

Then we will create the assembly:

CREATE ASSEMBLY ExportToExcel
FROM 'C:\CLR\ExcelExport.dll'
WITH PERMISSION_SET = EXTERNAL_ACCESS

Associate a stored procedure name with that assembly:

CREATE PROCEDURE[dbo].[prc_ExportToExcel]
@proc [nvarchar](100),
@path [nvarchar](200),
@filename [nvarchar](100),
@params xml
AS
EXTERNAL NAME [ExportToExcel].[StoredProcedures].[ExportToExcel]

At this point you have a stored procedure that can be called, that will export to Excel. The prototype call for this procedure is:

Declare @params xml
Set @params = ''
exec prc_ExportToExcel 'procname',‘Drive:\Directory\', 'Filename', @params

Note that filename is without XLS at the end, XLS will be added on automatically.

The parameters here are what you would normally use in the stored procedure passed into prc_ExportToExcel, each parameter has to be defined in the XML variable @params. The parameters to pass into the stored procedure is in XML, at a minimum that line needs to have empty in it:

Set @params ='' -- this will work for a proc with no parameters

For example:

exec prc_AndersDemo @FirstName = 'Anders'

When called to export to Excel would look like this:

Declare @paramsxml
Set @params=''
exec prc_ExportToExcel 'prc_AndersDemo', ‘Drive:\Directory\', 'Filename', @params

To be able to change the Tab name for the worksheets in Excel, the first column in the stored procedure must hold the name you want on the Tab. Currently we do not have a way to NOT do this, so if you do not put on one it will take whatever is in the first record and make the column header.

I wrote a quick stored procedure to demo this:

create procedureAndersExcelDemo
as
begin
select 'sysobjects',*fromsys.objects
select 'syscolumns',*fromsys.columns
end

The call to create an Excel file for my directory structure, note that the directory this is saved to is local seen from the SQL Server it is executed as, but it will work to network shares as long as the account SQL is running under has the appropriate permissions. The below code will create a file on the SQL Server it is executed on in the C:\Anders directory called AndersDemo.XLS.

declare @params XML
set @params=''
exec prodreports2008.dbo.prc_ExportToExcel 'AndersTest.dbo.AndersExcelDemo', 'C:\Anders\', 'AndersDemo', @params

The resulting Excel file looks like this:

Data exported to Excel

Note how it creates 2 spreadsheets, one corresponding to each result set in the stored procedures. I have not tested what the limits is for how many can be created, but one I have in production is running fine with 15.

I hope this can be of use to some of you. Comments and suggestions for improvements are welcome.

By Anders Pedersen, 2009/12/02

How to access storedprocedure with parameter : c#

public bool InsertACustomer(string custName, string address1, string tp, string fax)
{

SqlParameter [] prams = new SqlParameter[5];

prams[0] = new SqlParameter("@name", custName);
prams[1] = new SqlParameter("@address1", address1);
prams[2] = new SqlParameter("@tp", tp);
prams[3] = new SqlParameter("@fax", fax);
prams[4] = new SqlParameter("@id",SqlDbType.Int);
prams[4].Direction = ParameterDirection.Output;


bool isTrue = base.CallSP("AddCustomer", prams);

if (isTrue)
{
string id = prams[4].Value.ToString();

return true;
}
else
{
return false;
}


}

How to use datareader with Storedprocedure:C#

using System;
using System.Data.SqlClient;
using System.Data;

namespace DataAccess
{
public class DBConnection
{


string sqlConnectionString = System.Configuration.ConfigurationSettings.AppSettings["ConnectionString"];

SqlDataReader rdr = null;
SqlConnection con = null;
SqlCommand cmd = null;

public DataTable ReadData(string qry)
{
try
{

DataTable dt = new DataTable();
con = new SqlConnection(sqlConnectionString);
cmd = new SqlCommand();
cmd.Connection = con;
cmd.CommandText = qry;
cmd.CommandType = CommandType.Text;

con.Open();
rdr = cmd.ExecuteReader(CommandBehavior.CloseConnection);
dt.Load(rdr);

return dt;
}
catch (Exception ex)
{
return new DataTable();
}
finally
{
if(rdr != null)
rdr.Close();
//con.Close();
}
}

public bool CallSP(string spName, SqlParameter[] param)
{
con = new SqlConnection(sqlConnectionString);
bool toReturn;

try
{
cmd = new SqlCommand(spName, con);
cmd.CommandType = CommandType.StoredProcedure;
cmd.Parameters.AddRange(param);

con.Open();
cmd.ExecuteNonQuery();
toReturn = true;

}
catch (Exception ex)
{
toReturn = false;
}
finally
{
con.Close();
}

return toReturn;

}

public DataTable CallSP(string spName, SqlParameter[] param, bool isReturn)
{
DataTable dt = new DataTable();
con = new SqlConnection(sqlConnectionString);

try
{
cmd = new SqlCommand(spName, con);
cmd.CommandType = CommandType.StoredProcedure;
cmd.Parameters.AddRange(param);

con.Open();
rdr = cmd.ExecuteReader();
dt.Load(rdr);
return dt;
}
catch (Exception ex)
{
return new DataTable();
}
finally
{
con.Close();
}
}


public DataTable CallSP(string spName, bool isReturn)
{
DataTable dt = new DataTable();
con = new SqlConnection(sqlConnectionString);

try
{
cmd = new SqlCommand(spName, con);
cmd.CommandType = CommandType.StoredProcedure;


con.Open();
rdr = cmd.ExecuteReader();
dt.Load(rdr);
return dt;
}
catch (Exception ex)
{
return new DataTable();
}
finally
{
con.Close();
}
}


}
}

Out Parameter in SQL:SQL server 2005

CREATE PROCEDURE [dbo].[AddCustomer]
-- Add the parameters for the stored procedure here
@name nvarchar(50),
@address1 nvarchar(50),
@tp nvarchar(12),
@fax nvarchar(12),
@id int output

AS
BEGIN
-- SET NOCOUNT ON added to prevent extra result sets from
-- interfering with SELECT statements.
SET NOCOUNT ON;

-- Insert statements for procedure here
INSERT INTO Customer
([Name], Address1, TP, FAX)
VALUES (@name, @address1, @tp, @fax)


SET @id = (SELECT SCOPE_IDENTITY())


END

Nov 27, 2009

Parallel Data Loading using SQL Server 2005 partition techniques (BCP)

http://samsudeenb.blogspot.com/search/label/BCP

How to make MemberTree from Employee:AdventureWorks

Here is the two ways to get the memeber level tree structure.
create FUNCTION dbo.MemberTree(@Emp int)
RETURNS @Tree TABLE (EmployeeID int ,Title nvarchar(50),ManagerID int)
AS
BEGIN
DECLARE @Man int
DECLARE @EmpMan int
INSERT @Tree
SELECT EmployeeID,Title,ManagerID
FROM HumanResources.Employee
WHERE EmployeeID=@Emp

SELECT @Man=ManagerID
FROM HumanResources.Employee
WHERE EmployeeID=@Emp
WHILE @@ROWCOUNT >0
BEGIN

set @EmpMan=@Man
INSERT @Tree
SELECT EmployeeID,Title,ManagerID
FROM HumanResources.Employee
WHERE EmployeeID=@EmpMan

SELECT @Man=ManagerID
FROM HumanResources.Employee
WHERE EmployeeID=@EmpMan

END

RETURN
END

After create the above function now run this
SELECT * FROM [dbo].[MemberTree] (1)

Second way to get the memebership tree structure. This function uses recursive call method

create FUNCTION dbo.MemberTreeRecur(@Emp int)
WITH RECOMPILE
RETURNS @Tree TABLE (EmployeeID int ,Title nvarchar(50),ManagerID int)
AS
BEGIN
WITH MemberTreeRecur(EmployeeID ,Title ,ManagerID)
AS (
SELECT EmployeeID,Title,ManagerID
FROM HumanResources.Employee
WHERE EmployeeID=@Emp
UNION ALL
SELECT Node.EmployeeID,Node.Title,Node.ManagerID
FROM HumanResources.Employee Node
JOIN MemberTreeRecur ft
ON Node.EmployeeID=ft.ManagerID
)

insert @Tree

SELECT EmployeeID,Title,ManagerID
FROM MemberTreeRecur

return
END

Now run this
SELECT * FROM [dbo].[MemberTreeRecur] (1)

Oct 19, 2009

How to do Bulkupload to sqlserver

Create the table like this

CREATE TABLE CSVTest
(yearn INT,
FirstName VARCHAR(40),
LastName VARCHAR(40),
description varchar(max),amount numeric(12,2))
GO
now prepare the txt file named as test.txt and the put the data
1997,Ford,E350,ac,3000.00
1999,Chevy,Venture,kk,4900.00
1996,Jeep,Grand,Cherokee, 2000
1985, moon, roof, loaded,4799.00

now run this query
BULK
INSERT CSVTest
FROM 'd:\test.txt'
WITH
(
FIELDTERMINATOR = ',',
ROWTERMINATOR = '\n'
)

GO



Oct 16, 2009

How to do SQL Server Encryption and Symmetric Key Encryption

/* Create Database */
USE master
GO
CREATE DATABASE EncryptTest
ON PRIMARY ( NAME = N'EncryptTest', FILENAME = N'C:\EncryptTest.mdf')
LOG ON ( NAME = N'EncryptTest_log', FILENAME = N'C:\EncryptTest_log.ldf')
GO

/* Create table and insert data in the table */
USE EncryptTest
GO
CREATE TABLE TestTable (FirstCol INT, SecondCol VARCHAR(50))
GO
INSERT INTO TestTable (FirstCol, SecondCol)
SELECT 1,'First'
UNION ALL
SELECT 2,'Second'
UNION ALL
SELECT 3,'Third'
UNION ALL
SELECT 4,'Fourth'
UNION ALL
SELECT 5,'Fifth'
GO
/* Check the content of the TestTable */
USE EncryptTest
GO
SELECT *
FROM TestTable
GO

/* Create Database Master Key */
USE EncryptTest
GO
CREATE MASTER KEY ENCRYPTION
BY PASSWORD = 'SQLAuthority'
GO

/* Create Encryption Certificate */
USE EncryptTest
GO
CREATE CERTIFICATE EncryptTestCert
WITH SUBJECT = 'SQLAuthority'
GO

/* Create Symmetric Key */
USE EncryptTest
GO
CREATE SYMMETRIC KEY TestTableKey
WITH ALGORITHM = TRIPLE_DES ENCRYPTION
BY CERTIFICATE EncryptTestCert
GO

/* Encrypt Data using Key and Certificate
Add Columns which will hold the encrypted data in binary */
USE EncryptTest
GO
ALTER TABLE TestTable
ADD EncryptSecondCol VARBINARY(256)
GO

/* Update binary column with encrypted data created by certificate and key */
USE EncryptTest
GO
OPEN SYMMETRIC KEY TestTableKey DECRYPTION
BY CERTIFICATE EncryptTestCert
UPDATE TestTable
SET EncryptSecondCol = ENCRYPTBYKEY(KEY_GUID('TestTableKey'),SecondCol)
GO

/* DROP original column which was encrypted for protect the data */
USE EncryptTest
GO
ALTER TABLE TestTable
DROP COLUMN SecondCol
GO

/* Check the content of the TestTable */
USE EncryptTest
GO
SELECT *
FROM TestTable
GO

/* Decrypt the data of the SecondCol */
USE EncryptTest
GO
OPEN SYMMETRIC KEY TestTableKey DECRYPTION
BY CERTIFICATE EncryptTestCert
SELECT CONVERT(VARCHAR(50),DECRYPTBYKEY(EncryptSecondCol)) AS DecryptSecondCol
FROM TestTable
GO

/* Clean up database */
USE EncryptTest
GO
CLOSE SYMMETRIC KEY TestTableKey
GO
DROP SYMMETRIC KEY TestTableKey
GO
DROP CERTIFICATE EncryptTestCert
GO
DROP MASTER KEY
GO
USE [master]
GO
DROP DATABASE [EncryptTest]
GO

Sep 3, 2009

MySQL :: Why Move to MySQL from Microsoft SQL Server?

Why Move to MySQL from Microsoft SQL Server?

Before we get started, let me say that I always liked being a SQL Server DBA. My database experience started with DB2, then Teradata, followed by Oracle, and then SQL Server (and then a little bit of Sybase after that, followed by MySQL). Coming from the other databases, I found SQL Server the easiest and quickest to learn at the time, but of course, a lot of that had to do with the fact that Microsoft was really the only database vendor around then (I started with version 4.2 of SQL Server) that shipped any decent GUI management tools with their server. Take the tools away, and you basically had Sybase on Windows with the ISQL command line tool, which was not pretty by any means.

I started managing a lot more database servers with SQL Server 6.0, 6.5, and then SQL Server 2000. I was doing a lot of Oracle at the time too, but still felt that SQL Server had an edge on Oracle in terms of ease-of use. Oracle’s done a lot to improve their tools and overall manageability over time and today they are way ahead of where they were in the version 8/9i days with their 10/11g tools, but Microsoft has kept moving forward too. SQL Server 2005 came along with a little more help in the area of management, but it wasn’t as much as in prior versions. The same can be said (in my opinion) of SQL Server 2008 in terms of additional management help. But don’t misunderstand me, in my DBA days, SQL Server was still the easiest to use over all its competitors and I never thought I’d find another database server that could equal it in terms of having the total package of ease-of-use, reliability, and good performance.

But then I found MySQL.

Of course, you can say I’m biased because I oversee product management for MySQL inside of Sun, so guilty as charged there. But I do think MySQL has SQL Server beat in many respects, even when it comes to running on the Windows platform. Moreover, a lot of other folks seem to agree. When it comes to migrating away from another database to MySQL – or using another database for new applications – the #1 database platform is Microsoft, three years running according to our year-end surveys.

So why is this the case? Why should you, if you are using SQL Server today, consider moving your current applications to MySQL or use MySQL instead of SQL Server for new applications? In an article of this size, I can’t possibly cover every factor or consideration in detail, but I can try and hit the high points which hopefully will be enough. Let’s begin…

MySQL on Windows? Absolutely!

Almost two years ago, I wrote an article entitled “MySQL on Windows? Absolutely!”, where I talked about how prevalent MySQL is on Windows and why Microsoft is a great platform for our database. To date, that article has been the one I have received the most comments/emails on, and every single one was positive – all those who wrote me confirmed the reliability and high-performance characteristics of the MySQL/Windows combination.

Well, two years later, the same is still true. In terms of downloads, Windows far exceeds any other platform for the MySQL Server. For example, here are some download statistics from April 2008 to April 2009 (which includes all server versions from 5.0 on up):

MSGraph.Chart

Now some argue and say that the Windows platform sees such a huge increase over our other supported platforms because developers/DBAs download and develop on Windows and then move to Linux or another platform for production. And this is certainly a valid point. However, in our most recent global survey, we asked the question what production platform people use for their MySQL database, and for MySQL’s enterprise paying customers, 54% said they used MySQL/Windows for development purposes, but 32% said they deploy production MySQL databases on the Microsoft platform – the 2nd most popular platform (RHEL was #1). Perhaps more surprising, the MySQL Community stated they use Windows for development 65% of the time and deploy MySQL production databases on Windows in 44% of their roll-outs, making it the number one platform for both development and production. You can see a general online quick poll we did on this subject here: http://dev.mysql.com/tech-resources/quickpolls/primary-os.html.

The point is, if you’re sold on the Microsoft platform for your data center or various stand-alone systems, then you’ve got plenty of company in using MySQL on Windows. And, of course, unlike SQL Server, if you want to move to Linux, Solaris, or other operating systems with MySQL, you certainly can.

Installing and Configuring MySQL vs. SQL Server

Although installing software isn’t a major factor when looking at databases, it can still come into play if you have many servers that you commission and upgrade. The last SQL Server 2008 Enterprise download I utilized was 1.6GB in size with another download required for a new .NET framework install, which ironically, was the same size as the full MySQL 5.1 GA install for Windows – 150MB. I can install MySQL on my WIN boxes and be at a MySQL command utility prompt in under 5 minutes, but a SQL Server install takes much longer (with your mileage varying depending on the server you use). Just the .NET framework install took more than 5 times the installation time of the MySQL Server on one of my test machines.

But as I said above, installation isn’t a big consideration with databases. And some may think that they small size of the MySQL WIN download is indicative of the fact that the database server isn’t very feature rich and therefore that’s why it’s so tiny. We’ll more fully dispel that myth later in this article, but suffice it to say that such an assertion is false. Good things do come in small packages, as they say. One of my favorite examples to illustrate this point is one of our partner storage engines – InfoBright. Their MySQL-based data warehouse engine is only a 17MB download, installs in about 3 minutes, and can mange up to 30TB of data on a single server with incredibly fast response times.

Windows Services

Another install and configuration consideration is having multiple instances of a database server on one machine. This is another area where I find MySQL easier to use/manage than SQL Server. For me, it’s a cakewalk to have multiple instances of either the same version of MySQL or different versions of MySQL on the same box. All that’s basically required from a ‘tweaking’ standpoint is that you (a) install different versions of MySQL in different directories and, (b) use a different connection port (MySQL’s normal default port is 3306) for each MySQL install if and only if you want to run multiple instances of the database server at the same time.

I also like the fact that I can have a full-featured MySQL Server install on my WIN XP machines without having to use Windows Server like is sometimes required for various versions of SQL Server Enterprise.

Lastly, when it comes to configuration, most SQL Server DBAs set their config parms via the SQL Server Management Studio whereas MySQL DBAs manually edit the my.cnf file. Note that, like SQL Server, most of the config parms for MySQL are dynamic and can be set/immediately changed via a SET GLOBAL statement at a MySQL command line prompt.

Comparing Core SQL Server and MySQL Features

To me, the release of SQL Server 2008 was more impressive from a new feature introduction standpoint than SQL 2005, although 2005 did have some very welcome new enhancements such as Microsoft’s first cut of table and index partitioning. But SQL 2008 brought out some solid features I find very compelling such as parallel support for partitioned objects, transparent data encryption, resource governing, automated performance data collection, change data capture (CDC), and more.

With MySQL, we never engage in database feature wars because that’s not our goal – adding features and becoming as large as Microsoft or Oracle in that regard is not what we’re about. Our three priorities are reliability, performance, and ease-of use, with features only being added when we believe they will strengthen those main concerns. Now that said, there certainly are times when various esoteric database features are necessary for an application, but those times are often the exception rather than the rule with even Forrester Research stating that 80% of the current database installations only make use of around 30% of the vendor’s feature set. I can echo that conclusion from my personal experience in using SQL Server – rare was it that I actually used some of the more boutique features of the database server; most times it was the core RDBMS feature set that powered my applications.

Now you may think I’m setting you up for a letdown with the previous paragraph – that MySQL isn’t going to deliver when it comes to the features you’ll need to move from SQL Server, but you’d be mostly wrong. When I first began transitioning some work from SQL Server to MySQL a number of years ago, I was impressed with the feature set I found then in MySQL, and it has only improved since that time. If you’re unfamiliar with the MySQL feature set, I can’t possibly list everything included in MySQL in an article of this size, but here’s just a sample of how MySQL stacks up to SQL Server (latest versions of both) from a feature standpoint:

Database Feature

MySQL

SQL Server

Open Source

X

Available on two-dozen platforms (32 and 64 bit) including Windows: (RedHat, SuSE, Fedora, Solaris, HPUS, AIX, SCO, FreeBSD, Mac OS, Windows)

X

Pluggable Storage Engine Architecture (MyISAM, InnoDB, Merge, Memory, Archive, Cluster)

X

High-Availability Clustered Database

X

X

ANSI SQL, SubQueries, Joins, Cursors, Prepared Statements

X

X

Stored Procedures, Triggers, SQL and User-Defined Functions

X

X

Updateable Views

X

X

ACID Transactions with Commit, Rollback

X

X

Distributed Transactions

X

X

Row-level Locking

X

X

Snapshot/Consistent Repeatable Reads (readers don’t block writers and vice-versa)

X

X

Server-enforced Referential Integrity

X

X

Strong Data type support (Numeric, VARCHAR, BLOB, etc)

X

X

High-Precision Numeric Data types

X

X

Indexing (clustered, b-tree, hash, full-text)

X

X

Dynamic Memory Caches

X

X

Unique Query Cache (stores query and result set)

X

Cost-Based Optimizer

X

X

Unicode, UTF-8

X

X

XML, XPath

X

X

Geospatial support

X

X

Replication

X

X

Table and index Partitioning

X

X

VLDB (terabytes) capable

X

X

High-speed, data load utility

X

X

Online Backup with Point-in-Time Recovery

X

X

Automatic Restart/Crash Recovery

X

X

Automatic Storage Management (auto-expansion, rollback management)

X

X

Compressed and Archive Tables

X

X

Information Schema/Data Dictionary

X

X

Security (GRANT/REVOKE, SSL, fine grained object privileges)

X

X

Built-in data encryption and decryption

X

X

Built-in Task Scheduler

X

X

Drivers (ODBC, JDBC, .NET, PHP, etc)

X

X

GUI management and development tools

X

X

Again, I don’t want to mislead you into thinking MySQL has more features than SQL Server because the truth is it doesn’t. But the features and capabilities MySQL does possess is usually more than enough to handle systems that need a strong OLTP or analytical database. But also understand that, although both SQL Server and MySQL have complementary features in many areas, there are sections where the depth of what SQL Server offers is better than MySQL. For example, both MySQL and SQL Server have GIS features, but SQL Server’s is more robust than MySQL’s. Ditto when it comes to their job scheduler vs. ours. But conversely, there are cases when MySQL rises above SQL Server in some ways, such as partitioning – SQL Server does have parallel support for partitioning, but MySQL provides more options for various types of partitioning: MySQL offers range, hash, key, list, and composite partitioning whereas SQL Server only offers range. And continuing with the example of partitioning, I find MySQL’s partitioning much easier to use as it’s defined right with the table via DDL during creation time vs. creating partitioning objects in SQL Server (partition schemes and functions) that are then applied to tables.

Of course, there are features in SQL Server MySQL has no complement for. Security is one particular area where MySQL trails Microsoft – unlike SQL Server, MySQL has no concept of roles or external authentication, and its data auditing abilities are quite weak compared to Microsoft. I could list many more features that SQL Server has over MySQL (e.g. better query optimizations and methods, transparent data encryption, etc.), but as I said earlier, MySQL isn’t about going all out with Microsoft or anyone else in the features arena.

Storage Engines

One advantage you’ll have with MySQL over SQL Server is in the area of MySQL storage engines. I can’t tell you how many customers have told me they use MySQL over other databases like SQL Server because of its pluggable storage engine architecture. When I first began looking at storage engines, I was a bit skeptical, but not anymore. As a DBA, what I like is the flexibility storage engines offer. In one database, I can have tables that are transactional or not, main memory or not, compress data or not, and on. And each storage engine supplies performance and usage advantages over using just traditional tables like you find in SQL Server.

In addition, I like that not only do we at MySQL/Sun develop storage engines, but other vendors do as well, which helps us innovate faster. We have third-party vendors that have created column-oriented data warehouse engines, OLAP engines, transactional engines, and more. Using and switching between storage engines is very easy as well. In the end, what storage engines give you over SQL Server is more choice, opportunities for higher performance, and better adaptability for your applications.

Other Brief Server Feature Comparisons

Although I won’t go through all the various major feature differences between MySQL and SQL Server, there are a couple that I do want to bring to your attention. The first has to do with replication. For me, I find MySQL’s replication easier to use and run than Microsoft’s, although you won’t find any GUI tools to help manage the setup and operations of MySQL replication (yet). MySQL uses a master/slave paradigm whereas SQL Server uses a publish and subscribe model. MySQL also has the option of using statement-based replication (that sends the actual SQL statements from a master to a slave for execution) or row-based replication (that sends the actual changed data values to one or more slave servers). One main thing you won’t find in MySQL that you will in SQL Server is conflict detection for peer replication topologies, but many MySQL shops the heavily rely on replication don’t find this omission much of a problem.

When it comes to high availability, many MySQL shops utilize replication to accomplish various fail-over scenarios. Although not exactly like SQL Server log shipping, it fills the bill pretty well. For other active/passive scenarios, MySQL users sometimes utilize DRBD which performs synchronous replication at the block level. And for scenarios needing the highest possible availability, MySQL Cluster can be used, although it does differ from SQL Server clustering quite a bit. MySQL Cluster is a shared-nothing architecture that employs a synchronous replication and heartbeat mechanism between multiple data nodes to provide high availability. Data is redundantly populated among the various nodes, and should a node go down, traffic is rerouted to surviving nodes until the down node is fixed and rejoins the cluster (at which point it rebuilds itself). MySQL Cluster also has a geographic replication option so you can replicate data from one cluster to another cluster that is physically located at a different site than the primary setup.

Going from the high to the low end, what about embedded database capabilities? SQL Server offers some nice options here with its Compact and Express editions, although each have limits in terms of how many CPU’s you can utilize and how large your database can be. MySQL doesn’t have such restrictions so you don’t have to worry there, and MySQL also offers an embedded library which can shrink the MySQL footprint down to around 3-4MB in size if you need to put a database on a device or something similar.

Lastly, if you’re using Microsoft’s integration services for ETL, BI, or similar functions, you’ll have to go outside MySQL for complementary capabilities. Fortunately, you have some nice options available with the Pentaho and Talend BI offerings, much of which you can use at no cost, but each do have pay-for options that you can move to if your applications need their pay-for only additions.

Development Capabilities

In version 5.0 of MySQL, stored procedures, triggers, views, and cursors became a part of the database server. If you’re used to SQL Server T-SQL, you won’t have much of a learning curve at all in switching to MySQL’s programming objects, but you unfortunately won’t find as rich a feature set in terms of development functions and capabilities. MySQL’s stored code objects are close to ANSI standards, but again, they don’t have the breadth and depth of T-SQL. One thing I particularly miss quite a lot from SQL Server is that in MySQL there is no SQL debugger, so you can’t easily debug complex stored procedures or functions.

In terms of connectors, MySQL delivers C, C++, .NET, ODBC, JDBC, Perl, Python, Ruby, and PHP drivers, so you shouldn’t have a problem on that front.

I won’t go into much more detail here, primarily because MySQL has a solid developer and community following for the very reason that it does supply what developers need in most cases to create applications that deliver all that you can typically get done with SQL Server.

What about Performance and Scalability?

At MySQL, we’ve stopped getting many questions about performance and scalability primarily because there are so many sites using MySQL for major applications that produce heavy workloads (e.g. Google, Yahoo, Facebook, etc.) Plus we’ve published a number of recent benchmarks that show MySQL leading the pack in various application scenarios (see our benchmark page at http://www.mysql.com/why-mysql/benchmarks/). And it’s not just the core MySQL Server showing such high performance – various storage engine partners have something to crow about in this area as well. For example, the Kickfire/MySQL data warehouse storage engine currently holds the world record in the TPC-H benchmarks at the 100 and 300GB levels (see http://www.kickfire.com/images/press_releases/tpc-h%20numbers%20300gb%20final%20may%208.pdf for more info).

When it comes to scalability, you can either scale up on a single machine or out across multiple servers. For many years, because of expensive hardware prices, the MySQL community and customer base used a scale-out approach and utilized either dual or quad boxes coupled with MySQL replication to create a dynamic and highly scalable architecture. But now that hardware prices have fallen and getting a machine with many CPU’s/cores doesn’t break the bank, some are now beginning to use scale up for their database systems. MySQL is keeping in step with these moves and released a new version of the MySQL Server (5.4, now in beta) with scalability patches for the InnoDB storage engine that allows it to address many more CPU’s/cores than it has in the past – up to 16-way generic x86 servers and 64-way Sun CMT servers.

The bottom line when it comes to comparing SQL Server and MySQL performance is that you should not encounter many (if any) roadblocks when it comes down to having fast and scalable MySQL-driven applications over what you’re used to with SQL Server.

Management, Monitoring, and Tuning

Most people won’t argue that, for years, Microsoft dominated the area of ease-of-use for managing database servers. The SQL Server Enterprise Manager (now SQL Server Management Studio) had really no peers among the major database vendors and the coupling of the SQL Server Agent job scheduling/notification capabilities with wizard-driven aids like database maintenance plans made it very easy to administer many SQL Servers (at least I always thought so).

With the MySQL Server, there are several GUI management tools provided by MySQL you can download and use and, in fact, they are the second most downloaded software package from our web site. You have a strong modeling tool in MySQL Workbench that helps you visually design databases; a SQL IDE for creating queries, stored procedures and more; a server administrator tool; and a migration tool for moving schema and data to MySQL. Further, there are a number of both free and pay-for products on the market that can help with MySQL management such as those provided by Quest Software and others.

Although I’ve always believed SQL Server was easy to manage, I would, however, argue that SQL Server wasn’t always that easy to monitor or tune, especially from a global standpoint. As a DBA, I either built my own cross-server monitoring solutions or turned to third parties for help. And while at Embarcadero Technologies, I helped design and build a number of SQL Server monitoring and tuning aids that did pretty well in the market.

Microsoft has introduced a few new/improved tools since my SQL Server DBA days such as a better SQL Profiler, fairly helpful monitoring reports, better performance diagnostic objects and collection ability, and a database engine tuning advisor. If you’re accustomed to using these things you may wonder if there are any complements on the MySQL side.

Starting at the most basic level, there are a number of command line monitoring options that you can run to get a handle on general server operations (memory, IO, etc). These are accomplished through a variety of MySQL SHOW commands and through using the small monitor that’s built into the MySQL Administrator GUI tool.

Moving up quite a bit from command line monitoring, if you subscribe to MySQL Enterprise, you’ll be able to use the MySQL Enterprise Monitor, which I think bests some of what Microsoft offers in a couple of ways. We’ve architected MySQL’s Enterprise Monitor to be global in nature, so from one web-based console, you can see and monitor all your servers and understand what’s up, what’s down, and which servers require your attention from a tuning standpoint.

Monitor Click for larger image.

What I like about the MySQL Enterprise Monitor from a DBA standpoint is the global aspect I mentioned above: the heat chart (on the right) shows you what servers need help and the alerts in the middle give you more details on best practice deviations that have occurred across your servers that need correction. The Monitor is powered by a number of best practice advisors that you can have monitor one or all of your servers, and you can add your own customer advisors if you’d like. The advisors also come with expert tuning advice on how to make things right from any errors they find on your server.

Many SQL Server DBAs including myself make heavy use of the SQL Server Profiler to catch and analyze SQL traffic. On the MySQL side, you have a couple of different options available. For interactive SQL analysis, there is a SQL profiler you can use at the MySQL command line prompt that gives a lot of diagnostic information (far above a normal EXPLAIN) on queries you execute. You can read an article I wrote about our interactive profiler on the MySQL dev zone at: http://dev.mysql.com/tech-resources/articles/using-new-query-profiler.html. For a more robust SQL capture tool, you can use the Query Analyzer that’s part of the MySQL Enterprise Monitor. Again, what I like with the Query Analyzer is its global characteristic: you can capture and roll up the worst queries across all your MySQL Servers (if you wish) vs. just a single server at a time like you have with SQL Server. The latest version (now in beta) also allows you to visually identify performance/usage spikes on your servers, highlight the spike in the graph, and immediately see all the SQL code that was running at the time.

Data Activity Lrg

So when it comes to management, monitoring, and tuning, you’ve got plenty on the MySQL side to help you ensure your servers are running as best they can.

And Let’s not Forget Cost…

Even though we’ve been primarily talking about technical aspects of why moving to MySQL from SQL Server makes sense, let’s not forget you can save quite a bit of money in most cases as well. Check out our Total-Cost-of-Ownership calculator at http://www.mysql.com/tcosavings/ and customize it to see how much you can save with MySQL over Microsoft.

Tco

How to Transition from SQL Server to MySQL

So if you liked what you’ve read so far, you’ll naturally have the question “So how can I move to MySQL from SQL Server?” First, realize you can certainly institute a co-existence strategy with SQL Server and MySQL – in fact, many do. Use MySQL where it makes sense, and if you need features or capabilities that are absent in MySQL you can go with SQL Server.

Migration

But if you want to migrate existing SQL Server databases to MySQL, you can do so in a couple of different ways. First, you can download and use the MySQL Migration Toolkit, which is a graphical wizard-driven tool that will migrate your databases from SQL Server to MySQL. I’ve used it a number of times and the product works like a champ, although there are some limitations you should keep in mind. First, it will only do tables, indexes, and data (and views where possible). So if you want to migrate your T-SQL procedures, triggers, and functions, you’ll need to use one of our partner tools that I’ll describe in a minute. Second, realize it is a client-side only tool, so if you’re going to be dragging half a terabyte or so into a MySQL Server, it’s probably not your best bet. A better plan may be to use the Migration Toolkit to move the schemas and then unload the data via SQL Server’s BCP tool and load it into MySQL with our load utility.

Alternatively you can use an ETL tool like Pentaho or Talend to move your databases to MySQL, although it’s a little more involved. These tools have the advantage of being able to do transformations of data during the migration, which the migration toolkit doesn’t have.

If you need more industrial-strength firepower to migrate your SQL Server databases to MySQL, you can look into the Ispirer SQLWays tool, which is sometimes used by the MySQL Professional Services folks in the field. SQLWays will preserve any large investment you’ve made in T-SQL stored procedures, functions, triggers, and more by translating them to MySQL syntax. It will also do all the schema and data migrations as well, plus it will even go so far as to analyze and replace code in Visual Basic, C, Java, and other like programs so that any calls are made to MySQL instead of SQL Server.

Something else to keep in mind when moving from SQL Server to MySQL is there are some objects you won’t have a complement for. Things like synonyms, global-partitioned indexes, any OLAP objects, DDL and statement-based triggers, and a few other things can’t be migrated so you’ll have to find some workaround or other similar object in MySQL for those types of SQL Server features.

Conclusion

As I said in the beginning, I thoroughly enjoyed my stint as a SQL Server DBA and if you’re looking for any harsh words aimed at Microsoft’s database, you’ll have to look elsewhere. But that said, I will also follow it up and say I find MySQL a very worthy alternative to SQL Server, especially in the departmental applications area and Small-Medium Business (SMB) market segment where SQL Server is routinely found. Of course, MySQL excels in the Web, embedded, SaaS, Telco, and Enterprise 2.0 areas as well and has the premier customer rolodex to prove it.

So why should you consider a move from SQL Server to MySQL? Before we answer that question, visit http://www.microsoft.com/sqlserver/2008/en/us/compare-mysql.aspx and see why Microsoft thinks you should switch from us to them. I think they have some valid points there (e.g. advanced security features, which we’ve already covered above, built-in BI tools, etc.), but I think a number of their other arguments are incorrect.

Now, here are a few reasons we’ve discussed in this article as to why you should consider moving from SQL Server to MySQL:

  • MySQL runs great on the Microsoft OS platform, is extremely popular as evidenced by many developing and running production MySQL databases on Windows, but MySQL can be ported to other operating systems if desired, whereas SQL Server cannot.
  • Regarding installation and configuration, MySQL installs faster, has a smaller footprint while still being able to manage fairly large databases, and has less configuration knobs that need turning than SQL Server.
  • There are no size restrictions (CPU, RAM, database size, etc.) in any MySQL Server offering unlike Microsoft’s constraints that are placed on their standard, workgroup, compact, and express editions.
  • MySQL storage engines provide more flexibility and offer more performance and custom application options over SQL Server’s standard RDBMS table type. Plus, the growing storage engine ecosystem gives MySQL great opportunity to quickly develop and innovate.
  • MySQL’s feature set can handle the vast majority of RDBMS use cases (e.g. OLTP, warehousing, etc.) and has simpler implementation models than SQL Server in some areas (e.g. partitioning, replication).
  • In the area of high availability, MySQL has a number of proven solutions including replication, SANs, DRBD, and MySQL Cluster, which equal or best SQL Server depending on the scenario.
  • Although MySQL lacks some of SQL Server’s optimizer sophistication and parallel features, MySQL’s performance has been proven to deliver under heavy OLTP and web-styled workloads and can scale both up and out very well.
  • MySQL’s monitoring and query analysis methodology is global in nature and is better suited to more easily monitor and tune many servers at one time over SQL Server’s server-at-a-time performance analysis paradigm.
  • The ubiquity of MySQL, the Open Source nature of the product, and its great Community provide many benefits including a great developer and DBA network of everyone working together to help ensure a high-quality product and each other’s success.
  • MySQL’s cost is typically much lower than Microsoft’s.

Developing a SQL Server/MySQL co-existence strategy and/or moving from SQL Server to MySQL is a pretty painless procedure and provides many technical and financial benefits in the process. If you want to learn more about transitioning from Microsoft to MySQL, visit our migration central web page at http://www.mysql.com/why-mysql/migration/ where you’ll find case studies, migration white papers, and more on making the move to MySQL.

As always, thanks for your support of MySQL and Sun!



MySQL :: Why Move to MySQL from Microsoft SQL Server?

Aug 17, 2009

Adding Identity column as Primary key to Existing Table

--Create a table as like this:
Create TABLE DataTable(id int, name varchar(20) CONSTRAINT pk_id PRIMARY KEY (id))
--now insert some values for this
Insert into DataTable Values(4, 'test 1')
Insert into DataTable Values(5, 'test 2')
Insert into DataTable Values(6, 'test 3')

--Drop the existing key
ALTER TABLE DataTable DROP CONSTRAINT pk_id

--adding pid as identiry column
ALTER TABLE DataTable add pid int identity(1,1)

--adding the new as primary key
ALTER TABLE DataTable ADD CONSTRAINT pk_DataTable PRIMARY KEY (pid)

OTN Discussion Forums : DateADD in oracle - How to add the date ...

OTN Discussion Forums : DateADD in oracle - How to add the date ...: "select sysdate, sysdate - interval '2' DAY from dual;"

Aug 10, 2009

Taking a look at CROSS APPLY

Taking a look at CROSS APPLY
Applying a Sub-Query, Joining a Derived Table ...

I think the easiest way to think of CROSS APPLY is that it is like doing a CROSS JOIN with a correlated sub-query instead of a derived table. Let's see if I can explain that .... A derived table is 'self-contained', in that all tables and columns in the parent SELECT are not accessible (though variables and parameters can be referenced). For example, consider:

select A.*, b.X
from A
cross join (select B.X from B where B.Val=A.Val) b

That is not legal because A.Val is out of scope within the derived table; this is because the derived table is evaluated independently of the other tables in the SELECT. To limit the rows in table B so that B.Val = A.Val, we must do that outside of the derived table via a join or in the criteria:

select A.*, b.X
from A
cross join (select * from B) b
where A.Val = b.Val

(Of course, the above is equivalent to doing an INNER JOIN to the derived table, or just joining to the table B.)

Also, keep in mind that the scope-of-derived-tables rule isn't just for CROSS JOINS, it's for all JOINS -- CROSS, INNER, OUTER and even UNION; they all use 'self-contained' derived tables.

This is in contrast to a correlated sub-query, where the parent SELECT is in scope for the sub-query; the sub-query is evaluated for each row in the query, so the other tables and columns in the SELECT are all available:

select A.*, (select B.X from B where B.Val=A.Val) as X
from A

(Note: I am ignoring for now the fact that returning multiple rows in a sub-query will return an error.)

This is an easy way to think of the difference between CROSS JOIN and CROSS APPLY. CROSS JOIN, as we saw, joins to a derived table; however, CROSS APPLY, despite looking like a JOIN, actually is applying a correlated sub-query. This imposes both the advantages of a correlated sub-query but also the performance implications.

So, we can simply rewrite our first example using CROSS APPLY like this:

select A.*, b.X
from A
cross apply (select B.X from B where B.Val=A.Val) b

Since we are performing an APPLY and not a JOIN, A.Val is in scope and it works just fine.

Table Valued User Defined Functions

Note that the same rules apply when using Table-Valued User-Defined Functions:

select A.*, B.X
from A
cross join dbo.UDF(A.Val) B

is not legal; once again, A.Val is not in scope for the user-defined function. The best we can do before SQL 2005 was to use a correlated sub-query:

select A.*, (select X from dbo.UDF(A.Val)) X
from A

However, that is not logically equivalent; the UDF cannot return more than 1 row or it will result in an error, and wouldn't make logical sense anyway if it did.

Starting with SQL 2005, we can now use CROSS APPLY and it will work fine:

select A.*, b.X
from A
cross apply dbo.UDF(A.Val) b

So, that is one way to think of the difference between a JOIN and an APPLY; a JOIN combines two separate result sets, but APPLY is more of a loop that evaluates one result set over and over for each row in another. This means that, in general, APPLY will be less efficient than a JOIN, just as, in general, correlated sub-queries are less efficient than derived tables. (The optimizer, however, is generally quite good at optimized correlated sub-queries when possible.)

So, why use CROSS APPLY instead of a correlated sub-query? What's the advantage? Actually, quite a lot -- it is much more powerful!

CROSS APPLY can return multiple rows

Unlike correlate sub-queries, CROSS APPLY works with multiple rows. This allows us to do things like 'joining' a table to a function that parses a CSV column in that table into multiple rows:

select A.ID, b.Val
from A
cross apply dbo.ParseCSV(A.CSV) b

When the ParseCSV() function returns multiple rows, it simply acts as if we have joined Table A to the function's return table, duplicating the rows in Table A for each row in the joined table. This is not possible with a correlated sub-query, and will result in an error. This is a quick and easy way to parse a table of data into multiple rows in an efficient 'set-based' manner when the algorithm requires a complex User-Defined Function. (The CSV is not a great example, but other parsing routines are not as easily accomplished via a JOIN to a numbers table.)

CROSS APPLY can return multiple columns

Again, in a correlated sub-query, we can only return a single value. If we write a SQL statement that returns a running sum, we can use a correlated sub-query like this:

select o.*,
(select sum(Amount) from Order o
where p.OrderDate <= o.OrderDate) as RunningSum
from Order o

However, what if we'd like to return an additional running sum of Orders based on some other criteria (e.g., for orders with the same 'OrderCode')? We'd need another correlated sub-query, greatly reducing the efficiency of our SELECT:

select o.*,
(select sum(Amount) from Order o
where p.OrderDate <= o.OrderDate) as RunningSum,
(select sum(Amount) from Order o
where p.OrderCode = o.OrderCode and p.OrderDate <= o.OrderDate) as SameCode
from Order o

However, we can easily re-write that using a single CROSS APPLY:

select o.*, rs.RunningSum, rs.SameCode
from Order o
cross apply
(
select
sum(Amount) as RunningSum,
sum(case when p.OrderCode = o.OrderCode then Amount else 0 end) as SameCode
from Order P
where P.OrderDate <= O.OrderDate
) rs

So, we get the benefit of return multiple columns like a derived table, and we also get the ability to reference outer values in our SELECT to use in criteria and CASE statements. Very, very powerful.

CROSS APPLY also allows us to quickly get columns from the 'previous' row in a table quite easily:

select o.*, prev.*
from Order o
cross apply
(
select top 1 *
from Order P where P.OrderDate < O.OrderDate
order by OrderDate DESC
) prev

We can express the running total and counts as CROSS JOINS or INNER JOINS by reworking a correlated sub-query, but using TOP and referencing the outer SELECT is not easily done in that manner.

Note that the above CROSS APPLY will not return any orders without a previous order; we would use OUTER APPLY to ensure that all orders are returned even if no previous orders exist:

select o.*, prev.*
from Order o
outer apply
(
select top 1 *
from Order P where P.OrderDate < O.OrderDate
order by OrderDate DESC
) prev

We can also CROSS APPLY to a Table-Valued User Defined Function that returns exactly one row, but with multiple columns, to return separate pieces of data from a single function call. For example, we can parse an email address into separate username and domain columns. A few months back I wrote a SQL Team article that discusses that concept.

Summary

CROSS and OUTER APPLY are very powerful and can be very useful, but we must be careful to use them only when necessary; I am still testing the possibilities, but in general an APPLY will tend to be not as efficient as a JOIN. In addition, APPLY is mostly demonstrated by applying table-valued user-defined functions, but it can be used with in-line SELECT statements as well.

original
Taking a look at CROSS APPLY:

A Story of the Deleted Transaction Log - SQLServerCentral

A Story of the Deleted Transaction Log

I was lying in bed one Saturday night and my phone rang. It was one of our team leads on the other end. The conversation began something like this. 'Hey, this client ran out of disk space so we deleted the transaction log for the database as it was over 100GB'. Well I wasn't quite asleep but I certainly wasn't awake and my initial thought was this guy thinks he is having a little fun with me. Before my mind could fully envision the animated balled up pieces of paper dematerializing as they were thrown towards the recycle bin I heard the funniest thing of the night come out of the leads mouth, 'Do you think that is related to the application not functioning?'

After rubbing my eyes and placing the jack back under the bed that I often use to get my jaw back into place, I asked if the LDF file was in the Recycle Bin. I am sure you know that the answer to that question was 'No', mainly because the file is too big for most bins and secondly because nothing is that easy. Obviously the next question was 'When was the last backup?' Wouldn't you know there wasn't one, go figure!

To fill you in on a little detail, the company I work for develops software. Our support model includes support of the applications, and we often get involved with database issues, but we do not own or maintain databases. The client had called the support line and stated they were out of space and asked if we could help them. This particular client does not have a DBA on staff, so our technicians were trying to help them. Certainly the wrong route was taken but at this point it was too late to look back. I am the sole DBA on the support side of the company, and since this was now a DBA issue that was caused by our technicians, I had to come up with a solution. The solution I came up with was the best for this particular issue based upon all the factors, specifically no backups.

After getting over the ridiculousness of the issue I began researching how to resolve the problem. I ran into a few hints in BOL and then other hints on the Internet. After all the research, I determined the best issue would be to use the DBCC CHECKDB with the REPAIR_ALLOW_DATA_LOSS option. BOL stated this option could repair a transaction log. Well what could be more broken about a transaction log than not having one at all?

Once I determined I would go down this path the first problem was that the db would not go online, not even in emergency mode. So what I did was stop the SQL Server service and then I renamed the MDF file. Let's say the database name was DbLive and I renamed the MDF from DbLive.mdf to DbBad.mdf. I then started the SQL Server service and created a new DbLive db with a corresponding DbLive_log.ldf file. I then stopped the SQL service again and deleted the DbLive.mdf file. I then renamed DbBad.mdf back to DbLive.mdf. I then started the SQL service again.

You can imagine that these steps didn't fix the problem. The db did not come on line, but at least the db now had a transaction log, albeit one from a different database. But as far as SQL server was concerned the log was there now. It was just in a jacked-up state (yes that is a technical term).

The next thing I did was run ALTER DATABASE DbLive SET EMERGENCY. This put the db in emergency mode and allowed SQL server to work with the db at an emergency level. Next I ran 'sp_dboption 'DbLive', 'single user', 'true' '. This set the db to single user mode preparing it for the DBCC that I would run next. Finally I ran 'DBCC CHECKDB ('DbLive', REPAIR_ALLOW_DATA_LOSS)'. After about an hour the db was ONLINE!

I took the db out of single user mode by running 'sp_dboption 'DbLive', 'single user', 'false'' I then ran a few queries against the database and all was fine. I am not totally sure what data loss there was, if any. Since the MDF was intact and the SQL service was taken off line and then the LDF file was deleted all data should have been committed to the MDF. So I believe there was no data loss. This happened several months ago and there has not been any reported db or application anomalies so I must assume all data was present.

Oh and yes, I slapped a maintenance plan on that system and sent the backups to an obscure folder. If they run out of space again it will probably be due to having too many backups! What I meant to say was that I explained to the client how important backups are.


Script:

/*
Stop SQL service, rename DbLive.mdf to DbBad.mdf.
started SQL service, created fake DbLive db (with log etc)
Stopped SQL service
Deleted DbLIve.mdf
Renamed DbBad.MDF to DbLive.MDF
Started SQL service.
Ran following script:
*/
ALTER DATABASE DbLive SET EMERGENCY
sp_dboption 'DbLive', 'single user', 'true'
DBCC CHECKDB ('DbLive', REPAIR_ALLOW_DATA_LOSS)
sp_dboption 'DbLive', 'single user', 'false'"

original Link
A Story of the Deleted Transaction Log - SQLServerCentral:
http://www.sqlservercentral.com/articles/Disaster+Recovery/63311/

Aug 4, 2009

Using CROSS APPLY in SQL Server 2005 - SQLTeam.com

Using CROSS APPLY in SQL Server 2005


My interest in writing this article was started by an MSDN article titled SQL Server 2005: The CLR Enters the Relational Stage. The article shows how to write a function that returns the top three countries per category. That's always been something that was difficult to do in SQL so I was curious about the approach. The article started out well but I was very unhappy by the end. It's just soooo much easier to do this in SQL Server 2005 using the new CROSS APPLY clause in Transact-SQL. So I'm going to write a query to return the top 3 orders for each customer and I'm going to do it in about 10 lines of SQL. (UPDATE: An alert reader found an even better approach!)

First please take a second to read the MSDN article. Pay special attention to how much C# code is required for the solution and how flexible it is.

Note: I also want to mention here that I started out trying to replicate their solution. I tried to install the MSI but that failed with an invalid characters message. I tried to recreate the CLR code but since he mostly posted snippets and had three versions I finally gave up trying to get that to work. One thing I really wanted to do was compare performance on identical data. Unfortunately I wasn't able to do that. My example will use the AdventureWorks database

Let's start at the TOP

Along the way to making this solution work we'll discuss a few new features of SQL Server 2005. The first is that the TOP clause can now take a variable:

DECLARE @Rows INT
SET @Rows = 10

SELECT TOP ( @Rows ) *
FROM Sales.SalesOrderHeader

This will return the top 10 rows from SalesOrderHeader. You can also replace @Rows with anything that evaluates to a number. The following query looks odd but runs just fine:

SELECT TOP (
SELECT COUNT(*)
FROM Sales.Customer
) *
FROM Sales.SalesOrderHeader

There are 19,185 rows in the Customer table and this query returns the top 19,185 rows from SalesOrderHeader. You can also use the TOP clause for INSERT, UPDATE and DELETE statements. If you wanted to DELETE in batches of 500 you can now do that using the TOP clause.

My Function

Next we need a function to return the TOP X rows from SalesOrderHeader based on the total sales amount. That function looks like this:

CREATE FUNCTION dbo.fn_GetTopOrders(@custid AS int, @n AS INT)
RETURNS TABLE
AS
RETURN
SELECT TOP(@n) *
FROM Sales.SalesOrderHeader
WHERE CustomerID = @custid
ORDER BY TotalDue DESC
GO

Notice that it accepts the number of orders to return as a parameter. Also notice that I'm using SELECT * all over the place. I really encourage you to explicitly list out the columns when you write this for real. If you call this with a CustomerID and a number of rows it will return that many rows ordered by the total amount of the order in descending order. Also notice that there is an ORDER BY clause in this function. We'll talk about that in a minute.

Apply the APPLY Clause

The real magic happens when you use SQL Server 2005's new APPLY clause. The APPLY clause let's you join a table to a table-valued-function. That let's you write a query like this:

SELECT  C.CustomerID,
O.SalesOrderID,
O.TotalDue
FROM
AdventureWorks.Sales.Customer AS C
CROSS APPLY
AdventureWorks.dbo.fn_GetTopOrders(C.CustomerID, 3) AS O
ORDER BY
CustomerID ASC, TotalDue DESC

which results in this...

CustomerID  SalesOrderID TotalDue
----------- ------------ ---------------------
1 45283 37643.1378
1 46042 34722.9906
1 44501 26128.8674
2 46976 10184.0774
2 47997 5469.5941
2 57044 4537.8484
3 53616 92196.9738
3 47439 78578.9054
3 48378 56574.3871
4 47658 132199.8023
. . .

The APPLY clause acts like a JOIN without the ON clause comes in two flavors: CROSS and OUTER. The OUTER APPLY clause returns all the rows on the left side (Customers) whether they return any rows in the table-valued-function or not. The columns that the table-valued-function returns are null if no rows are returned. The CROSS APPLY only returns rows from the left side (Customers) if the table-valued-function returns rows.

Notice that I'm just passing in the CustomerID to the function. It returns the TOP 3 rows based on the amount of the order. Since I'm using CROSS APPLY a customer without orders won't appear in the list. I can also pass in a number other than 3 to easily return a different number of orders per customer. Even better I can pass in a different number of orders for each customer. So I could list the top 5 orders for one type of customer but the top 10 for another type of customer. How cool is that?!?

And it gets even better. Remember the function has an ORDER BY in it. It will always return the top orders based on the amount. However you can change the ORDER BY clause in the query that calls the function to display those rows in whatever order you want. You could easily display the top 3 orders in ascending order instead.

Performance and Conclusion

Unfortunately I wasn't able to compare the performance of my solution to MSDN's. The query plan from my solution doesn't look that bad. (And did I mention that it showed me a full query plan for the query with the UDF rather than just a UDF step? Sweet!) If anyone gets their solution running send me the code and I'll update the article.

I also think their solution returns the result as a comma separated value. Now that would be a great use for the CLR. You could easily wrap that around the results of this function for a fast, effecient procedure. And it happens to be the next article I'm working on. :) If you want to see a great article on using the CLR inside SQL Server I'd start with Using CLR Integration in SQL Server 2005. The code is from an earlier beta but it gives you a great idea of when to use CLR code.

In summary I think that 5-10 lines of SQL is much simpler than 50+ lines of C# (or VB) for this type of solution. The article provides a lot of good information on the CLR inside SQL Server. I just don't think this is the best place to use it. One of my biggest fears is that people will use the CLR incorrectly and cause performance problems. This is the first real case I've seen of this. Is this really how Microsoft wants us to use the CLR?

UPDATE

I was curious when I posted this how long it would take to find an even better solution. It didn't take long. Arnold Fribble posted it in the forums. The syntax needed a little tweaking but it appears you can do this in a single query. And how did I forget the PARTITION BY clause of the ROW_NUMBER function. Doh! Here's the cleaned up query:

SELECT
CustomerID,
SalesOrderID,
TotalDue
FROM (
SELECT
O.CustomerID,
O.SalesOrderID,
O.TotalDue,
ROW_NUMBER = ROW_NUMBER() OVER (
PARTITION BY O.CustomerID
ORDER BY O.TotalDue DESC)
FROM
Sales.SalesOrderHeader AS O
) AS d
WHERE
d.ROW_NUMBER <= 3

You have to use a derived table (or a Common Table Expression) since the row number function can't be used in the WHERE clause of the query where it's defined. Now I'll have to write an article on the new ranking functions. :)


Using CROSS APPLY in SQL Server 2005 -
Publish Post
SQLTeam.com

Jul 31, 2009

String Format for DateTime [C#]

String Format for DateTime [C#]

This example shows how to format DateTime using String.Format method. All formatting can be done also using DateTime.ToString method.

Custom DateTime Formatting

There are following custom format specifiers y (year), M (month), d (day), h (hour 12), H (hour 24), m (minute), s (second), f (second fraction), F (second fraction, trailing zeroes are trimmed), t (P.M or A.M) and z (time zone).

Following examples demonstrate how are the format specifiers rewritten to the output.

[C#]
// create date time 2008-03-09 16:05:07.123
DateTime dt = new DateTime(2008, 3, 9, 16, 5, 7, 123);

String.Format("{0:y yy yyy yyyy}", dt); // "8 08 008 2008" year
String.Format("{0:M MM MMM MMMM}", dt); // "3 03 Mar March" month
String.Format("{0:d dd ddd dddd}", dt); // "9 09 Sun Sunday" day
String.Format("{0:h hh H HH}", dt); // "4 04 16 16" hour 12/24
String.Format("{0:m mm}", dt); // "5 05" minute
String.Format("{0:s ss}", dt); // "7 07" second
String.Format("{0:f ff fff ffff}", dt); // "1 12 123 1230" sec.fraction
String.Format("{0:F FF FFF FFFF}", dt); // "1 12 123 123" without zeroes
String.Format("{0:t tt}", dt); // "P PM" A.M. or P.M.
String.Format("{0:z zz zzz}", dt); // "-6 -06 -06:00" time zone

You can use also date separator / (slash) and time sepatator : (colon). These characters will be rewritten to characters defined in the current DateTimeForma­tInfo.DateSepa­rator and DateTimeForma­tInfo.TimeSepa­rator.

[C#]
// date separator in german culture is "." (so "/" changes to ".")
String.Format("{0:d/M/yyyy HH:mm:ss}", dt); // "9/3/2008 16:05:07" - english (en-US)
String.Format("{0:d/M/yyyy HH:mm:ss}", dt); // "9.3.2008 16:05:07" - german (de-DE)

Here are some examples of custom date and time formatting:

[C#]
// month/day numbers without/with leading zeroes
String.Format("{0:M/d/yyyy}", dt); // "3/9/2008"
String.Format("{0:MM/dd/yyyy}", dt); // "03/09/2008"

// day/month names
String.Format("{0:ddd, MMM d, yyyy}", dt); // "Sun, Mar 9, 2008"
String.Format("{0:dddd, MMMM d, yyyy}", dt); // "Sunday, March 9, 2008"

// two/four digit year
String.Format("{0:MM/dd/yy}", dt); // "03/09/08"
String.Format("{0:MM/dd/yyyy}", dt); // "03/09/2008"

Standard DateTime Formatting

In DateTimeForma­tInfo there are defined standard patterns for the current culture. For example property ShortTimePattern is string that contains value h:mm tt for en-US culture and value HH:mm for de-DE culture.

Following table shows patterns defined in DateTimeForma­tInfo and their values for en-US culture. First column contains format specifiers for the String.Format method.

Specifier DateTimeFormatInfo property Pattern value (for en-US culture)
t ShortTimePattern h:mm tt
d ShortDatePattern M/d/yyyy
T LongTimePattern h:mm:ss tt
D LongDatePattern dddd, MMMM dd, yyyy
f (combination of D and t) dddd, MMMM dd, yyyy h:mm tt
F FullDateTimePattern dddd, MMMM dd, yyyy h:mm:ss tt
g (combination of d and t) M/d/yyyy h:mm tt
G (combination of d and T) M/d/yyyy h:mm:ss tt
m, M MonthDayPattern MMMM dd
y, Y YearMonthPattern MMMM, yyyy
r, R RFC1123Pattern ddd, dd MMM yyyy HH':'mm':'ss 'GMT' (*)
s SortableDateTi­mePattern yyyy'-'MM'-'dd'T'HH':'mm':'ss (*)
u UniversalSorta­bleDateTimePat­tern yyyy'-'MM'-'dd HH':'mm':'ss'Z' (*)
(*) = culture independent

Following examples show usage of standard format specifiers in String.Format method and the resulting output.

[C#]
String.Format("{0:t}", dt);  // "4:05 PM"                         ShortTime
String.Format("{0:d}", dt); // "3/9/2008" ShortDate
String.Format("{0:T}", dt); // "4:05:07 PM" LongTime
String.Format("{0:D}", dt); // "Sunday, March 09, 2008" LongDate
String.Format("{0:f}", dt); // "Sunday, March 09, 2008 4:05 PM" LongDate+ShortTime
String.Format("{0:F}", dt); // "Sunday, March 09, 2008 4:05:07 PM" FullDateTime
String.Format("{0:g}", dt); // "3/9/2008 4:05 PM" ShortDate+ShortTime
String.Format("{0:G}", dt); // "3/9/2008 4:05:07 PM" ShortDate+LongTime
String.Format("{0:m}", dt); // "March 09" MonthDay
String.Format("{0:y}", dt); // "March, 2008" YearMonth
String.Format("{0:r}", dt); // "Sun, 09 Mar 2008 16:05:07 GMT" RFC1123
String.Format("{0:s}", dt); // "2008-03-09T16:05:07" SortableDateTime
String.Format("{0:u}", dt); // "2008-03-09 16:05:07Z" UniversalSortableDateTime




String Format for DateTime [C#]