public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Jorge Manuel B. S. Vicetto" <jmbsvicetto@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/mysql-extras:master commit in: /
Date: Tue, 10 May 2011 18:05:24 +0000 (UTC)	[thread overview]
Message-ID: <950710a56e02503b2e08400853d4365f4a297dea.jmbsvicetto@gentoo> (raw)

commit:     950710a56e02503b2e08400853d4365f4a297dea
Author:     Jorge Manuel B. S. Vicetto (jmbsvicetto) <jmbsvicetto <AT> gentoo <DOT> org>
AuthorDate: Tue May 10 18:00:36 2011 +0000
Commit:     Jorge Manuel B. S. Vicetto <jmbsvicetto <AT> gentoo <DOT> org>
CommitDate: Tue May 10 18:00:36 2011 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/mysql-extras.git;a=commit;h=950710a5

Updated the 07110_all_mysql_gcc-4.2 patch for mysql-5.1.57.

---
 00000_index.txt                      |    8 +-
 07110_all_mysql_gcc-4.2_5.1.57.patch |70130 ++++++++++++++++++++++++++++++++++
 2 files changed, 70137 insertions(+), 1 deletions(-)

diff --git a/00000_index.txt b/00000_index.txt
index cc6549f..a65a9c9 100644
--- a/00000_index.txt
+++ b/00000_index.txt
@@ -523,7 +523,13 @@
 @@ FIXME: Testing patch - applies cleanly
 
 @patch 07110_all_mysql_gcc-4.2_5.1.56.patch
-@ver 5.01.56.00 to 5.01.99.99
+@ver 5.01.56.00 to 5.01.56.99
+@pn mysql
+@@ Replace max() and min() macro with MYSQL_MIN() and MYSQL_MAX()
+@@ FIXME: Testing patch - applies cleanly
+
+@patch 07110_all_mysql_gcc-4.2_5.1.57.patch
+@ver 5.01.57.00 to 5.01.99.99
 @pn mysql
 @@ Replace max() and min() macro with MYSQL_MIN() and MYSQL_MAX()
 @@ FIXME: Testing patch - applies cleanly

diff --git a/07110_all_mysql_gcc-4.2_5.1.57.patch b/07110_all_mysql_gcc-4.2_5.1.57.patch
new file mode 100644
index 0000000..7265c94
--- /dev/null
+++ b/07110_all_mysql_gcc-4.2_5.1.57.patch
@@ -0,0 +1,70130 @@
+diff -urN mysql-old/client/mysqlbinlog.cc mysql/client/mysqlbinlog.cc
+--- mysql-old/client/mysqlbinlog.cc	2011-05-10 17:45:45.693349043 +0000
++++ mysql/client/mysqlbinlog.cc	2011-05-10 17:56:01.266682376 +0000
+@@ -1953,7 +1953,7 @@
+       my_off_t length,tmp;
+       for (length= start_position_mot ; length > 0 ; length-=tmp)
+       {
+-	tmp=min(length,sizeof(buff));
++	tmp=MYSQL_MIN(length,sizeof(buff));
+ 	if (my_b_read(file, buff, (uint) tmp))
+         {
+           error("Failed reading from file.");
+diff -urN mysql-old/client/mysql.cc mysql/client/mysql.cc
+--- mysql-old/client/mysql.cc	2011-05-10 17:45:45.693349043 +0000
++++ mysql/client/mysql.cc	2011-05-10 17:56:01.270015709 +0000
+@@ -3336,9 +3336,9 @@
+   {
+     uint length= column_names ? field->name_length : 0;
+     if (quick)
+-      length=max(length,field->length);
++      length=MYSQL_MAX(length,field->length);
+     else
+-      length=max(length,field->max_length);
++      length=MYSQL_MAX(length,field->max_length);
+     if (length < 4 && !IS_NOT_NULL(field->flags))
+       length=4;					// Room for "NULL"
+     field->max_length=length;
+@@ -3358,7 +3358,7 @@
+                                                   field->name,
+                                                   field->name + name_length);
+       uint display_length= field->max_length + name_length - numcells;
+-      tee_fprintf(PAGER, " %-*s |",(int) min(display_length,
++      tee_fprintf(PAGER, " %-*s |",(int) MYSQL_MIN(display_length,
+                                             MAX_COLUMN_LENGTH),
+                   field->name);
+       num_flag[off]= IS_NUM(field->type);
+diff -urN mysql-old/client/mysqldump.c mysql/client/mysqldump.c
+--- mysql-old/client/mysqldump.c	2011-05-10 17:45:45.693349043 +0000
++++ mysql/client/mysqldump.c	2011-05-10 17:56:01.273349042 +0000
+@@ -830,7 +830,7 @@
+                                     &err_ptr, &err_len);
+       if (err_len)
+       {
+-        strmake(buff, err_ptr, min(sizeof(buff) - 1, err_len));
++        strmake(buff, err_ptr, MYSQL_MIN(sizeof(buff) - 1, err_len));
+         fprintf(stderr, "Invalid mode to --compatible: %s\n", buff);
+         exit(1);
+       }
+@@ -4523,7 +4523,7 @@
+ 
+       for (; pos != end && *pos != ','; pos++) ;
+       var_len= (uint) (pos - start);
+-      strmake(buff, start, min(sizeof(buff) - 1, var_len));
++      strmake(buff, start, MYSQL_MIN(sizeof(buff) - 1, var_len));
+       find= find_type(buff, lib, var_len);
+       if (!find)
+       {
+diff -urN mysql-old/client/mysqltest.cc mysql/client/mysqltest.cc
+--- mysql-old/client/mysqltest.cc	2011-05-10 17:45:45.693349043 +0000
++++ mysql/client/mysqltest.cc	2011-05-10 17:56:01.293349043 +0000
+@@ -5652,9 +5652,9 @@
+       }
+       else if ((c == '{' &&
+                 (!my_strnncoll_simple(charset_info, (const uchar*) "while", 5,
+-                                      (uchar*) buf, min(5, p - buf), 0) ||
++                                      (uchar*) buf, MYSQL_MIN(5, p - buf), 0) ||
+                  !my_strnncoll_simple(charset_info, (const uchar*) "if", 2,
+-                                      (uchar*) buf, min(2, p - buf), 0))))
++                                      (uchar*) buf, MYSQL_MIN(2, p - buf), 0))))
+       {
+         /* Only if and while commands can be terminated by { */
+         *p++= c;
+diff -urN mysql-old/client/mysql_upgrade.c mysql/client/mysql_upgrade.c
+--- mysql-old/client/mysql_upgrade.c	2011-05-10 17:45:45.693349043 +0000
++++ mysql/client/mysql_upgrade.c	2011-05-10 17:56:01.296682376 +0000
+@@ -528,7 +528,7 @@
+   if ((value_end= strchr(value_start, '\n')) == NULL)
+     return 1; /* Unexpected result */
+ 
+-  strncpy(value, value_start, min(FN_REFLEN, value_end-value_start));
++  strncpy(value, value_start, MYSQL_MIN(FN_REFLEN, value_end-value_start));
+   return 0;
+ }
+ 
+diff -urN mysql-old/client/sql_string.cc mysql/client/sql_string.cc
+--- mysql-old/client/sql_string.cc	2011-05-10 17:45:45.693349043 +0000
++++ mysql/client/sql_string.cc	2011-05-10 17:56:01.296682376 +0000
+@@ -660,7 +660,7 @@
+ {
+   if (Alloced_length < str_length + space_needed)
+   {
+-    if (realloc(Alloced_length + max(space_needed, grow_by) - 1))
++    if (realloc(Alloced_length + MYSQL_MAX(space_needed, grow_by) - 1))
+       return TRUE;
+   }
+   return FALSE;
+@@ -746,7 +746,7 @@
+ 
+ int stringcmp(const String *s,const String *t)
+ {
+-  uint32 s_len=s->length(),t_len=t->length(),len=min(s_len,t_len);
++  uint32 s_len=s->length(),t_len=t->length(),len=MYSQL_MIN(s_len,t_len);
+   int cmp= memcmp(s->ptr(), t->ptr(), len);
+   return (cmp) ? cmp : (int) (s_len - t_len);
+ }
+@@ -763,7 +763,7 @@
+   }
+   if (to->realloc(from_length))
+     return from;				// Actually an error
+-  if ((to->str_length=min(from->str_length,from_length)))
++  if ((to->str_length=MYSQL_MIN(from->str_length,from_length)))
+     memcpy(to->Ptr,from->Ptr,to->str_length);
+   to->str_charset=from->str_charset;
+   return to;
+diff -urN mysql-old/dbug/dbug.c mysql/dbug/dbug.c
+--- mysql-old/dbug/dbug.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/dbug/dbug.c	2011-05-10 17:56:01.296682376 +0000
+@@ -1205,7 +1205,7 @@
+     if (TRACING)
+     {
+       Indent(cs, cs->level + 1);
+-      pos= min(max(cs->level-cs->stack->sub_level,0)*INDENT,80);
++      pos= MYSQL_MIN(MYSQL_MAX(cs->level-cs->stack->sub_level,0)*INDENT,80);
+     }
+     else
+     {
+@@ -1690,7 +1690,7 @@
+ {
+   REGISTER int count;
+ 
+-  indent= max(indent-1-cs->stack->sub_level,0)*INDENT;
++  indent= MYSQL_MAX(indent-1-cs->stack->sub_level,0)*INDENT;
+   for (count= 0; count < indent ; count++)
+   {
+     if ((count % INDENT) == 0)
+diff -urN mysql-old/extra/yassl/src/ssl.cpp mysql/extra/yassl/src/ssl.cpp
+--- mysql-old/extra/yassl/src/ssl.cpp	2011-05-10 17:45:45.696682376 +0000
++++ mysql/extra/yassl/src/ssl.cpp	2011-05-10 17:56:01.300015709 +0000
+@@ -38,6 +38,7 @@
+ #include "file.hpp"             // for TaoCrypt Source
+ #include "coding.hpp"           // HexDecoder
+ #include "helpers.hpp"          // for placement new hack
++#include "my_global.h"
+ #include <stdio.h>
+ 
+ #ifdef _WIN32
+@@ -113,7 +114,7 @@
+                 // use file's salt for key derivation, but not real iv
+                 TaoCrypt::Source source(info.iv, info.ivSz);
+                 TaoCrypt::HexDecoder dec(source);
+-                memcpy(info.iv, source.get_buffer(), min((uint)sizeof(info.iv),
++                memcpy(info.iv, source.get_buffer(), MYSQL_MIN((uint)sizeof(info.iv),
+                                                          source.size()));
+                 EVP_BytesToKey(info.name, "MD5", info.iv, (byte*)password,
+                                passwordSz, 1, key, iv);
+diff -urN mysql-old/extra/yassl/taocrypt/include/pwdbased.hpp mysql/extra/yassl/taocrypt/include/pwdbased.hpp
+--- mysql-old/extra/yassl/taocrypt/include/pwdbased.hpp	2011-05-10 17:45:45.696682376 +0000
++++ mysql/extra/yassl/taocrypt/include/pwdbased.hpp	2011-05-10 17:56:01.300015709 +0000
+@@ -67,7 +67,7 @@
+ 		}
+ 		hmac.Final(buffer.get_buffer());
+ 
+-		word32 segmentLen = min(dLen, buffer.size());
++		word32 segmentLen = MYSQL_MIN(dLen, buffer.size());
+ 		memcpy(derived, buffer.get_buffer(), segmentLen);
+ 
+ 		for (j = 1; j < iterations; j++) {
+diff -urN mysql-old/extra/yassl/taocrypt/src/dh.cpp mysql/extra/yassl/taocrypt/src/dh.cpp
+--- mysql-old/extra/yassl/taocrypt/src/dh.cpp	2011-05-10 17:45:45.696682376 +0000
++++ mysql/extra/yassl/taocrypt/src/dh.cpp	2011-05-10 17:56:01.300015709 +0000
+@@ -23,6 +23,7 @@
+ #include "runtime.hpp"
+ #include "dh.hpp"
+ #include "asn.hpp"
++#include "my_global.h"
+ #include <math.h>
+ 
+ namespace TaoCrypt {
+@@ -54,7 +55,7 @@
+ // Generate private value
+ void DH::GeneratePrivate(RandomNumberGenerator& rng, byte* priv)
+ {
+-    Integer x(rng, Integer::One(), min(p_ - 1,
++    Integer x(rng, Integer::One(), MYSQL_MIN(p_ - 1,
+         Integer::Power2(2*DiscreteLogWorkFactor(p_.BitCount())) ) );
+     x.Encode(priv, p_.ByteCount());
+ }
+diff -urN mysql-old/include/my_global.h mysql/include/my_global.h
+--- mysql-old/include/my_global.h	2011-05-10 17:45:45.726682376 +0000
++++ mysql/include/my_global.h	2011-05-10 17:56:01.300015709 +0000
+@@ -584,10 +584,8 @@
+ #endif
+ 
+ /* Define some useful general macros */
+-#if !defined(max)
+-#define max(a, b)	((a) > (b) ? (a) : (b))
+-#define min(a, b)	((a) < (b) ? (a) : (b))
+-#endif
++#define MYSQL_MAX(a, b)	((a) > (b) ? (a) : (b))
++#define MYSQL_MIN(a, b)	((a) < (b) ? (a) : (b))
+ 
+ #if !defined(HAVE_UINT)
+ #undef HAVE_UINT
+@@ -1535,6 +1533,8 @@
+ /* Define some useful general macros (should be done after all headers). */
+ #if !defined(max)
+ #define max(a, b)	((a) > (b) ? (a) : (b))
++#endif  
++#if !defined(min)
+ #define min(a, b)	((a) < (b) ? (a) : (b))
+ #endif  
+ /*
+diff -urN mysql-old/libmysql/libmysql.c mysql/libmysql/libmysql.c
+--- mysql-old/libmysql/libmysql.c	2011-05-10 17:45:45.693349043 +0000
++++ mysql/libmysql/libmysql.c	2011-05-10 17:56:01.303349042 +0000
+@@ -1572,7 +1572,7 @@
+   my_net_set_read_timeout(net, CLIENT_NET_READ_TIMEOUT);
+   my_net_set_write_timeout(net, CLIENT_NET_WRITE_TIMEOUT);
+   net->retry_count=  1;
+-  net->max_packet_size= max(net_buffer_length, max_allowed_packet);
++  net->max_packet_size= MYSQL_MAX(net_buffer_length, max_allowed_packet);
+ }
+ 
+ /*
+@@ -3622,7 +3622,7 @@
+       copy_length= end - start;
+       /* We've got some data beyond offset: copy up to buffer_length bytes */
+       if (param->buffer_length)
+-        memcpy(buffer, start, min(copy_length, param->buffer_length));
++        memcpy(buffer, start, MYSQL_MIN(copy_length, param->buffer_length));
+     }
+     else
+       copy_length= 0;
+@@ -3855,9 +3855,9 @@
+         precisions. This will ensure that on the same machine you get the
+         same value as a string independent of the protocol you use.
+       */
+-      sprintf(buff, "%-*.*g", (int) min(sizeof(buff)-1,
++      sprintf(buff, "%-*.*g", (int) MYSQL_MIN(sizeof(buff)-1,
+                                         param->buffer_length),
+-              min(DBL_DIG, width), value);
++              MYSQL_MIN(DBL_DIG,width), value);
+       end= strcend(buff, ' ');
+       *end= 0;
+     }
+@@ -4175,7 +4175,7 @@
+                              uchar **row)
+ {
+   ulong length= net_field_length(row);
+-  ulong copy_length= min(length, param->buffer_length);
++  ulong copy_length= MYSQL_MIN(length, param->buffer_length);
+   memcpy(param->buffer, (char *)*row, copy_length);
+   *param->length= length;
+   *param->error= copy_length < length;
+@@ -4187,7 +4187,7 @@
+                              uchar **row)
+ {
+   ulong length= net_field_length(row);
+-  ulong copy_length= min(length, param->buffer_length);
++  ulong copy_length= MYSQL_MIN(length, param->buffer_length);
+   memcpy(param->buffer, (char *)*row, copy_length);
+   /* Add an end null if there is room in the buffer */
+   if (copy_length != param->buffer_length)
+diff -urN mysql-old/libmysqld/lib_sql.cc mysql/libmysqld/lib_sql.cc
+--- mysql-old/libmysqld/lib_sql.cc	2011-05-10 17:45:45.620015710 +0000
++++ mysql/libmysqld/lib_sql.cc	2011-05-10 17:56:01.303349042 +0000
+@@ -824,7 +824,7 @@
+     is cleared between substatements, and mysqltest gets confused
+   */
+   thd->cur_data->embedded_info->warning_count=
+-    (thd->spcont ? 0 : min(total_warn_count, 65535));
++    (thd->spcont ? 0 : MYSQL_MIN(total_warn_count, 65535));
+   return FALSE;
+ }
+ 
+diff -urN mysql-old/mysys/array.c mysql/mysys/array.c
+--- mysql-old/mysys/array.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/array.c	2011-05-10 17:56:01.306682376 +0000
+@@ -47,7 +47,7 @@
+   DBUG_ENTER("init_dynamic_array");
+   if (!alloc_increment)
+   {
+-    alloc_increment=max((8192-MALLOC_OVERHEAD)/element_size,16);
++    alloc_increment=MYSQL_MAX((8192-MALLOC_OVERHEAD)/element_size,16);
+     if (init_alloc > 8 && alloc_increment > init_alloc * 2)
+       alloc_increment=init_alloc*2;
+   }
+@@ -341,7 +341,7 @@
+ 
+ void freeze_size(DYNAMIC_ARRAY *array)
+ {
+-  uint elements=max(array->elements,1);
++  uint elements=MYSQL_MAX(array->elements,1);
+ 
+   /*
+     Do nothing if we are using a static buffer
+diff -urN mysql-old/mysys/default.c mysql/mysys/default.c
+--- mysql-old/mysys/default.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/default.c	2011-05-10 17:56:01.306682376 +0000
+@@ -793,7 +793,7 @@
+       for ( ; my_isspace(&my_charset_latin1,end[-1]) ; end--) ;
+       end[0]=0;
+ 
+-      strmake(curr_gr, ptr, min((size_t) (end-ptr)+1, sizeof(curr_gr)-1));
++      strmake(curr_gr, ptr, MYSQL_MIN((size_t) (end-ptr)+1, sizeof(curr_gr)-1));
+ 
+       /* signal that a new group is found */
+       opt_handler(handler_ctx, curr_gr, NULL);
+diff -urN mysql-old/mysys/mf_format.c mysql/mysys/mf_format.c
+--- mysql-old/mysys/mf_format.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/mf_format.c	2011-05-10 17:56:01.306682376 +0000
+@@ -83,7 +83,7 @@
+     tmp_length= strlength(startpos);
+     DBUG_PRINT("error",("dev: '%s'  ext: '%s'  length: %u",dev,ext,
+                         (uint) length));
+-    (void) strmake(to,startpos,min(tmp_length,FN_REFLEN-1));
++    (void) strmake(to,startpos,MYSQL_MIN(tmp_length,FN_REFLEN-1));
+   }
+   else
+   {
+diff -urN mysql-old/mysys/mf_iocache.c mysql/mysys/mf_iocache.c
+--- mysql-old/mysys/mf_iocache.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/mf_iocache.c	2011-05-10 17:56:01.306682376 +0000
+@@ -1097,7 +1097,7 @@
+   */
+   while (write_length)
+   {
+-    size_t copy_length= min(write_length, write_cache->buffer_length);
++    size_t copy_length= MYSQL_MIN(write_length, write_cache->buffer_length);
+     int  __attribute__((unused)) rc;
+ 
+     rc= lock_io_cache(write_cache, write_cache->pos_in_file);
+@@ -1256,7 +1256,7 @@
+       TODO: figure out if the assert below is needed or correct.
+     */
+     DBUG_ASSERT(pos_in_file == info->end_of_file);
+-    copy_len=min(Count, len_in_buff);
++    copy_len=MYSQL_MIN(Count, len_in_buff);
+     memcpy(Buffer, info->append_read_pos, copy_len);
+     info->append_read_pos += copy_len;
+     Count -= copy_len;
+@@ -1365,7 +1365,7 @@
+     }
+ #endif
+ 	/* Copy found bytes to buffer */
+-    length=min(Count,read_length);
++    length=MYSQL_MIN(Count,read_length);
+     memcpy(Buffer,info->read_pos,(size_t) length);
+     Buffer+=length;
+     Count-=length;
+@@ -1399,7 +1399,7 @@
+       if ((read_length=my_read(info->file,info->request_pos,
+ 			       read_length, info->myflags)) == (size_t) -1)
+         return info->error= -1;
+-      use_length=min(Count,read_length);
++      use_length=MYSQL_MIN(Count,read_length);
+       memcpy(Buffer,info->request_pos,(size_t) use_length);
+       info->read_pos=info->request_pos+Count;
+       info->read_end=info->request_pos+read_length;
+diff -urN mysql-old/mysys/my_alloc.c mysql/mysys/my_alloc.c
+--- mysql-old/mysys/my_alloc.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/my_alloc.c	2011-05-10 17:56:01.310015710 +0000
+@@ -212,7 +212,7 @@
+   {						/* Time to alloc new block */
+     block_size= mem_root->block_size * (mem_root->block_num >> 2);
+     get_size= length+ALIGN_SIZE(sizeof(USED_MEM));
+-    get_size= max(get_size, block_size);
++    get_size= MYSQL_MAX(get_size, block_size);
+ 
+     if (!(next = (USED_MEM*) my_malloc(get_size,MYF(MY_WME))))
+     {
+diff -urN mysql-old/mysys/my_bitmap.c mysql/mysys/my_bitmap.c
+--- mysql-old/mysys/my_bitmap.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/my_bitmap.c	2011-05-10 17:56:01.310015710 +0000
+@@ -423,7 +423,7 @@
+ 
+   DBUG_ASSERT(map->bitmap && map2->bitmap);
+ 
+-  end= to+min(len,len2);
++  end= to+MYSQL_MIN(len,len2);
+   for (; to < end; to++, from++)
+     *to &= *from;
+ 
+diff -urN mysql-old/mysys/my_compress.c mysql/mysys/my_compress.c
+--- mysql-old/mysys/my_compress.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/my_compress.c	2011-05-10 17:56:01.310015710 +0000
+@@ -244,7 +244,7 @@
+ 
+    if (ver != 1)
+      DBUG_RETURN(1);
+-   if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME))))
++   if (!(data= my_malloc(MYSQL_MAX(orglen, complen), MYF(MY_WME))))
+      DBUG_RETURN(2);
+    memcpy(data, pack_data + BLOB_HEADER, complen);
+ 
+diff -urN mysql-old/mysys/my_conio.c mysql/mysys/my_conio.c
+--- mysql-old/mysys/my_conio.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/my_conio.c	2011-05-10 17:56:01.310015710 +0000
+@@ -165,13 +165,13 @@
+     though it is known it should not be more than 64K               
+     so we cut 64K and try first size of screen buffer               
+     if it is still to large we cut half of it and try again         
+-    later we may want to cycle from min(clen, 65535) to allowed size
++    later we may want to cycle from MYSQL_MIN(clen, 65535) to allowed size
+     with small decrement to determine exact allowed buffer           
+   */
+-  clen= min(clen, 65535);
++  clen= MYSQL_MIN(clen, 65535);
+   do
+   {
+-    clen= min(clen, (size_t) csbi.dwSize.X*csbi.dwSize.Y);
++    clen= MYSQL_MIN(clen, (size_t) csbi.dwSize.X*csbi.dwSize.Y);
+     if (!ReadConsole((HANDLE)my_coninpfh, (LPVOID)buffer, (DWORD) clen - 1, &plen_res,
+                      NULL))
+     {
+diff -urN mysql-old/mysys/my_file.c mysql/mysys/my_file.c
+--- mysql-old/mysys/my_file.c	2011-05-10 17:45:45.703349042 +0000
++++ mysql/mysys/my_file.c	2011-05-10 17:56:01.310015710 +0000
+@@ -75,7 +75,7 @@
+ static uint set_max_open_files(uint max_file_limit)
+ {
+   /* We don't know the limit. Return best guess */
+-  return min(max_file_limit, OS_FILE_LIMIT);
++  return MYSQL_MIN(max_file_limit, OS_FILE_LIMIT);
+ }
+ #endif
+ 
+@@ -97,7 +97,7 @@
+   DBUG_ENTER("my_set_max_open_files");
+   DBUG_PRINT("enter",("files: %u  my_file_limit: %u", files, my_file_limit));
+ 
+-  files= set_max_open_files(min(files, OS_FILE_LIMIT));
++  files= set_max_open_files(MYSQL_MIN(files, OS_FILE_LIMIT));
+   if (files <= MY_NFILE)
+     DBUG_RETURN(files);
+ 
+@@ -107,9 +107,9 @@
+ 
+   /* Copy any initialized files */
+   memcpy((char*) tmp, (char*) my_file_info,
+-         sizeof(*tmp) * min(my_file_limit, files));
++         sizeof(*tmp) * MYSQL_MIN(my_file_limit, files));
+   bzero((char*) (tmp + my_file_limit),
+-        max((int) (files- my_file_limit), 0)*sizeof(*tmp));
++        MYSQL_MAX((int) (files- my_file_limit), 0)*sizeof(*tmp));
+   my_free_open_file_info();			/* Free if already allocated */
+   my_file_info= tmp;
+   my_file_limit= files;
+diff -urN mysql-old/mysys/my_getopt.c mysql/mysys/my_getopt.c
+--- mysql-old/mysys/my_getopt.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/my_getopt.c	2011-05-10 17:56:01.310015710 +0000
+@@ -983,7 +983,7 @@
+   }
+   if (optp->max_value && num > (double) optp->max_value)
+     num= (double) optp->max_value;
+-  return max(num, (double) optp->min_value);
++  return MYSQL_MAX(num, (double) optp->min_value);
+ }
+ 
+ /*
+diff -urN mysql-old/mysys/my_static.h mysql/mysys/my_static.h
+--- mysql-old/mysys/my_static.h	2011-05-10 17:45:45.703349042 +0000
++++ mysql/mysys/my_static.h	2011-05-10 17:56:01.310015710 +0000
+@@ -22,7 +22,7 @@
+ #include <signal.h>
+ 
+ #define MAX_SIGNALS	10		/* Max signals under a dont-allow */
+-#define MIN_KEYBLOCK	(min(IO_SIZE,1024))
++#define MIN_KEYBLOCK	(MYSQL_MIN(IO_SIZE,1024))
+ #define MAX_KEYBLOCK	8192		/* Max keyblocklength == 8*IO_SIZE */
+ #define MAX_BLOCK_TYPES MAX_KEYBLOCK/MIN_KEYBLOCK
+ 
+diff -urN mysql-old/mysys/safemalloc.c mysql/mysys/safemalloc.c
+--- mysql-old/mysys/safemalloc.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/safemalloc.c	2011-05-10 17:56:01.313349044 +0000
+@@ -248,7 +248,7 @@
+ 
+   if ((data= _mymalloc(size,filename,lineno,MyFlags))) /* Allocate new area */
+   {
+-    size=min(size, irem->datasize);		/* Move as much as possibly */
++    size=MYSQL_MIN(size, irem->datasize);		/* Move as much as possibly */
+     memcpy((uchar*) data, ptr, (size_t) size);	/* Copy old data */
+     _myfree(ptr, filename, lineno, 0);		/* Free not needed area */
+   }
+diff -urN mysql-old/mysys/stacktrace.c mysql/mysys/stacktrace.c
+--- mysql-old/mysys/stacktrace.c	2011-05-10 17:45:45.700015709 +0000
++++ mysql/mysys/stacktrace.c	2011-05-10 17:56:01.313349044 +0000
+@@ -324,7 +324,7 @@
+ 
+   if (!stack_bottom || (uchar*) stack_bottom > (uchar*) &fp)
+   {
+-    ulong tmp= min(0x10000,thread_stack);
++    ulong tmp= MYSQL_MIN(0x10000,thread_stack);
+     /* Assume that the stack starts at the previous even 65K */
+     stack_bottom= (uchar*) (((ulong) &fp + tmp) &
+ 			  ~(ulong) 0xFFFF);
+diff -urN mysql-old/server-tools/instance-manager/buffer.cc mysql/server-tools/instance-manager/buffer.cc
+--- mysql-old/server-tools/instance-manager/buffer.cc	2011-05-10 17:45:45.436682376 +0000
++++ mysql/server-tools/instance-manager/buffer.cc	2011-05-10 17:56:01.313349044 +0000
+@@ -83,8 +83,8 @@
+   if (position + len_arg >= buffer_size)
+   {
+     buffer= (uchar*) my_realloc(buffer,
+-                                min(MAX_BUFFER_SIZE,
+-                                    max((uint) (buffer_size*1.5),
++                                MYSQL_MIN(MAX_BUFFER_SIZE,
++                                    MYSQL_MAX((uint) (buffer_size*1.5),
+                                         position + len_arg)), MYF(0));
+     if (!(buffer))
+       goto err;
+diff -urN mysql-old/server-tools/instance-manager/listener.cc mysql/server-tools/instance-manager/listener.cc
+--- mysql-old/server-tools/instance-manager/listener.cc	2011-05-10 17:45:45.436682376 +0000
++++ mysql/server-tools/instance-manager/listener.cc	2011-05-10 17:56:01.313349044 +0000
+@@ -103,7 +103,7 @@
+ 
+   /* II. Listen sockets and spawn childs */
+   for (i= 0; i < num_sockets; i++)
+-    n= max(n, sockets[i]);
++    n= MYSQL_MAX(n, sockets[i]);
+   n++;
+ 
+   timeval tv;
+diff -urN mysql-old/sql/debug_sync.cc mysql/sql/debug_sync.cc
+--- mysql-old/sql/debug_sync.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/debug_sync.cc	2011-05-10 17:56:01.313349044 +0000
+@@ -1036,7 +1036,7 @@
+   DBUG_ASSERT(action);
+   DBUG_ASSERT(ds_control);
+ 
+-  action->activation_count= max(action->hit_limit, action->execute);
++  action->activation_count= MYSQL_MAX(action->hit_limit, action->execute);
+   if (!action->activation_count)
+   {
+     debug_sync_remove_action(ds_control, action);
+diff -urN mysql-old/sql/field.cc mysql/sql/field.cc
+--- mysql-old/sql/field.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/field.cc	2011-05-10 17:56:01.316682377 +0000
+@@ -54,7 +54,7 @@
+ #define LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE 128
+ #define DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE 128
+ #define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \
+-((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1)))
++((ulong) ((LL(1) << MYSQL_MIN(arg, 4) * 8) - LL(1)))
+ 
+ #define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table || (!table->read_set || bitmap_is_set(table->read_set, field_index)))
+ #define ASSERT_COLUMN_MARKED_FOR_WRITE DBUG_ASSERT(!table || (!table->write_set || bitmap_is_set(table->write_set, field_index)))
+@@ -2072,7 +2072,7 @@
+     tmp_uint=tmp_dec+(uint)(int_digits_end-int_digits_from);
+   else if (expo_sign_char == '-') 
+   {
+-    tmp_uint=min(exponent,(uint)(int_digits_end-int_digits_from));
++    tmp_uint=MYSQL_MIN(exponent,(uint)(int_digits_end-int_digits_from));
+     frac_digits_added_zeros=exponent-tmp_uint;
+     int_digits_end -= tmp_uint;
+     frac_digits_head_end=int_digits_end+tmp_uint;
+@@ -2080,7 +2080,7 @@
+   }
+   else // (expo_sign_char=='+') 
+   {
+-    tmp_uint=min(exponent,(uint)(frac_digits_end-frac_digits_from));
++    tmp_uint=MYSQL_MIN(exponent,(uint)(frac_digits_end-frac_digits_from));
+     int_digits_added_zeros=exponent-tmp_uint;
+     int_digits_tail_from=frac_digits_from;
+     frac_digits_from=frac_digits_from+tmp_uint;
+@@ -2505,7 +2505,7 @@
+   {
+     signed int overflow;
+ 
+-    dec= min(dec, DECIMAL_MAX_SCALE);
++    dec= MYSQL_MIN(dec, DECIMAL_MAX_SCALE);
+ 
+     /*
+       If the value still overflows the field with the corrected dec,
+@@ -2521,7 +2521,7 @@
+     overflow= required_length - len;
+ 
+     if (overflow > 0)
+-      dec= max(0, dec - overflow);            // too long, discard fract
++      dec= MYSQL_MAX(0, dec - overflow);            // too long, discard fract
+     else
+       /* Corrected value fits. */
+       len= required_length;
+@@ -3091,7 +3091,7 @@
+   ASSERT_COLUMN_MARKED_FOR_READ;
+   CHARSET_INFO *cs= &my_charset_bin;
+   uint length;
+-  uint mlength=max(field_length+1,5*cs->mbmaxlen);
++  uint mlength=MYSQL_MAX(field_length+1,5*cs->mbmaxlen);
+   val_buffer->alloc(mlength);
+   char *to=(char*) val_buffer->ptr();
+ 
+@@ -3303,7 +3303,7 @@
+   ASSERT_COLUMN_MARKED_FOR_READ;
+   CHARSET_INFO *cs= &my_charset_bin;
+   uint length;
+-  uint mlength=max(field_length+1,7*cs->mbmaxlen);
++  uint mlength=MYSQL_MAX(field_length+1,7*cs->mbmaxlen);
+   val_buffer->alloc(mlength);
+   char *to=(char*) val_buffer->ptr();
+   short j;
+@@ -3520,7 +3520,7 @@
+   ASSERT_COLUMN_MARKED_FOR_READ;
+   CHARSET_INFO *cs= &my_charset_bin;
+   uint length;
+-  uint mlength=max(field_length+1,10*cs->mbmaxlen);
++  uint mlength=MYSQL_MAX(field_length+1,10*cs->mbmaxlen);
+   val_buffer->alloc(mlength);
+   char *to=(char*) val_buffer->ptr();
+   long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
+@@ -3739,7 +3739,7 @@
+   ASSERT_COLUMN_MARKED_FOR_READ;
+   CHARSET_INFO *cs= &my_charset_bin;
+   uint length;
+-  uint mlength=max(field_length+1,12*cs->mbmaxlen);
++  uint mlength=MYSQL_MAX(field_length+1,12*cs->mbmaxlen);
+   val_buffer->alloc(mlength);
+   char *to=(char*) val_buffer->ptr();
+   int32 j;
+@@ -3980,7 +3980,7 @@
+ {
+   CHARSET_INFO *cs= &my_charset_bin;
+   uint length;
+-  uint mlength=max(field_length+1,22*cs->mbmaxlen);
++  uint mlength=MYSQL_MAX(field_length+1,22*cs->mbmaxlen);
+   val_buffer->alloc(mlength);
+   char *to=(char*) val_buffer->ptr();
+   longlong j;
+@@ -4203,7 +4203,7 @@
+ #endif
+     memcpy_fixed((uchar*) &nr,ptr,sizeof(nr));
+ 
+-  uint to_length=max(field_length,70);
++  uint to_length=MYSQL_MAX(field_length,70);
+   val_buffer->alloc(to_length);
+   char *to=(char*) val_buffer->ptr();
+ 
+@@ -6440,13 +6440,13 @@
+     calculate the maximum number of significant digits if the 'f'-format
+     would be used (+1 for decimal point if the number has a fractional part).
+   */
+-  digits= max(1, (int) max_length - fractional);
++  digits= MYSQL_MAX(1, (int) max_length - fractional);
+   /*
+     If the exponent is negative, decrease digits by the number of leading zeros
+     after the decimal point that do not count as significant digits.
+   */
+   if (exp < 0)
+-    digits= max(1, (int) digits + exp);
++    digits= MYSQL_MAX(1, (int) digits + exp);
+   /*
+     'e'-format is used only if the exponent is less than -4 or greater than or
+     equal to the precision. In this case we need to adjust the number of
+@@ -6454,7 +6454,7 @@
+     We also have to reserve one additional character if abs(exp) >= 100.
+   */
+   if (exp >= (int) digits || exp < -4)
+-    digits= max(1, (int) (max_length - 5 - (exp >= 100 || exp <= -100)));
++    digits= MYSQL_MAX(1, (int) (max_length - 5 - (exp >= 100 || exp <= -100)));
+ 
+   /* Limit precision to DBL_DIG to avoid garbage past significant digits */
+   set_if_smaller(digits, DBL_DIG);
+@@ -6712,7 +6712,7 @@
+                           uint max_length,
+                           bool low_byte_first __attribute__((unused)))
+ {
+-  uint length=      min(field_length,max_length);
++  uint length=      MYSQL_MIN(field_length,max_length);
+   uint local_char_length= max_length/field_charset->mbmaxlen;
+   if (length > local_char_length)
+     local_char_length= my_charpos(field_charset, from, from+length,
+@@ -7706,7 +7706,7 @@
+     from= tmpstr.ptr();
+   }
+ 
+-  new_length= min(max_data_length(), field_charset->mbmaxlen * length);
++  new_length= MYSQL_MIN(max_data_length(), field_charset->mbmaxlen * length);
+   if (value.alloc(new_length))
+     goto oom_error;
+ 
+@@ -7866,7 +7866,7 @@
+   b_length=get_length(b_ptr);
+   if (b_length > max_length)
+     b_length=max_length;
+-  diff=memcmp(a,b,min(a_length,b_length));
++  diff=memcmp(a,b,MYSQL_MIN(a_length,b_length));
+   return diff ? diff : (int) (a_length - b_length);
+ }
+ 
+@@ -8062,7 +8062,7 @@
+     length given is smaller than the actual length of the blob, we
+     just store the initial bytes of the blob.
+   */
+-  store_length(to, packlength, min(length, max_length), low_byte_first);
++  store_length(to, packlength, MYSQL_MIN(length, max_length), low_byte_first);
+ 
+   /*
+     Store the actual blob data, which will occupy 'length' bytes.
+@@ -9109,7 +9109,7 @@
+ {
+   ASSERT_COLUMN_MARKED_FOR_READ;
+   char buff[sizeof(longlong)];
+-  uint length= min(pack_length(), sizeof(longlong));
++  uint length= MYSQL_MIN(pack_length(), sizeof(longlong));
+   ulonglong bits= val_int();
+   mi_int8store(buff,bits);
+ 
+@@ -9195,7 +9195,7 @@
+     *buff++= bits;
+     length--;
+   }
+-  uint data_length = min(length, bytes_in_rec);
++  uint data_length = MYSQL_MIN(length, bytes_in_rec);
+   memcpy(buff, ptr, data_length);
+   return data_length + 1;
+ }
+@@ -9323,7 +9323,7 @@
+     uchar bits= get_rec_bits(bit_ptr + (from - ptr), bit_ofs, bit_len);
+     *to++= bits;
+   }
+-  length= min(bytes_in_rec, max_length - (bit_len > 0));
++  length= MYSQL_MIN(bytes_in_rec, max_length - (bit_len > 0));
+   memcpy(to, from, length);
+   return to + length;
+ }
+@@ -9780,7 +9780,7 @@
+       DBUG_ASSERT(MAX_DATETIME_COMPRESSED_WIDTH < UINT_MAX);
+       if (length != UINT_MAX)  /* avoid overflow; is safe because of min() */
+         length= ((length+1)/2)*2;
+-      length= min(length, MAX_DATETIME_COMPRESSED_WIDTH);
++      length= MYSQL_MIN(length, MAX_DATETIME_COMPRESSED_WIDTH);
+     }
+     flags|= ZEROFILL_FLAG | UNSIGNED_FLAG;
+     /*
+diff -urN mysql-old/sql/filesort.cc mysql/sql/filesort.cc
+--- mysql-old/sql/filesort.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/filesort.cc	2011-05-10 17:56:01.320015710 +0000
+@@ -193,7 +193,7 @@
+ #ifdef CAN_TRUST_RANGE
+   if (select && select->quick && select->quick->records > 0L)
+   {
+-    records=min((ha_rows) (select->quick->records*2+EXTRA_RECORDS*2),
++    records=MYSQL_MIN((ha_rows) (select->quick->records*2+EXTRA_RECORDS*2),
+ 		table->file->stats.records)+EXTRA_RECORDS;
+     selected_records_file=0;
+   }
+@@ -215,12 +215,12 @@
+     goto err;
+ 
+   memavl= thd->variables.sortbuff_size;
+-  min_sort_memory= max(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2);
++  min_sort_memory= MYSQL_MAX(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2);
+   while (memavl >= min_sort_memory)
+   {
+     ulong old_memavl;
+     ulong keys= memavl/(param.rec_length+sizeof(char*));
+-    param.keys=(uint) min(records+1, keys);
++    param.keys=(uint) MYSQL_MIN(records+1, keys);
+     if ((table_sort.sort_keys=
+ 	 (uchar **) make_char_array((char **) table_sort.sort_keys,
+                                     param.keys, param.rec_length, MYF(0))))
+@@ -1117,7 +1117,7 @@
+   register uint count;
+   uint length;
+ 
+-  if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count)))
++  if ((count=(uint) MYSQL_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
+   {
+     if (my_pread(fromfile->file,(uchar*) buffpek->base,
+ 		 (length= rec_length*count),buffpek->file_pos,MYF_RW))
+@@ -1380,7 +1380,7 @@
+          != -1 && error != 0);
+ 
+ end:
+-  lastbuff->count= min(org_max_rows-max_rows, param->max_rows);
++  lastbuff->count= MYSQL_MIN(org_max_rows-max_rows, param->max_rows);
+   lastbuff->file_pos= to_start_filepos;
+ err:
+   delete_queue(&queue);
+diff -urN mysql-old/sql/ha_ndbcluster.cc mysql/sql/ha_ndbcluster.cc
+--- mysql-old/sql/ha_ndbcluster.cc	2011-05-10 17:45:45.636682376 +0000
++++ mysql/sql/ha_ndbcluster.cc	2011-05-10 17:56:01.323349043 +0000
+@@ -800,7 +800,7 @@
+ 
+       DBUG_PRINT("value", ("set blob ptr: 0x%lx  len: %u",
+                            (long) blob_ptr, blob_len));
+-      DBUG_DUMP("value", blob_ptr, min(blob_len, 26));
++      DBUG_DUMP("value", blob_ptr, MYSQL_MIN(blob_len, 26));
+ 
+       if (set_blob_value)
+         *set_blob_value= TRUE;
+diff -urN mysql-old/sql/handler.h mysql/sql/handler.h
+--- mysql-old/sql/handler.h	2011-05-10 17:45:45.640015709 +0000
++++ mysql/sql/handler.h	2011-05-10 17:56:01.330015709 +0000
+@@ -1605,15 +1605,15 @@
+   { return (HA_ERR_WRONG_COMMAND); }
+ 
+   uint max_record_length() const
+-  { return min(HA_MAX_REC_LENGTH, max_supported_record_length()); }
++  { return MYSQL_MIN(HA_MAX_REC_LENGTH, max_supported_record_length()); }
+   uint max_keys() const
+-  { return min(MAX_KEY, max_supported_keys()); }
++  { return MYSQL_MIN(MAX_KEY, max_supported_keys()); }
+   uint max_key_parts() const
+-  { return min(MAX_REF_PARTS, max_supported_key_parts()); }
++  { return MYSQL_MIN(MAX_REF_PARTS, max_supported_key_parts()); }
+   uint max_key_length() const
+-  { return min(MAX_KEY_LENGTH, max_supported_key_length()); }
++  { return MYSQL_MIN(MAX_KEY_LENGTH, max_supported_key_length()); }
+   uint max_key_part_length() const
+-  { return min(MAX_KEY_LENGTH, max_supported_key_part_length()); }
++  { return MYSQL_MIN(MAX_KEY_LENGTH, max_supported_key_part_length()); }
+ 
+   virtual uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
+   virtual uint max_supported_keys() const { return 0; }
+diff -urN mysql-old/sql/ha_partition.cc mysql/sql/ha_partition.cc
+--- mysql-old/sql/ha_partition.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/ha_partition.cc	2011-05-10 17:56:01.330015709 +0000
+@@ -5968,7 +5968,7 @@
+ {
+   *first= bitmap_get_first_set(&(m_part_info->used_partitions));
+   *num_used_parts= bitmap_bits_set(&(m_part_info->used_partitions));
+-  *check_min_num= min(MAX_PARTS_FOR_OPTIMIZER_CALLS, *num_used_parts);
++  *check_min_num= MYSQL_MIN(MAX_PARTS_FOR_OPTIMIZER_CALLS, *num_used_parts);
+ }
+ 
+ 
+diff -urN mysql-old/sql/item_buff.cc mysql/sql/item_buff.cc
+--- mysql-old/sql/item_buff.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/item_buff.cc	2011-05-10 17:56:01.333349042 +0000
+@@ -59,7 +59,7 @@
+ 
+ Cached_item_str::Cached_item_str(THD *thd, Item *arg)
+   :item(arg),
+-   value_max_length(min(arg->max_length, thd->variables.max_sort_length)),
++   value_max_length(MYSQL_MIN(arg->max_length, thd->variables.max_sort_length)),
+    value(value_max_length)
+ {}
+ 
+@@ -69,7 +69,7 @@
+   bool tmp;
+ 
+   if ((res=item->val_str(&tmp_value)))
+-    res->length(min(res->length(), value_max_length));
++    res->length(MYSQL_MIN(res->length(), value_max_length));
+   if (null_value != item->null_value)
+   {
+     if ((null_value= item->null_value))
+diff -urN mysql-old/sql/item.cc mysql/sql/item.cc
+--- mysql-old/sql/item.cc	2011-05-10 17:45:45.636682376 +0000
++++ mysql/sql/item.cc	2011-05-10 17:56:01.336682376 +0000
+@@ -74,7 +74,7 @@
+ Hybrid_type_traits_decimal::fix_length_and_dec(Item *item, Item *arg) const
+ {
+   item->decimals= arg->decimals;
+-  item->max_length= min(arg->max_length + DECIMAL_LONGLONG_DIGITS,
++  item->max_length= MYSQL_MIN(arg->max_length + DECIMAL_LONGLONG_DIGITS,
+                         DECIMAL_MAX_STR_LENGTH);
+ }
+ 
+@@ -442,9 +442,9 @@
+   {
+     uint prec= 
+       my_decimal_length_to_precision(max_length, decimals, unsigned_flag);
+-    return min(prec, DECIMAL_MAX_PRECISION);
++    return MYSQL_MIN(prec, DECIMAL_MAX_PRECISION);
+   }
+-  return min(max_length, DECIMAL_MAX_PRECISION);
++  return MYSQL_MIN(max_length, DECIMAL_MAX_PRECISION);
+ }
+ 
+ 
+@@ -750,7 +750,7 @@
+ 				   &res_length);
+   }
+   else
+-    name= sql_strmake(str, (name_length= min(length,MAX_ALIAS_NAME)));
++    name= sql_strmake(str, (name_length= MYSQL_MIN(length,MAX_ALIAS_NAME)));
+ }
+ 
+ 
+@@ -5414,7 +5414,7 @@
+   // following assert is redundant, because fixed=1 assigned in constructor
+   DBUG_ASSERT(fixed == 1);
+   char *end=(char*) str_value.ptr()+str_value.length(),
+-       *ptr=end-min(str_value.length(),sizeof(longlong));
++       *ptr=end-MYSQL_MIN(str_value.length(),sizeof(longlong));
+ 
+   ulonglong value=0;
+   for (; ptr != end ; ptr++)
+@@ -5469,7 +5469,7 @@
+ void Item_hex_string::print(String *str, enum_query_type query_type)
+ {
+   char *end= (char*) str_value.ptr() + str_value.length(),
+-       *ptr= end - min(str_value.length(), sizeof(longlong));
++       *ptr= end - MYSQL_MIN(str_value.length(), sizeof(longlong));
+   str->append("0x");
+   for (; ptr != end ; ptr++)
+   {
+@@ -7559,14 +7559,14 @@
+     /* fix variable decimals which always is NOT_FIXED_DEC */
+     if (Field::result_merge_type(fld_type) == INT_RESULT)
+       item_decimals= 0;
+-    decimals= max(decimals, item_decimals);
++    decimals= MYSQL_MAX(decimals, item_decimals);
+   }
+   if (Field::result_merge_type(fld_type) == DECIMAL_RESULT)
+   {
+-    decimals= min(max(decimals, item->decimals), DECIMAL_MAX_SCALE);
++    decimals= MYSQL_MIN(MYSQL_MAX(decimals, item->decimals), DECIMAL_MAX_SCALE);
+     int item_int_part= item->decimal_int_part();
+-    int item_prec = max(prev_decimal_int_part, item_int_part) + decimals;
+-    int precision= min(item_prec, DECIMAL_MAX_PRECISION);
++    int item_prec = MYSQL_MAX(prev_decimal_int_part, item_int_part) + decimals;
++    int precision= MYSQL_MIN(item_prec, DECIMAL_MAX_PRECISION);
+     unsigned_flag&= item->unsigned_flag;
+     max_length= my_decimal_precision_to_length_no_truncation(precision,
+                                                              decimals,
+@@ -7597,7 +7597,7 @@
+      */
+     if (collation.collation != &my_charset_bin)
+     {
+-      max_length= max(old_max_chars * collation.collation->mbmaxlen,
++      max_length= MYSQL_MAX(old_max_chars * collation.collation->mbmaxlen,
+                       display_length(item) /
+                       item->collation.collation->mbmaxlen *
+                       collation.collation->mbmaxlen);
+@@ -7619,7 +7619,7 @@
+       {
+         int delta1= max_length_orig - decimals_orig;
+         int delta2= item->max_length - item->decimals;
+-        max_length= max(delta1, delta2) + decimals;
++        max_length= MYSQL_MAX(delta1, delta2) + decimals;
+         if (fld_type == MYSQL_TYPE_FLOAT && max_length > FLT_DIG + 2)
+         {
+           max_length= MAX_FLOAT_STR_LENGTH;
+@@ -7637,7 +7637,7 @@
+     break;
+   }
+   default:
+-    max_length= max(max_length, display_length(item));
++    max_length= MYSQL_MAX(max_length, display_length(item));
+   };
+   maybe_null|= item->maybe_null;
+   get_full_info(item);
+diff -urN mysql-old/sql/item_cmpfunc.cc mysql/sql/item_cmpfunc.cc
+--- mysql-old/sql/item_cmpfunc.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/item_cmpfunc.cc	2011-05-10 17:56:01.340015710 +0000
+@@ -628,7 +628,7 @@
+   {
+     if ((*a)->decimals < NOT_FIXED_DEC && (*b)->decimals < NOT_FIXED_DEC)
+     {
+-      precision= 5 / log_10[max((*a)->decimals, (*b)->decimals) + 1];
++      precision= 5 / log_10[MYSQL_MAX((*a)->decimals, (*b)->decimals) + 1];
+       if (func == &Arg_comparator::compare_real)
+         func= &Arg_comparator::compare_real_fixed;
+       else if (func == &Arg_comparator::compare_e_real)
+@@ -1315,7 +1315,7 @@
+         owner->null_value= 0;
+       uint res1_length= res1->length();
+       uint res2_length= res2->length();
+-      int cmp= memcmp(res1->ptr(), res2->ptr(), min(res1_length,res2_length));
++      int cmp= memcmp(res1->ptr(), res2->ptr(), MYSQL_MIN(res1_length,res2_length));
+       return cmp ? cmp : (int) (res1_length - res2_length);
+     }
+   }
+@@ -2447,7 +2447,7 @@
+ {
+   agg_result_type(&hybrid_type, args, 2);
+   maybe_null=args[1]->maybe_null;
+-  decimals= max(args[0]->decimals, args[1]->decimals);
++  decimals= MYSQL_MAX(args[0]->decimals, args[1]->decimals);
+   unsigned_flag= args[0]->unsigned_flag && args[1]->unsigned_flag;
+ 
+   if (hybrid_type == DECIMAL_RESULT || hybrid_type == INT_RESULT) 
+@@ -2458,10 +2458,10 @@
+     int len1= args[1]->max_length - args[1]->decimals
+       - (args[1]->unsigned_flag ? 0 : 1);
+ 
+-    max_length= max(len0, len1) + decimals + (unsigned_flag ? 0 : 1);
++    max_length= MYSQL_MAX(len0, len1) + decimals + (unsigned_flag ? 0 : 1);
+   }
+   else
+-    max_length= max(args[0]->max_length, args[1]->max_length);
++    max_length= MYSQL_MAX(args[0]->max_length, args[1]->max_length);
+ 
+   switch (hybrid_type) {
+   case STRING_RESULT:
+@@ -2485,9 +2485,9 @@
+ {
+   int arg0_int_part= args[0]->decimal_int_part();
+   int arg1_int_part= args[1]->decimal_int_part();
+-  int max_int_part= max(arg0_int_part, arg1_int_part);
++  int max_int_part= MYSQL_MAX(arg0_int_part, arg1_int_part);
+   int precision= max_int_part + decimals;
+-  return min(precision, DECIMAL_MAX_PRECISION);
++  return MYSQL_MIN(precision, DECIMAL_MAX_PRECISION);
+ }
+ 
+ 
+@@ -2615,7 +2615,7 @@
+ Item_func_if::fix_length_and_dec()
+ {
+   maybe_null=args[1]->maybe_null || args[2]->maybe_null;
+-  decimals= max(args[1]->decimals, args[2]->decimals);
++  decimals= MYSQL_MAX(args[1]->decimals, args[2]->decimals);
+   unsigned_flag=args[1]->unsigned_flag && args[2]->unsigned_flag;
+ 
+   enum Item_result arg1_type=args[1]->result_type();
+@@ -2659,10 +2659,10 @@
+     int len2= args[2]->max_length - args[2]->decimals
+       - (args[2]->unsigned_flag ? 0 : 1);
+ 
+-    max_length=max(len1, len2) + decimals + (unsigned_flag ? 0 : 1);
++    max_length=MYSQL_MAX(len1, len2) + decimals + (unsigned_flag ? 0 : 1);
+   }
+   else
+-    max_length= max(args[1]->max_length, args[2]->max_length);
++    max_length= MYSQL_MAX(args[1]->max_length, args[2]->max_length);
+ }
+ 
+ 
+@@ -2670,8 +2670,8 @@
+ {
+   int arg1_prec= args[1]->decimal_int_part();
+   int arg2_prec= args[2]->decimal_int_part();
+-  int precision=max(arg1_prec,arg2_prec) + decimals;
+-  return min(precision, DECIMAL_MAX_PRECISION);
++  int precision=MYSQL_MAX(arg1_prec,arg2_prec) + decimals;
++  return MYSQL_MIN(precision, DECIMAL_MAX_PRECISION);
+ }
+ 
+ 
+@@ -3081,7 +3081,7 @@
+ 
+   if (else_expr_num != -1) 
+     set_if_bigger(max_int_part, args[else_expr_num]->decimal_int_part());
+-  return min(max_int_part + decimals, DECIMAL_MAX_PRECISION);
++  return MYSQL_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION);
+ }
+ 
+ 
+@@ -4981,7 +4981,7 @@
+       else
+       {
+ 	if (i < g)
+-	  g = i; // g = min(i, g)
++	  g = i; // g = MYSQL_MIN(i, g)
+ 	f = i;
+ 	while (g >= 0 && pattern[g] == pattern[g + plm1 - f])
+ 	  g--;
+@@ -5000,7 +5000,7 @@
+       else
+       {
+ 	if (i < g)
+-	  g = i; // g = min(i, g)
++	  g = i; // g = MYSQL_MIN(i, g)
+ 	f = i;
+ 	while (g >= 0 &&
+ 	       likeconv(cs, pattern[g]) == likeconv(cs, pattern[g + plm1 - f]))
+@@ -5121,14 +5121,14 @@
+       register const int v = plm1 - i;
+       turboShift = u - v;
+       bcShift    = bmBc[(uint) (uchar) text[i + j]] - plm1 + i;
+-      shift      = max(turboShift, bcShift);
+-      shift      = max(shift, bmGs[i]);
++      shift      = MYSQL_MAX(turboShift, bcShift);
++      shift      = MYSQL_MAX(shift, bmGs[i]);
+       if (shift == bmGs[i])
+-	u = min(pattern_len - shift, v);
++	u = MYSQL_MIN(pattern_len - shift, v);
+       else
+       {
+ 	if (turboShift < bcShift)
+-	  shift = max(shift, u + 1);
++	  shift = MYSQL_MAX(shift, u + 1);
+ 	u = 0;
+       }
+       j+= shift;
+@@ -5152,14 +5152,14 @@
+       register const int v = plm1 - i;
+       turboShift = u - v;
+       bcShift    = bmBc[(uint) likeconv(cs, text[i + j])] - plm1 + i;
+-      shift      = max(turboShift, bcShift);
+-      shift      = max(shift, bmGs[i]);
++      shift      = MYSQL_MAX(turboShift, bcShift);
++      shift      = MYSQL_MAX(shift, bmGs[i]);
+       if (shift == bmGs[i])
+-	u = min(pattern_len - shift, v);
++	u = MYSQL_MIN(pattern_len - shift, v);
+       else
+       {
+ 	if (turboShift < bcShift)
+-	  shift = max(shift, u + 1);
++	  shift = MYSQL_MAX(shift, u + 1);
+ 	u = 0;
+       }
+       j+= shift;
+diff -urN mysql-old/sql/item_func.cc mysql/sql/item_func.cc
+--- mysql-old/sql/item_func.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/item_func.cc	2011-05-10 17:56:01.346682377 +0000
+@@ -550,7 +550,7 @@
+     set_if_bigger(max_int_part, args[i]->decimal_int_part());
+     set_if_smaller(unsigned_flag, args[i]->unsigned_flag);
+   }
+-  int precision= min(max_int_part + decimals, DECIMAL_MAX_PRECISION);
++  int precision= MYSQL_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION);
+   max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
+                                                            unsigned_flag);
+ }
+@@ -1144,10 +1144,10 @@
+ */
+ void Item_func_additive_op::result_precision()
+ {
+-  decimals= max(args[0]->decimals, args[1]->decimals);
++  decimals= MYSQL_MAX(args[0]->decimals, args[1]->decimals);
+   int arg1_int= args[0]->decimal_precision() - args[0]->decimals;
+   int arg2_int= args[1]->decimal_precision() - args[1]->decimals;
+-  int precision= max(arg1_int, arg2_int) + 1 + decimals;
++  int precision= MYSQL_MAX(arg1_int, arg2_int) + 1 + decimals;
+ 
+   /* Integer operations keep unsigned_flag if one of arguments is unsigned */
+   if (result_type() == INT_RESULT)
+@@ -1257,9 +1257,9 @@
+     unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
+   else
+     unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
+-  decimals= min(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE);
++  decimals= MYSQL_MIN(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE);
+   uint est_prec = args[0]->decimal_precision() + args[1]->decimal_precision();
+-  uint precision= min(est_prec, DECIMAL_MAX_PRECISION);
++  uint precision= MYSQL_MIN(est_prec, DECIMAL_MAX_PRECISION);
+   max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
+                                                            unsigned_flag);
+ }
+@@ -1307,7 +1307,7 @@
+ 
+ void Item_func_div::result_precision()
+ {
+-  uint precision=min(args[0]->decimal_precision() + 
++  uint precision=MYSQL_MIN(args[0]->decimal_precision() +
+                      args[1]->decimals + prec_increment,
+                      DECIMAL_MAX_PRECISION);
+ 
+@@ -1316,7 +1316,7 @@
+     unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
+   else
+     unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
+-  decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
++  decimals= MYSQL_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
+   max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
+                                                            unsigned_flag);
+ }
+@@ -1330,7 +1330,7 @@
+   switch(hybrid_type) {
+   case REAL_RESULT:
+   {
+-    decimals=max(args[0]->decimals,args[1]->decimals)+prec_increment;
++    decimals=MYSQL_MAX(args[0]->decimals,args[1]->decimals)+prec_increment;
+     set_if_smaller(decimals, NOT_FIXED_DEC);
+     uint tmp=float_length(decimals);
+     if (decimals == NOT_FIXED_DEC)
+@@ -1461,8 +1461,8 @@
+ 
+ void Item_func_mod::result_precision()
+ {
+-  decimals= max(args[0]->decimals, args[1]->decimals);
+-  max_length= max(args[0]->max_length, args[1]->max_length);
++  decimals= MYSQL_MAX(args[0]->decimals, args[1]->decimals);
++  max_length= MYSQL_MAX(args[0]->max_length, args[1]->max_length);
+ }
+ 
+ 
+@@ -1981,7 +1981,7 @@
+ 
+   if (args[0]->decimals == NOT_FIXED_DEC)
+   {
+-    decimals= min(decimals_to_set, NOT_FIXED_DEC);
++    decimals= MYSQL_MIN(decimals_to_set, NOT_FIXED_DEC);
+     max_length= float_length(decimals);
+     hybrid_type= REAL_RESULT;
+     return;
+@@ -1991,7 +1991,7 @@
+   case REAL_RESULT:
+   case STRING_RESULT:
+     hybrid_type= REAL_RESULT;
+-    decimals= min(decimals_to_set, NOT_FIXED_DEC);
++    decimals= MYSQL_MIN(decimals_to_set, NOT_FIXED_DEC);
+     max_length= float_length(decimals);
+     break;
+   case INT_RESULT:
+@@ -2008,13 +2008,13 @@
+   case DECIMAL_RESULT:
+   {
+     hybrid_type= DECIMAL_RESULT;
+-    decimals_to_set= min(DECIMAL_MAX_SCALE, decimals_to_set);
++    decimals_to_set= MYSQL_MIN(DECIMAL_MAX_SCALE, decimals_to_set);
+     int decimals_delta= args[0]->decimals - decimals_to_set;
+     int precision= args[0]->decimal_precision();
+     int length_increase= ((decimals_delta <= 0) || truncate) ? 0:1;
+ 
+     precision-= decimals_delta - length_increase;
+-    decimals= min(decimals_to_set, DECIMAL_MAX_SCALE);
++    decimals= MYSQL_MIN(decimals_to_set, DECIMAL_MAX_SCALE);
+     max_length= my_decimal_precision_to_length_no_truncation(precision,
+                                                              decimals,
+                                                              unsigned_flag);
+@@ -2115,7 +2115,7 @@
+   my_decimal val, *value= args[0]->val_decimal(&val);
+   longlong dec= args[1]->val_int();
+   if (dec >= 0 || args[1]->unsigned_flag)
+-    dec= min((ulonglong) dec, decimals);
++    dec= MYSQL_MIN((ulonglong) dec, decimals);
+   else if (dec < INT_MIN)
+     dec= INT_MIN;
+     
+@@ -2990,7 +2990,7 @@
+       free_udf(u_d);
+       DBUG_RETURN(TRUE);
+     }
+-    func->max_length=min(initid.max_length,MAX_BLOB_WIDTH);
++    func->max_length=MYSQL_MIN(initid.max_length,MAX_BLOB_WIDTH);
+     func->maybe_null=initid.maybe_null;
+     const_item_cache=initid.const_item;
+     /* 
+@@ -2999,7 +2999,7 @@
+     */  
+     if (!const_item_cache && !used_tables_cache)
+       used_tables_cache= RAND_TABLE_BIT;
+-    func->decimals=min(initid.decimals,NOT_FIXED_DEC);
++    func->decimals=MYSQL_MIN(initid.decimals,NOT_FIXED_DEC);
+   }
+   initialized=1;
+   if (error)
+diff -urN mysql-old/sql/item_func.cc.orig mysql/sql/item_func.cc.orig
+--- mysql-old/sql/item_func.cc.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/item_func.cc.orig	2011-04-12 12:11:35.000000000 +0000
+@@ -0,0 +1,6160 @@
++/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
++
++
++/**
++  @file
++
++  @brief
++  This file defines all numerical functions
++*/
++
++#ifdef USE_PRAGMA_IMPLEMENTATION
++#pragma implementation				// gcc: Class implementation
++#endif
++
++#include "mysql_priv.h"
++#include "slave.h"				// for wait_for_master_pos
++#include "rpl_mi.h"
++#include <m_ctype.h>
++#include <hash.h>
++#include <time.h>
++#include <ft_global.h>
++#include <my_bit.h>
++
++#include "sp_head.h"
++#include "sp_rcontext.h"
++#include "sp.h"
++
++#ifdef NO_EMBEDDED_ACCESS_CHECKS
++#define sp_restore_security_context(A,B) while (0) {}
++#endif
++
++bool check_reserved_words(LEX_STRING *name)
++{
++  if (!my_strcasecmp(system_charset_info, name->str, "GLOBAL") ||
++      !my_strcasecmp(system_charset_info, name->str, "LOCAL") ||
++      !my_strcasecmp(system_charset_info, name->str, "SESSION"))
++    return TRUE;
++  return FALSE;
++}
++
++
++/**
++  @return
++    TRUE if item is a constant
++*/
++
++bool
++eval_const_cond(COND *cond)
++{
++  return ((Item_func*) cond)->val_int() ? TRUE : FALSE;
++}
++
++
++void Item_func::set_arguments(List<Item> &list)
++{
++  allowed_arg_cols= 1;
++  arg_count=list.elements;
++  args= tmp_arg;                                // If 2 arguments
++  if (arg_count <= 2 || (args=(Item**) sql_alloc(sizeof(Item*)*arg_count)))
++  {
++    List_iterator_fast<Item> li(list);
++    Item *item;
++    Item **save_args= args;
++
++    while ((item=li++))
++    {
++      *(save_args++)= item;
++      with_sum_func|=item->with_sum_func;
++    }
++  }
++  list.empty();					// Fields are used
++}
++
++Item_func::Item_func(List<Item> &list)
++  :allowed_arg_cols(1)
++{
++  set_arguments(list);
++}
++
++Item_func::Item_func(THD *thd, Item_func *item)
++  :Item_result_field(thd, item),
++   allowed_arg_cols(item->allowed_arg_cols),
++   arg_count(item->arg_count),
++   used_tables_cache(item->used_tables_cache),
++   not_null_tables_cache(item->not_null_tables_cache),
++   const_item_cache(item->const_item_cache)
++{
++  if (arg_count)
++  {
++    if (arg_count <=2)
++      args= tmp_arg;
++    else
++    {
++      if (!(args=(Item**) thd->alloc(sizeof(Item*)*arg_count)))
++	return;
++    }
++    memcpy((char*) args, (char*) item->args, sizeof(Item*)*arg_count);
++  }
++}
++
++
++/*
++  Resolve references to table column for a function and its argument
++
++  SYNOPSIS:
++  fix_fields()
++  thd		Thread object
++  ref		Pointer to where this object is used.  This reference
++		is used if we want to replace this object with another
++		one (for example in the summary functions).
++
++  DESCRIPTION
++    Call fix_fields() for all arguments to the function.  The main intention
++    is to allow all Item_field() objects to setup pointers to the table fields.
++
++    Sets as a side effect the following class variables:
++      maybe_null	Set if any argument may return NULL
++      with_sum_func	Set if any of the arguments contains a sum function
++      used_tables_cache Set to union of the tables used by arguments
++
++      str_value.charset If this is a string function, set this to the
++			character set for the first argument.
++			If any argument is binary, this is set to binary
++
++   If for any item any of the defaults are wrong, then this can
++   be fixed in the fix_length_and_dec() function that is called
++   after this one or by writing a specialized fix_fields() for the
++   item.
++
++  RETURN VALUES
++  FALSE	ok
++  TRUE	Got error.  Stored with my_error().
++*/
++
++bool
++Item_func::fix_fields(THD *thd, Item **ref)
++{
++  DBUG_ASSERT(fixed == 0);
++  Item **arg,**arg_end;
++#ifndef EMBEDDED_LIBRARY			// Avoid compiler warning
++  uchar buff[STACK_BUFF_ALLOC];			// Max argument in function
++#endif
++
++  used_tables_cache= not_null_tables_cache= 0;
++  const_item_cache=1;
++
++  /*
++    Use stack limit of STACK_MIN_SIZE * 2 since
++    on some platforms a recursive call to fix_fields
++    requires more than STACK_MIN_SIZE bytes (e.g. for
++    MIPS, it takes about 22kB to make one recursive
++    call to Item_func::fix_fields())
++  */
++  if (check_stack_overrun(thd, STACK_MIN_SIZE * 2, buff))
++    return TRUE;				// Fatal error if flag is set!
++  if (arg_count)
++  {						// Print purify happy
++    for (arg=args, arg_end=args+arg_count; arg != arg_end ; arg++)
++    {
++      Item *item;
++      /*
++	We can't yet set item to *arg as fix_fields may change *arg
++	We shouldn't call fix_fields() twice, so check 'fixed' field first
++      */
++      if ((!(*arg)->fixed && (*arg)->fix_fields(thd, arg)))
++	return TRUE;				/* purecov: inspected */
++      item= *arg;
++
++      if (allowed_arg_cols)
++      {
++        if (item->check_cols(allowed_arg_cols))
++          return 1;
++      }
++      else
++      {
++        /*  we have to fetch allowed_arg_cols from first argument */
++        DBUG_ASSERT(arg == args); // it is first argument
++        allowed_arg_cols= item->cols();
++        DBUG_ASSERT(allowed_arg_cols); // Can't be 0 any more
++      }
++
++      if (item->maybe_null)
++	maybe_null=1;
++
++      with_sum_func= with_sum_func || item->with_sum_func;
++      used_tables_cache|=     item->used_tables();
++      not_null_tables_cache|= item->not_null_tables();
++      const_item_cache&=      item->const_item();
++      with_subselect|=        item->with_subselect;
++    }
++  }
++  fix_length_and_dec();
++  if (thd->is_error()) // An error inside fix_length_and_dec occured
++    return TRUE;
++  fixed= 1;
++  return FALSE;
++}
++
++
++bool Item_func::walk(Item_processor processor, bool walk_subquery,
++                     uchar *argument)
++{
++  if (arg_count)
++  {
++    Item **arg,**arg_end;
++    for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
++    {
++      if ((*arg)->walk(processor, walk_subquery, argument))
++	return 1;
++    }
++  }
++  return (this->*processor)(argument);
++}
++
++void Item_func::traverse_cond(Cond_traverser traverser,
++                              void *argument, traverse_order order)
++{
++  if (arg_count)
++  {
++    Item **arg,**arg_end;
++
++    switch (order) {
++    case(PREFIX):
++      (*traverser)(this, argument);
++      for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
++      {
++	(*arg)->traverse_cond(traverser, argument, order);
++      }
++      break;
++    case (POSTFIX):
++      for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
++      {
++	(*arg)->traverse_cond(traverser, argument, order);
++      }
++      (*traverser)(this, argument);
++    }
++  }
++  else
++    (*traverser)(this, argument);
++}
++
++
++/**
++  Transform an Item_func object with a transformer callback function.
++
++    The function recursively applies the transform method to each
++    argument of the Item_func node.
++    If the call of the method for an argument item returns a new item
++    the old item is substituted for a new one.
++    After this the transformer is applied to the root node
++    of the Item_func object. 
++  @param transformer   the transformer callback function to be applied to
++                       the nodes of the tree of the object
++  @param argument      parameter to be passed to the transformer
++
++  @return
++    Item returned as the result of transformation of the root node
++*/
++
++Item *Item_func::transform(Item_transformer transformer, uchar *argument)
++{
++  DBUG_ASSERT(!current_thd->is_stmt_prepare());
++
++  if (arg_count)
++  {
++    Item **arg,**arg_end;
++    for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
++    {
++      Item *new_item= (*arg)->transform(transformer, argument);
++      if (!new_item)
++	return 0;
++
++      /*
++        THD::change_item_tree() should be called only if the tree was
++        really transformed, i.e. when a new item has been created.
++        Otherwise we'll be allocating a lot of unnecessary memory for
++        change records at each execution.
++      */
++      if (*arg != new_item)
++        current_thd->change_item_tree(arg, new_item);
++    }
++  }
++  return (this->*transformer)(argument);
++}
++
++
++/**
++  Compile Item_func object with a processor and a transformer
++  callback functions.
++
++    First the function applies the analyzer to the root node of
++    the Item_func object. Then if the analizer succeeeds (returns TRUE)
++    the function recursively applies the compile method to each argument
++    of the Item_func node.
++    If the call of the method for an argument item returns a new item
++    the old item is substituted for a new one.
++    After this the transformer is applied to the root node
++    of the Item_func object. 
++
++  @param analyzer      the analyzer callback function to be applied to the
++                       nodes of the tree of the object
++  @param[in,out] arg_p parameter to be passed to the processor
++  @param transformer   the transformer callback function to be applied to the
++                       nodes of the tree of the object
++  @param arg_t         parameter to be passed to the transformer
++
++  @return
++    Item returned as the result of transformation of the root node
++*/
++
++Item *Item_func::compile(Item_analyzer analyzer, uchar **arg_p,
++                         Item_transformer transformer, uchar *arg_t)
++{
++  if (!(this->*analyzer)(arg_p))
++    return 0;
++  if (arg_count)
++  {
++    Item **arg,**arg_end;
++    for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++)
++    {
++      /* 
++        The same parameter value of arg_p must be passed
++        to analyze any argument of the condition formula.
++      */   
++      uchar *arg_v= *arg_p;
++      Item *new_item= (*arg)->compile(analyzer, &arg_v, transformer, arg_t);
++      if (new_item && *arg != new_item)
++        current_thd->change_item_tree(arg, new_item);
++    }
++  }
++  return (this->*transformer)(arg_t);
++}
++
++/**
++  See comments in Item_cmp_func::split_sum_func()
++*/
++
++void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array,
++                               List<Item> &fields)
++{
++  Item **arg, **arg_end;
++  for (arg= args, arg_end= args+arg_count; arg != arg_end ; arg++)
++    (*arg)->split_sum_func2(thd, ref_pointer_array, fields, arg, TRUE);
++}
++
++
++void Item_func::update_used_tables()
++{
++  used_tables_cache=0;
++  const_item_cache=1;
++  for (uint i=0 ; i < arg_count ; i++)
++  {
++    args[i]->update_used_tables();
++    used_tables_cache|=args[i]->used_tables();
++    const_item_cache&=args[i]->const_item();
++  }
++}
++
++
++table_map Item_func::used_tables() const
++{
++  return used_tables_cache;
++}
++
++
++table_map Item_func::not_null_tables() const
++{
++  return not_null_tables_cache;
++}
++
++
++void Item_func::print(String *str, enum_query_type query_type)
++{
++  str->append(func_name());
++  str->append('(');
++  print_args(str, 0, query_type);
++  str->append(')');
++}
++
++
++void Item_func::print_args(String *str, uint from, enum_query_type query_type)
++{
++  for (uint i=from ; i < arg_count ; i++)
++  {
++    if (i != from)
++      str->append(',');
++    args[i]->print(str, query_type);
++  }
++}
++
++
++void Item_func::print_op(String *str, enum_query_type query_type)
++{
++  str->append('(');
++  for (uint i=0 ; i < arg_count-1 ; i++)
++  {
++    args[i]->print(str, query_type);
++    str->append(' ');
++    str->append(func_name());
++    str->append(' ');
++  }
++  args[arg_count-1]->print(str, query_type);
++  str->append(')');
++}
++
++
++bool Item_func::eq(const Item *item, bool binary_cmp) const
++{
++  /* Assume we don't have rtti */
++  if (this == item)
++    return 1;
++  if (item->type() != FUNC_ITEM)
++    return 0;
++  Item_func *item_func=(Item_func*) item;
++  Item_func::Functype func_type;
++  if ((func_type= functype()) != item_func->functype() ||
++      arg_count != item_func->arg_count ||
++      (func_type != Item_func::FUNC_SP &&
++       func_name() != item_func->func_name()) ||
++      (func_type == Item_func::FUNC_SP &&
++       my_strcasecmp(system_charset_info, func_name(), item_func->func_name())))
++    return 0;
++  for (uint i=0; i < arg_count ; i++)
++    if (!args[i]->eq(item_func->args[i], binary_cmp))
++      return 0;
++  return 1;
++}
++
++
++Field *Item_func::tmp_table_field(TABLE *table)
++{
++  Field *field= NULL;
++
++  switch (result_type()) {
++  case INT_RESULT:
++    if (max_length > MY_INT32_NUM_DECIMAL_DIGITS)
++      field= new Field_longlong(max_length, maybe_null, name, unsigned_flag);
++    else
++      field= new Field_long(max_length, maybe_null, name, unsigned_flag);
++    break;
++  case REAL_RESULT:
++    field= new Field_double(max_length, maybe_null, name, decimals);
++    break;
++  case STRING_RESULT:
++    return make_string_field(table);
++    break;
++  case DECIMAL_RESULT:
++    field= Field_new_decimal::create_from_item(this);
++    break;
++  case ROW_RESULT:
++  default:
++    // This case should never be chosen
++    DBUG_ASSERT(0);
++    field= 0;
++    break;
++  }
++  if (field)
++    field->init(table);
++  return field;
++}
++
++
++bool Item_func::is_expensive_processor(uchar *arg)
++{
++  return is_expensive();
++}
++
++
++my_decimal *Item_func::val_decimal(my_decimal *decimal_value)
++{
++  DBUG_ASSERT(fixed);
++  longlong nr= val_int();
++  if (null_value)
++    return 0; /* purecov: inspected */
++  int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value);
++  return decimal_value;
++}
++
++
++String *Item_real_func::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  double nr= val_real();
++  if (null_value)
++    return 0; /* purecov: inspected */
++  str->set_real(nr,decimals, &my_charset_bin);
++  return str;
++}
++
++
++my_decimal *Item_real_func::val_decimal(my_decimal *decimal_value)
++{
++  DBUG_ASSERT(fixed);
++  double nr= val_real();
++  if (null_value)
++    return 0; /* purecov: inspected */
++  double2my_decimal(E_DEC_FATAL_ERROR, nr, decimal_value);
++  return decimal_value;
++}
++
++
++void Item_func::fix_num_length_and_dec()
++{
++  uint fl_length= 0;
++  decimals=0;
++  for (uint i=0 ; i < arg_count ; i++)
++  {
++    set_if_bigger(decimals,args[i]->decimals);
++    set_if_bigger(fl_length, args[i]->max_length);
++  }
++  max_length=float_length(decimals);
++  if (fl_length > max_length)
++  {
++    decimals= NOT_FIXED_DEC;
++    max_length= float_length(NOT_FIXED_DEC);
++  }
++}
++
++
++void Item_func_numhybrid::fix_num_length_and_dec()
++{}
++
++
++/**
++  Set max_length/decimals of function if function is fixed point and
++  result length/precision depends on argument ones.
++*/
++
++void Item_func::count_decimal_length()
++{
++  int max_int_part= 0;
++  decimals= 0;
++  unsigned_flag= 1;
++  for (uint i=0 ; i < arg_count ; i++)
++  {
++    set_if_bigger(decimals, args[i]->decimals);
++    set_if_bigger(max_int_part, args[i]->decimal_int_part());
++    set_if_smaller(unsigned_flag, args[i]->unsigned_flag);
++  }
++  int precision= min(max_int_part + decimals, DECIMAL_MAX_PRECISION);
++  max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
++                                                           unsigned_flag);
++}
++
++
++/**
++  Set max_length of if it is maximum length of its arguments.
++*/
++
++void Item_func::count_only_length()
++{
++  max_length= 0;
++  unsigned_flag= 0;
++  for (uint i=0 ; i < arg_count ; i++)
++  {
++    set_if_bigger(max_length, args[i]->max_length);
++    set_if_bigger(unsigned_flag, args[i]->unsigned_flag);
++  }
++}
++
++
++/**
++  Set max_length/decimals of function if function is floating point and
++  result length/precision depends on argument ones.
++*/
++
++void Item_func::count_real_length()
++{
++  uint32 length= 0;
++  decimals= 0;
++  max_length= 0;
++  for (uint i=0 ; i < arg_count ; i++)
++  {
++    if (decimals != NOT_FIXED_DEC)
++    {
++      set_if_bigger(decimals, args[i]->decimals);
++      set_if_bigger(length, (args[i]->max_length - args[i]->decimals));
++    }
++    set_if_bigger(max_length, args[i]->max_length);
++  }
++  if (decimals != NOT_FIXED_DEC)
++  {
++    max_length= length;
++    length+= decimals;
++    if (length < max_length)  // If previous operation gave overflow
++      max_length= UINT_MAX32;
++    else
++      max_length= length;
++  }
++}
++
++
++
++void Item_func::signal_divide_by_null()
++{
++  THD *thd= current_thd;
++  if (thd->variables.sql_mode & MODE_ERROR_FOR_DIVISION_BY_ZERO)
++    push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DIVISION_BY_ZERO,
++                 ER(ER_DIVISION_BY_ZERO));
++  null_value= 1;
++}
++
++
++Item *Item_func::get_tmp_table_item(THD *thd)
++{
++  if (!with_sum_func && !const_item())
++    return new Item_field(result_field);
++  return copy_or_same(thd);
++}
++
++double Item_int_func::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++
++  return unsigned_flag ? (double) ((ulonglong) val_int()) : (double) val_int();
++}
++
++
++String *Item_int_func::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  longlong nr=val_int();
++  if (null_value)
++    return 0;
++  str->set_int(nr, unsigned_flag, &my_charset_bin);
++  return str;
++}
++
++
++void Item_func_connection_id::fix_length_and_dec()
++{
++  Item_int_func::fix_length_and_dec();
++  max_length= 10;
++}
++
++
++bool Item_func_connection_id::fix_fields(THD *thd, Item **ref)
++{
++  if (Item_int_func::fix_fields(thd, ref))
++    return TRUE;
++  thd->thread_specific_used= TRUE;
++  value= thd->variables.pseudo_thread_id;
++  return FALSE;
++}
++
++
++/**
++  Check arguments here to determine result's type for a numeric
++  function of two arguments.
++*/
++
++void Item_num_op::find_num_type(void)
++{
++  DBUG_ENTER("Item_num_op::find_num_type");
++  DBUG_PRINT("info", ("name %s", func_name()));
++  DBUG_ASSERT(arg_count == 2);
++  Item_result r0= args[0]->result_type();
++  Item_result r1= args[1]->result_type();
++
++  if (r0 == REAL_RESULT || r1 == REAL_RESULT ||
++      r0 == STRING_RESULT || r1 ==STRING_RESULT)
++  {
++    count_real_length();
++    max_length= float_length(decimals);
++    hybrid_type= REAL_RESULT;
++  }
++  else if (r0 == DECIMAL_RESULT || r1 == DECIMAL_RESULT)
++  {
++    hybrid_type= DECIMAL_RESULT;
++    result_precision();
++  }
++  else
++  {
++    DBUG_ASSERT(r0 == INT_RESULT && r1 == INT_RESULT);
++    decimals= 0;
++    hybrid_type=INT_RESULT;
++    result_precision();
++  }
++  DBUG_PRINT("info", ("Type: %s",
++             (hybrid_type == REAL_RESULT ? "REAL_RESULT" :
++              hybrid_type == DECIMAL_RESULT ? "DECIMAL_RESULT" :
++              hybrid_type == INT_RESULT ? "INT_RESULT" :
++              "--ILLEGAL!!!--")));
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Set result type for a numeric function of one argument
++  (can be also used by a numeric function of many arguments, if the result
++  type depends only on the first argument)
++*/
++
++void Item_func_num1::find_num_type()
++{
++  DBUG_ENTER("Item_func_num1::find_num_type");
++  DBUG_PRINT("info", ("name %s", func_name()));
++  switch (hybrid_type= args[0]->result_type()) {
++  case INT_RESULT:
++    unsigned_flag= args[0]->unsigned_flag;
++    break;
++  case STRING_RESULT:
++  case REAL_RESULT:
++    hybrid_type= REAL_RESULT;
++    max_length= float_length(decimals);
++    break;
++  case DECIMAL_RESULT:
++    break;
++  default:
++    DBUG_ASSERT(0);
++  }
++  DBUG_PRINT("info", ("Type: %s",
++                      (hybrid_type == REAL_RESULT ? "REAL_RESULT" :
++                       hybrid_type == DECIMAL_RESULT ? "DECIMAL_RESULT" :
++                       hybrid_type == INT_RESULT ? "INT_RESULT" :
++                       "--ILLEGAL!!!--")));
++  DBUG_VOID_RETURN;
++}
++
++
++void Item_func_num1::fix_num_length_and_dec()
++{
++  decimals= args[0]->decimals;
++  max_length= args[0]->max_length;
++}
++
++
++void Item_func_numhybrid::fix_length_and_dec()
++{
++  fix_num_length_and_dec();
++  find_num_type();
++}
++
++
++String *Item_func_numhybrid::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  switch (hybrid_type) {
++  case DECIMAL_RESULT:
++  {
++    my_decimal decimal_value, *val;
++    if (!(val= decimal_op(&decimal_value)))
++      return 0;                                 // null is set
++    my_decimal_round(E_DEC_FATAL_ERROR, val, decimals, FALSE, val);
++    my_decimal2string(E_DEC_FATAL_ERROR, val, 0, 0, 0, str);
++    break;
++  }
++  case INT_RESULT:
++  {
++    longlong nr= int_op();
++    if (null_value)
++      return 0; /* purecov: inspected */
++    str->set_int(nr, unsigned_flag, &my_charset_bin);
++    break;
++  }
++  case REAL_RESULT:
++  {
++    double nr= real_op();
++    if (null_value)
++      return 0; /* purecov: inspected */
++    str->set_real(nr,decimals,&my_charset_bin);
++    break;
++  }
++  case STRING_RESULT:
++    return str_op(&str_value);
++  default:
++    DBUG_ASSERT(0);
++  }
++  return str;
++}
++
++
++double Item_func_numhybrid::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  switch (hybrid_type) {
++  case DECIMAL_RESULT:
++  {
++    my_decimal decimal_value, *val;
++    double result;
++    if (!(val= decimal_op(&decimal_value)))
++      return 0.0;                               // null is set
++    my_decimal2double(E_DEC_FATAL_ERROR, val, &result);
++    return result;
++  }
++  case INT_RESULT:
++  {
++    longlong result= int_op();
++    return unsigned_flag ? (double) ((ulonglong) result) : (double) result;
++  }
++  case REAL_RESULT:
++    return real_op();
++  case STRING_RESULT:
++  {
++    char *end_not_used;
++    int err_not_used;
++    String *res= str_op(&str_value);
++    return (res ? my_strntod(res->charset(), (char*) res->ptr(), res->length(),
++			     &end_not_used, &err_not_used) : 0.0);
++  }
++  default:
++    DBUG_ASSERT(0);
++  }
++  return 0.0;
++}
++
++
++longlong Item_func_numhybrid::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  switch (hybrid_type) {
++  case DECIMAL_RESULT:
++  {
++    my_decimal decimal_value, *val;
++    if (!(val= decimal_op(&decimal_value)))
++      return 0;                                 // null is set
++    longlong result;
++    my_decimal2int(E_DEC_FATAL_ERROR, val, unsigned_flag, &result);
++    return result;
++  }
++  case INT_RESULT:
++    return int_op();
++  case REAL_RESULT:
++    return (longlong) rint(real_op());
++  case STRING_RESULT:
++  {
++    int err_not_used;
++    String *res;
++    if (!(res= str_op(&str_value)))
++      return 0;
++
++    char *end= (char*) res->ptr() + res->length();
++    CHARSET_INFO *cs= res->charset();
++    return (*(cs->cset->strtoll10))(cs, res->ptr(), &end, &err_not_used);
++  }
++  default:
++    DBUG_ASSERT(0);
++  }
++  return 0;
++}
++
++
++my_decimal *Item_func_numhybrid::val_decimal(my_decimal *decimal_value)
++{
++  my_decimal *val= decimal_value;
++  DBUG_ASSERT(fixed == 1);
++  switch (hybrid_type) {
++  case DECIMAL_RESULT:
++    val= decimal_op(decimal_value);
++    break;
++  case INT_RESULT:
++  {
++    longlong result= int_op();
++    int2my_decimal(E_DEC_FATAL_ERROR, result, unsigned_flag, decimal_value);
++    break;
++  }
++  case REAL_RESULT:
++  {
++    double result= (double)real_op();
++    double2my_decimal(E_DEC_FATAL_ERROR, result, decimal_value);
++    break;
++  }
++  case STRING_RESULT:
++  {
++    String *res;
++    if (!(res= str_op(&str_value)))
++      return NULL;
++
++    str2my_decimal(E_DEC_FATAL_ERROR, (char*) res->ptr(),
++                   res->length(), res->charset(), decimal_value);
++    break;
++  }  
++  case ROW_RESULT:
++  default:
++    DBUG_ASSERT(0);
++  }
++  return val;
++}
++
++
++void Item_func_signed::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("cast("));
++  args[0]->print(str, query_type);
++  str->append(STRING_WITH_LEN(" as signed)"));
++
++}
++
++
++longlong Item_func_signed::val_int_from_str(int *error)
++{
++  char buff[MAX_FIELD_WIDTH], *end, *start;
++  uint32 length;
++  String tmp(buff,sizeof(buff), &my_charset_bin), *res;
++  longlong value;
++
++  /*
++    For a string result, we must first get the string and then convert it
++    to a longlong
++  */
++
++  if (!(res= args[0]->val_str(&tmp)))
++  {
++    null_value= 1;
++    *error= 0;
++    return 0;
++  }
++  null_value= 0;
++  start= (char *)res->ptr();
++  length= res->length();
++
++  end= start + length;
++  value= my_strtoll10(start, &end, error);
++  if (*error > 0 || end != start+ length)
++  {
++    char err_buff[128];
++    String err_tmp(err_buff,(uint32) sizeof(err_buff), system_charset_info);
++    err_tmp.copy(start, length, system_charset_info);
++    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                        ER_TRUNCATED_WRONG_VALUE,
++                        ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER",
++                        err_tmp.c_ptr());
++  }
++  return value;
++}
++
++
++longlong Item_func_signed::val_int()
++{
++  longlong value;
++  int error;
++
++  if (args[0]->cast_to_int_type() != STRING_RESULT ||
++      args[0]->result_as_longlong())
++  {
++    value= args[0]->val_int();
++    null_value= args[0]->null_value; 
++    return value;
++  }
++
++  value= val_int_from_str(&error);
++  if (value < 0 && error == 0)
++  {
++    push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
++                 "Cast to signed converted positive out-of-range integer to "
++                 "it's negative complement");
++  }
++  return value;
++}
++
++
++void Item_func_unsigned::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("cast("));
++  args[0]->print(str, query_type);
++  str->append(STRING_WITH_LEN(" as unsigned)"));
++
++}
++
++
++longlong Item_func_unsigned::val_int()
++{
++  longlong value;
++  int error;
++
++  if (args[0]->cast_to_int_type() == DECIMAL_RESULT)
++  {
++    my_decimal tmp, *dec= args[0]->val_decimal(&tmp);
++    if (!(null_value= args[0]->null_value))
++      my_decimal2int(E_DEC_FATAL_ERROR, dec, 1, &value);
++    else
++      value= 0;
++    return value;
++  }
++  else if (args[0]->cast_to_int_type() != STRING_RESULT ||
++           args[0]->result_as_longlong())
++  {
++    value= args[0]->val_int();
++    null_value= args[0]->null_value; 
++    return value;
++  }
++
++  value= val_int_from_str(&error);
++  if (error < 0)
++    push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
++                 "Cast to unsigned converted negative integer to it's "
++                 "positive complement");
++  return value;
++}
++
++
++String *Item_decimal_typecast::val_str(String *str)
++{
++  my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf);
++  if (null_value)
++    return NULL;
++  my_decimal2string(E_DEC_FATAL_ERROR, tmp, 0, 0, 0, str);
++  return str;
++}
++
++
++double Item_decimal_typecast::val_real()
++{
++  my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf);
++  double res;
++  if (null_value)
++    return 0.0;
++  my_decimal2double(E_DEC_FATAL_ERROR, tmp, &res);
++  return res;
++}
++
++
++longlong Item_decimal_typecast::val_int()
++{
++  my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf);
++  longlong res;
++  if (null_value)
++    return 0;
++  my_decimal2int(E_DEC_FATAL_ERROR, tmp, unsigned_flag, &res);
++  return res;
++}
++
++
++my_decimal *Item_decimal_typecast::val_decimal(my_decimal *dec)
++{
++  my_decimal tmp_buf, *tmp= args[0]->val_decimal(&tmp_buf);
++  bool sign;
++  uint precision;
++
++  if ((null_value= args[0]->null_value))
++    return NULL;
++  my_decimal_round(E_DEC_FATAL_ERROR, tmp, decimals, FALSE, dec);
++  sign= dec->sign();
++  if (unsigned_flag)
++  {
++    if (sign)
++    {
++      my_decimal_set_zero(dec);
++      goto err;
++    }
++  }
++  precision= my_decimal_length_to_precision(max_length,
++                                            decimals, unsigned_flag);
++  if (precision - decimals < (uint) my_decimal_intg(dec))
++  {
++    max_my_decimal(dec, precision, decimals);
++    dec->sign(sign);
++    goto err;
++  }
++  return dec;
++
++err:
++  push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
++                      ER_WARN_DATA_OUT_OF_RANGE,
++                      ER(ER_WARN_DATA_OUT_OF_RANGE),
++                      name, 1);
++  return dec;
++}
++
++
++void Item_decimal_typecast::print(String *str, enum_query_type query_type)
++{
++  char len_buf[20*3 + 1];
++  char *end;
++
++  uint precision= my_decimal_length_to_precision(max_length, decimals,
++                                                 unsigned_flag);
++  str->append(STRING_WITH_LEN("cast("));
++  args[0]->print(str, query_type);
++  str->append(STRING_WITH_LEN(" as decimal("));
++
++  end=int10_to_str(precision, len_buf,10);
++  str->append(len_buf, (uint32) (end - len_buf));
++
++  str->append(',');
++
++  end=int10_to_str(decimals, len_buf,10);
++  str->append(len_buf, (uint32) (end - len_buf));
++
++  str->append(')');
++  str->append(')');
++}
++
++
++double Item_func_plus::real_op()
++{
++  double value= args[0]->val_real() + args[1]->val_real();
++  if ((null_value=args[0]->null_value || args[1]->null_value))
++    return 0.0;
++  return fix_result(value);
++}
++
++
++longlong Item_func_plus::int_op()
++{
++  longlong value=args[0]->val_int()+args[1]->val_int();
++  if ((null_value=args[0]->null_value || args[1]->null_value))
++    return 0;
++  return value;
++}
++
++
++/**
++  Calculate plus of two decimals.
++
++  @param decimal_value	Buffer that can be used to store result
++
++  @retval
++    0  Value was NULL;  In this case null_value is set
++  @retval
++    \# Value of operation as a decimal
++*/
++
++my_decimal *Item_func_plus::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal value1, *val1;
++  my_decimal value2, *val2;
++  val1= args[0]->val_decimal(&value1);
++  if ((null_value= args[0]->null_value))
++    return 0;
++  val2= args[1]->val_decimal(&value2);
++  if (!(null_value= (args[1]->null_value ||
++                     (my_decimal_add(E_DEC_FATAL_ERROR, decimal_value, val1,
++                                     val2) > 3))))
++    return decimal_value;
++  return 0;
++}
++
++/**
++  Set precision of results for additive operations (+ and -)
++*/
++void Item_func_additive_op::result_precision()
++{
++  decimals= max(args[0]->decimals, args[1]->decimals);
++  int arg1_int= args[0]->decimal_precision() - args[0]->decimals;
++  int arg2_int= args[1]->decimal_precision() - args[1]->decimals;
++  int precision= max(arg1_int, arg2_int) + 1 + decimals;
++
++  /* Integer operations keep unsigned_flag if one of arguments is unsigned */
++  if (result_type() == INT_RESULT)
++    unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
++  else
++    unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
++  max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
++                                                           unsigned_flag);
++}
++
++
++/**
++  The following function is here to allow the user to force
++  subtraction of UNSIGNED BIGINT to return negative values.
++*/
++
++void Item_func_minus::fix_length_and_dec()
++{
++  Item_num_op::fix_length_and_dec();
++  if (unsigned_flag &&
++      (current_thd->variables.sql_mode & MODE_NO_UNSIGNED_SUBTRACTION))
++    unsigned_flag=0;
++}
++
++
++double Item_func_minus::real_op()
++{
++  double value= args[0]->val_real() - args[1]->val_real();
++  if ((null_value=args[0]->null_value || args[1]->null_value))
++    return 0.0;
++  return fix_result(value);
++}
++
++
++longlong Item_func_minus::int_op()
++{
++  longlong value=args[0]->val_int() - args[1]->val_int();
++  if ((null_value=args[0]->null_value || args[1]->null_value))
++    return 0;
++  return value;
++}
++
++
++/**
++  See Item_func_plus::decimal_op for comments.
++*/
++
++my_decimal *Item_func_minus::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal value1, *val1;
++  my_decimal value2, *val2= 
++
++  val1= args[0]->val_decimal(&value1);
++  if ((null_value= args[0]->null_value))
++    return 0;
++  val2= args[1]->val_decimal(&value2);
++  if (!(null_value= (args[1]->null_value ||
++                     (my_decimal_sub(E_DEC_FATAL_ERROR, decimal_value, val1,
++                                     val2) > 3))))
++    return decimal_value;
++  return 0;
++}
++
++
++double Item_func_mul::real_op()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real() * args[1]->val_real();
++  if ((null_value=args[0]->null_value || args[1]->null_value))
++    return 0.0;
++  return fix_result(value);
++}
++
++
++longlong Item_func_mul::int_op()
++{
++  DBUG_ASSERT(fixed == 1);
++  longlong value=args[0]->val_int()*args[1]->val_int();
++  if ((null_value=args[0]->null_value || args[1]->null_value))
++    return 0;
++  return value;
++}
++
++
++/** See Item_func_plus::decimal_op for comments. */
++
++my_decimal *Item_func_mul::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal value1, *val1;
++  my_decimal value2, *val2;
++  val1= args[0]->val_decimal(&value1);
++  if ((null_value= args[0]->null_value))
++    return 0;
++  val2= args[1]->val_decimal(&value2);
++  if (!(null_value= (args[1]->null_value ||
++                     (my_decimal_mul(E_DEC_FATAL_ERROR, decimal_value, val1,
++                                    val2) > 3))))
++    return decimal_value;
++  return 0;
++}
++
++
++void Item_func_mul::result_precision()
++{
++  /* Integer operations keep unsigned_flag if one of arguments is unsigned */
++  if (result_type() == INT_RESULT)
++    unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
++  else
++    unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
++  decimals= min(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE);
++  uint est_prec = args[0]->decimal_precision() + args[1]->decimal_precision();
++  uint precision= min(est_prec, DECIMAL_MAX_PRECISION);
++  max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
++                                                           unsigned_flag);
++}
++
++
++double Item_func_div::real_op()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  double val2= args[1]->val_real();
++  if ((null_value= args[0]->null_value || args[1]->null_value))
++    return 0.0;
++  if (val2 == 0.0)
++  {
++    signal_divide_by_null();
++    return 0.0;
++  }
++  return fix_result(value/val2);
++}
++
++
++my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal value1, *val1;
++  my_decimal value2, *val2;
++  int err;
++
++  val1= args[0]->val_decimal(&value1);
++  if ((null_value= args[0]->null_value))
++    return 0;
++  val2= args[1]->val_decimal(&value2);
++  if ((null_value= args[1]->null_value))
++    return 0;
++  if ((err= my_decimal_div(E_DEC_FATAL_ERROR & ~E_DEC_DIV_ZERO, decimal_value,
++                           val1, val2, prec_increment)) > 3)
++  {
++    if (err == E_DEC_DIV_ZERO)
++      signal_divide_by_null();
++    null_value= 1;
++    return 0;
++  }
++  return decimal_value;
++}
++
++
++void Item_func_div::result_precision()
++{
++  uint precision=min(args[0]->decimal_precision() + 
++                     args[1]->decimals + prec_increment,
++                     DECIMAL_MAX_PRECISION);
++
++  /* Integer operations keep unsigned_flag if one of arguments is unsigned */
++  if (result_type() == INT_RESULT)
++    unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
++  else
++    unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
++  decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
++  max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
++                                                           unsigned_flag);
++}
++
++
++void Item_func_div::fix_length_and_dec()
++{
++  DBUG_ENTER("Item_func_div::fix_length_and_dec");
++  prec_increment= current_thd->variables.div_precincrement;
++  Item_num_op::fix_length_and_dec();
++  switch(hybrid_type) {
++  case REAL_RESULT:
++  {
++    decimals=max(args[0]->decimals,args[1]->decimals)+prec_increment;
++    set_if_smaller(decimals, NOT_FIXED_DEC);
++    uint tmp=float_length(decimals);
++    if (decimals == NOT_FIXED_DEC)
++      max_length= tmp;
++    else
++    {
++      max_length=args[0]->max_length - args[0]->decimals + decimals;
++      set_if_smaller(max_length,tmp);
++    }
++    break;
++  }
++  case INT_RESULT:
++    hybrid_type= DECIMAL_RESULT;
++    DBUG_PRINT("info", ("Type changed: DECIMAL_RESULT"));
++    result_precision();
++    break;
++  case DECIMAL_RESULT:
++    result_precision();
++    break;
++  default:
++    DBUG_ASSERT(0);
++  }
++  maybe_null= 1; // devision by zero
++  DBUG_VOID_RETURN;
++}
++
++
++/* Integer division */
++longlong Item_func_int_div::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  longlong value=args[0]->val_int();
++  longlong val2=args[1]->val_int();
++  if ((null_value= (args[0]->null_value || args[1]->null_value)))
++    return 0;
++  if (val2 == 0)
++  {
++    signal_divide_by_null();
++    return 0;
++  }
++
++  if (unsigned_flag)
++    return  ((ulonglong) value / (ulonglong) val2);
++  else if (value == LONGLONG_MIN && val2 == -1)
++    return LONGLONG_MIN;
++  else
++    return value / val2;
++}
++
++
++void Item_func_int_div::fix_length_and_dec()
++{
++  Item_result argtype= args[0]->result_type();
++  /* use precision ony for the data type it is applicable for and valid */
++  max_length=args[0]->max_length -
++    (argtype == DECIMAL_RESULT || argtype == INT_RESULT ?
++     args[0]->decimals : 0);
++  maybe_null=1;
++  unsigned_flag=args[0]->unsigned_flag | args[1]->unsigned_flag;
++}
++
++
++longlong Item_func_mod::int_op()
++{
++  DBUG_ASSERT(fixed == 1);
++  longlong value=  args[0]->val_int();
++  longlong val2= args[1]->val_int();
++  longlong result;
++
++  if ((null_value= args[0]->null_value || args[1]->null_value))
++    return 0; /* purecov: inspected */
++  if (val2 == 0)
++  {
++    signal_divide_by_null();
++    return 0;
++  }
++
++  if (args[0]->unsigned_flag)
++    result= args[1]->unsigned_flag ? 
++      ((ulonglong) value) % ((ulonglong) val2) : ((ulonglong) value) % val2;
++  else result= args[1]->unsigned_flag ?
++         value % ((ulonglong) val2) :
++         (val2 == -1) ? 0 : value % val2;
++
++  return result;
++}
++
++double Item_func_mod::real_op()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  double val2=  args[1]->val_real();
++  if ((null_value= args[0]->null_value || args[1]->null_value))
++    return 0.0; /* purecov: inspected */
++  if (val2 == 0.0)
++  {
++    signal_divide_by_null();
++    return 0.0;
++  }
++  return fmod(value,val2);
++}
++
++
++my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal value1, *val1;
++  my_decimal value2, *val2;
++
++  val1= args[0]->val_decimal(&value1);
++  if ((null_value= args[0]->null_value))
++    return 0;
++  val2= args[1]->val_decimal(&value2);
++  if ((null_value= args[1]->null_value))
++    return 0;
++  switch (my_decimal_mod(E_DEC_FATAL_ERROR & ~E_DEC_DIV_ZERO, decimal_value,
++                         val1, val2)) {
++  case E_DEC_TRUNCATED:
++  case E_DEC_OK:
++    return decimal_value;
++  case E_DEC_DIV_ZERO:
++    signal_divide_by_null();
++  default:
++    null_value= 1;
++    return 0;
++  }
++}
++
++
++void Item_func_mod::result_precision()
++{
++  decimals= max(args[0]->decimals, args[1]->decimals);
++  max_length= max(args[0]->max_length, args[1]->max_length);
++}
++
++
++void Item_func_mod::fix_length_and_dec()
++{
++  Item_num_op::fix_length_and_dec();
++  maybe_null= 1;
++  unsigned_flag= args[0]->unsigned_flag;
++}
++
++
++double Item_func_neg::real_op()
++{
++  double value= args[0]->val_real();
++  null_value= args[0]->null_value;
++  return -value;
++}
++
++
++longlong Item_func_neg::int_op()
++{
++  longlong value= args[0]->val_int();
++  null_value= args[0]->null_value;
++  return -value;
++}
++
++
++my_decimal *Item_func_neg::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal val, *value= args[0]->val_decimal(&val);
++  if (!(null_value= args[0]->null_value))
++  {
++    my_decimal2decimal(value, decimal_value);
++    my_decimal_neg(decimal_value);
++    return decimal_value;
++  }
++  return 0;
++}
++
++
++void Item_func_neg::fix_num_length_and_dec()
++{
++  decimals= args[0]->decimals;
++  /* 1 add because sign can appear */
++  max_length= args[0]->max_length + 1;
++}
++
++
++void Item_func_neg::fix_length_and_dec()
++{
++  DBUG_ENTER("Item_func_neg::fix_length_and_dec");
++  Item_func_num1::fix_length_and_dec();
++
++  /*
++    If this is in integer context keep the context as integer if possible
++    (This is how multiplication and other integer functions works)
++    Use val() to get value as arg_type doesn't mean that item is
++    Item_int or Item_real due to existence of Item_param.
++  */
++  if (hybrid_type == INT_RESULT && args[0]->const_item())
++  {
++    longlong val= args[0]->val_int();
++    if ((ulonglong) val >= (ulonglong) LONGLONG_MIN &&
++        ((ulonglong) val != (ulonglong) LONGLONG_MIN ||
++          args[0]->type() != INT_ITEM))        
++    {
++      /*
++        Ensure that result is converted to DECIMAL, as longlong can't hold
++        the negated number
++      */
++      hybrid_type= DECIMAL_RESULT;
++      DBUG_PRINT("info", ("Type changed: DECIMAL_RESULT"));
++    }
++  }
++  unsigned_flag= 0;
++  DBUG_VOID_RETURN;
++}
++
++
++double Item_func_abs::real_op()
++{
++  double value= args[0]->val_real();
++  null_value= args[0]->null_value;
++  return fabs(value);
++}
++
++
++longlong Item_func_abs::int_op()
++{
++  longlong value= args[0]->val_int();
++  if ((null_value= args[0]->null_value))
++    return 0;
++  return (value >= 0) || unsigned_flag ? value : -value;
++}
++
++
++my_decimal *Item_func_abs::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal val, *value= args[0]->val_decimal(&val);
++  if (!(null_value= args[0]->null_value))
++  {
++    my_decimal2decimal(value, decimal_value);
++    if (decimal_value->sign())
++      my_decimal_neg(decimal_value);
++    return decimal_value;
++  }
++  return 0;
++}
++
++
++void Item_func_abs::fix_length_and_dec()
++{
++  Item_func_num1::fix_length_and_dec();
++  unsigned_flag= args[0]->unsigned_flag;
++}
++
++
++/** Gateway to natural LOG function. */
++double Item_func_ln::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value= args[0]->null_value))
++    return 0.0;
++  if (value <= 0.0)
++  {
++    signal_divide_by_null();
++    return 0.0;
++  }
++  return log(value);
++}
++
++/** 
++  Extended but so slower LOG function.
++
++  We have to check if all values are > zero and first one is not one
++  as these are the cases then result is not a number.
++*/ 
++double Item_func_log::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value= args[0]->null_value))
++    return 0.0;
++  if (value <= 0.0)
++  {
++    signal_divide_by_null();
++    return 0.0;
++  }
++  if (arg_count == 2)
++  {
++    double value2= args[1]->val_real();
++    if ((null_value= args[1]->null_value))
++      return 0.0;
++    if (value2 <= 0.0 || value == 1.0)
++    {
++      signal_divide_by_null();
++      return 0.0;
++    }
++    return log(value2) / log(value);
++  }
++  return log(value);
++}
++
++double Item_func_log2::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++
++  if ((null_value=args[0]->null_value))
++    return 0.0;
++  if (value <= 0.0)
++  {
++    signal_divide_by_null();
++    return 0.0;
++  }
++  return log(value) / M_LN2;
++}
++
++double Item_func_log10::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value= args[0]->null_value))
++    return 0.0;
++  if (value <= 0.0)
++  {
++    signal_divide_by_null();
++    return 0.0;
++  }
++  return log10(value);
++}
++
++double Item_func_exp::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value=args[0]->null_value))
++    return 0.0; /* purecov: inspected */
++  return fix_result(exp(value));
++}
++
++double Item_func_sqrt::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value=(args[0]->null_value || value < 0)))
++    return 0.0; /* purecov: inspected */
++  return sqrt(value);
++}
++
++double Item_func_pow::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  double val2= args[1]->val_real();
++  if ((null_value=(args[0]->null_value || args[1]->null_value)))
++    return 0.0; /* purecov: inspected */
++  return fix_result(pow(value,val2));
++}
++
++// Trigonometric functions
++
++double Item_func_acos::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug)
++  volatile double value= args[0]->val_real();
++  if ((null_value=(args[0]->null_value || (value < -1.0 || value > 1.0))))
++    return 0.0;
++  return acos(value);
++}
++
++double Item_func_asin::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug)
++  volatile double value= args[0]->val_real();
++  if ((null_value=(args[0]->null_value || (value < -1.0 || value > 1.0))))
++    return 0.0;
++  return asin(value);
++}
++
++double Item_func_atan::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value=args[0]->null_value))
++    return 0.0;
++  if (arg_count == 2)
++  {
++    double val2= args[1]->val_real();
++    if ((null_value=args[1]->null_value))
++      return 0.0;
++    return fix_result(atan2(value,val2));
++  }
++  return atan(value);
++}
++
++double Item_func_cos::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value=args[0]->null_value))
++    return 0.0;
++  return cos(value);
++}
++
++double Item_func_sin::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value=args[0]->null_value))
++    return 0.0;
++  return sin(value);
++}
++
++double Item_func_tan::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value=args[0]->null_value))
++    return 0.0;
++  return fix_result(tan(value));
++}
++
++
++// Shift-functions, same as << and >> in C/C++
++
++
++longlong Item_func_shift_left::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  uint shift;
++  ulonglong res= ((ulonglong) args[0]->val_int() <<
++		  (shift=(uint) args[1]->val_int()));
++  if (args[0]->null_value || args[1]->null_value)
++  {
++    null_value=1;
++    return 0;
++  }
++  null_value=0;
++  return (shift < sizeof(longlong)*8 ? (longlong) res : LL(0));
++}
++
++longlong Item_func_shift_right::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  uint shift;
++  ulonglong res= (ulonglong) args[0]->val_int() >>
++    (shift=(uint) args[1]->val_int());
++  if (args[0]->null_value || args[1]->null_value)
++  {
++    null_value=1;
++    return 0;
++  }
++  null_value=0;
++  return (shift < sizeof(longlong)*8 ? (longlong) res : LL(0));
++}
++
++
++longlong Item_func_bit_neg::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  ulonglong res= (ulonglong) args[0]->val_int();
++  if ((null_value=args[0]->null_value))
++    return 0;
++  return ~res;
++}
++
++
++// Conversion functions
++
++void Item_func_integer::fix_length_and_dec()
++{
++  max_length=args[0]->max_length - args[0]->decimals+1;
++  uint tmp=float_length(decimals);
++  set_if_smaller(max_length,tmp);
++  decimals=0;
++}
++
++void Item_func_int_val::fix_num_length_and_dec()
++{
++  ulonglong tmp_max_length= (ulonglong ) args[0]->max_length - 
++    (args[0]->decimals ? args[0]->decimals + 1 : 0) + 2;
++  max_length= tmp_max_length > (ulonglong) max_field_size ?
++    max_field_size : (uint32) tmp_max_length;
++  uint tmp= float_length(decimals);
++  set_if_smaller(max_length,tmp);
++  decimals= 0;
++}
++
++
++void Item_func_int_val::find_num_type()
++{
++  DBUG_ENTER("Item_func_int_val::find_num_type");
++  DBUG_PRINT("info", ("name %s", func_name()));
++  switch(hybrid_type= args[0]->result_type())
++  {
++  case STRING_RESULT:
++  case REAL_RESULT:
++    hybrid_type= REAL_RESULT;
++    max_length= float_length(decimals);
++    break;
++  case INT_RESULT:
++  case DECIMAL_RESULT:
++    /*
++      -2 because in most high position can't be used any digit for longlong
++      and one position for increasing value during operation
++    */
++    if ((args[0]->max_length - args[0]->decimals) >=
++        (DECIMAL_LONGLONG_DIGITS - 2))
++    {
++      hybrid_type= DECIMAL_RESULT;
++    }
++    else
++    {
++      unsigned_flag= args[0]->unsigned_flag;
++      hybrid_type= INT_RESULT;
++    }
++    break;
++  default:
++    DBUG_ASSERT(0);
++  }
++  DBUG_PRINT("info", ("Type: %s",
++                      (hybrid_type == REAL_RESULT ? "REAL_RESULT" :
++                       hybrid_type == DECIMAL_RESULT ? "DECIMAL_RESULT" :
++                       hybrid_type == INT_RESULT ? "INT_RESULT" :
++                       "--ILLEGAL!!!--")));
++
++  DBUG_VOID_RETURN;
++}
++
++
++longlong Item_func_ceiling::int_op()
++{
++  longlong result;
++  switch (args[0]->result_type()) {
++  case INT_RESULT:
++    result= args[0]->val_int();
++    null_value= args[0]->null_value;
++    break;
++  case DECIMAL_RESULT:
++  {
++    my_decimal dec_buf, *dec;
++    if ((dec= Item_func_ceiling::decimal_op(&dec_buf)))
++      my_decimal2int(E_DEC_FATAL_ERROR, dec, unsigned_flag, &result);
++    else
++      result= 0;
++    break;
++  }
++  default:
++    result= (longlong)Item_func_ceiling::real_op();
++  };
++  return result;
++}
++
++
++double Item_func_ceiling::real_op()
++{
++  /*
++    the volatile's for BUG #3051 to calm optimizer down (because of gcc's
++    bug)
++  */
++  volatile double value= args[0]->val_real();
++  null_value= args[0]->null_value;
++  return ceil(value);
++}
++
++
++my_decimal *Item_func_ceiling::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal val, *value= args[0]->val_decimal(&val);
++  if (!(null_value= (args[0]->null_value ||
++                     my_decimal_ceiling(E_DEC_FATAL_ERROR, value,
++                                        decimal_value) > 1)))
++    return decimal_value;
++  return 0;
++}
++
++
++longlong Item_func_floor::int_op()
++{
++  longlong result;
++  switch (args[0]->result_type()) {
++  case INT_RESULT:
++    result= args[0]->val_int();
++    null_value= args[0]->null_value;
++    break;
++  case DECIMAL_RESULT:
++  {
++    my_decimal dec_buf, *dec;
++    if ((dec= Item_func_floor::decimal_op(&dec_buf)))
++      my_decimal2int(E_DEC_FATAL_ERROR, dec, unsigned_flag, &result);
++    else
++      result= 0;
++    break;
++  }
++  default:
++    result= (longlong)Item_func_floor::real_op();
++  };
++  return result;
++}
++
++
++double Item_func_floor::real_op()
++{
++  /*
++    the volatile's for BUG #3051 to calm optimizer down (because of gcc's
++    bug)
++  */
++  volatile double value= args[0]->val_real();
++  null_value= args[0]->null_value;
++  return floor(value);
++}
++
++
++my_decimal *Item_func_floor::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal val, *value= args[0]->val_decimal(&val);
++  if (!(null_value= (args[0]->null_value ||
++                     my_decimal_floor(E_DEC_FATAL_ERROR, value,
++                                      decimal_value) > 1)))
++    return decimal_value;
++  return 0;
++}
++
++
++void Item_func_round::fix_length_and_dec()
++{
++  int      decimals_to_set;
++  longlong val1;
++  bool     val1_unsigned;
++  
++  unsigned_flag= args[0]->unsigned_flag;
++  if (!args[1]->const_item())
++  {
++    decimals= args[0]->decimals;
++    max_length= float_length(decimals);
++    if (args[0]->result_type() == DECIMAL_RESULT)
++    {
++      max_length++;
++      hybrid_type= DECIMAL_RESULT;
++    }
++    else
++      hybrid_type= REAL_RESULT;
++    return;
++  }
++
++  val1= args[1]->val_int();
++  val1_unsigned= args[1]->unsigned_flag;
++  if (val1 < 0)
++    decimals_to_set= val1_unsigned ? INT_MAX : 0;
++  else
++    decimals_to_set= (val1 > INT_MAX) ? INT_MAX : (int) val1;
++
++  if (args[0]->decimals == NOT_FIXED_DEC)
++  {
++    decimals= min(decimals_to_set, NOT_FIXED_DEC);
++    max_length= float_length(decimals);
++    hybrid_type= REAL_RESULT;
++    return;
++  }
++  
++  switch (args[0]->result_type()) {
++  case REAL_RESULT:
++  case STRING_RESULT:
++    hybrid_type= REAL_RESULT;
++    decimals= min(decimals_to_set, NOT_FIXED_DEC);
++    max_length= float_length(decimals);
++    break;
++  case INT_RESULT:
++    if ((!decimals_to_set && truncate) || (args[0]->decimal_precision() < DECIMAL_LONGLONG_DIGITS))
++    {
++      int length_can_increase= test(!truncate && (val1 < 0) && !val1_unsigned);
++      max_length= args[0]->max_length + length_can_increase;
++      /* Here we can keep INT_RESULT */
++      hybrid_type= INT_RESULT;
++      decimals= 0;
++      break;
++    }
++    /* fall through */
++  case DECIMAL_RESULT:
++  {
++    hybrid_type= DECIMAL_RESULT;
++    decimals_to_set= min(DECIMAL_MAX_SCALE, decimals_to_set);
++    int decimals_delta= args[0]->decimals - decimals_to_set;
++    int precision= args[0]->decimal_precision();
++    int length_increase= ((decimals_delta <= 0) || truncate) ? 0:1;
++
++    precision-= decimals_delta - length_increase;
++    decimals= min(decimals_to_set, DECIMAL_MAX_SCALE);
++    max_length= my_decimal_precision_to_length_no_truncation(precision,
++                                                             decimals,
++                                                             unsigned_flag);
++    break;
++  }
++  default:
++    DBUG_ASSERT(0); /* This result type isn't handled */
++  }
++}
++
++double my_double_round(double value, longlong dec, bool dec_unsigned,
++                       bool truncate)
++{
++  double tmp;
++  bool dec_negative= (dec < 0) && !dec_unsigned;
++  ulonglong abs_dec= dec_negative ? -dec : dec;
++  /*
++    tmp2 is here to avoid return the value with 80 bit precision
++    This will fix that the test round(0.1,1) = round(0.1,1) is true
++  */
++  volatile double tmp2;
++
++  tmp=(abs_dec < array_elements(log_10) ?
++       log_10[abs_dec] : pow(10.0,(double) abs_dec));
++
++  if (dec_negative && my_isinf(tmp))
++    tmp2= 0;
++  else if (!dec_negative && my_isinf(value * tmp))
++    tmp2= value;
++  else if (truncate)
++  {
++    if (value >= 0)
++      tmp2= dec < 0 ? floor(value/tmp)*tmp : floor(value*tmp)/tmp;
++    else
++      tmp2= dec < 0 ? ceil(value/tmp)*tmp : ceil(value*tmp)/tmp;
++  }
++  else
++    tmp2=dec < 0 ? rint(value/tmp)*tmp : rint(value*tmp)/tmp;
++  return tmp2;
++}
++
++
++double Item_func_round::real_op()
++{
++  double value= args[0]->val_real();
++
++  if (!(null_value= args[0]->null_value || args[1]->null_value))
++    return my_double_round(value, args[1]->val_int(), args[1]->unsigned_flag,
++                           truncate);
++
++  return 0.0;
++}
++
++/*
++  Rounds a given value to a power of 10 specified as the 'to' argument,
++  avoiding overflows when the value is close to the ulonglong range boundary.
++*/
++
++static inline ulonglong my_unsigned_round(ulonglong value, ulonglong to)
++{
++  ulonglong tmp= value / to * to;
++  return (value - tmp < (to >> 1)) ? tmp : tmp + to;
++}
++
++
++longlong Item_func_round::int_op()
++{
++  longlong value= args[0]->val_int();
++  longlong dec= args[1]->val_int();
++  decimals= 0;
++  ulonglong abs_dec;
++  if ((null_value= args[0]->null_value || args[1]->null_value))
++    return 0;
++  if ((dec >= 0) || args[1]->unsigned_flag)
++    return value; // integer have not digits after point
++
++  abs_dec= -dec;
++  longlong tmp;
++  
++  if(abs_dec >= array_elements(log_10_int))
++    return 0;
++  
++  tmp= log_10_int[abs_dec];
++  
++  if (truncate)
++    value= (unsigned_flag) ?
++      ((ulonglong) value / tmp) * tmp : (value / tmp) * tmp;
++  else
++    value= (unsigned_flag || value >= 0) ?
++      my_unsigned_round((ulonglong) value, tmp) :
++      -(longlong) my_unsigned_round((ulonglong) -value, tmp);
++  return value;
++}
++
++
++my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value)
++{
++  my_decimal val, *value= args[0]->val_decimal(&val);
++  longlong dec= args[1]->val_int();
++  if (dec >= 0 || args[1]->unsigned_flag)
++    dec= min((ulonglong) dec, decimals);
++  else if (dec < INT_MIN)
++    dec= INT_MIN;
++    
++  if (!(null_value= (args[0]->null_value || args[1]->null_value ||
++                     my_decimal_round(E_DEC_FATAL_ERROR, value, (int) dec,
++                                      truncate, decimal_value) > 1))) 
++  {
++    decimal_value->frac= decimals;
++    return decimal_value;
++  }
++  return 0;
++}
++
++
++void Item_func_rand::seed_random(Item *arg)
++{
++  /*
++    TODO: do not do reinit 'rand' for every execute of PS/SP if
++    args[0] is a constant.
++  */
++  uint32 tmp= (uint32) arg->val_int();
++  randominit(rand, (uint32) (tmp*0x10001L+55555555L),
++             (uint32) (tmp*0x10000001L));
++}
++
++
++bool Item_func_rand::fix_fields(THD *thd,Item **ref)
++{
++  if (Item_real_func::fix_fields(thd, ref))
++    return TRUE;
++  used_tables_cache|= RAND_TABLE_BIT;
++  if (arg_count)
++  {					// Only use argument once in query
++    /*
++      Allocate rand structure once: we must use thd->stmt_arena
++      to create rand in proper mem_root if it's a prepared statement or
++      stored procedure.
++
++      No need to send a Rand log event if seed was given eg: RAND(seed),
++      as it will be replicated in the query as such.
++    */
++    if (!rand && !(rand= (struct rand_struct*)
++                   thd->stmt_arena->alloc(sizeof(*rand))))
++      return TRUE;
++  }
++  else
++  {
++    /*
++      Save the seed only the first time RAND() is used in the query
++      Once events are forwarded rather than recreated,
++      the following can be skipped if inside the slave thread
++    */
++    if (!thd->rand_used)
++    {
++      thd->rand_used= 1;
++      thd->rand_saved_seed1= thd->rand.seed1;
++      thd->rand_saved_seed2= thd->rand.seed2;
++    }
++    rand= &thd->rand;
++  }
++  return FALSE;
++}
++
++void Item_func_rand::update_used_tables()
++{
++  Item_real_func::update_used_tables();
++  used_tables_cache|= RAND_TABLE_BIT;
++}
++
++
++double Item_func_rand::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  if (arg_count)
++  {
++    if (!args[0]->const_item())
++      seed_random(args[0]);
++    else if (first_eval)
++    {
++      /*
++        Constantness of args[0] may be set during JOIN::optimize(), if arg[0]
++        is a field item of "constant" table. Thus, we have to evaluate
++        seed_random() for constant arg there but not at the fix_fields method.
++      */
++      first_eval= FALSE;
++      seed_random(args[0]);
++    }
++  }
++  return my_rnd(rand);
++}
++
++longlong Item_func_sign::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  null_value=args[0]->null_value;
++  return value < 0.0 ? -1 : (value > 0 ? 1 : 0);
++}
++
++
++double Item_func_units::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value= args[0]->val_real();
++  if ((null_value=args[0]->null_value))
++    return 0;
++  return value*mul+add;
++}
++
++
++void Item_func_min_max::fix_length_and_dec()
++{
++  int max_int_part=0;
++  bool datetime_found= FALSE;
++  decimals=0;
++  max_length=0;
++  maybe_null=0;
++  cmp_type=args[0]->result_type();
++
++  for (uint i=0 ; i < arg_count ; i++)
++  {
++    set_if_bigger(max_length, args[i]->max_length);
++    set_if_bigger(decimals, args[i]->decimals);
++    set_if_bigger(max_int_part, args[i]->decimal_int_part());
++    if (args[i]->maybe_null)
++      maybe_null=1;
++    cmp_type=item_cmp_type(cmp_type,args[i]->result_type());
++    if (args[i]->result_type() != ROW_RESULT && args[i]->is_datetime())
++    {
++      datetime_found= TRUE;
++      if (!datetime_item || args[i]->field_type() == MYSQL_TYPE_DATETIME)
++        datetime_item= args[i];
++    }
++  }
++  if (cmp_type == STRING_RESULT)
++  {
++    agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1);
++    if (datetime_found)
++    {
++      thd= current_thd;
++      compare_as_dates= TRUE;
++    }
++  }
++  else if ((cmp_type == DECIMAL_RESULT) || (cmp_type == INT_RESULT))
++    max_length= my_decimal_precision_to_length_no_truncation(max_int_part +
++                                                             decimals, decimals,
++                                                             unsigned_flag);
++  else if (cmp_type == REAL_RESULT)
++    max_length= float_length(decimals);
++  cached_field_type= agg_field_type(args, arg_count);
++}
++
++
++/*
++  Compare item arguments in the DATETIME context.
++
++  SYNOPSIS
++    cmp_datetimes()
++    value [out]   found least/greatest DATE/DATETIME value
++
++  DESCRIPTION
++    Compare item arguments as DATETIME values and return the index of the
++    least/greatest argument in the arguments array.
++    The correct integer DATE/DATETIME value of the found argument is
++    stored to the value pointer, if latter is provided.
++
++  RETURN
++   0	If one of arguments is NULL or there was a execution error
++   #	index of the least/greatest argument
++*/
++
++uint Item_func_min_max::cmp_datetimes(ulonglong *value)
++{
++  longlong UNINIT_VAR(min_max);
++  uint min_max_idx= 0;
++
++  for (uint i=0; i < arg_count ; i++)
++  {
++    Item **arg= args + i;
++    bool is_null;
++    longlong res= get_datetime_value(thd, &arg, 0, datetime_item, &is_null);
++
++    /* Check if we need to stop (because of error or KILL)  and stop the loop */
++    if (thd->is_error())
++    {
++      null_value= 1;
++      return 0;
++    }
++
++    if ((null_value= args[i]->null_value))
++      return 0;
++    if (i == 0 || (res < min_max ? cmp_sign : -cmp_sign) > 0)
++    {
++      min_max= res;
++      min_max_idx= i;
++    }
++  }
++  if (value)
++  {
++    *value= min_max;
++    if (datetime_item->field_type() == MYSQL_TYPE_DATE)
++      *value/= 1000000L;
++  }
++  return min_max_idx;
++}
++
++
++String *Item_func_min_max::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  if (compare_as_dates)
++  {
++    String *str_res;
++    uint min_max_idx= cmp_datetimes(NULL);
++    if (null_value)
++      return 0;
++    str_res= args[min_max_idx]->val_str(str);
++    if (args[min_max_idx]->null_value)
++    {
++      // check if the call to val_str() above returns a NULL value
++      null_value= 1;
++      return NULL;
++    }
++    str_res->set_charset(collation.collation);
++    return str_res;
++  }
++  switch (cmp_type) {
++  case INT_RESULT:
++  {
++    longlong nr=val_int();
++    if (null_value)
++      return 0;
++    str->set_int(nr, unsigned_flag, &my_charset_bin);
++    return str;
++  }
++  case DECIMAL_RESULT:
++  {
++    my_decimal dec_buf, *dec_val= val_decimal(&dec_buf);
++    if (null_value)
++      return 0;
++    my_decimal2string(E_DEC_FATAL_ERROR, dec_val, 0, 0, 0, str);
++    return str;
++  }
++  case REAL_RESULT:
++  {
++    double nr= val_real();
++    if (null_value)
++      return 0; /* purecov: inspected */
++    str->set_real(nr,decimals,&my_charset_bin);
++    return str;
++  }
++  case STRING_RESULT:
++  {
++    String *UNINIT_VAR(res);
++    for (uint i=0; i < arg_count ; i++)
++    {
++      if (i == 0)
++	res=args[i]->val_str(str);
++      else
++      {
++	String *res2;
++	res2= args[i]->val_str(res == str ? &tmp_value : str);
++	if (res2)
++	{
++	  int cmp= sortcmp(res,res2,collation.collation);
++	  if ((cmp_sign < 0 ? cmp : -cmp) < 0)
++	    res=res2;
++	}
++      }
++      if ((null_value= args[i]->null_value))
++        return 0;
++    }
++    res->set_charset(collation.collation);
++    return res;
++  }
++  case ROW_RESULT:
++  default:
++    // This case should never be chosen
++    DBUG_ASSERT(0);
++    return 0;
++  }
++  return 0;					// Keep compiler happy
++}
++
++
++double Item_func_min_max::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  double value=0.0;
++  if (compare_as_dates)
++  {
++    ulonglong result= 0;
++    (void)cmp_datetimes(&result);
++    return (double)result;
++  }
++  for (uint i=0; i < arg_count ; i++)
++  {
++    if (i == 0)
++      value= args[i]->val_real();
++    else
++    {
++      double tmp= args[i]->val_real();
++      if (!args[i]->null_value && (tmp < value ? cmp_sign : -cmp_sign) > 0)
++	value=tmp;
++    }
++    if ((null_value= args[i]->null_value))
++      break;
++  }
++  return value;
++}
++
++
++longlong Item_func_min_max::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  longlong value=0;
++  if (compare_as_dates)
++  {
++    ulonglong result= 0;
++    (void)cmp_datetimes(&result);
++    return (longlong)result;
++  }
++  for (uint i=0; i < arg_count ; i++)
++  {
++    if (i == 0)
++      value=args[i]->val_int();
++    else
++    {
++      longlong tmp=args[i]->val_int();
++      if (!args[i]->null_value && (tmp < value ? cmp_sign : -cmp_sign) > 0)
++	value=tmp;
++    }
++    if ((null_value= args[i]->null_value))
++      break;
++  }
++  return value;
++}
++
++
++my_decimal *Item_func_min_max::val_decimal(my_decimal *dec)
++{
++  DBUG_ASSERT(fixed == 1);
++  my_decimal tmp_buf, *tmp, *UNINIT_VAR(res);
++
++  if (compare_as_dates)
++  {
++    ulonglong value= 0;
++    (void)cmp_datetimes(&value);
++    ulonglong2decimal(value, dec);
++    return dec;
++  }
++  for (uint i=0; i < arg_count ; i++)
++  {
++    if (i == 0)
++      res= args[i]->val_decimal(dec);
++    else
++    {
++      tmp= args[i]->val_decimal(&tmp_buf);      // Zero if NULL
++      if (tmp && (my_decimal_cmp(tmp, res) * cmp_sign) < 0)
++      {
++        if (tmp == &tmp_buf)
++        {
++          /* Move value out of tmp_buf as this will be reused on next loop */
++          my_decimal2decimal(tmp, dec);
++          res= dec;
++        }
++        else
++          res= tmp;
++      }
++    }
++    if ((null_value= args[i]->null_value))
++    {
++      res= 0;
++      break;
++    }
++  }
++  return res;
++}
++
++
++longlong Item_func_length::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=args[0]->val_str(&value);
++  if (!res)
++  {
++    null_value=1;
++    return 0; /* purecov: inspected */
++  }
++  null_value=0;
++  return (longlong) res->length();
++}
++
++
++longlong Item_func_char_length::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=args[0]->val_str(&value);
++  if (!res)
++  {
++    null_value=1;
++    return 0; /* purecov: inspected */
++  }
++  null_value=0;
++  return (longlong) res->numchars();
++}
++
++
++longlong Item_func_coercibility::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  null_value= 0;
++  return (longlong) args[0]->collation.derivation;
++}
++
++
++void Item_func_locate::fix_length_and_dec()
++{
++  max_length= MY_INT32_NUM_DECIMAL_DIGITS;
++  agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1);
++}
++
++
++longlong Item_func_locate::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *a=args[0]->val_str(&value1);
++  String *b=args[1]->val_str(&value2);
++  if (!a || !b)
++  {
++    null_value=1;
++    return 0; /* purecov: inspected */
++  }
++  null_value=0;
++  /* must be longlong to avoid truncation */
++  longlong start=  0; 
++  longlong start0= 0;
++  my_match_t match;
++
++  if (arg_count == 3)
++  {
++    start0= start= args[2]->val_int() - 1;
++
++    if ((start < 0) || (start > a->length()))
++      return 0;
++
++    /* start is now sufficiently valid to pass to charpos function */
++    start= a->charpos((int) start);
++
++    if (start + b->length() > a->length())
++      return 0;
++  }
++
++  if (!b->length())				// Found empty string at start
++    return start + 1;
++  
++  if (!cmp_collation.collation->coll->instr(cmp_collation.collation,
++                                            a->ptr()+start,
++                                            (uint) (a->length()-start),
++                                            b->ptr(), b->length(),
++                                            &match, 1))
++    return 0;
++  return (longlong) match.mb_len + start0 + 1;
++}
++
++
++void Item_func_locate::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("locate("));
++  args[1]->print(str, query_type);
++  str->append(',');
++  args[0]->print(str, query_type);
++  if (arg_count == 3)
++  {
++    str->append(',');
++    args[2]->print(str, query_type);
++  }
++  str->append(')');
++}
++
++
++longlong Item_func_field::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++
++  if (cmp_type == STRING_RESULT)
++  {
++    String *field;
++    if (!(field= args[0]->val_str(&value)))
++      return 0;
++    for (uint i=1 ; i < arg_count ; i++)
++    {
++      String *tmp_value=args[i]->val_str(&tmp);
++      if (tmp_value && !sortcmp(field,tmp_value,cmp_collation.collation))
++        return (longlong) (i);
++    }
++  }
++  else if (cmp_type == INT_RESULT)
++  {
++    longlong val= args[0]->val_int();
++    if (args[0]->null_value)
++      return 0;
++    for (uint i=1; i < arg_count ; i++)
++    {
++      if (val == args[i]->val_int() && !args[i]->null_value)
++        return (longlong) (i);
++    }
++  }
++  else if (cmp_type == DECIMAL_RESULT)
++  {
++    my_decimal dec_arg_buf, *dec_arg,
++               dec_buf, *dec= args[0]->val_decimal(&dec_buf);
++    if (args[0]->null_value)
++      return 0;
++    for (uint i=1; i < arg_count; i++)
++    {
++      dec_arg= args[i]->val_decimal(&dec_arg_buf);
++      if (!args[i]->null_value && !my_decimal_cmp(dec_arg, dec))
++        return (longlong) (i);
++    }
++  }
++  else
++  {
++    double val= args[0]->val_real();
++    if (args[0]->null_value)
++      return 0;
++    for (uint i=1; i < arg_count ; i++)
++    {
++      if (val == args[i]->val_real() && !args[i]->null_value)
++        return (longlong) (i);
++    }
++  }
++  return 0;
++}
++
++
++void Item_func_field::fix_length_and_dec()
++{
++  maybe_null=0; max_length=3;
++  cmp_type= args[0]->result_type();
++  for (uint i=1; i < arg_count ; i++)
++    cmp_type= item_cmp_type(cmp_type, args[i]->result_type());
++  if (cmp_type == STRING_RESULT)
++    agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1);
++}
++
++
++longlong Item_func_ascii::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=args[0]->val_str(&value);
++  if (!res)
++  {
++    null_value=1;
++    return 0;
++  }
++  null_value=0;
++  return (longlong) (res->length() ? (uchar) (*res)[0] : (uchar) 0);
++}
++
++longlong Item_func_ord::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=args[0]->val_str(&value);
++  if (!res)
++  {
++    null_value=1;
++    return 0;
++  }
++  null_value=0;
++  if (!res->length()) return 0;
++#ifdef USE_MB
++  if (use_mb(res->charset()))
++  {
++    register const char *str=res->ptr();
++    register uint32 n=0, l=my_ismbchar(res->charset(),str,str+res->length());
++    if (!l)
++      return (longlong)((uchar) *str);
++    while (l--)
++      n=(n<<8)|(uint32)((uchar) *str++);
++    return (longlong) n;
++  }
++#endif
++  return (longlong) ((uchar) (*res)[0]);
++}
++
++	/* Search after a string in a string of strings separated by ',' */
++	/* Returns number of found type >= 1 or 0 if not found */
++	/* This optimizes searching in enums to bit testing! */
++
++void Item_func_find_in_set::fix_length_and_dec()
++{
++  decimals=0;
++  max_length=3;					// 1-999
++  if (args[0]->const_item() && args[1]->type() == FIELD_ITEM)
++  {
++    Field *field= ((Item_field*) args[1])->field;
++    if (field->real_type() == MYSQL_TYPE_SET)
++    {
++      String *find=args[0]->val_str(&value);
++      if (find)
++      {
++	enum_value= find_type(((Field_enum*) field)->typelib,find->ptr(),
++			      find->length(), 0);
++	enum_bit=0;
++	if (enum_value)
++	  enum_bit=LL(1) << (enum_value-1);
++      }
++    }
++  }
++  agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1);
++}
++
++static const char separator=',';
++
++longlong Item_func_find_in_set::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  if (enum_value)
++  {
++    ulonglong tmp=(ulonglong) args[1]->val_int();
++    if (!(null_value=args[1]->null_value || args[0]->null_value))
++    {
++      if (tmp & enum_bit)
++	return enum_value;
++    }
++    return 0L;
++  }
++
++  String *find=args[0]->val_str(&value);
++  String *buffer=args[1]->val_str(&value2);
++  if (!find || !buffer)
++  {
++    null_value=1;
++    return 0; /* purecov: inspected */
++  }
++  null_value=0;
++
++  int diff;
++  if ((diff=buffer->length() - find->length()) >= 0)
++  {
++    my_wc_t wc= 0;
++    CHARSET_INFO *cs= cmp_collation.collation;
++    const char *str_begin= buffer->ptr();
++    const char *str_end= buffer->ptr();
++    const char *real_end= str_end+buffer->length();
++    const uchar *find_str= (const uchar *) find->ptr();
++    uint find_str_len= find->length();
++    int position= 0;
++    while (1)
++    {
++      int symbol_len;
++      if ((symbol_len= cs->cset->mb_wc(cs, &wc, (uchar*) str_end, 
++                                       (uchar*) real_end)) > 0)
++      {
++        const char *substr_end= str_end + symbol_len;
++        bool is_last_item= (substr_end == real_end);
++        bool is_separator= (wc == (my_wc_t) separator);
++        if (is_separator || is_last_item)
++        {
++          position++;
++          if (is_last_item && !is_separator)
++            str_end= substr_end;
++          if (!my_strnncoll(cs, (const uchar *) str_begin,
++                            (uint) (str_end - str_begin),
++                            find_str, find_str_len))
++            return (longlong) position;
++          else
++            str_begin= substr_end;
++        }
++        str_end= substr_end;
++      }
++      else if (str_end - str_begin == 0 &&
++               find_str_len == 0 &&
++               wc == (my_wc_t) separator)
++        return (longlong) ++position;
++      else
++        return LL(0);
++    }
++  }
++  return 0;
++}
++
++longlong Item_func_bit_count::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  ulonglong value= (ulonglong) args[0]->val_int();
++  if ((null_value= args[0]->null_value))
++    return 0; /* purecov: inspected */
++  return (longlong) my_count_bits(value);
++}
++
++
++/****************************************************************************
++** Functions to handle dynamic loadable functions
++** Original source by: Alexis Mikhailov <root@medinf.chuvashia.su>
++** Rewritten by monty.
++****************************************************************************/
++
++#ifdef HAVE_DLOPEN
++
++void udf_handler::cleanup()
++{
++  if (!not_original)
++  {
++    if (initialized)
++    {
++      if (u_d->func_deinit != NULL)
++      {
++        Udf_func_deinit deinit= u_d->func_deinit;
++        (*deinit)(&initid);
++      }
++      free_udf(u_d);
++      initialized= FALSE;
++    }
++    if (buffers)				// Because of bug in ecc
++      delete [] buffers;
++    buffers= 0;
++  }
++}
++
++
++bool
++udf_handler::fix_fields(THD *thd, Item_result_field *func,
++			uint arg_count, Item **arguments)
++{
++#ifndef EMBEDDED_LIBRARY			// Avoid compiler warning
++  uchar buff[STACK_BUFF_ALLOC];			// Max argument in function
++#endif
++  DBUG_ENTER("Item_udf_func::fix_fields");
++
++  if (check_stack_overrun(thd, STACK_MIN_SIZE, buff))
++    DBUG_RETURN(TRUE);				// Fatal error flag is set!
++
++  udf_func *tmp_udf=find_udf(u_d->name.str,(uint) u_d->name.length,1);
++
++  if (!tmp_udf)
++  {
++    my_error(ER_CANT_FIND_UDF, MYF(0), u_d->name.str, errno);
++    DBUG_RETURN(TRUE);
++  }
++  u_d=tmp_udf;
++  args=arguments;
++
++  /* Fix all arguments */
++  func->maybe_null=0;
++  used_tables_cache=0;
++  const_item_cache=1;
++
++  if ((f_args.arg_count=arg_count))
++  {
++    if (!(f_args.arg_type= (Item_result*)
++	  sql_alloc(f_args.arg_count*sizeof(Item_result))))
++
++    {
++      free_udf(u_d);
++      DBUG_RETURN(TRUE);
++    }
++    uint i;
++    Item **arg,**arg_end;
++    for (i=0, arg=arguments, arg_end=arguments+arg_count;
++	 arg != arg_end ;
++	 arg++,i++)
++    {
++      if (!(*arg)->fixed &&
++          (*arg)->fix_fields(thd, arg))
++	DBUG_RETURN(1);
++      // we can't assign 'item' before, because fix_fields() can change arg
++      Item *item= *arg;
++      if (item->check_cols(1))
++	DBUG_RETURN(TRUE);
++      /*
++	TODO: We should think about this. It is not always
++	right way just to set an UDF result to return my_charset_bin
++	if one argument has binary sorting order.
++	The result collation should be calculated according to arguments
++	derivations in some cases and should not in other cases.
++	Moreover, some arguments can represent a numeric input
++	which doesn't effect the result character set and collation.
++	There is no a general rule for UDF. Everything depends on
++        the particular user defined function.
++      */
++      if (item->collation.collation->state & MY_CS_BINSORT)
++	func->collation.set(&my_charset_bin);
++      if (item->maybe_null)
++	func->maybe_null=1;
++      func->with_sum_func= func->with_sum_func || item->with_sum_func;
++      used_tables_cache|=item->used_tables();
++      const_item_cache&=item->const_item();
++      f_args.arg_type[i]=item->result_type();
++    }
++    //TODO: why all following memory is not allocated with 1 call of sql_alloc?
++    if (!(buffers=new String[arg_count]) ||
++	!(f_args.args= (char**) sql_alloc(arg_count * sizeof(char *))) ||
++	!(f_args.lengths= (ulong*) sql_alloc(arg_count * sizeof(long))) ||
++	!(f_args.maybe_null= (char*) sql_alloc(arg_count * sizeof(char))) ||
++	!(num_buffer= (char*) sql_alloc(arg_count *
++					ALIGN_SIZE(sizeof(double)))) ||
++	!(f_args.attributes= (char**) sql_alloc(arg_count * sizeof(char *))) ||
++	!(f_args.attribute_lengths= (ulong*) sql_alloc(arg_count *
++						       sizeof(long))))
++    {
++      free_udf(u_d);
++      DBUG_RETURN(TRUE);
++    }
++  }
++  func->fix_length_and_dec();
++  initid.max_length=func->max_length;
++  initid.maybe_null=func->maybe_null;
++  initid.const_item=const_item_cache;
++  initid.decimals=func->decimals;
++  initid.ptr=0;
++
++  if (u_d->func_init)
++  {
++    char init_msg_buff[MYSQL_ERRMSG_SIZE];
++    char *to=num_buffer;
++    for (uint i=0; i < arg_count; i++)
++    {
++      /*
++       For a constant argument i, args->args[i] points to the argument value. 
++       For non-constant, args->args[i] is NULL.
++      */
++      f_args.args[i]= NULL;         /* Non-const unless updated below. */
++
++      f_args.lengths[i]= arguments[i]->max_length;
++      f_args.maybe_null[i]= (char) arguments[i]->maybe_null;
++      f_args.attributes[i]= arguments[i]->name;
++      f_args.attribute_lengths[i]= arguments[i]->name_length;
++
++      if (arguments[i]->const_item())
++      {
++        switch (arguments[i]->result_type()) 
++        {
++        case STRING_RESULT:
++        case DECIMAL_RESULT:
++        {
++          String *res= arguments[i]->val_str(&buffers[i]);
++          if (arguments[i]->null_value)
++            continue;
++          f_args.args[i]= (char*) res->c_ptr();
++          f_args.lengths[i]= res->length();
++          break;
++        }
++        case INT_RESULT:
++          *((longlong*) to)= arguments[i]->val_int();
++          if (arguments[i]->null_value)
++            continue;
++          f_args.args[i]= to;
++          to+= ALIGN_SIZE(sizeof(longlong));
++          break;
++        case REAL_RESULT:
++          *((double*) to)= arguments[i]->val_real();
++          if (arguments[i]->null_value)
++            continue;
++          f_args.args[i]= to;
++          to+= ALIGN_SIZE(sizeof(double));
++          break;
++        case ROW_RESULT:
++        default:
++          // This case should never be chosen
++          DBUG_ASSERT(0);
++          break;
++        }
++      }
++    }
++    Udf_func_init init= u_d->func_init;
++    if ((error=(uchar) init(&initid, &f_args, init_msg_buff)))
++    {
++      my_error(ER_CANT_INITIALIZE_UDF, MYF(0),
++               u_d->name.str, init_msg_buff);
++      free_udf(u_d);
++      DBUG_RETURN(TRUE);
++    }
++    func->max_length=min(initid.max_length,MAX_BLOB_WIDTH);
++    func->maybe_null=initid.maybe_null;
++    const_item_cache=initid.const_item;
++    /* 
++      Keep used_tables_cache in sync with const_item_cache.
++      See the comment in Item_udf_func::update_used tables.
++    */  
++    if (!const_item_cache && !used_tables_cache)
++      used_tables_cache= RAND_TABLE_BIT;
++    func->decimals=min(initid.decimals,NOT_FIXED_DEC);
++  }
++  initialized=1;
++  if (error)
++  {
++    my_error(ER_CANT_INITIALIZE_UDF, MYF(0),
++             u_d->name.str, ER(ER_UNKNOWN_ERROR));
++    DBUG_RETURN(TRUE);
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++bool udf_handler::get_arguments()
++{
++  if (error)
++    return 1;					// Got an error earlier
++  char *to= num_buffer;
++  uint str_count=0;
++  for (uint i=0; i < f_args.arg_count; i++)
++  {
++    f_args.args[i]=0;
++    switch (f_args.arg_type[i]) {
++    case STRING_RESULT:
++    case DECIMAL_RESULT:
++      {
++	String *res=args[i]->val_str(&buffers[str_count++]);
++	if (!(args[i]->null_value))
++	{
++	  f_args.args[i]=    (char*) res->ptr();
++	  f_args.lengths[i]= res->length();
++	  break;
++	}
++      }
++    case INT_RESULT:
++      *((longlong*) to) = args[i]->val_int();
++      if (!args[i]->null_value)
++      {
++	f_args.args[i]=to;
++	to+= ALIGN_SIZE(sizeof(longlong));
++      }
++      break;
++    case REAL_RESULT:
++      *((double*) to)= args[i]->val_real();
++      if (!args[i]->null_value)
++      {
++	f_args.args[i]=to;
++	to+= ALIGN_SIZE(sizeof(double));
++      }
++      break;
++    case ROW_RESULT:
++    default:
++      // This case should never be chosen
++      DBUG_ASSERT(0);
++      break;
++    }
++  }
++  return 0;
++}
++
++/**
++  @return
++    (String*)NULL in case of NULL values
++*/
++String *udf_handler::val_str(String *str,String *save_str)
++{
++  uchar is_null_tmp=0;
++  ulong res_length;
++  DBUG_ENTER("udf_handler::val_str");
++
++  if (get_arguments())
++    DBUG_RETURN(0);
++  char * (*func)(UDF_INIT *, UDF_ARGS *, char *, ulong *, uchar *, uchar *)=
++    (char* (*)(UDF_INIT *, UDF_ARGS *, char *, ulong *, uchar *, uchar *))
++    u_d->func;
++
++  if ((res_length=str->alloced_length()) < MAX_FIELD_WIDTH)
++  {						// This happens VERY seldom
++    if (str->alloc(MAX_FIELD_WIDTH))
++    {
++      error=1;
++      DBUG_RETURN(0);
++    }
++  }
++  char *res=func(&initid, &f_args, (char*) str->ptr(), &res_length,
++		 &is_null_tmp, &error);
++  DBUG_PRINT("info", ("udf func returned, res_length: %lu", res_length));
++  if (is_null_tmp || !res || error)		// The !res is for safety
++  {
++    DBUG_PRINT("info", ("Null or error"));
++    DBUG_RETURN(0);
++  }
++  if (res == str->ptr())
++  {
++    str->length(res_length);
++    DBUG_PRINT("exit", ("str: %s", str->ptr()));
++    DBUG_RETURN(str);
++  }
++  save_str->set(res, res_length, str->charset());
++  DBUG_PRINT("exit", ("save_str: %s", save_str->ptr()));
++  DBUG_RETURN(save_str);
++}
++
++
++/*
++  For the moment, UDF functions are returning DECIMAL values as strings
++*/
++
++my_decimal *udf_handler::val_decimal(my_bool *null_value, my_decimal *dec_buf)
++{
++  char buf[DECIMAL_MAX_STR_LENGTH+1], *end;
++  ulong res_length= DECIMAL_MAX_STR_LENGTH;
++
++  if (get_arguments())
++  {
++    *null_value=1;
++    return 0;
++  }
++  char *(*func)(UDF_INIT *, UDF_ARGS *, char *, ulong *, uchar *, uchar *)=
++    (char* (*)(UDF_INIT *, UDF_ARGS *, char *, ulong *, uchar *, uchar *))
++    u_d->func;
++
++  char *res= func(&initid, &f_args, buf, &res_length, &is_null, &error);
++  if (is_null || error)
++  {
++    *null_value= 1;
++    return 0;
++  }
++  end= res+ res_length;
++  str2my_decimal(E_DEC_FATAL_ERROR, res, dec_buf, &end);
++  return dec_buf;
++}
++
++
++void Item_udf_func::cleanup()
++{
++  udf.cleanup();
++  Item_func::cleanup();
++}
++
++
++void Item_udf_func::print(String *str, enum_query_type query_type)
++{
++  str->append(func_name());
++  str->append('(');
++  for (uint i=0 ; i < arg_count ; i++)
++  {
++    if (i != 0)
++      str->append(',');
++    args[i]->print_item_w_name(str, query_type);
++  }
++  str->append(')');
++}
++
++
++double Item_func_udf_float::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  DBUG_ENTER("Item_func_udf_float::val");
++  DBUG_PRINT("info",("result_type: %d  arg_count: %d",
++		     args[0]->result_type(), arg_count));
++  DBUG_RETURN(udf.val(&null_value));
++}
++
++
++String *Item_func_udf_float::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  double nr= val_real();
++  if (null_value)
++    return 0;					/* purecov: inspected */
++  str->set_real(nr,decimals,&my_charset_bin);
++  return str;
++}
++
++
++longlong Item_func_udf_int::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  DBUG_ENTER("Item_func_udf_int::val_int");
++  DBUG_RETURN(udf.val_int(&null_value));
++}
++
++
++String *Item_func_udf_int::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  longlong nr=val_int();
++  if (null_value)
++    return 0;
++  str->set_int(nr, unsigned_flag, &my_charset_bin);
++  return str;
++}
++
++
++longlong Item_func_udf_decimal::val_int()
++{
++  my_decimal dec_buf, *dec= udf.val_decimal(&null_value, &dec_buf);
++  longlong result;
++  if (null_value)
++    return 0;
++  my_decimal2int(E_DEC_FATAL_ERROR, dec, unsigned_flag, &result);
++  return result;
++}
++
++
++double Item_func_udf_decimal::val_real()
++{
++  my_decimal dec_buf, *dec= udf.val_decimal(&null_value, &dec_buf);
++  double result;
++  if (null_value)
++    return 0.0;
++  my_decimal2double(E_DEC_FATAL_ERROR, dec, &result);
++  return result;
++}
++
++
++my_decimal *Item_func_udf_decimal::val_decimal(my_decimal *dec_buf)
++{
++  DBUG_ASSERT(fixed == 1);
++  DBUG_ENTER("Item_func_udf_decimal::val_decimal");
++  DBUG_PRINT("info",("result_type: %d  arg_count: %d",
++                     args[0]->result_type(), arg_count));
++
++  DBUG_RETURN(udf.val_decimal(&null_value, dec_buf));
++}
++
++
++String *Item_func_udf_decimal::val_str(String *str)
++{
++  my_decimal dec_buf, *dec= udf.val_decimal(&null_value, &dec_buf);
++  if (null_value)
++    return 0;
++  if (str->length() < DECIMAL_MAX_STR_LENGTH)
++    str->length(DECIMAL_MAX_STR_LENGTH);
++  my_decimal_round(E_DEC_FATAL_ERROR, dec, decimals, FALSE, &dec_buf);
++  my_decimal2string(E_DEC_FATAL_ERROR, &dec_buf, 0, 0, '0', str);
++  return str;
++}
++
++
++void Item_func_udf_decimal::fix_length_and_dec()
++{
++  fix_num_length_and_dec();
++}
++
++
++/* Default max_length is max argument length */
++
++void Item_func_udf_str::fix_length_and_dec()
++{
++  DBUG_ENTER("Item_func_udf_str::fix_length_and_dec");
++  max_length=0;
++  for (uint i = 0; i < arg_count; i++)
++    set_if_bigger(max_length,args[i]->max_length);
++  DBUG_VOID_RETURN;
++}
++
++String *Item_func_udf_str::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=udf.val_str(str,&str_value);
++  null_value = !res;
++  return res;
++}
++
++
++/**
++  @note
++  This has to come last in the udf_handler methods, or C for AIX
++  version 6.0.0.0 fails to compile with debugging enabled. (Yes, really.)
++*/
++
++udf_handler::~udf_handler()
++{
++  /* Everything should be properly cleaned up by this moment. */
++  DBUG_ASSERT(not_original || !(initialized || buffers));
++}
++
++#else
++bool udf_handler::get_arguments() { return 0; }
++#endif /* HAVE_DLOPEN */
++
++/*
++** User level locks
++*/
++
++pthread_mutex_t LOCK_user_locks;
++static HASH hash_user_locks;
++
++class User_level_lock
++{
++  uchar *key;
++  size_t key_length;
++
++public:
++  int count;
++  bool locked;
++  pthread_cond_t cond;
++  my_thread_id thread_id;
++  void set_thread(THD *thd) { thread_id= thd->thread_id; }
++
++  User_level_lock(const uchar *key_arg,uint length, ulong id) 
++    :key_length(length),count(1),locked(1), thread_id(id)
++  {
++    key= (uchar*) my_memdup(key_arg,length,MYF(0));
++    pthread_cond_init(&cond,NULL);
++    if (key)
++    {
++      if (my_hash_insert(&hash_user_locks,(uchar*) this))
++      {
++	my_free(key,MYF(0));
++	key=0;
++      }
++    }
++  }
++  ~User_level_lock()
++  {
++    if (key)
++    {
++      hash_delete(&hash_user_locks,(uchar*) this);
++      my_free(key, MYF(0));
++    }
++    pthread_cond_destroy(&cond);
++  }
++  inline bool initialized() { return key != 0; }
++  friend void item_user_lock_release(User_level_lock *ull);
++  friend uchar *ull_get_key(const User_level_lock *ull, size_t *length,
++                            my_bool not_used);
++};
++
++uchar *ull_get_key(const User_level_lock *ull, size_t *length,
++                   my_bool not_used __attribute__((unused)))
++{
++  *length= ull->key_length;
++  return ull->key;
++}
++
++
++static bool item_user_lock_inited= 0;
++
++void item_user_lock_init(void)
++{
++  pthread_mutex_init(&LOCK_user_locks,MY_MUTEX_INIT_SLOW);
++  hash_init(&hash_user_locks,system_charset_info,
++	    16,0,0,(hash_get_key) ull_get_key,NULL,0);
++  item_user_lock_inited= 1;
++}
++
++void item_user_lock_free(void)
++{
++  if (item_user_lock_inited)
++  {
++    item_user_lock_inited= 0;
++    hash_free(&hash_user_locks);
++    pthread_mutex_destroy(&LOCK_user_locks);
++  }
++}
++
++void item_user_lock_release(User_level_lock *ull)
++{
++  ull->locked=0;
++  ull->thread_id= 0;
++  if (--ull->count)
++    pthread_cond_signal(&ull->cond);
++  else
++    delete ull;
++}
++
++/**
++  Wait until we are at or past the given position in the master binlog
++  on the slave.
++*/
++
++longlong Item_master_pos_wait::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  THD* thd = current_thd;
++  String *log_name = args[0]->val_str(&value);
++  int event_count= 0;
++
++  null_value=0;
++  if (thd->slave_thread || !log_name || !log_name->length())
++  {
++    null_value = 1;
++    return 0;
++  }
++#ifdef HAVE_REPLICATION
++  longlong pos = (ulong)args[1]->val_int();
++  longlong timeout = (arg_count==3) ? args[2]->val_int() : 0 ;
++  if ((event_count = active_mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2)
++  {
++    null_value = 1;
++    event_count=0;
++  }
++#endif
++  return event_count;
++}
++
++
++/**
++  Get a user level lock.  If the thread has an old lock this is first released.
++
++  @retval
++    1    : Got lock
++  @retval
++    0    : Timeout
++  @retval
++    NULL : Error
++*/
++
++longlong Item_func_get_lock::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=args[0]->val_str(&value);
++  longlong timeout=args[1]->val_int();
++  struct timespec abstime;
++  THD *thd=current_thd;
++  User_level_lock *ull;
++  int error;
++  DBUG_ENTER("Item_func_get_lock::val_int");
++
++  /*
++    In slave thread no need to get locks, everything is serialized. Anyway
++    there is no way to make GET_LOCK() work on slave like it did on master
++    (i.e. make it return exactly the same value) because we don't have the
++    same other concurrent threads environment. No matter what we return here,
++    it's not guaranteed to be same as on master.
++  */
++  if (thd->slave_thread)
++    DBUG_RETURN(1);
++
++  pthread_mutex_lock(&LOCK_user_locks);
++
++  if (!res || !res->length())
++  {
++    pthread_mutex_unlock(&LOCK_user_locks);
++    null_value=1;
++    DBUG_RETURN(0);
++  }
++  DBUG_PRINT("info", ("lock %.*s, thd=%ld", res->length(), res->ptr(),
++                      (long) thd->real_id));
++  null_value=0;
++
++  if (thd->ull)
++  {
++    item_user_lock_release(thd->ull);
++    thd->ull=0;
++  }
++
++  if (!(ull= ((User_level_lock *) hash_search(&hash_user_locks,
++                                              (uchar*) res->ptr(),
++                                              (size_t) res->length()))))
++  {
++    ull= new User_level_lock((uchar*) res->ptr(), (size_t) res->length(),
++                             thd->thread_id);
++    if (!ull || !ull->initialized())
++    {
++      delete ull;
++      pthread_mutex_unlock(&LOCK_user_locks);
++      null_value=1;				// Probably out of memory
++      DBUG_RETURN(0);
++    }
++    ull->set_thread(thd);
++    thd->ull=ull;
++    pthread_mutex_unlock(&LOCK_user_locks);
++    DBUG_PRINT("info", ("made new lock"));
++    DBUG_RETURN(1);				// Got new lock
++  }
++  ull->count++;
++  DBUG_PRINT("info", ("ull->count=%d", ull->count));
++
++  /*
++    Structure is now initialized.  Try to get the lock.
++    Set up control struct to allow others to abort locks.
++  */
++  thd_proc_info(thd, "User lock");
++  thd->mysys_var->current_mutex= &LOCK_user_locks;
++  thd->mysys_var->current_cond=  &ull->cond;
++
++  set_timespec(abstime,timeout);
++  error= 0;
++  while (ull->locked && !thd->killed)
++  {
++    DBUG_PRINT("info", ("waiting on lock"));
++    error= pthread_cond_timedwait(&ull->cond,&LOCK_user_locks,&abstime);
++    if (error == ETIMEDOUT || error == ETIME)
++    {
++      DBUG_PRINT("info", ("lock wait timeout"));
++      break;
++    }
++    error= 0;
++  }
++
++  if (ull->locked)
++  {
++    if (!--ull->count)
++    {
++      DBUG_ASSERT(0);
++      delete ull;				// Should never happen
++    }
++    if (!error)                                 // Killed (thd->killed != 0)
++    {
++      error=1;
++      null_value=1;				// Return NULL
++    }
++  }
++  else                                          // We got the lock
++  {
++    ull->locked=1;
++    ull->set_thread(thd);
++    ull->thread_id= thd->thread_id;
++    thd->ull=ull;
++    error=0;
++    DBUG_PRINT("info", ("got the lock"));
++  }
++  pthread_mutex_unlock(&LOCK_user_locks);
++
++  pthread_mutex_lock(&thd->mysys_var->mutex);
++  thd_proc_info(thd, 0);
++  thd->mysys_var->current_mutex= 0;
++  thd->mysys_var->current_cond=  0;
++  pthread_mutex_unlock(&thd->mysys_var->mutex);
++
++  DBUG_RETURN(!error ? 1 : 0);
++}
++
++
++/**
++  Release a user level lock.
++  @return
++    - 1 if lock released
++    - 0 if lock wasn't held
++    - (SQL) NULL if no such lock
++*/
++
++longlong Item_func_release_lock::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=args[0]->val_str(&value);
++  User_level_lock *ull;
++  longlong result;
++  THD *thd=current_thd;
++  DBUG_ENTER("Item_func_release_lock::val_int");
++  if (!res || !res->length())
++  {
++    null_value=1;
++    DBUG_RETURN(0);
++  }
++  DBUG_PRINT("info", ("lock %.*s", res->length(), res->ptr()));
++  null_value=0;
++
++  result=0;
++  pthread_mutex_lock(&LOCK_user_locks);
++  if (!(ull= ((User_level_lock*) hash_search(&hash_user_locks,
++                                             (const uchar*) res->ptr(),
++                                             (size_t) res->length()))))
++  {
++    null_value=1;
++  }
++  else
++  {
++    DBUG_PRINT("info", ("ull->locked=%d ull->thread=%lu thd=%lu", 
++                        (int) ull->locked,
++                        (long)ull->thread_id,
++                        (long)thd->thread_id));
++    if (ull->locked && current_thd->thread_id == ull->thread_id)
++    {
++      DBUG_PRINT("info", ("release lock"));
++      result=1;					// Release is ok
++      item_user_lock_release(ull);
++      thd->ull=0;
++    }
++  }
++  pthread_mutex_unlock(&LOCK_user_locks);
++  DBUG_RETURN(result);
++}
++
++
++longlong Item_func_last_insert_id::val_int()
++{
++  THD *thd= current_thd;
++  DBUG_ASSERT(fixed == 1);
++  if (arg_count)
++  {
++    longlong value= args[0]->val_int();
++    null_value= args[0]->null_value;
++    /*
++      LAST_INSERT_ID(X) must affect the client's mysql_insert_id() as
++      documented in the manual. We don't want to touch
++      first_successful_insert_id_in_cur_stmt because it would make
++      LAST_INSERT_ID(X) take precedence over an generated auto_increment
++      value for this row.
++    */
++    thd->arg_of_last_insert_id_function= TRUE;
++    thd->first_successful_insert_id_in_prev_stmt= value;
++    return value;
++  }
++  return thd->read_first_successful_insert_id_in_prev_stmt();
++}
++
++
++bool Item_func_last_insert_id::fix_fields(THD *thd, Item **ref)
++{
++  thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
++  return Item_int_func::fix_fields(thd, ref);
++}
++
++
++/* This function is just used to test speed of different functions */
++
++longlong Item_func_benchmark::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  char buff[MAX_FIELD_WIDTH];
++  String tmp(buff,sizeof(buff), &my_charset_bin);
++  my_decimal tmp_decimal;
++  THD *thd=current_thd;
++  ulonglong loop_count;
++
++  loop_count= (ulonglong) args[0]->val_int();
++
++  if (args[0]->null_value ||
++      (!args[0]->unsigned_flag && (((longlong) loop_count) < 0)))
++  {
++    if (!args[0]->null_value)
++    {
++      char buff[22];
++      llstr(((longlong) loop_count), buff);
++      push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
++                          ER_WRONG_VALUE_FOR_TYPE, ER(ER_WRONG_VALUE_FOR_TYPE),
++                          "count", buff, "benchmark");
++    }
++
++    null_value= 1;
++    return 0;
++  }
++
++  null_value=0;
++  for (ulonglong loop=0 ; loop < loop_count && !thd->killed; loop++)
++  {
++    switch (args[1]->result_type()) {
++    case REAL_RESULT:
++      (void) args[1]->val_real();
++      break;
++    case INT_RESULT:
++      (void) args[1]->val_int();
++      break;
++    case STRING_RESULT:
++      (void) args[1]->val_str(&tmp);
++      break;
++    case DECIMAL_RESULT:
++      (void) args[1]->val_decimal(&tmp_decimal);
++      break;
++    case ROW_RESULT:
++    default:
++      // This case should never be chosen
++      DBUG_ASSERT(0);
++      return 0;
++    }
++  }
++  return 0;
++}
++
++
++void Item_func_benchmark::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("benchmark("));
++  args[0]->print(str, query_type);
++  str->append(',');
++  args[1]->print(str, query_type);
++  str->append(')');
++}
++
++
++/** This function is just used to create tests with time gaps. */
++
++longlong Item_func_sleep::val_int()
++{
++  THD *thd= current_thd;
++  struct timespec abstime;
++  pthread_cond_t cond;
++  int error;
++
++  DBUG_ASSERT(fixed == 1);
++
++  double time= args[0]->val_real();
++  /*
++    On 64-bit OSX pthread_cond_timedwait() waits forever
++    if passed abstime time has already been exceeded by 
++    the system time.
++    When given a very short timeout (< 10 mcs) just return 
++    immediately.
++    We assume that the lines between this test and the call 
++    to pthread_cond_timedwait() will be executed in less than 0.00001 sec.
++  */
++  if (time < 0.00001)
++    return 0;
++    
++  set_timespec_nsec(abstime, (ulonglong)(time * ULL(1000000000)));
++
++  pthread_cond_init(&cond, NULL);
++  pthread_mutex_lock(&LOCK_user_locks);
++
++  thd_proc_info(thd, "User sleep");
++  thd->mysys_var->current_mutex= &LOCK_user_locks;
++  thd->mysys_var->current_cond=  &cond;
++
++  error= 0;
++  while (!thd->killed)
++  {
++    error= pthread_cond_timedwait(&cond, &LOCK_user_locks, &abstime);
++    if (error == ETIMEDOUT || error == ETIME)
++      break;
++    error= 0;
++  }
++  thd_proc_info(thd, 0);
++  pthread_mutex_unlock(&LOCK_user_locks);
++  pthread_mutex_lock(&thd->mysys_var->mutex);
++  thd->mysys_var->current_mutex= 0;
++  thd->mysys_var->current_cond=  0;
++  pthread_mutex_unlock(&thd->mysys_var->mutex);
++
++  pthread_cond_destroy(&cond);
++
++  return test(!error); 		// Return 1 killed
++}
++
++
++#define extra_size sizeof(double)
++
++static user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
++				    bool create_if_not_exists)
++{
++  user_var_entry *entry;
++
++  if (!(entry = (user_var_entry*) hash_search(hash, (uchar*) name.str,
++					      name.length)) &&
++      create_if_not_exists)
++  {
++    uint size=ALIGN_SIZE(sizeof(user_var_entry))+name.length+1+extra_size;
++    if (!hash_inited(hash))
++      return 0;
++    if (!(entry = (user_var_entry*) my_malloc(size,MYF(MY_WME))))
++      return 0;
++    entry->name.str=(char*) entry+ ALIGN_SIZE(sizeof(user_var_entry))+
++      extra_size;
++    entry->name.length=name.length;
++    entry->value=0;
++    entry->length=0;
++    entry->update_query_id=0;
++    entry->collation.set(NULL, DERIVATION_IMPLICIT, 0);
++    entry->unsigned_flag= 0;
++    /*
++      If we are here, we were called from a SET or a query which sets a
++      variable. Imagine it is this:
++      INSERT INTO t SELECT @a:=10, @a:=@a+1.
++      Then when we have a Item_func_get_user_var (because of the @a+1) so we
++      think we have to write the value of @a to the binlog. But before that,
++      we have a Item_func_set_user_var to create @a (@a:=10), in this we mark
++      the variable as "already logged" (line below) so that it won't be logged
++      by Item_func_get_user_var (because that's not necessary).
++    */
++    entry->used_query_id=current_thd->query_id;
++    entry->type=STRING_RESULT;
++    memcpy(entry->name.str, name.str, name.length+1);
++    if (my_hash_insert(hash,(uchar*) entry))
++    {
++      my_free((char*) entry,MYF(0));
++      return 0;
++    }
++  }
++  return entry;
++}
++
++
++void Item_func_set_user_var::cleanup()
++{
++  Item_func::cleanup();
++  entry= NULL;
++}
++
++
++bool Item_func_set_user_var::set_entry(THD *thd, bool create_if_not_exists)
++{
++  if (entry && thd->thread_id == entry_thread_id)
++    goto end; // update entry->update_query_id for PS
++  if (!(entry= get_variable(&thd->user_vars, name, create_if_not_exists)))
++  {
++    entry_thread_id= 0;
++    return TRUE;
++  }
++  entry_thread_id= thd->thread_id;
++  /* 
++     Remember the last query which updated it, this way a query can later know
++     if this variable is a constant item in the query (it is if update_query_id
++     is different from query_id).
++  */
++end:
++  entry->update_query_id= thd->query_id;
++  return FALSE;
++}
++
++
++/*
++  When a user variable is updated (in a SET command or a query like
++  SELECT @a:= ).
++*/
++
++bool Item_func_set_user_var::fix_fields(THD *thd, Item **ref)
++{
++  DBUG_ASSERT(fixed == 0);
++  /* fix_fields will call Item_func_set_user_var::fix_length_and_dec */
++  if (Item_func::fix_fields(thd, ref) || set_entry(thd, TRUE))
++    return TRUE;
++  /*
++    As it is wrong and confusing to associate any 
++    character set with NULL, @a should be latin2
++    after this query sequence:
++
++      SET @a=_latin2'string';
++      SET @a=NULL;
++
++    I.e. the second query should not change the charset
++    to the current default value, but should keep the 
++    original value assigned during the first query.
++    In order to do it, we don't copy charset
++    from the argument if the argument is NULL
++    and the variable has previously been initialized.
++  */
++  null_item= (args[0]->type() == NULL_ITEM);
++  if (!entry->collation.collation || !null_item)
++    entry->collation.set(args[0]->collation.collation, DERIVATION_IMPLICIT);
++  collation.set(entry->collation.collation, DERIVATION_IMPLICIT);
++  cached_result_type= args[0]->result_type();
++  return FALSE;
++}
++
++
++void
++Item_func_set_user_var::fix_length_and_dec()
++{
++  maybe_null=args[0]->maybe_null;
++  max_length=args[0]->max_length;
++  decimals=args[0]->decimals;
++  unsigned_flag= args[0]->unsigned_flag;
++  collation.set(args[0]->collation.collation, DERIVATION_IMPLICIT);
++}
++
++
++/*
++  Mark field in read_map
++
++  NOTES
++    This is used by filesort to register used fields in a a temporary
++    column read set or to register used fields in a view
++*/
++
++bool Item_func_set_user_var::register_field_in_read_map(uchar *arg)
++{
++  if (result_field)
++  {
++    TABLE *table= (TABLE *) arg;
++    if (result_field->table == table || !table)
++      bitmap_set_bit(result_field->table->read_set, result_field->field_index);
++  }
++  return 0;
++}
++
++
++/**
++  Set value to user variable.
++
++  @param entry          pointer to structure representing variable
++  @param set_null       should we set NULL value ?
++  @param ptr            pointer to buffer with new value
++  @param length         length of new value
++  @param type           type of new value
++  @param cs             charset info for new value
++  @param dv             derivation for new value
++  @param unsigned_arg   indiates if a value of type INT_RESULT is unsigned
++
++  @retval
++    false   success
++  @retval
++    true    failure
++*/
++
++static bool
++update_hash(user_var_entry *entry, bool set_null, void *ptr, uint length,
++            Item_result type, CHARSET_INFO *cs, Derivation dv,
++            bool unsigned_arg)
++{
++  if (set_null)
++  {
++    char *pos= (char*) entry+ ALIGN_SIZE(sizeof(user_var_entry));
++    if (entry->value && entry->value != pos)
++      my_free(entry->value,MYF(0));
++    entry->value= 0;
++    entry->length= 0;
++  }
++  else
++  {
++    if (type == STRING_RESULT)
++      length++;					// Store strings with end \0
++    if (length <= extra_size)
++    {
++      /* Save value in value struct */
++      char *pos= (char*) entry+ ALIGN_SIZE(sizeof(user_var_entry));
++      if (entry->value != pos)
++      {
++	if (entry->value)
++	  my_free(entry->value,MYF(0));
++	entry->value=pos;
++      }
++    }
++    else
++    {
++      /* Allocate variable */
++      if (entry->length != length)
++      {
++	char *pos= (char*) entry+ ALIGN_SIZE(sizeof(user_var_entry));
++	if (entry->value == pos)
++	  entry->value=0;
++        entry->value= (char*) my_realloc(entry->value, length,
++                                         MYF(MY_ALLOW_ZERO_PTR | MY_WME));
++        if (!entry->value)
++	  return 1;
++      }
++    }
++    if (type == STRING_RESULT)
++    {
++      length--;					// Fix length change above
++      entry->value[length]= 0;			// Store end \0
++    }
++    memmove(entry->value, ptr, length);
++    if (type == DECIMAL_RESULT)
++      ((my_decimal*)entry->value)->fix_buffer_pointer();
++    entry->length= length;
++    entry->collation.set(cs, dv);
++    entry->unsigned_flag= unsigned_arg;
++  }
++  entry->type=type;
++  return 0;
++}
++
++
++bool
++Item_func_set_user_var::update_hash(void *ptr, uint length,
++                                    Item_result res_type,
++                                    CHARSET_INFO *cs, Derivation dv,
++                                    bool unsigned_arg)
++{
++  /*
++    If we set a variable explicitely to NULL then keep the old
++    result type of the variable
++  */
++  if ((null_value= args[0]->null_value) && null_item)
++    res_type= entry->type;                      // Don't change type of item
++  if (::update_hash(entry, (null_value= args[0]->null_value),
++                    ptr, length, res_type, cs, dv, unsigned_arg))
++  {
++    current_thd->fatal_error();     // Probably end of memory
++    null_value= 1;
++    return 1;
++  }
++  return 0;
++}
++
++
++/** Get the value of a variable as a double. */
++
++double user_var_entry::val_real(my_bool *null_value)
++{
++  if ((*null_value= (value == 0)))
++    return 0.0;
++
++  switch (type) {
++  case REAL_RESULT:
++    return *(double*) value;
++  case INT_RESULT:
++    return (double) *(longlong*) value;
++  case DECIMAL_RESULT:
++  {
++    double result;
++    my_decimal2double(E_DEC_FATAL_ERROR, (my_decimal *)value, &result);
++    return result;
++  }
++  case STRING_RESULT:
++    return my_atof(value);                      // This is null terminated
++  case ROW_RESULT:
++    DBUG_ASSERT(1);				// Impossible
++    break;
++  }
++  return 0.0;					// Impossible
++}
++
++
++/** Get the value of a variable as an integer. */
++
++longlong user_var_entry::val_int(my_bool *null_value) const
++{
++  if ((*null_value= (value == 0)))
++    return LL(0);
++
++  switch (type) {
++  case REAL_RESULT:
++    return (longlong) *(double*) value;
++  case INT_RESULT:
++    return *(longlong*) value;
++  case DECIMAL_RESULT:
++  {
++    longlong result;
++    my_decimal2int(E_DEC_FATAL_ERROR, (my_decimal *)value, 0, &result);
++    return result;
++  }
++  case STRING_RESULT:
++  {
++    int error;
++    return my_strtoll10(value, (char**) 0, &error);// String is null terminated
++  }
++  case ROW_RESULT:
++    DBUG_ASSERT(1);				// Impossible
++    break;
++  }
++  return LL(0);					// Impossible
++}
++
++
++/** Get the value of a variable as a string. */
++
++String *user_var_entry::val_str(my_bool *null_value, String *str,
++				uint decimals)
++{
++  if ((*null_value= (value == 0)))
++    return (String*) 0;
++
++  switch (type) {
++  case REAL_RESULT:
++    str->set_real(*(double*) value, decimals, &my_charset_bin);
++    break;
++  case INT_RESULT:
++    if (!unsigned_flag)
++      str->set(*(longlong*) value, &my_charset_bin);
++    else
++      str->set(*(ulonglong*) value, &my_charset_bin);
++    break;
++  case DECIMAL_RESULT:
++    my_decimal2string(E_DEC_FATAL_ERROR, (my_decimal *)value, 0, 0, 0, str);
++    break;
++  case STRING_RESULT:
++    if (str->copy(value, length, collation.collation))
++      str= 0;					// EOM error
++  case ROW_RESULT:
++    DBUG_ASSERT(1);				// Impossible
++    break;
++  }
++  return(str);
++}
++
++/** Get the value of a variable as a decimal. */
++
++my_decimal *user_var_entry::val_decimal(my_bool *null_value, my_decimal *val)
++{
++  if ((*null_value= (value == 0)))
++    return 0;
++
++  switch (type) {
++  case REAL_RESULT:
++    double2my_decimal(E_DEC_FATAL_ERROR, *(double*) value, val);
++    break;
++  case INT_RESULT:
++    int2my_decimal(E_DEC_FATAL_ERROR, *(longlong*) value, 0, val);
++    break;
++  case DECIMAL_RESULT:
++    my_decimal2decimal((my_decimal *) value, val);
++    break;
++  case STRING_RESULT:
++    str2my_decimal(E_DEC_FATAL_ERROR, value, length, collation.collation, val);
++    break;
++  case ROW_RESULT:
++    DBUG_ASSERT(1);				// Impossible
++    break;
++  }
++  return(val);
++}
++
++/**
++  This functions is invoked on SET \@variable or
++  \@variable:= expression.
++
++  Evaluate (and check expression), store results.
++
++  @note
++    For now it always return OK. All problem with value evaluating
++    will be caught by thd->is_error() check in sql_set_variables().
++
++  @retval
++    FALSE OK.
++*/
++
++bool
++Item_func_set_user_var::check(bool use_result_field)
++{
++  DBUG_ENTER("Item_func_set_user_var::check");
++  if (use_result_field && !result_field)
++    use_result_field= FALSE;
++
++  switch (cached_result_type) {
++  case REAL_RESULT:
++  {
++    save_result.vreal= use_result_field ? result_field->val_real() :
++                        args[0]->val_real();
++    break;
++  }
++  case INT_RESULT:
++  {
++    save_result.vint= use_result_field ? result_field->val_int() :
++                       args[0]->val_int();
++    unsigned_flag= use_result_field ? ((Field_num*)result_field)->unsigned_flag:
++                    args[0]->unsigned_flag;
++    break;
++  }
++  case STRING_RESULT:
++  {
++    save_result.vstr= use_result_field ? result_field->val_str(&value) :
++                       args[0]->val_str(&value);
++    break;
++  }
++  case DECIMAL_RESULT:
++  {
++    save_result.vdec= use_result_field ?
++                       result_field->val_decimal(&decimal_buff) :
++                       args[0]->val_decimal(&decimal_buff);
++    break;
++  }
++  case ROW_RESULT:
++  default:
++    // This case should never be chosen
++    DBUG_ASSERT(0);
++    break;
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  @brief Evaluate and store item's result.
++  This function is invoked on "SELECT ... INTO @var ...".
++  
++  @param    item    An item to get value from.
++*/
++
++void Item_func_set_user_var::save_item_result(Item *item)
++{
++  DBUG_ENTER("Item_func_set_user_var::save_item_result");
++
++  switch (cached_result_type) {
++  case REAL_RESULT:
++    save_result.vreal= item->val_result();
++    break;
++  case INT_RESULT:
++    save_result.vint= item->val_int_result();
++    unsigned_flag= item->unsigned_flag;
++    break;
++  case STRING_RESULT:
++    save_result.vstr= item->str_result(&value);
++    break;
++  case DECIMAL_RESULT:
++    save_result.vdec= item->val_decimal_result(&decimal_buff);
++    break;
++  case ROW_RESULT:
++  default:
++    // Should never happen
++    DBUG_ASSERT(0);
++    break;
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  This functions is invoked on
++  SET \@variable or \@variable:= expression.
++
++  @note
++    We have to store the expression as such in the variable, independent of
++    the value method used by the user
++
++  @retval
++    0	OK
++  @retval
++    1	EOM Error
++
++*/
++
++bool
++Item_func_set_user_var::update()
++{
++  bool res= 0;
++  DBUG_ENTER("Item_func_set_user_var::update");
++
++  switch (cached_result_type) {
++  case REAL_RESULT:
++  {
++    res= update_hash((void*) &save_result.vreal,sizeof(save_result.vreal),
++		     REAL_RESULT, &my_charset_bin, DERIVATION_IMPLICIT, 0);
++    break;
++  }
++  case INT_RESULT:
++  {
++    res= update_hash((void*) &save_result.vint, sizeof(save_result.vint),
++                     INT_RESULT, &my_charset_bin, DERIVATION_IMPLICIT,
++                     unsigned_flag);
++    break;
++  }
++  case STRING_RESULT:
++  {
++    if (!save_result.vstr)					// Null value
++      res= update_hash((void*) 0, 0, STRING_RESULT, &my_charset_bin,
++		       DERIVATION_IMPLICIT, 0);
++    else
++      res= update_hash((void*) save_result.vstr->ptr(),
++		       save_result.vstr->length(), STRING_RESULT,
++		       save_result.vstr->charset(),
++		       DERIVATION_IMPLICIT, 0);
++    break;
++  }
++  case DECIMAL_RESULT:
++  {
++    if (!save_result.vdec)					// Null value
++      res= update_hash((void*) 0, 0, DECIMAL_RESULT, &my_charset_bin,
++                       DERIVATION_IMPLICIT, 0);
++    else
++      res= update_hash((void*) save_result.vdec,
++                       sizeof(my_decimal), DECIMAL_RESULT,
++                       &my_charset_bin, DERIVATION_IMPLICIT, 0);
++    break;
++  }
++  case ROW_RESULT:
++  default:
++    // This case should never be chosen
++    DBUG_ASSERT(0);
++    break;
++  }
++  DBUG_RETURN(res);
++}
++
++
++double Item_func_set_user_var::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  check(0);
++  update();					// Store expression
++  return entry->val_real(&null_value);
++}
++
++longlong Item_func_set_user_var::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  check(0);
++  update();					// Store expression
++  return entry->val_int(&null_value);
++}
++
++String *Item_func_set_user_var::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  check(0);
++  update();					// Store expression
++  return entry->val_str(&null_value, str, decimals);
++}
++
++
++my_decimal *Item_func_set_user_var::val_decimal(my_decimal *val)
++{
++  DBUG_ASSERT(fixed == 1);
++  check(0);
++  update();					// Store expression
++  return entry->val_decimal(&null_value, val);
++}
++
++
++double Item_func_set_user_var::val_result()
++{
++  DBUG_ASSERT(fixed == 1);
++  check(TRUE);
++  update();					// Store expression
++  return entry->val_real(&null_value);
++}
++
++longlong Item_func_set_user_var::val_int_result()
++{
++  DBUG_ASSERT(fixed == 1);
++  check(TRUE);
++  update();					// Store expression
++  return entry->val_int(&null_value);
++}
++
++bool Item_func_set_user_var::val_bool_result()
++{
++  DBUG_ASSERT(fixed == 1);
++  check(TRUE);
++  update();					// Store expression
++  return entry->val_int(&null_value) != 0;
++}
++
++String *Item_func_set_user_var::str_result(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  check(TRUE);
++  update();					// Store expression
++  return entry->val_str(&null_value, str, decimals);
++}
++
++
++my_decimal *Item_func_set_user_var::val_decimal_result(my_decimal *val)
++{
++  DBUG_ASSERT(fixed == 1);
++  check(TRUE);
++  update();					// Store expression
++  return entry->val_decimal(&null_value, val);
++}
++
++
++bool Item_func_set_user_var::is_null_result()
++{
++  DBUG_ASSERT(fixed == 1);
++  check(TRUE);
++  update();					// Store expression
++  return is_null();
++}
++
++
++void Item_func_set_user_var::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("(@"));
++  str->append(name.str, name.length);
++  str->append(STRING_WITH_LEN(":="));
++  args[0]->print(str, query_type);
++  str->append(')');
++}
++
++
++void Item_func_set_user_var::print_as_stmt(String *str,
++                                           enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("set @"));
++  str->append(name.str, name.length);
++  str->append(STRING_WITH_LEN(":="));
++  args[0]->print(str, query_type);
++  str->append(')');
++}
++
++bool Item_func_set_user_var::send(Protocol *protocol, String *str_arg)
++{
++  if (result_field)
++  {
++    check(1);
++    update();
++    return protocol->store(result_field);
++  }
++  return Item::send(protocol, str_arg);
++}
++
++void Item_func_set_user_var::make_field(Send_field *tmp_field)
++{
++  if (result_field)
++  {
++    result_field->make_field(tmp_field);
++    DBUG_ASSERT(tmp_field->table_name != 0);
++    if (Item::name)
++      tmp_field->col_name=Item::name;               // Use user supplied name
++  }
++  else
++    Item::make_field(tmp_field);
++}
++
++
++/*
++  Save the value of a user variable into a field
++
++  SYNOPSIS
++    save_in_field()
++      field           target field to save the value to
++      no_conversion   flag indicating whether conversions are allowed
++
++  DESCRIPTION
++    Save the function value into a field and update the user variable
++    accordingly. If a result field is defined and the target field doesn't
++    coincide with it then the value from the result field will be used as
++    the new value of the user variable.
++
++    The reason to have this method rather than simply using the result
++    field in the val_xxx() methods is that the value from the result field
++    not always can be used when the result field is defined.
++    Let's consider the following cases:
++    1) when filling a tmp table the result field is defined but the value of it
++    is undefined because it has to be produced yet. Thus we can't use it.
++    2) on execution of an INSERT ... SELECT statement the save_in_field()
++    function will be called to fill the data in the new record. If the SELECT
++    part uses a tmp table then the result field is defined and should be
++    used in order to get the correct result.
++
++    The difference between the SET_USER_VAR function and regular functions
++    like CONCAT is that the Item_func objects for the regular functions are
++    replaced by Item_field objects after the values of these functions have
++    been stored in a tmp table. Yet an object of the Item_field class cannot
++    be used to update a user variable.
++    Due to this we have to handle the result field in a special way here and
++    in the Item_func_set_user_var::send() function.
++
++  RETURN VALUES
++    FALSE       Ok
++    TRUE        Error
++*/
++
++int Item_func_set_user_var::save_in_field(Field *field, bool no_conversions,
++                                          bool can_use_result_field)
++{
++  bool use_result_field= (!can_use_result_field ? 0 :
++                          (result_field && result_field != field));
++  int error;
++
++  /* Update the value of the user variable */
++  check(use_result_field);
++  update();
++
++  if (result_type() == STRING_RESULT ||
++      (result_type() == REAL_RESULT &&
++      field->result_type() == STRING_RESULT))
++  {
++    String *result;
++    CHARSET_INFO *cs= collation.collation;
++    char buff[MAX_FIELD_WIDTH];		// Alloc buffer for small columns
++    str_value.set_quick(buff, sizeof(buff), cs);
++    result= entry->val_str(&null_value, &str_value, decimals);
++
++    if (null_value)
++    {
++      str_value.set_quick(0, 0, cs);
++      return set_field_to_null_with_conversions(field, no_conversions);
++    }
++
++    /* NOTE: If null_value == FALSE, "result" must be not NULL.  */
++
++    field->set_notnull();
++    error=field->store(result->ptr(),result->length(),cs);
++    str_value.set_quick(0, 0, cs);
++  }
++  else if (result_type() == REAL_RESULT)
++  {
++    double nr= entry->val_real(&null_value);
++    if (null_value)
++      return set_field_to_null(field);
++    field->set_notnull();
++    error=field->store(nr);
++  }
++  else if (result_type() == DECIMAL_RESULT)
++  {
++    my_decimal decimal_value;
++    my_decimal *val= entry->val_decimal(&null_value, &decimal_value);
++    if (null_value)
++      return set_field_to_null(field);
++    field->set_notnull();
++    error=field->store_decimal(val);
++  }
++  else
++  {
++    longlong nr= entry->val_int(&null_value);
++    if (null_value)
++      return set_field_to_null_with_conversions(field, no_conversions);
++    field->set_notnull();
++    error=field->store(nr, unsigned_flag);
++  }
++  return error;
++}
++
++
++String *
++Item_func_get_user_var::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  DBUG_ENTER("Item_func_get_user_var::val_str");
++  if (!var_entry)
++    DBUG_RETURN((String*) 0);			// No such variable
++  DBUG_RETURN(var_entry->val_str(&null_value, str, decimals));
++}
++
++
++double Item_func_get_user_var::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  if (!var_entry)
++    return 0.0;					// No such variable
++  return (var_entry->val_real(&null_value));
++}
++
++
++my_decimal *Item_func_get_user_var::val_decimal(my_decimal *dec)
++{
++  DBUG_ASSERT(fixed == 1);
++  if (!var_entry)
++    return 0;
++  return var_entry->val_decimal(&null_value, dec);
++}
++
++
++longlong Item_func_get_user_var::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  if (!var_entry)
++    return LL(0);				// No such variable
++  return (var_entry->val_int(&null_value));
++}
++
++
++/**
++  Get variable by name and, if necessary, put the record of variable 
++  use into the binary log.
++
++  When a user variable is invoked from an update query (INSERT, UPDATE etc),
++  stores this variable and its value in thd->user_var_events, so that it can be
++  written to the binlog (will be written just before the query is written, see
++  log.cc).
++
++  @param      thd        Current thread
++  @param      name       Variable name
++  @param[out] out_entry  variable structure or NULL. The pointer is set
++                         regardless of whether function succeeded or not.
++
++  @retval
++    0  OK
++  @retval
++    1  Failed to put appropriate record into binary log
++
++*/
++
++int get_var_with_binlog(THD *thd, enum_sql_command sql_command,
++                        LEX_STRING &name, user_var_entry **out_entry)
++{
++  BINLOG_USER_VAR_EVENT *user_var_event;
++  user_var_entry *var_entry;
++  var_entry= get_variable(&thd->user_vars, name, 0);
++
++  /*
++    Any reference to user-defined variable which is done from stored
++    function or trigger affects their execution and the execution of the
++    calling statement. We must log all such variables even if they are 
++    not involved in table-updating statements.
++  */
++  if (!(opt_bin_log && 
++       (is_update_query(sql_command) || thd->in_sub_stmt)))
++  {
++    *out_entry= var_entry;
++    return 0;
++  }
++
++  if (!var_entry)
++  {
++    /*
++      If the variable does not exist, it's NULL, but we want to create it so
++      that it gets into the binlog (if it didn't, the slave could be
++      influenced by a variable of the same name previously set by another
++      thread).
++      We create it like if it had been explicitly set with SET before.
++      The 'new' mimics what sql_yacc.yy does when 'SET @a=10;'.
++      sql_set_variables() is what is called from 'case SQLCOM_SET_OPTION'
++      in dispatch_command()). Instead of building a one-element list to pass to
++      sql_set_variables(), we could instead manually call check() and update();
++      this would save memory and time; but calling sql_set_variables() makes
++      one unique place to maintain (sql_set_variables()). 
++
++      Manipulation with lex is necessary since free_underlaid_joins
++      is going to release memory belonging to the main query.
++    */
++
++    List<set_var_base> tmp_var_list;
++    LEX *sav_lex= thd->lex, lex_tmp;
++    thd->lex= &lex_tmp;
++    lex_start(thd);
++    tmp_var_list.push_back(new set_var_user(new Item_func_set_user_var(name,
++                                                                       new Item_null())));
++    /* Create the variable */
++    if (sql_set_variables(thd, &tmp_var_list))
++    {
++      thd->lex= sav_lex;
++      goto err;
++    }
++    thd->lex= sav_lex;
++    if (!(var_entry= get_variable(&thd->user_vars, name, 0)))
++      goto err;
++  }
++  else if (var_entry->used_query_id == thd->query_id ||
++           mysql_bin_log.is_query_in_union(thd, var_entry->used_query_id))
++  {
++    /* 
++       If this variable was already stored in user_var_events by this query
++       (because it's used in more than one place in the query), don't store
++       it.
++    */
++    *out_entry= var_entry;
++    return 0;
++  }
++
++  uint size;
++  /*
++    First we need to store value of var_entry, when the next situation
++    appears:
++    > set @a:=1;
++    > insert into t1 values (@a), (@a:=@a+1), (@a:=@a+1);
++    We have to write to binlog value @a= 1.
++
++    We allocate the user_var_event on user_var_events_alloc pool, not on
++    the this-statement-execution pool because in SPs user_var_event objects 
++    may need to be valid after current [SP] statement execution pool is
++    destroyed.
++  */
++  size= ALIGN_SIZE(sizeof(BINLOG_USER_VAR_EVENT)) + var_entry->length;
++  if (!(user_var_event= (BINLOG_USER_VAR_EVENT *)
++        alloc_root(thd->user_var_events_alloc, size)))
++    goto err;
++
++  user_var_event->value= (char*) user_var_event +
++    ALIGN_SIZE(sizeof(BINLOG_USER_VAR_EVENT));
++  user_var_event->user_var_event= var_entry;
++  user_var_event->type= var_entry->type;
++  user_var_event->charset_number= var_entry->collation.collation->number;
++  if (!var_entry->value)
++  {
++    /* NULL value*/
++    user_var_event->length= 0;
++    user_var_event->value= 0;
++  }
++  else
++  {
++    user_var_event->length= var_entry->length;
++    memcpy(user_var_event->value, var_entry->value,
++           var_entry->length);
++  }
++  /* Mark that this variable has been used by this query */
++  var_entry->used_query_id= thd->query_id;
++  if (insert_dynamic(&thd->user_var_events, (uchar*) &user_var_event))
++    goto err;
++
++  *out_entry= var_entry;
++  return 0;
++
++err:
++  *out_entry= var_entry;
++  return 1;
++}
++
++void Item_func_get_user_var::fix_length_and_dec()
++{
++  THD *thd=current_thd;
++  int error;
++  maybe_null=1;
++  decimals=NOT_FIXED_DEC;
++  max_length=MAX_BLOB_WIDTH;
++
++  error= get_var_with_binlog(thd, thd->lex->sql_command, name, &var_entry);
++
++  /*
++    If the variable didn't exist it has been created as a STRING-type.
++    'var_entry' is NULL only if there occured an error during the call to
++    get_var_with_binlog.
++  */
++  if (var_entry)
++  {
++    m_cached_result_type= var_entry->type;
++    unsigned_flag= var_entry->unsigned_flag;
++    max_length= var_entry->length;
++
++    collation.set(var_entry->collation);
++    switch(m_cached_result_type) {
++    case REAL_RESULT:
++      max_length= DBL_DIG + 8;
++      break;
++    case INT_RESULT:
++      max_length= MAX_BIGINT_WIDTH;
++      decimals=0;
++      break;
++    case STRING_RESULT:
++      max_length= MAX_BLOB_WIDTH - 1;
++      break;
++    case DECIMAL_RESULT:
++      max_length= DECIMAL_MAX_STR_LENGTH;
++      decimals= DECIMAL_MAX_SCALE;
++      break;
++    case ROW_RESULT:                            // Keep compiler happy
++    default:
++      DBUG_ASSERT(0);
++      break;
++    }
++  }
++  else
++  {
++    collation.set(&my_charset_bin, DERIVATION_IMPLICIT);
++    null_value= 1;
++    m_cached_result_type= STRING_RESULT;
++    max_length= MAX_BLOB_WIDTH;
++  }
++
++  if (error)
++    thd->fatal_error();
++
++  return;
++}
++
++
++bool Item_func_get_user_var::const_item() const
++{
++  return (!var_entry || current_thd->query_id != var_entry->update_query_id);
++}
++
++
++enum Item_result Item_func_get_user_var::result_type() const
++{
++  return m_cached_result_type;
++}
++
++
++void Item_func_get_user_var::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("(@"));
++  str->append(name.str,name.length);
++  str->append(')');
++}
++
++
++bool Item_func_get_user_var::eq(const Item *item, bool binary_cmp) const
++{
++  /* Assume we don't have rtti */
++  if (this == item)
++    return 1;					// Same item is same.
++  /* Check if other type is also a get_user_var() object */
++  if (item->type() != FUNC_ITEM ||
++      ((Item_func*) item)->functype() != functype())
++    return 0;
++  Item_func_get_user_var *other=(Item_func_get_user_var*) item;
++  return (name.length == other->name.length &&
++	  !memcmp(name.str, other->name.str, name.length));
++}
++
++
++bool Item_func_get_user_var::set_value(THD *thd,
++                                       sp_rcontext * /*ctx*/, Item **it)
++{
++  Item_func_set_user_var *suv= new Item_func_set_user_var(get_name(), *it);
++  /*
++    Item_func_set_user_var is not fixed after construction, call
++    fix_fields().
++  */
++  return (!suv || suv->fix_fields(thd, it) || suv->check(0) || suv->update());
++}
++
++
++bool Item_user_var_as_out_param::fix_fields(THD *thd, Item **ref)
++{
++  DBUG_ASSERT(fixed == 0);
++  DBUG_ASSERT(thd->lex->exchange);
++  if (Item::fix_fields(thd, ref) ||
++      !(entry= get_variable(&thd->user_vars, name, 1)))
++    return TRUE;
++  entry->type= STRING_RESULT;
++  /*
++    Let us set the same collation which is used for loading
++    of fields in LOAD DATA INFILE.
++    (Since Item_user_var_as_out_param is used only there).
++  */
++  entry->collation.set(thd->lex->exchange->cs ? 
++                       thd->lex->exchange->cs :
++                       thd->variables.collation_database);
++  entry->update_query_id= thd->query_id;
++  return FALSE;
++}
++
++
++void Item_user_var_as_out_param::set_null_value(CHARSET_INFO* cs)
++{
++  if (::update_hash(entry, TRUE, 0, 0, STRING_RESULT, cs,
++                    DERIVATION_IMPLICIT, 0 /* unsigned_arg */))
++    current_thd->fatal_error();			// Probably end of memory
++}
++
++
++void Item_user_var_as_out_param::set_value(const char *str, uint length,
++                                           CHARSET_INFO* cs)
++{
++  if (::update_hash(entry, FALSE, (void*)str, length, STRING_RESULT, cs,
++                    DERIVATION_IMPLICIT, 0 /* unsigned_arg */))
++    current_thd->fatal_error();			// Probably end of memory
++}
++
++
++double Item_user_var_as_out_param::val_real()
++{
++  DBUG_ASSERT(0);
++  return 0.0;
++}
++
++
++longlong Item_user_var_as_out_param::val_int()
++{
++  DBUG_ASSERT(0);
++  return 0;
++}
++
++
++String* Item_user_var_as_out_param::val_str(String *str)
++{
++  DBUG_ASSERT(0);
++  return 0;
++}
++
++
++my_decimal* Item_user_var_as_out_param::val_decimal(my_decimal *decimal_buffer)
++{
++  DBUG_ASSERT(0);
++  return 0;
++}
++
++
++void Item_user_var_as_out_param::print(String *str, enum_query_type query_type)
++{
++  str->append('@');
++  str->append(name.str,name.length);
++}
++
++
++Item_func_get_system_var::
++Item_func_get_system_var(sys_var *var_arg, enum_var_type var_type_arg,
++                       LEX_STRING *component_arg, const char *name_arg,
++                       size_t name_len_arg)
++  :var(var_arg), var_type(var_type_arg), orig_var_type(var_type_arg),
++  component(*component_arg), cache_present(0)
++{
++  /* set_name() will allocate the name */
++  set_name(name_arg, (uint) name_len_arg, system_charset_info);
++}
++
++
++bool Item_func_get_system_var::is_written_to_binlog()
++{
++  return var->is_written_to_binlog(var_type);
++}
++
++
++void Item_func_get_system_var::update_null_value()
++{
++  THD *thd= current_thd;
++  int save_no_errors= thd->no_errors;
++  thd->no_errors= TRUE;
++  Item::update_null_value();
++  thd->no_errors= save_no_errors;
++}
++
++
++void Item_func_get_system_var::fix_length_and_dec()
++{
++  char *cptr;
++  maybe_null= TRUE;
++  max_length= 0;
++
++  if (var->check_type(var_type))
++  {
++    if (var_type != OPT_DEFAULT)
++    {
++      my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0),
++               var->name, var_type == OPT_GLOBAL ? "SESSION" : "GLOBAL");
++      return;
++    }
++    /* As there was no local variable, return the global value */
++    var_type= OPT_GLOBAL;
++  }
++
++  switch (var->show_type())
++  {
++    case SHOW_LONG:
++    case SHOW_INT:
++    case SHOW_HA_ROWS:
++      unsigned_flag= TRUE;
++      max_length= MY_INT64_NUM_DECIMAL_DIGITS;
++      decimals=0;
++      break;
++    case SHOW_LONGLONG:
++      unsigned_flag= TRUE;
++      max_length= MY_INT64_NUM_DECIMAL_DIGITS;
++      decimals=0;
++      break;
++    case SHOW_CHAR:
++    case SHOW_CHAR_PTR:
++      pthread_mutex_lock(&LOCK_global_system_variables);
++      cptr= var->show_type() == SHOW_CHAR_PTR ? 
++        *(char**) var->value_ptr(current_thd, var_type, &component) :
++        (char*) var->value_ptr(current_thd, var_type, &component);
++      if (cptr)
++        max_length= strlen(cptr) * system_charset_info->mbmaxlen;
++      pthread_mutex_unlock(&LOCK_global_system_variables);
++      collation.set(system_charset_info, DERIVATION_SYSCONST);
++      decimals=NOT_FIXED_DEC;
++      break;
++    case SHOW_BOOL:
++    case SHOW_MY_BOOL:
++      unsigned_flag= FALSE;
++      max_length= 1;
++      decimals=0;
++      break;
++    case SHOW_DOUBLE:
++      unsigned_flag= FALSE;
++      decimals= 6;
++      max_length= DBL_DIG + 6;
++      break;
++    default:
++      my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name);
++      break;
++  }
++}
++
++
++void Item_func_get_system_var::print(String *str, enum_query_type query_type)
++{
++  str->append(name, name_length);
++}
++
++
++enum Item_result Item_func_get_system_var::result_type() const
++{
++  switch (var->show_type())
++  {
++    case SHOW_BOOL:
++    case SHOW_MY_BOOL:
++    case SHOW_INT:
++    case SHOW_LONG:
++    case SHOW_LONGLONG:
++    case SHOW_HA_ROWS:
++      return INT_RESULT;
++    case SHOW_CHAR: 
++    case SHOW_CHAR_PTR: 
++      return STRING_RESULT;
++    case SHOW_DOUBLE:
++      return REAL_RESULT;
++    default:
++      my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name);
++      return STRING_RESULT;                   // keep the compiler happy
++  }
++}
++
++
++enum_field_types Item_func_get_system_var::field_type() const
++{
++  switch (var->show_type())
++  {
++    case SHOW_BOOL:
++    case SHOW_MY_BOOL:
++    case SHOW_INT:
++    case SHOW_LONG:
++    case SHOW_LONGLONG:
++    case SHOW_HA_ROWS:
++      return MYSQL_TYPE_LONGLONG;
++    case SHOW_CHAR: 
++    case SHOW_CHAR_PTR: 
++      return MYSQL_TYPE_VARCHAR;
++    case SHOW_DOUBLE:
++      return MYSQL_TYPE_DOUBLE;
++    default:
++      my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name);
++      return MYSQL_TYPE_VARCHAR;              // keep the compiler happy
++  }
++}
++
++
++/*
++  Uses var, var_type, component, cache_present, used_query_id, thd,
++  cached_llval, null_value, cached_null_value
++*/
++#define get_sys_var_safe(type) \
++do { \
++  type value; \
++  pthread_mutex_lock(&LOCK_global_system_variables); \
++  value= *(type*) var->value_ptr(thd, var_type, &component); \
++  pthread_mutex_unlock(&LOCK_global_system_variables); \
++  cache_present |= GET_SYS_VAR_CACHE_LONG; \
++  used_query_id= thd->query_id; \
++  cached_llval= null_value ? 0 : (longlong) value; \
++  cached_null_value= null_value; \
++  return cached_llval; \
++} while (0)
++
++
++longlong Item_func_get_system_var::val_int()
++{
++  THD *thd= current_thd;
++
++  if (cache_present && thd->query_id == used_query_id)
++  {
++    if (cache_present & GET_SYS_VAR_CACHE_LONG)
++    {
++      null_value= cached_null_value;
++      return cached_llval;
++    } 
++    else if (cache_present & GET_SYS_VAR_CACHE_DOUBLE)
++    {
++      null_value= cached_null_value;
++      cached_llval= (longlong) cached_dval;
++      cache_present|= GET_SYS_VAR_CACHE_LONG;
++      return cached_llval;
++    }
++    else if (cache_present & GET_SYS_VAR_CACHE_STRING)
++    {
++      null_value= cached_null_value;
++      if (!null_value)
++        cached_llval= longlong_from_string_with_check (cached_strval.charset(),
++                                                       cached_strval.c_ptr(),
++                                                       cached_strval.c_ptr() +
++                                                       cached_strval.length());
++      else
++        cached_llval= 0;
++      cache_present|= GET_SYS_VAR_CACHE_LONG;
++      return cached_llval;
++    }
++  }
++
++  switch (var->show_type())
++  {
++    case SHOW_INT:      get_sys_var_safe (uint);
++    case SHOW_LONG:     get_sys_var_safe (ulong);
++    case SHOW_LONGLONG: get_sys_var_safe (ulonglong);
++    case SHOW_HA_ROWS:  get_sys_var_safe (ha_rows);
++    case SHOW_BOOL:     get_sys_var_safe (bool);
++    case SHOW_MY_BOOL:  get_sys_var_safe (my_bool);
++    case SHOW_DOUBLE:
++      {
++        double dval= val_real();
++
++        used_query_id= thd->query_id;
++        cached_llval= (longlong) dval;
++        cache_present|= GET_SYS_VAR_CACHE_LONG;
++        return cached_llval;
++      }
++    case SHOW_CHAR:
++    case SHOW_CHAR_PTR:
++      {
++        String *str_val= val_str(NULL);
++
++        if (str_val && str_val->length())
++          cached_llval= longlong_from_string_with_check (system_charset_info,
++                                                          str_val->c_ptr(), 
++                                                          str_val->c_ptr() + 
++                                                          str_val->length());
++        else
++        {
++          null_value= TRUE;
++          cached_llval= 0;
++        }
++
++        cache_present|= GET_SYS_VAR_CACHE_LONG;
++        return cached_llval;
++      }
++
++    default:            
++      my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name); 
++      return 0;                               // keep the compiler happy
++  }
++}
++
++
++String* Item_func_get_system_var::val_str(String* str)
++{
++  THD *thd= current_thd;
++
++  if (cache_present && thd->query_id == used_query_id)
++  {
++    if (cache_present & GET_SYS_VAR_CACHE_STRING)
++    {
++      null_value= cached_null_value;
++      return null_value ? NULL : &cached_strval;
++    }
++    else if (cache_present & GET_SYS_VAR_CACHE_LONG)
++    {
++      null_value= cached_null_value;
++      if (!null_value)
++        cached_strval.set (cached_llval, collation.collation);
++      cache_present|= GET_SYS_VAR_CACHE_STRING;
++      return null_value ? NULL : &cached_strval;
++    }
++    else if (cache_present & GET_SYS_VAR_CACHE_DOUBLE)
++    {
++      null_value= cached_null_value;
++      if (!null_value)
++        cached_strval.set_real (cached_dval, decimals, collation.collation);
++      cache_present|= GET_SYS_VAR_CACHE_STRING;
++      return null_value ? NULL : &cached_strval;
++    }
++  }
++
++  str= &cached_strval;
++  switch (var->show_type())
++  {
++    case SHOW_CHAR:
++    case SHOW_CHAR_PTR:
++    {
++      pthread_mutex_lock(&LOCK_global_system_variables);
++      char *cptr= var->show_type() == SHOW_CHAR_PTR ? 
++        *(char**) var->value_ptr(thd, var_type, &component) :
++        (char*) var->value_ptr(thd, var_type, &component);
++      if (cptr)
++      {
++        if (str->copy(cptr, strlen(cptr), collation.collation))
++        {
++          null_value= TRUE;
++          str= NULL;
++        }
++      }
++      else
++      {
++        null_value= TRUE;
++        str= NULL;
++      }
++      pthread_mutex_unlock(&LOCK_global_system_variables);
++      break;
++    }
++
++    case SHOW_INT:
++    case SHOW_LONG:
++    case SHOW_LONGLONG:
++    case SHOW_HA_ROWS:
++    case SHOW_BOOL:
++    case SHOW_MY_BOOL:
++      str->set (val_int(), collation.collation);
++      break;
++    case SHOW_DOUBLE:
++      str->set_real (val_real(), decimals, collation.collation);
++      break;
++
++    default:
++      my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name);
++      str= NULL;
++      break;
++  }
++
++  cache_present|= GET_SYS_VAR_CACHE_STRING;
++  used_query_id= thd->query_id;
++  cached_null_value= null_value;
++  return str;
++}
++
++
++double Item_func_get_system_var::val_real()
++{
++  THD *thd= current_thd;
++
++  if (cache_present && thd->query_id == used_query_id)
++  {
++    if (cache_present & GET_SYS_VAR_CACHE_DOUBLE)
++    {
++      null_value= cached_null_value;
++      return cached_dval;
++    }
++    else if (cache_present & GET_SYS_VAR_CACHE_LONG)
++    {
++      null_value= cached_null_value;
++      cached_dval= (double)cached_llval;
++      cache_present|= GET_SYS_VAR_CACHE_DOUBLE;
++      return cached_dval;
++    }
++    else if (cache_present & GET_SYS_VAR_CACHE_STRING)
++    {
++      null_value= cached_null_value;
++      if (!null_value)
++        cached_dval= double_from_string_with_check (cached_strval.charset(),
++                                                    cached_strval.c_ptr(),
++                                                    cached_strval.c_ptr() +
++                                                    cached_strval.length());
++      else
++        cached_dval= 0;
++      cache_present|= GET_SYS_VAR_CACHE_DOUBLE;
++      return cached_dval;
++    }
++  }
++
++  switch (var->show_type())
++  {
++    case SHOW_DOUBLE:
++      pthread_mutex_lock(&LOCK_global_system_variables);
++      cached_dval= *(double*) var->value_ptr(thd, var_type, &component);
++      pthread_mutex_unlock(&LOCK_global_system_variables);
++      used_query_id= thd->query_id;
++      cached_null_value= null_value;
++      if (null_value)
++        cached_dval= 0;
++      cache_present|= GET_SYS_VAR_CACHE_DOUBLE;
++      return cached_dval;
++    case SHOW_CHAR:
++    case SHOW_CHAR_PTR:
++      {
++        char *cptr;
++
++        pthread_mutex_lock(&LOCK_global_system_variables);
++        cptr= var->show_type() == SHOW_CHAR ? 
++          (char*) var->value_ptr(thd, var_type, &component) :
++          *(char**) var->value_ptr(thd, var_type, &component);
++        if (cptr)
++          cached_dval= double_from_string_with_check (system_charset_info, 
++                                                cptr, cptr + strlen (cptr));
++        else
++        {
++          null_value= TRUE;
++          cached_dval= 0;
++        }
++        pthread_mutex_unlock(&LOCK_global_system_variables);
++        used_query_id= thd->query_id;
++        cached_null_value= null_value;
++        cache_present|= GET_SYS_VAR_CACHE_DOUBLE;
++        return cached_dval;
++      }
++    case SHOW_INT:
++    case SHOW_LONG:
++    case SHOW_LONGLONG:
++    case SHOW_HA_ROWS:
++    case SHOW_BOOL:
++    case SHOW_MY_BOOL:
++        cached_dval= (double) val_int();
++        cache_present|= GET_SYS_VAR_CACHE_DOUBLE;
++        used_query_id= thd->query_id;
++        cached_null_value= null_value;
++        return cached_dval;
++    default:
++      my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name);
++      return 0;
++  }
++}
++
++
++bool Item_func_get_system_var::eq(const Item *item, bool binary_cmp) const
++{
++  /* Assume we don't have rtti */
++  if (this == item)
++    return 1;					// Same item is same.
++  /* Check if other type is also a get_user_var() object */
++  if (item->type() != FUNC_ITEM ||
++      ((Item_func*) item)->functype() != functype())
++    return 0;
++  Item_func_get_system_var *other=(Item_func_get_system_var*) item;
++  return (var == other->var && var_type == other->var_type);
++}
++
++
++void Item_func_get_system_var::cleanup()
++{
++  Item_func::cleanup();
++  cache_present= 0;
++  var_type= orig_var_type;
++  cached_strval.free();
++}
++
++
++longlong Item_func_inet_aton::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  uint byte_result = 0;
++  ulonglong result = 0;			// We are ready for 64 bit addresses
++  const char *p,* end;
++  char c = '.'; // we mark c to indicate invalid IP in case length is 0
++  char buff[36];
++  int dot_count= 0;
++
++  String *s,tmp(buff,sizeof(buff),&my_charset_bin);
++  if (!(s = args[0]->val_str(&tmp)))		// If null value
++    goto err;
++  null_value=0;
++
++  end= (p = s->ptr()) + s->length();
++  while (p < end)
++  {
++    c = *p++;
++    int digit = (int) (c - '0');		// Assume ascii
++    if (digit >= 0 && digit <= 9)
++    {
++      if ((byte_result = byte_result * 10 + digit) > 255)
++	goto err;				// Wrong address
++    }
++    else if (c == '.')
++    {
++      dot_count++;
++      result= (result << 8) + (ulonglong) byte_result;
++      byte_result = 0;
++    }
++    else
++      goto err;					// Invalid character
++  }
++  if (c != '.')					// IP number can't end on '.'
++  {
++    /*
++      Handle short-forms addresses according to standard. Examples:
++      127		-> 0.0.0.127
++      127.1		-> 127.0.0.1
++      127.2.1		-> 127.2.0.1
++    */
++    switch (dot_count) {
++    case 1: result<<= 8; /* Fall through */
++    case 2: result<<= 8; /* Fall through */
++    }
++    return (result << 8) + (ulonglong) byte_result;
++  }
++
++err:
++  null_value=1;
++  return 0;
++}
++
++
++void Item_func_match::init_search(bool no_order)
++{
++  DBUG_ENTER("Item_func_match::init_search");
++
++  /* Check if init_search() has been called before */
++  if (ft_handler)
++  {
++    /*
++      We should reset ft_handler as it is cleaned up
++      on destruction of FT_SELECT object
++      (necessary in case of re-execution of subquery).
++      TODO: FT_SELECT should not clean up ft_handler.
++    */
++    if (join_key)
++      table->file->ft_handler= ft_handler;
++    DBUG_VOID_RETURN;
++  }
++
++  if (key == NO_SUCH_KEY)
++  {
++    List<Item> fields;
++    fields.push_back(new Item_string(" ",1, cmp_collation.collation));
++    for (uint i=1; i < arg_count; i++)
++      fields.push_back(args[i]);
++    concat_ws=new Item_func_concat_ws(fields);
++    /*
++      Above function used only to get value and do not need fix_fields for it:
++      Item_string - basic constant
++      fields - fix_fields() was already called for this arguments
++      Item_func_concat_ws - do not need fix_fields() to produce value
++    */
++    concat_ws->quick_fix_field();
++  }
++
++  if (master)
++  {
++    join_key=master->join_key=join_key|master->join_key;
++    master->init_search(no_order);
++    ft_handler=master->ft_handler;
++    join_key=master->join_key;
++    DBUG_VOID_RETURN;
++  }
++
++  String *ft_tmp= 0;
++
++  // MATCH ... AGAINST (NULL) is meaningless, but possible
++  if (!(ft_tmp=key_item()->val_str(&value)))
++  {
++    ft_tmp= &value;
++    value.set("",0,cmp_collation.collation);
++  }
++
++  if (ft_tmp->charset() != cmp_collation.collation)
++  {
++    uint dummy_errors;
++    search_value.copy(ft_tmp->ptr(), ft_tmp->length(), ft_tmp->charset(),
++                      cmp_collation.collation, &dummy_errors);
++    ft_tmp= &search_value;
++  }
++
++  if (join_key && !no_order)
++    flags|=FT_SORTED;
++  ft_handler=table->file->ft_init_ext(flags, key, ft_tmp);
++
++  if (join_key)
++    table->file->ft_handler=ft_handler;
++
++  DBUG_VOID_RETURN;
++}
++
++
++bool Item_func_match::fix_fields(THD *thd, Item **ref)
++{
++  DBUG_ASSERT(fixed == 0);
++  Item *UNINIT_VAR(item);                        // Safe as arg_count is > 1
++
++  maybe_null=1;
++  join_key=0;
++
++  /*
++    const_item is assumed in quite a bit of places, so it would be difficult
++    to remove;  If it would ever to be removed, this should include
++    modifications to find_best and auto_close as complement to auto_init code
++    above.
++   */
++  if (Item_func::fix_fields(thd, ref) ||
++      !args[0]->const_during_execution())
++  {
++    my_error(ER_WRONG_ARGUMENTS,MYF(0),"AGAINST");
++    return TRUE;
++  }
++
++  const_item_cache=0;
++  for (uint i=1 ; i < arg_count ; i++)
++  {
++    item=args[i];
++    if (item->type() == Item::REF_ITEM)
++      args[i]= item= *((Item_ref *)item)->ref;
++    if (item->type() != Item::FIELD_ITEM)
++    {
++      my_error(ER_WRONG_ARGUMENTS, MYF(0), "AGAINST");
++      return TRUE;
++    }
++  }
++  /*
++    Check that all columns come from the same table.
++    We've already checked that columns in MATCH are fields so
++    PARAM_TABLE_BIT can only appear from AGAINST argument.
++  */
++  if ((used_tables_cache & ~PARAM_TABLE_BIT) != item->used_tables())
++    key=NO_SUCH_KEY;
++
++  if (key == NO_SUCH_KEY && !(flags & FT_BOOL))
++  {
++    my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH");
++    return TRUE;
++  }
++  table=((Item_field *)item)->field->table;
++  if (!(table->file->ha_table_flags() & HA_CAN_FULLTEXT))
++  {
++    my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0));
++    return 1;
++  }
++  table->fulltext_searched=1;
++  return agg_arg_collations_for_comparison(cmp_collation,
++                                           args+1, arg_count-1, 0);
++}
++
++bool Item_func_match::fix_index()
++{
++  Item_field *item;
++  uint ft_to_key[MAX_KEY], ft_cnt[MAX_KEY], fts=0, keynr;
++  uint max_cnt=0, mkeys=0, i;
++
++  if (key == NO_SUCH_KEY)
++    return 0;
++  
++  if (!table) 
++    goto err;
++
++  for (keynr=0 ; keynr < table->s->keys ; keynr++)
++  {
++    if ((table->key_info[keynr].flags & HA_FULLTEXT) &&
++        (flags & FT_BOOL ? table->keys_in_use_for_query.is_set(keynr) :
++                           table->s->keys_in_use.is_set(keynr)))
++
++    {
++      ft_to_key[fts]=keynr;
++      ft_cnt[fts]=0;
++      fts++;
++    }
++  }
++
++  if (!fts)
++    goto err;
++
++  for (i=1; i < arg_count; i++)
++  {
++    item=(Item_field*)args[i];
++    for (keynr=0 ; keynr < fts ; keynr++)
++    {
++      KEY *ft_key=&table->key_info[ft_to_key[keynr]];
++      uint key_parts=ft_key->key_parts;
++
++      for (uint part=0 ; part < key_parts ; part++)
++      {
++	if (item->field->eq(ft_key->key_part[part].field))
++	  ft_cnt[keynr]++;
++      }
++    }
++  }
++
++  for (keynr=0 ; keynr < fts ; keynr++)
++  {
++    if (ft_cnt[keynr] > max_cnt)
++    {
++      mkeys=0;
++      max_cnt=ft_cnt[mkeys]=ft_cnt[keynr];
++      ft_to_key[mkeys]=ft_to_key[keynr];
++      continue;
++    }
++    if (max_cnt && ft_cnt[keynr] == max_cnt)
++    {
++      mkeys++;
++      ft_cnt[mkeys]=ft_cnt[keynr];
++      ft_to_key[mkeys]=ft_to_key[keynr];
++      continue;
++    }
++  }
++
++  for (keynr=0 ; keynr <= mkeys ; keynr++)
++  {
++    // partial keys doesn't work
++    if (max_cnt < arg_count-1 ||
++        max_cnt < table->key_info[ft_to_key[keynr]].key_parts)
++      continue;
++
++    key=ft_to_key[keynr];
++
++    return 0;
++  }
++
++err:
++  if (flags & FT_BOOL)
++  {
++    key=NO_SUCH_KEY;
++    return 0;
++  }
++  my_message(ER_FT_MATCHING_KEY_NOT_FOUND,
++             ER(ER_FT_MATCHING_KEY_NOT_FOUND), MYF(0));
++  return 1;
++}
++
++
++bool Item_func_match::eq(const Item *item, bool binary_cmp) const
++{
++  if (item->type() != FUNC_ITEM ||
++      ((Item_func*)item)->functype() != FT_FUNC ||
++      flags != ((Item_func_match*)item)->flags)
++    return 0;
++
++  Item_func_match *ifm=(Item_func_match*) item;
++
++  if (key == ifm->key && table == ifm->table &&
++      key_item()->eq(ifm->key_item(), binary_cmp))
++    return 1;
++
++  return 0;
++}
++
++
++double Item_func_match::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  DBUG_ENTER("Item_func_match::val");
++  if (ft_handler == NULL)
++    DBUG_RETURN(-1.0);
++
++  if (key != NO_SUCH_KEY && table->null_row) /* NULL row from an outer join */
++    DBUG_RETURN(0.0);
++
++  if (join_key)
++  {
++    if (table->file->ft_handler)
++      DBUG_RETURN(ft_handler->please->get_relevance(ft_handler));
++    join_key=0;
++  }
++
++  if (key == NO_SUCH_KEY)
++  {
++    String *a= concat_ws->val_str(&value);
++    if ((null_value= (a == 0)) || !a->length())
++      DBUG_RETURN(0);
++    DBUG_RETURN(ft_handler->please->find_relevance(ft_handler,
++				      (uchar *)a->ptr(), a->length()));
++  }
++  DBUG_RETURN(ft_handler->please->find_relevance(ft_handler,
++                                                 table->record[0], 0));
++}
++
++void Item_func_match::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("(match "));
++  print_args(str, 1, query_type);
++  str->append(STRING_WITH_LEN(" against ("));
++  args[0]->print(str, query_type);
++  if (flags & FT_BOOL)
++    str->append(STRING_WITH_LEN(" in boolean mode"));
++  else if (flags & FT_EXPAND)
++    str->append(STRING_WITH_LEN(" with query expansion"));
++  str->append(STRING_WITH_LEN("))"));
++}
++
++longlong Item_func_bit_xor::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  ulonglong arg1= (ulonglong) args[0]->val_int();
++  ulonglong arg2= (ulonglong) args[1]->val_int();
++  if ((null_value= (args[0]->null_value || args[1]->null_value)))
++    return 0;
++  return (longlong) (arg1 ^ arg2);
++}
++
++
++/***************************************************************************
++  System variables
++****************************************************************************/
++
++/**
++  Return value of an system variable base[.name] as a constant item.
++
++  @param thd			Thread handler
++  @param var_type		global / session
++  @param name		        Name of base or system variable
++  @param component		Component.
++
++  @note
++    If component.str = 0 then the variable name is in 'name'
++
++  @return
++    - 0  : error
++    - #  : constant item
++*/
++
++
++Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name,
++		     LEX_STRING component)
++{
++  sys_var *var;
++  LEX_STRING *base_name, *component_name;
++
++  if (component.str)
++  {
++    base_name= &component;
++    component_name= &name;
++  }
++  else
++  {
++    base_name= &name;
++    component_name= &component;			// Empty string
++  }
++
++  if (!(var= find_sys_var(thd, base_name->str, base_name->length)))
++    return 0;
++  if (component.str)
++  {
++    if (!var->is_struct())
++    {
++      my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), base_name->str);
++      return 0;
++    }
++  }
++  thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
++
++  set_if_smaller(component_name->length, MAX_SYS_VAR_LENGTH);
++
++  return new Item_func_get_system_var(var, var_type, component_name,
++                                      NULL, 0);
++}
++
++
++/**
++  Check a user level lock.
++
++  Sets null_value=TRUE on error.
++
++  @retval
++    1		Available
++  @retval
++    0		Already taken, or error
++*/
++
++longlong Item_func_is_free_lock::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=args[0]->val_str(&value);
++  User_level_lock *ull;
++
++  null_value=0;
++  if (!res || !res->length())
++  {
++    null_value=1;
++    return 0;
++  }
++  
++  pthread_mutex_lock(&LOCK_user_locks);
++  ull= (User_level_lock *) hash_search(&hash_user_locks, (uchar*) res->ptr(),
++                                       (size_t) res->length());
++  pthread_mutex_unlock(&LOCK_user_locks);
++  if (!ull || !ull->locked)
++    return 1;
++  return 0;
++}
++
++longlong Item_func_is_used_lock::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=args[0]->val_str(&value);
++  User_level_lock *ull;
++
++  null_value=1;
++  if (!res || !res->length())
++    return 0;
++  
++  pthread_mutex_lock(&LOCK_user_locks);
++  ull= (User_level_lock *) hash_search(&hash_user_locks, (uchar*) res->ptr(),
++                                       (size_t) res->length());
++  pthread_mutex_unlock(&LOCK_user_locks);
++  if (!ull || !ull->locked)
++    return 0;
++
++  null_value=0;
++  return ull->thread_id;
++}
++
++
++longlong Item_func_row_count::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  THD *thd= current_thd;
++
++  return thd->row_count_func;
++}
++
++
++
++
++Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, sp_name *name)
++  :Item_func(), context(context_arg), m_name(name), m_sp(NULL), sp_result_field(NULL)
++{
++  maybe_null= 1;
++  m_name->init_qname(current_thd);
++  dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE));
++  dummy_table->s= (TABLE_SHARE*) (dummy_table+1);
++}
++
++
++Item_func_sp::Item_func_sp(Name_resolution_context *context_arg,
++                           sp_name *name, List<Item> &list)
++  :Item_func(list), context(context_arg), m_name(name), m_sp(NULL),sp_result_field(NULL)
++{
++  maybe_null= 1;
++  m_name->init_qname(current_thd);
++  dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE));
++  dummy_table->s= (TABLE_SHARE*) (dummy_table+1);
++}
++
++
++void
++Item_func_sp::cleanup()
++{
++  if (sp_result_field)
++  {
++    delete sp_result_field;
++    sp_result_field= NULL;
++  }
++  m_sp= NULL;
++  dummy_table->alias= NULL;
++  Item_func::cleanup();
++}
++
++const char *
++Item_func_sp::func_name() const
++{
++  THD *thd= current_thd;
++  /* Calculate length to avoid reallocation of string for sure */
++  uint len= (((m_name->m_explicit_name ? m_name->m_db.length : 0) +
++              m_name->m_name.length)*2 + //characters*quoting
++             2 +                         // ` and `
++             (m_name->m_explicit_name ?
++              3 : 0) +                   // '`', '`' and '.' for the db
++             1 +                         // end of string
++             ALIGN_SIZE(1));             // to avoid String reallocation
++  String qname((char *)alloc_root(thd->mem_root, len), len,
++               system_charset_info);
++
++  qname.length(0);
++  if (m_name->m_explicit_name)
++  {
++    append_identifier(thd, &qname, m_name->m_db.str, m_name->m_db.length);
++    qname.append('.');
++  }
++  append_identifier(thd, &qname, m_name->m_name.str, m_name->m_name.length);
++  return qname.ptr();
++}
++
++
++int my_missing_function_error(const LEX_STRING &token, const char *func_name)
++{
++  if (token.length && is_lex_native_function (&token))
++    return my_error(ER_FUNC_INEXISTENT_NAME_COLLISION, MYF(0), func_name);
++  else
++    return my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", func_name);
++}
++
++
++/**
++  @brief Initialize the result field by creating a temporary dummy table
++    and assign it to a newly created field object. Meta data used to
++    create the field is fetched from the sp_head belonging to the stored
++    proceedure found in the stored procedure functon cache.
++  
++  @note This function should be called from fix_fields to init the result
++    field. It is some what related to Item_field.
++
++  @see Item_field
++
++  @param thd A pointer to the session and thread context.
++
++  @return Function return error status.
++  @retval TRUE is returned on an error
++  @retval FALSE is returned on success.
++*/
++
++bool
++Item_func_sp::init_result_field(THD *thd)
++{
++  LEX_STRING empty_name= { C_STRING_WITH_LEN("") };
++  TABLE_SHARE *share;
++  DBUG_ENTER("Item_func_sp::init_result_field");
++
++  DBUG_ASSERT(m_sp == NULL);
++  DBUG_ASSERT(sp_result_field == NULL);
++
++  if (!(m_sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, m_name,
++                               &thd->sp_func_cache, TRUE)))
++  {
++    my_missing_function_error (m_name->m_name, m_name->m_qname.str);
++    context->process_error(thd);
++    DBUG_RETURN(TRUE);
++  }
++
++  /*
++     A Field need to be attached to a Table.
++     Below we "create" a dummy table by initializing 
++     the needed pointers.
++   */
++  
++  share= dummy_table->s;
++  dummy_table->alias = "";
++  dummy_table->maybe_null = maybe_null;
++  dummy_table->in_use= thd;
++  dummy_table->copy_blobs= TRUE;
++  share->table_cache_key = empty_name;
++  share->table_name = empty_name;
++
++  if (!(sp_result_field= m_sp->create_result_field(max_length, name,
++                                                   dummy_table)))
++  {
++   DBUG_RETURN(TRUE);
++  }
++  
++  if (sp_result_field->pack_length() > sizeof(result_buf))
++  {
++    void *tmp;
++    if (!(tmp= sql_alloc(sp_result_field->pack_length())))
++      DBUG_RETURN(TRUE);
++    sp_result_field->move_field((uchar*) tmp);
++  }
++  else
++    sp_result_field->move_field(result_buf);
++  
++  sp_result_field->null_ptr= (uchar *) &null_value;
++  sp_result_field->null_bit= 1;
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  @brief Initialize local members with values from the Field interface.
++
++  @note called from Item::fix_fields.
++*/
++
++void Item_func_sp::fix_length_and_dec()
++{
++  DBUG_ENTER("Item_func_sp::fix_length_and_dec");
++
++  DBUG_ASSERT(sp_result_field);
++  decimals= sp_result_field->decimals();
++  max_length= sp_result_field->field_length;
++  collation.set(sp_result_field->charset());
++  maybe_null= 1;
++  unsigned_flag= test(sp_result_field->flags & UNSIGNED_FLAG);
++
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  @brief Execute function & store value in field.
++
++  @return Function returns error status.
++  @retval FALSE on success.
++  @retval TRUE if an error occurred.
++*/
++
++bool
++Item_func_sp::execute()
++{
++  THD *thd= current_thd;
++  
++  /* Execute function and store the return value in the field. */
++
++  if (execute_impl(thd))
++  {
++    null_value= 1;
++    context->process_error(thd);
++    if (thd->killed)
++      thd->send_kill_message();
++    return TRUE;
++  }
++
++  /* Check that the field (the value) is not NULL. */
++
++  null_value= sp_result_field->is_null();
++
++  return null_value;
++}
++
++
++/**
++   @brief Execute function and store the return value in the field.
++
++   @note This function was intended to be the concrete implementation of
++    the interface function execute. This was never realized.
++
++   @return The error state.
++   @retval FALSE on success
++   @retval TRUE if an error occurred.
++*/
++bool
++Item_func_sp::execute_impl(THD *thd)
++{
++  bool err_status= TRUE;
++  Sub_statement_state statement_state;
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  Security_context *save_security_ctx= thd->security_ctx;
++#endif
++  enum enum_sp_data_access access=
++    (m_sp->m_chistics->daccess == SP_DEFAULT_ACCESS) ?
++     SP_DEFAULT_ACCESS_MAPPING : m_sp->m_chistics->daccess;
++
++  DBUG_ENTER("Item_func_sp::execute_impl");
++
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  if (context->security_ctx)
++  {
++    /* Set view definer security context */
++    thd->security_ctx= context->security_ctx;
++  }
++#endif
++  if (sp_check_access(thd))
++    goto error;
++
++  /*
++    Throw an error if a non-deterministic function is called while
++    statement-based replication (SBR) is active.
++  */
++
++  if (!m_sp->m_chistics->detistic && !trust_function_creators &&
++      (access == SP_CONTAINS_SQL || access == SP_MODIFIES_SQL_DATA) &&
++      (mysql_bin_log.is_open() &&
++       thd->variables.binlog_format == BINLOG_FORMAT_STMT))
++  {
++    my_error(ER_BINLOG_UNSAFE_ROUTINE, MYF(0));
++    goto error;
++  }
++
++  /*
++    Disable the binlogging if this is not a SELECT statement. If this is a
++    SELECT, leave binlogging on, so execute_function() code writes the
++    function call into binlog.
++  */
++  thd->reset_sub_statement_state(&statement_state, SUB_STMT_FUNCTION);
++  err_status= m_sp->execute_function(thd, args, arg_count, sp_result_field); 
++  thd->restore_sub_statement_state(&statement_state);
++
++error:
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  thd->security_ctx= save_security_ctx;
++#endif
++
++  DBUG_RETURN(err_status);
++}
++
++
++void
++Item_func_sp::make_field(Send_field *tmp_field)
++{
++  DBUG_ENTER("Item_func_sp::make_field");
++  DBUG_ASSERT(sp_result_field);
++  sp_result_field->make_field(tmp_field);
++  if (name)
++    tmp_field->col_name= name;
++  DBUG_VOID_RETURN;
++}
++
++
++enum enum_field_types
++Item_func_sp::field_type() const
++{
++  DBUG_ENTER("Item_func_sp::field_type");
++  DBUG_ASSERT(sp_result_field);
++  DBUG_RETURN(sp_result_field->type());
++}
++
++Item_result
++Item_func_sp::result_type() const
++{
++  DBUG_ENTER("Item_func_sp::result_type");
++  DBUG_PRINT("info", ("m_sp = %p", (void *) m_sp));
++  DBUG_ASSERT(sp_result_field);
++  DBUG_RETURN(sp_result_field->result_type());
++}
++
++longlong Item_func_found_rows::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  return current_thd->found_rows();
++}
++
++
++Field *
++Item_func_sp::tmp_table_field(TABLE *t_arg)
++{
++  DBUG_ENTER("Item_func_sp::tmp_table_field");
++
++  DBUG_ASSERT(sp_result_field);
++  DBUG_RETURN(sp_result_field);
++}
++
++
++/**
++  @brief Checks if requested access to function can be granted to user.
++    If function isn't found yet, it searches function first.
++    If function can't be found or user don't have requested access
++    error is raised.
++
++  @param thd thread handler
++
++  @return Indication if the access was granted or not.
++  @retval FALSE Access is granted.
++  @retval TRUE Requested access can't be granted or function doesn't exists.
++    
++*/
++
++bool
++Item_func_sp::sp_check_access(THD *thd)
++{
++  DBUG_ENTER("Item_func_sp::sp_check_access");
++  DBUG_ASSERT(m_sp);
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  if (check_routine_access(thd, EXECUTE_ACL,
++			   m_sp->m_db.str, m_sp->m_name.str, 0, FALSE))
++    DBUG_RETURN(TRUE);
++#endif
++
++  DBUG_RETURN(FALSE);
++}
++
++
++bool
++Item_func_sp::fix_fields(THD *thd, Item **ref)
++{
++  bool res;
++  DBUG_ENTER("Item_func_sp::fix_fields");
++  DBUG_ASSERT(fixed == 0);
++ 
++  /*
++    We must call init_result_field before Item_func::fix_fields() 
++    to make m_sp and result_field members available to fix_length_and_dec(),
++    which is called from Item_func::fix_fields().
++  */
++  res= init_result_field(thd);
++
++  if (res)
++    DBUG_RETURN(res);
++
++  res= Item_func::fix_fields(thd, ref);
++
++  if (res)
++    DBUG_RETURN(res);
++
++  if (thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_VIEW)
++  {
++    /*
++      Here we check privileges of the stored routine only during view
++      creation, in order to validate the view.  A runtime check is
++      perfomed in Item_func_sp::execute(), and this method is not
++      called during context analysis.  Notice, that during view
++      creation we do not infer into stored routine bodies and do not
++      check privileges of its statements, which would probably be a
++      good idea especially if the view has SQL SECURITY DEFINER and
++      the used stored procedure has SQL SECURITY DEFINER.
++    */
++    res= sp_check_access(thd);
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++    /*
++      Try to set and restore the security context to see whether it's valid
++    */
++    Security_context *save_secutiry_ctx;
++    res= set_routine_security_ctx(thd, m_sp, false, &save_secutiry_ctx);
++    if (!res)
++      m_sp->m_security_ctx.restore_security_context(thd, save_secutiry_ctx);
++    
++#endif /* ! NO_EMBEDDED_ACCESS_CHECKS */
++  }
++
++  if (!m_sp->m_chistics->detistic)
++  {
++    used_tables_cache |= RAND_TABLE_BIT;
++    const_item_cache= FALSE;
++  }
++
++  DBUG_RETURN(res);
++}
++
++
++void Item_func_sp::update_used_tables()
++{
++  Item_func::update_used_tables();
++
++  if (!m_sp->m_chistics->detistic)
++  {
++    used_tables_cache |= RAND_TABLE_BIT;
++    const_item_cache= FALSE;
++  }
++}
++
++
++/*
++  uuid_short handling.
++
++  The short uuid is defined as a longlong that contains the following bytes:
++
++  Bytes  Comment
++  1      Server_id & 255
++  4      Startup time of server in seconds
++  3      Incrementor
++
++  This means that an uuid is guaranteed to be unique
++  even in a replication environment if the following holds:
++
++  - The last byte of the server id is unique
++  - If you between two shutdown of the server don't get more than
++    an average of 2^24 = 16M calls to uuid_short() per second.
++*/
++
++ulonglong uuid_value;
++
++void uuid_short_init()
++{
++  uuid_value= ((((ulonglong) server_id) << 56) + 
++               (((ulonglong) server_start_time) << 24));
++}
++
++
++longlong Item_func_uuid_short::val_int()
++{
++  ulonglong val;
++  pthread_mutex_lock(&LOCK_uuid_generator);
++  val= uuid_value++;
++  pthread_mutex_unlock(&LOCK_uuid_generator);
++  return (longlong) val;
++}
+diff -urN mysql-old/sql/item_func.h mysql/sql/item_func.h
+--- mysql-old/sql/item_func.h	2011-05-10 17:45:45.640015709 +0000
++++ mysql/sql/item_func.h	2011-05-10 17:56:01.346682377 +0000
+@@ -420,7 +420,7 @@
+   const char *func_name() const { return "cast_as_unsigned"; }
+   void fix_length_and_dec()
+   {
+-    max_length= min(args[0]->max_length, DECIMAL_MAX_PRECISION + 2);
++    max_length= MYSQL_MIN(args[0]->max_length, DECIMAL_MAX_PRECISION + 2);
+     unsigned_flag=1;
+   }
+   longlong val_int();
+diff -urN mysql-old/sql/item_strfunc.cc mysql/sql/item_strfunc.cc
+--- mysql-old/sql/item_strfunc.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/item_strfunc.cc	2011-05-10 17:56:01.350015710 +0000
+@@ -388,7 +388,7 @@
+           }
+           else
+           {
+-            uint new_len = max(tmp_value.alloced_length() * 2, concat_len);
++            uint new_len = MYSQL_MAX(tmp_value.alloced_length() * 2, concat_len);
+ 
+             if (tmp_value.realloc(new_len))
+               goto null;
+@@ -749,7 +749,7 @@
+         }
+         else
+         {
+-          uint new_len = max(tmp_value.alloced_length() * 2, concat_len);
++          uint new_len = MYSQL_MAX(tmp_value.alloced_length() * 2, concat_len);
+ 
+           if (tmp_value.realloc(new_len))
+             goto null;
+@@ -1250,7 +1250,7 @@
+ 
+   length= res->charpos((int) length, (uint32) start);
+   tmp_length= res->length() - start;
+-  length= min(length, tmp_length);
++  length= MYSQL_MIN(length, tmp_length);
+ 
+   if (!start && (longlong) res->length() == length)
+     return res;
+@@ -1270,7 +1270,7 @@
+     if (start < 0)
+       max_length= ((uint)(-start) > max_length) ? 0 : (uint)(-start);
+     else
+-      max_length-= min((uint)(start - 1), max_length);
++      max_length-= MYSQL_MIN((uint)(start - 1), max_length);
+   }
+   if (arg_count == 3 && args[2]->const_item())
+   {
+@@ -1961,7 +1961,7 @@
+   if ((null_value= args[0]->null_value))
+     return 0; /* purecov: inspected */
+ 
+-  if (tmp_value.alloc(max(res->length(), 4 * cs->mbminlen)))
++  if (tmp_value.alloc(MYSQL_MAX(res->length(), 4 * cs->mbminlen)))
+     return str; /* purecov: inspected */
+   char *to= (char *) tmp_value.ptr();
+   char *to_end= to + tmp_value.alloced_length();
+@@ -3113,11 +3113,11 @@
+ 
+ void Item_func_export_set::fix_length_and_dec()
+ {
+-  uint length=max(args[1]->max_length,args[2]->max_length);
++  uint length=MYSQL_MAX(args[1]->max_length,args[2]->max_length);
+   uint sep_length=(arg_count > 3 ? args[3]->max_length : 1);
+   max_length=length*64+sep_length*63;
+ 
+-  if (agg_arg_charsets(collation, args+1, min(4,arg_count)-1,
++  if (agg_arg_charsets(collation, args+1, MYSQL_MIN(4,arg_count)-1,
+                        MY_COLL_ALLOW_CONV, 1))
+     return;
+ }
+@@ -3580,7 +3580,7 @@
+       /*
+         -1 so we won't make tv= uuid_time for nanoseq >= (tv - uuid_time)
+       */
+-      ulong delta= min(nanoseq, (ulong) (tv - uuid_time -1));
++      ulong delta= MYSQL_MIN(nanoseq, (ulong) (tv - uuid_time -1));
+       tv-= delta;
+       nanoseq-= delta;
+     }
+diff -urN mysql-old/sql/item_strfunc.cc.orig mysql/sql/item_strfunc.cc.orig
+--- mysql-old/sql/item_strfunc.cc.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/item_strfunc.cc.orig	2011-04-12 12:11:38.000000000 +0000
+@@ -0,0 +1,3643 @@
++/* Copyright (C) 2000-2006 MySQL AB
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
++
++
++/**
++  @file
++
++  @brief
++  This file defines all string functions
++
++  @warning
++    Some string functions don't always put and end-null on a String.
++    (This shouldn't be needed)
++*/
++
++#ifdef USE_PRAGMA_IMPLEMENTATION
++#pragma implementation				// gcc: Class implementation
++#endif
++
++#include "mysql_priv.h"
++#include <m_ctype.h>
++#include "my_md5.h"
++#include "sha1.h"
++#include "my_aes.h"
++#include <zlib.h>
++C_MODE_START
++#include "../mysys/my_static.h"			// For soundex_map
++C_MODE_END
++
++/**
++   @todo Remove this. It is not safe to use a shared String object.
++ */
++String my_empty_string("",default_charset_info);
++
++
++/*
++  Convert an array of bytes to a hexadecimal representation.
++
++  Used to generate a hexadecimal representation of a message digest.
++*/
++static void array_to_hex(char *to, const char *str, uint len)
++{
++  const char *str_end= str + len;
++  for (; str != str_end; ++str)
++  {
++    *to++= _dig_vec_lower[((uchar) *str) >> 4];
++    *to++= _dig_vec_lower[((uchar) *str) & 0x0F];
++  }
++}
++
++
++bool Item_str_func::fix_fields(THD *thd, Item **ref)
++{
++  bool res= Item_func::fix_fields(thd, ref);
++  /*
++    In Item_str_func::check_well_formed_result() we may set null_value
++    flag on the same condition as in test() below.
++  */
++  maybe_null= (maybe_null ||
++               test(thd->variables.sql_mode &
++                    (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)));
++  return res;
++}
++
++
++my_decimal *Item_str_func::val_decimal(my_decimal *decimal_value)
++{
++  DBUG_ASSERT(fixed == 1);
++  char buff[64];
++  String *res, tmp(buff,sizeof(buff), &my_charset_bin);
++  res= val_str(&tmp);
++  if (!res)
++    return 0;
++  (void)str2my_decimal(E_DEC_FATAL_ERROR, (char*) res->ptr(),
++                       res->length(), res->charset(), decimal_value);
++  return decimal_value;
++}
++
++
++double Item_str_func::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  int err_not_used;
++  char *end_not_used, buff[64];
++  String *res, tmp(buff,sizeof(buff), &my_charset_bin);
++  res= val_str(&tmp);
++  return res ? my_strntod(res->charset(), (char*) res->ptr(), res->length(),
++			  &end_not_used, &err_not_used) : 0.0;
++}
++
++
++longlong Item_str_func::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  int err;
++  char buff[22];
++  String *res, tmp(buff,sizeof(buff), &my_charset_bin);
++  res= val_str(&tmp);
++  return (res ?
++	  my_strntoll(res->charset(), res->ptr(), res->length(), 10, NULL,
++		      &err) :
++	  (longlong) 0);
++}
++
++
++String *Item_func_md5::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String * sptr= args[0]->val_str(str);
++  str->set_charset(&my_charset_bin);
++  if (sptr)
++  {
++    uchar digest[16];
++
++    null_value=0;
++    MY_MD5_HASH(digest,(uchar *) sptr->ptr(), sptr->length());
++    if (str->alloc(32))				// Ensure that memory is free
++    {
++      null_value=1;
++      return 0;
++    }
++    array_to_hex((char *) str->ptr(), (const char*) digest, 16);
++    str->length((uint) 32);
++    return str;
++  }
++  null_value=1;
++  return 0;
++}
++
++
++void Item_func_md5::fix_length_and_dec()
++{
++  max_length=32;
++  /*
++    The MD5() function treats its parameter as being a case sensitive. Thus
++    we set binary collation on it so different instances of MD5() will be
++    compared properly.
++  */
++  args[0]->collation.set(
++      get_charset_by_csname(args[0]->collation.collation->csname,
++                            MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE);
++}
++
++
++String *Item_func_sha::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String * sptr= args[0]->val_str(str);
++  str->set_charset(&my_charset_bin);
++  if (sptr)  /* If we got value different from NULL */
++  {
++    SHA1_CONTEXT context;  /* Context used to generate SHA1 hash */
++    /* Temporary buffer to store 160bit digest */
++    uint8 digest[SHA1_HASH_SIZE];
++    mysql_sha1_reset(&context);  /* We do not have to check for error here */
++    /* No need to check error as the only case would be too long message */
++    mysql_sha1_input(&context,
++                     (const uchar *) sptr->ptr(), sptr->length());
++    /* Ensure that memory is free and we got result */
++    if (!( str->alloc(SHA1_HASH_SIZE*2) ||
++           (mysql_sha1_result(&context,digest))))
++    {
++      array_to_hex((char *) str->ptr(), (const char*) digest, SHA1_HASH_SIZE);
++      str->length((uint)  SHA1_HASH_SIZE*2);
++      null_value=0;
++      return str;
++    }
++  }
++  null_value=1;
++  return 0;
++}
++
++void Item_func_sha::fix_length_and_dec()
++{
++  max_length=SHA1_HASH_SIZE*2; // size of hex representation of hash
++  /*
++    The SHA() function treats its parameter as being a case sensitive. Thus
++    we set binary collation on it so different instances of MD5() will be
++    compared properly.
++  */
++  args[0]->collation.set(
++      get_charset_by_csname(args[0]->collation.collation->csname,
++                            MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE);
++}
++
++
++/* Implementation of AES encryption routines */
++
++String *Item_func_aes_encrypt::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  char key_buff[80];
++  String tmp_key_value(key_buff, sizeof(key_buff), system_charset_info);
++  String *sptr= args[0]->val_str(str);			// String to encrypt
++  String *key=  args[1]->val_str(&tmp_key_value);	// key
++  int aes_length;
++  if (sptr && key) // we need both arguments to be not NULL
++  {
++    null_value=0;
++    aes_length=my_aes_get_size(sptr->length()); // Calculate result length
++
++    if (!str_value.alloc(aes_length))		// Ensure that memory is free
++    {
++      // finally encrypt directly to allocated buffer.
++      if (my_aes_encrypt(sptr->ptr(),sptr->length(), (char*) str_value.ptr(),
++			 key->ptr(), key->length()) == aes_length)
++      {
++	// We got the expected result length
++	str_value.length((uint) aes_length);
++	return &str_value;
++      }
++    }
++  }
++  null_value=1;
++  return 0;
++}
++
++
++void Item_func_aes_encrypt::fix_length_and_dec()
++{
++  max_length=my_aes_get_size(args[0]->max_length);
++}
++
++
++String *Item_func_aes_decrypt::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  char key_buff[80];
++  String tmp_key_value(key_buff, sizeof(key_buff), system_charset_info);
++  String *sptr, *key;
++  DBUG_ENTER("Item_func_aes_decrypt::val_str");
++
++  sptr= args[0]->val_str(str);			// String to decrypt
++  key=  args[1]->val_str(&tmp_key_value);	// Key
++  if (sptr && key)  			// Need to have both arguments not NULL
++  {
++    null_value=0;
++    if (!str_value.alloc(sptr->length()))  // Ensure that memory is free
++    {
++      // finally decrypt directly to allocated buffer.
++      int length;
++      length=my_aes_decrypt(sptr->ptr(), sptr->length(),
++			    (char*) str_value.ptr(),
++                            key->ptr(), key->length());
++      if (length >= 0)  // if we got correct data data
++      {
++        str_value.length((uint) length);
++        DBUG_RETURN(&str_value);
++      }
++    }
++  }
++  // Bad parameters. No memory or bad data will all go here
++  null_value=1;
++  DBUG_RETURN(0);
++}
++
++
++void Item_func_aes_decrypt::fix_length_and_dec()
++{
++   max_length=args[0]->max_length;
++   maybe_null= 1;
++}
++
++
++/**
++  Concatenate args with the following premises:
++  If only one arg (which is ok), return value of arg;
++  Don't reallocate val_str() if not absolute necessary.
++*/
++
++String *Item_func_concat::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res,*res2,*use_as_buff;
++  uint i;
++  bool is_const= 0;
++
++  null_value=0;
++  if (!(res=args[0]->val_str(str)))
++    goto null;
++  use_as_buff= &tmp_value;
++  /* Item_subselect in --ps-protocol mode will state it as a non-const */
++  is_const= args[0]->const_item() || !args[0]->used_tables();
++  for (i=1 ; i < arg_count ; i++)
++  {
++    if (res->length() == 0)
++    {
++      if (!(res=args[i]->val_str(str)))
++	goto null;
++      /*
++       CONCAT accumulates its result in the result of its the first
++       non-empty argument. Because of this we need is_const to be 
++       evaluated only for it.
++      */
++      is_const= args[i]->const_item() || !args[i]->used_tables();
++    }
++    else
++    {
++      if (!(res2=args[i]->val_str(use_as_buff)))
++	goto null;
++      if (res2->length() == 0)
++	continue;
++      if (res->length()+res2->length() >
++	  current_thd->variables.max_allowed_packet)
++      {
++	push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			    ER_WARN_ALLOWED_PACKET_OVERFLOWED,
++			    ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(),
++			    current_thd->variables.max_allowed_packet);
++	goto null;
++      }
++      if (!is_const && res->alloced_length() >= res->length()+res2->length())
++      {						// Use old buffer
++	res->append(*res2);
++      }
++      else if (str->alloced_length() >= res->length()+res2->length())
++      {
++	if (str->ptr() == res2->ptr())
++	  str->replace(0,0,*res);
++	else
++	{
++	  str->copy(*res);
++	  str->append(*res2);
++	}
++        res= str;
++        use_as_buff= &tmp_value;
++      }
++      else if (res == &tmp_value)
++      {
++	if (res->append(*res2))			// Must be a blob
++	  goto null;
++      }
++      else if (res2 == &tmp_value)
++      {						// This can happend only 1 time
++	if (tmp_value.replace(0,0,*res))
++	  goto null;
++	res= &tmp_value;
++	use_as_buff=str;			// Put next arg here
++      }
++      else if (tmp_value.is_alloced() && res2->ptr() >= tmp_value.ptr() &&
++	       res2->ptr() <= tmp_value.ptr() + tmp_value.alloced_length())
++      {
++	/*
++	  This happens really seldom:
++	  In this case res2 is sub string of tmp_value.  We will
++	  now work in place in tmp_value to set it to res | res2
++	*/
++	/* Chop the last characters in tmp_value that isn't in res2 */
++	tmp_value.length((uint32) (res2->ptr() - tmp_value.ptr()) +
++			 res2->length());
++	/* Place res2 at start of tmp_value, remove chars before res2 */
++	if (tmp_value.replace(0,(uint32) (res2->ptr() - tmp_value.ptr()),
++			      *res))
++	  goto null;
++	res= &tmp_value;
++	use_as_buff=str;			// Put next arg here
++      }
++      else
++      {						// Two big const strings
++        /*
++          NOTE: We should be prudent in the initial allocation unit -- the
++          size of the arguments is a function of data distribution, which
++          can be any. Instead of overcommitting at the first row, we grow
++          the allocated amount by the factor of 2. This ensures that no
++          more than 25% of memory will be overcommitted on average.
++        */
++
++        uint concat_len= res->length() + res2->length();
++
++        if (tmp_value.alloced_length() < concat_len)
++        {
++          if (tmp_value.alloced_length() == 0)
++          {
++            if (tmp_value.alloc(concat_len))
++              goto null;
++          }
++          else
++          {
++            uint new_len = max(tmp_value.alloced_length() * 2, concat_len);
++
++            if (tmp_value.realloc(new_len))
++              goto null;
++          }
++        }
++
++	if (tmp_value.copy(*res) || tmp_value.append(*res2))
++	  goto null;
++
++	res= &tmp_value;
++	use_as_buff=str;
++      }
++      is_const= 0;
++    }
++  }
++  res->set_charset(collation.collation);
++  return res;
++
++null:
++  null_value=1;
++  return 0;
++}
++
++
++void Item_func_concat::fix_length_and_dec()
++{
++  ulonglong max_result_length= 0;
++
++  if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1))
++    return;
++
++  for (uint i=0 ; i < arg_count ; i++)
++  {
++    if (args[i]->collation.collation->mbmaxlen != collation.collation->mbmaxlen)
++      max_result_length+= (args[i]->max_length /
++                           args[i]->collation.collation->mbmaxlen) *
++                           collation.collation->mbmaxlen;
++    else
++      max_result_length+= args[i]->max_length;
++  }
++
++  if (max_result_length >= MAX_BLOB_WIDTH)
++  {
++    max_result_length= MAX_BLOB_WIDTH;
++    maybe_null= 1;
++  }
++  max_length= (ulong) max_result_length;
++}
++
++/**
++  @details
++  Function des_encrypt() by tonu@spam.ee & monty
++  Works only if compiled with OpenSSL library support.
++  @return
++    A binary string where first character is CHAR(128 | key-number).
++    If one uses a string key key_number is 127.
++    Encryption result is longer than original by formula:
++  @code new_length= org_length + (8-(org_length % 8))+1 @endcode
++*/
++
++String *Item_func_des_encrypt::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++#ifdef HAVE_OPENSSL
++  uint code= ER_WRONG_PARAMETERS_TO_PROCEDURE;
++  DES_cblock ivec;
++  struct st_des_keyblock keyblock;
++  struct st_des_keyschedule keyschedule;
++  const char *append_str="********";
++  uint key_number, res_length, tail;
++  String *res= args[0]->val_str(str);
++
++  if ((null_value= args[0]->null_value))
++    return 0;                                   // ENCRYPT(NULL) == NULL
++  if ((res_length=res->length()) == 0)
++    return make_empty_result();
++
++  if (arg_count == 1)
++  {
++    /* Protect against someone doing FLUSH DES_KEY_FILE */
++    VOID(pthread_mutex_lock(&LOCK_des_key_file));
++    keyschedule= des_keyschedule[key_number=des_default_key];
++    VOID(pthread_mutex_unlock(&LOCK_des_key_file));
++  }
++  else if (args[1]->result_type() == INT_RESULT)
++  {
++    key_number= (uint) args[1]->val_int();
++    if (key_number > 9)
++      goto error;
++    VOID(pthread_mutex_lock(&LOCK_des_key_file));
++    keyschedule= des_keyschedule[key_number];
++    VOID(pthread_mutex_unlock(&LOCK_des_key_file));
++  }
++  else
++  {
++    String *keystr=args[1]->val_str(&tmp_value);
++    if (!keystr)
++      goto error;
++    key_number=127;				// User key string
++
++    /* We make good 24-byte (168 bit) key from given plaintext key with MD5 */
++    bzero((char*) &ivec,sizeof(ivec));
++    EVP_BytesToKey(EVP_des_ede3_cbc(),EVP_md5(),NULL,
++		   (uchar*) keystr->ptr(), (int) keystr->length(),
++		   1, (uchar*) &keyblock,ivec);
++    DES_set_key_unchecked(&keyblock.key1,&keyschedule.ks1);
++    DES_set_key_unchecked(&keyblock.key2,&keyschedule.ks2);
++    DES_set_key_unchecked(&keyblock.key3,&keyschedule.ks3);
++  }
++
++  /*
++     The problem: DES algorithm requires original data to be in 8-bytes
++     chunks. Missing bytes get filled with '*'s and result of encryption
++     can be up to 8 bytes longer than original string. When decrypted,
++     we do not know the size of original string :(
++     We add one byte with value 0x1..0x8 as the last byte of the padded
++     string marking change of string length.
++  */
++
++  tail= 8 - (res_length % 8);                   // 1..8 marking extra length
++  res_length+=tail;
++  tmp_arg.realloc(res_length);
++  tmp_arg.length(0);
++  tmp_arg.append(res->ptr(), res->length());
++  code= ER_OUT_OF_RESOURCES;
++  if (tmp_arg.append(append_str, tail) || tmp_value.alloc(res_length+1))
++    goto error;
++  tmp_arg[res_length-1]=tail;                   // save extra length
++  tmp_value.realloc(res_length+1);
++  tmp_value.length(res_length+1);
++  tmp_value.set_charset(&my_charset_bin);
++  tmp_value[0]=(char) (128 | key_number);
++  // Real encryption
++  bzero((char*) &ivec,sizeof(ivec));
++  DES_ede3_cbc_encrypt((const uchar*) (tmp_arg.ptr()),
++		       (uchar*) (tmp_value.ptr()+1),
++		       res_length,
++		       &keyschedule.ks1,
++		       &keyschedule.ks2,
++		       &keyschedule.ks3,
++		       &ivec, TRUE);
++  return &tmp_value;
++
++error:
++  push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
++                          code, ER(code),
++                          "des_encrypt");
++#else
++  push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
++                      ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED),
++                      "des_encrypt","--with-openssl");
++#endif	/* HAVE_OPENSSL */
++  null_value=1;
++  return 0;
++}
++
++
++String *Item_func_des_decrypt::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++#ifdef HAVE_OPENSSL
++  uint code= ER_WRONG_PARAMETERS_TO_PROCEDURE;
++  DES_cblock ivec;
++  struct st_des_keyblock keyblock;
++  struct st_des_keyschedule keyschedule;
++  String *res= args[0]->val_str(str);
++  uint length,tail;
++
++  if ((null_value= args[0]->null_value))
++    return 0;
++  length= res->length();
++  if (length < 9 || (length % 8) != 1 || !((*res)[0] & 128))
++    return res;				// Skip decryption if not encrypted
++
++  if (arg_count == 1)			// If automatic uncompression
++  {
++    uint key_number=(uint) (*res)[0] & 127;
++    // Check if automatic key and that we have privilege to uncompress using it
++    if (!(current_thd->security_ctx->master_access & SUPER_ACL) ||
++        key_number > 9)
++      goto error;
++
++    VOID(pthread_mutex_lock(&LOCK_des_key_file));
++    keyschedule= des_keyschedule[key_number];
++    VOID(pthread_mutex_unlock(&LOCK_des_key_file));
++  }
++  else
++  {
++    // We make good 24-byte (168 bit) key from given plaintext key with MD5
++    String *keystr=args[1]->val_str(&tmp_value);
++    if (!keystr)
++      goto error;
++
++    bzero((char*) &ivec,sizeof(ivec));
++    EVP_BytesToKey(EVP_des_ede3_cbc(),EVP_md5(),NULL,
++		   (uchar*) keystr->ptr(),(int) keystr->length(),
++		   1,(uchar*) &keyblock,ivec);
++    // Here we set all 64-bit keys (56 effective) one by one
++    DES_set_key_unchecked(&keyblock.key1,&keyschedule.ks1);
++    DES_set_key_unchecked(&keyblock.key2,&keyschedule.ks2);
++    DES_set_key_unchecked(&keyblock.key3,&keyschedule.ks3);
++  }
++  code= ER_OUT_OF_RESOURCES;
++  if (tmp_value.alloc(length-1))
++    goto error;
++
++  bzero((char*) &ivec,sizeof(ivec));
++  DES_ede3_cbc_encrypt((const uchar*) res->ptr()+1,
++		       (uchar*) (tmp_value.ptr()),
++		       length-1,
++		       &keyschedule.ks1,
++		       &keyschedule.ks2,
++		       &keyschedule.ks3,
++		       &ivec, FALSE);
++  /* Restore old length of key */
++  if ((tail=(uint) (uchar) tmp_value[length-2]) > 8)
++    goto wrong_key;				     // Wrong key
++  tmp_value.length(length-1-tail);
++  tmp_value.set_charset(&my_charset_bin);
++  return &tmp_value;
++
++error:
++  push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
++                          code, ER(code),
++                          "des_decrypt");
++wrong_key:
++#else
++  push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
++                      ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED),
++                      "des_decrypt","--with-openssl");
++#endif	/* HAVE_OPENSSL */
++  null_value=1;
++  return 0;
++}
++
++
++/**
++  concat with separator. First arg is the separator
++  concat_ws takes at least two arguments.
++*/
++
++String *Item_func_concat_ws::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  char tmp_str_buff[10];
++  String tmp_sep_str(tmp_str_buff, sizeof(tmp_str_buff),default_charset_info),
++         *sep_str, *res, *res2,*use_as_buff;
++  uint i;
++  bool is_const= 0;
++
++  null_value=0;
++  if (!(sep_str= args[0]->val_str(&tmp_sep_str)))
++    goto null;
++
++  use_as_buff= &tmp_value;
++  str->length(0);				// QQ; Should be removed
++  res=str;
++
++  // Skip until non-null argument is found.
++  // If not, return the empty string
++  for (i=1; i < arg_count; i++)
++    if ((res= args[i]->val_str(str)))
++    {
++      is_const= args[i]->const_item() || !args[i]->used_tables();
++      break;
++    }
++
++  if (i ==  arg_count)
++    return make_empty_result();
++
++  for (i++; i < arg_count ; i++)
++  {
++    if (!(res2= args[i]->val_str(use_as_buff)))
++      continue;					// Skip NULL
++
++    if (res->length() + sep_str->length() + res2->length() >
++	current_thd->variables.max_allowed_packet)
++    {
++      push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			  ER_WARN_ALLOWED_PACKET_OVERFLOWED,
++			  ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(),
++			  current_thd->variables.max_allowed_packet);
++      goto null;
++    }
++    if (!is_const && res->alloced_length() >=
++	res->length() + sep_str->length() + res2->length())
++    {						// Use old buffer
++      res->append(*sep_str);			// res->length() > 0 always
++      res->append(*res2);
++    }
++    else if (str->alloced_length() >=
++	     res->length() + sep_str->length() + res2->length())
++    {
++      /* We have room in str;  We can't get any errors here */
++      if (str->ptr() == res2->ptr())
++      {						// This is quite uncommon!
++	str->replace(0,0,*sep_str);
++	str->replace(0,0,*res);
++      }
++      else
++      {
++	str->copy(*res);
++	str->append(*sep_str);
++	str->append(*res2);
++      }
++      res=str;
++      use_as_buff= &tmp_value;
++    }
++    else if (res == &tmp_value)
++    {
++      if (res->append(*sep_str) || res->append(*res2))
++	goto null; // Must be a blob
++    }
++    else if (res2 == &tmp_value)
++    {						// This can happend only 1 time
++      if (tmp_value.replace(0,0,*sep_str) || tmp_value.replace(0,0,*res))
++	goto null;
++      res= &tmp_value;
++      use_as_buff=str;				// Put next arg here
++    }
++    else if (tmp_value.is_alloced() && res2->ptr() >= tmp_value.ptr() &&
++	     res2->ptr() < tmp_value.ptr() + tmp_value.alloced_length())
++    {
++      /*
++	This happens really seldom:
++	In this case res2 is sub string of tmp_value.  We will
++	now work in place in tmp_value to set it to res | sep_str | res2
++      */
++      /* Chop the last characters in tmp_value that isn't in res2 */
++      tmp_value.length((uint32) (res2->ptr() - tmp_value.ptr()) +
++		       res2->length());
++      /* Place res2 at start of tmp_value, remove chars before res2 */
++      if (tmp_value.replace(0,(uint32) (res2->ptr() - tmp_value.ptr()),
++			    *res) ||
++	  tmp_value.replace(res->length(),0, *sep_str))
++	goto null;
++      res= &tmp_value;
++      use_as_buff=str;			// Put next arg here
++    }
++    else
++    {						// Two big const strings
++      /*
++        NOTE: We should be prudent in the initial allocation unit -- the
++        size of the arguments is a function of data distribution, which can
++        be any. Instead of overcommitting at the first row, we grow the
++        allocated amount by the factor of 2. This ensures that no more than
++        25% of memory will be overcommitted on average.
++      */
++
++      uint concat_len= res->length() + sep_str->length() + res2->length();
++
++      if (tmp_value.alloced_length() < concat_len)
++      {
++        if (tmp_value.alloced_length() == 0)
++        {
++          if (tmp_value.alloc(concat_len))
++            goto null;
++        }
++        else
++        {
++          uint new_len = max(tmp_value.alloced_length() * 2, concat_len);
++
++          if (tmp_value.realloc(new_len))
++            goto null;
++        }
++      }
++
++      if (tmp_value.copy(*res) ||
++	  tmp_value.append(*sep_str) ||
++	  tmp_value.append(*res2))
++	goto null;
++      res= &tmp_value;
++      use_as_buff=str;
++    }
++  }
++  res->set_charset(collation.collation);
++  return res;
++
++null:
++  null_value=1;
++  return 0;
++}
++
++
++void Item_func_concat_ws::fix_length_and_dec()
++{
++  ulonglong max_result_length;
++
++  if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1))
++    return;
++
++  /*
++     arg_count cannot be less than 2,
++     it is done on parser level in sql_yacc.yy
++     so, (arg_count - 2) is safe here.
++  */
++  max_result_length= (ulonglong) args[0]->max_length * (arg_count - 2);
++  for (uint i=1 ; i < arg_count ; i++)
++    max_result_length+=args[i]->max_length;
++
++  if (max_result_length >= MAX_BLOB_WIDTH)
++  {
++    max_result_length= MAX_BLOB_WIDTH;
++    maybe_null= 1;
++  }
++  max_length= (ulong) max_result_length;
++}
++
++
++String *Item_func_reverse::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res = args[0]->val_str(str);
++  char *ptr, *end, *tmp;
++
++  if ((null_value=args[0]->null_value))
++    return 0;
++  /* An empty string is a special case as the string pointer may be null */
++  if (!res->length())
++    return make_empty_result();
++  if (tmp_value.alloced_length() < res->length() &&
++      tmp_value.realloc(res->length()))
++  {
++    null_value= 1;
++    return 0;
++  }
++  tmp_value.length(res->length());
++  tmp_value.set_charset(res->charset());
++  ptr= (char *) res->ptr();
++  end= ptr + res->length();
++  tmp= (char *) tmp_value.ptr() + tmp_value.length();
++#ifdef USE_MB
++  if (use_mb(res->charset()))
++  {
++    register uint32 l;
++    while (ptr < end)
++    {
++      if ((l= my_ismbchar(res->charset(),ptr,end)))
++      {
++        tmp-= l;
++        memcpy(tmp,ptr,l);
++        ptr+= l;
++      }
++      else
++        *--tmp= *ptr++;
++    }
++  }
++  else
++#endif /* USE_MB */
++  {
++    while (ptr < end)
++      *--tmp= *ptr++;
++  }
++  return &tmp_value;
++}
++
++
++void Item_func_reverse::fix_length_and_dec()
++{
++  collation.set(args[0]->collation);
++  max_length = args[0]->max_length;
++}
++
++/**
++  Replace all occurences of string2 in string1 with string3.
++
++  Don't reallocate val_str() if not needed.
++
++  @todo
++    Fix that this works with binary strings when using USE_MB 
++*/
++
++String *Item_func_replace::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res,*res2,*res3;
++  int offset;
++  uint from_length,to_length;
++  bool alloced=0;
++#ifdef USE_MB
++  const char *ptr,*end,*strend,*search,*search_end;
++  register uint32 l;
++  bool binary_cmp;
++#endif
++
++  null_value=0;
++  res=args[0]->val_str(str);
++  if (args[0]->null_value)
++    goto null;
++  res2=args[1]->val_str(&tmp_value);
++  if (args[1]->null_value)
++    goto null;
++
++  res->set_charset(collation.collation);
++
++#ifdef USE_MB
++  binary_cmp = ((res->charset()->state & MY_CS_BINSORT) || !use_mb(res->charset()));
++#endif
++
++  if (res2->length() == 0)
++    return res;
++#ifndef USE_MB
++  if ((offset=res->strstr(*res2)) < 0)
++    return res;
++#else
++  offset=0;
++  if (binary_cmp && (offset=res->strstr(*res2)) < 0)
++    return res;
++#endif
++  if (!(res3=args[2]->val_str(&tmp_value2)))
++    goto null;
++  from_length= res2->length();
++  to_length=   res3->length();
++
++#ifdef USE_MB
++  if (!binary_cmp)
++  {
++    search=res2->ptr();
++    search_end=search+from_length;
++redo:
++    DBUG_ASSERT(res->ptr() || !offset);
++    ptr=res->ptr()+offset;
++    strend=res->ptr()+res->length();
++    /*
++      In some cases val_str() can return empty string
++      with ptr() == NULL and length() == 0.
++      Let's check strend to avoid overflow.
++    */
++    end= strend ? strend - from_length + 1 : NULL;
++    while (ptr < end)
++    {
++        if (*ptr == *search)
++        {
++          register char *i,*j;
++          i=(char*) ptr+1; j=(char*) search+1;
++          while (j != search_end)
++            if (*i++ != *j++) goto skip;
++          offset= (int) (ptr-res->ptr());
++          if (res->length()-from_length + to_length >
++	      current_thd->variables.max_allowed_packet)
++	  {
++	    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++				ER_WARN_ALLOWED_PACKET_OVERFLOWED,
++				ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
++				func_name(),
++				current_thd->variables.max_allowed_packet);
++
++            goto null;
++	  }
++          if (!alloced)
++          {
++            alloced=1;
++            res=copy_if_not_alloced(str,res,res->length()+to_length);
++          }
++          res->replace((uint) offset,from_length,*res3);
++	  offset+=(int) to_length;
++          goto redo;
++        }
++skip:
++        if ((l=my_ismbchar(res->charset(), ptr,strend))) ptr+=l;
++        else ++ptr;
++    }
++  }
++  else
++#endif /* USE_MB */
++    do
++    {
++      if (res->length()-from_length + to_length >
++	  current_thd->variables.max_allowed_packet)
++      {
++	push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			    ER_WARN_ALLOWED_PACKET_OVERFLOWED,
++			    ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(),
++			    current_thd->variables.max_allowed_packet);
++        goto null;
++      }
++      if (!alloced)
++      {
++        alloced=1;
++        res=copy_if_not_alloced(str,res,res->length()+to_length);
++      }
++      res->replace((uint) offset,from_length,*res3);
++      offset+=(int) to_length;
++    }
++    while ((offset=res->strstr(*res2,(uint) offset)) >= 0);
++  return res;
++
++null:
++  null_value=1;
++  return 0;
++}
++
++
++void Item_func_replace::fix_length_and_dec()
++{
++  ulonglong max_result_length= args[0]->max_length;
++  int diff=(int) (args[2]->max_length - args[1]->max_length);
++  if (diff > 0 && args[1]->max_length)
++  {						// Calculate of maxreplaces
++    ulonglong max_substrs= max_result_length/args[1]->max_length;
++    max_result_length+= max_substrs * (uint) diff;
++  }
++  if (max_result_length >= MAX_BLOB_WIDTH)
++  {
++    max_result_length= MAX_BLOB_WIDTH;
++    maybe_null= 1;
++  }
++  max_length= (ulong) max_result_length;
++  
++  if (agg_arg_charsets(collation, args, 3, MY_COLL_CMP_CONV, 1))
++    return;
++}
++
++
++String *Item_func_insert::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res,*res2;
++  longlong start, length;  /* must be longlong to avoid truncation */
++
++  null_value=0;
++  res=args[0]->val_str(str);
++  res2=args[3]->val_str(&tmp_value);
++  start= args[1]->val_int() - 1;
++  length= args[2]->val_int();
++
++  if (args[0]->null_value || args[1]->null_value || args[2]->null_value ||
++      args[3]->null_value)
++    goto null; /* purecov: inspected */
++
++  if ((start < 0) || (start > res->length()))
++    return res;                                 // Wrong param; skip insert
++  if ((length < 0) || (length > res->length()))
++    length= res->length();
++
++  /*
++    There is one exception not handled (intentionaly) by the character set
++    aggregation code. If one string is strong side and is binary, and
++    another one is weak side and is a multi-byte character string,
++    then we need to operate on the second string in terms on bytes when
++    calling ::numchars() and ::charpos(), rather than in terms of characters.
++    Lets substitute its character set to binary.
++  */
++  if (collation.collation == &my_charset_bin)
++  {
++    res->set_charset(&my_charset_bin);
++    res2->set_charset(&my_charset_bin);
++  }
++
++  /* start and length are now sufficiently valid to pass to charpos function */
++   start= res->charpos((int) start);
++   length= res->charpos((int) length, (uint32) start);
++
++  /* Re-testing with corrected params */
++  if (start > res->length())
++    return res; /* purecov: inspected */        // Wrong param; skip insert
++  if (length > res->length() - start)
++    length= res->length() - start;
++
++  if ((ulonglong) (res->length() - length + res2->length()) >
++      (ulonglong) current_thd->variables.max_allowed_packet)
++  {
++    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			ER_WARN_ALLOWED_PACKET_OVERFLOWED,
++			ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
++			func_name(), current_thd->variables.max_allowed_packet);
++    goto null;
++  }
++  res=copy_if_not_alloced(str,res,res->length());
++  res->replace((uint32) start,(uint32) length,*res2);
++  return res;
++null:
++  null_value=1;
++  return 0;
++}
++
++
++void Item_func_insert::fix_length_and_dec()
++{
++  ulonglong max_result_length;
++
++  // Handle character set for args[0] and args[3].
++  if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 3))
++    return;
++  max_result_length= ((ulonglong) args[0]->max_length+
++                      (ulonglong) args[3]->max_length);
++  if (max_result_length >= MAX_BLOB_WIDTH)
++  {
++    max_result_length= MAX_BLOB_WIDTH;
++    maybe_null= 1;
++  }
++  max_length= (ulong) max_result_length;
++}
++
++
++String *Item_str_conv::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res;
++  if (!(res=args[0]->val_str(str)))
++  {
++    null_value=1; /* purecov: inspected */
++    return 0; /* purecov: inspected */
++  }
++  null_value=0;
++  if (multiply == 1)
++  {
++    uint len;
++    res= copy_if_not_alloced(str,res,res->length());
++    len= converter(collation.collation, (char*) res->ptr(), res->length(),
++                                        (char*) res->ptr(), res->length());
++    DBUG_ASSERT(len <= res->length());
++    res->length(len);
++  }
++  else
++  {
++    uint len= res->length() * multiply;
++    tmp_value.alloc(len);
++    tmp_value.set_charset(collation.collation);
++    len= converter(collation.collation, (char*) res->ptr(), res->length(),
++                                        (char*) tmp_value.ptr(), len);
++    tmp_value.length(len);
++    res= &tmp_value;
++  }
++  return res;
++}
++
++
++void Item_func_lcase::fix_length_and_dec()
++{
++  collation.set(args[0]->collation);
++  multiply= collation.collation->casedn_multiply;
++  converter= collation.collation->cset->casedn;
++  max_length= args[0]->max_length * multiply;
++}
++
++void Item_func_ucase::fix_length_and_dec()
++{
++  collation.set(args[0]->collation);
++  multiply= collation.collation->caseup_multiply;
++  converter= collation.collation->cset->caseup;
++  max_length= args[0]->max_length * multiply;
++}
++
++
++String *Item_func_left::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res= args[0]->val_str(str);
++
++  /* must be longlong to avoid truncation */
++  longlong length= args[1]->val_int();
++  uint char_pos;
++
++  if ((null_value=(args[0]->null_value || args[1]->null_value)))
++    return 0;
++
++  /* if "unsigned_flag" is set, we have a *huge* positive number. */
++  if ((length <= 0) && (!args[1]->unsigned_flag))
++    return make_empty_result();
++  if ((res->length() <= (ulonglong) length) ||
++      (res->length() <= (char_pos= res->charpos((int) length))))
++    return res;
++
++  tmp_value.set(*res, 0, char_pos);
++  return &tmp_value;
++}
++
++
++void Item_str_func::left_right_max_length()
++{
++  max_length=args[0]->max_length;
++  if (args[1]->const_item())
++  {
++    int length=(int) args[1]->val_int()*collation.collation->mbmaxlen;
++    if (length <= 0)
++      max_length=0;
++    else
++      set_if_smaller(max_length,(uint) length);
++  }
++}
++
++
++void Item_func_left::fix_length_and_dec()
++{
++  collation.set(args[0]->collation);
++  left_right_max_length();
++}
++
++
++String *Item_func_right::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res= args[0]->val_str(str);
++  /* must be longlong to avoid truncation */
++  longlong length= args[1]->val_int();
++
++  if ((null_value=(args[0]->null_value || args[1]->null_value)))
++    return 0; /* purecov: inspected */
++
++  /* if "unsigned_flag" is set, we have a *huge* positive number. */
++  if ((length <= 0) && (!args[1]->unsigned_flag))
++    return make_empty_result(); /* purecov: inspected */
++
++  if (res->length() <= (ulonglong) length)
++    return res; /* purecov: inspected */
++
++  uint start=res->numchars();
++  if (start <= (uint) length)
++    return res;
++  start=res->charpos(start - (uint) length);
++  tmp_value.set(*res,start,res->length()-start);
++  return &tmp_value;
++}
++
++
++void Item_func_right::fix_length_and_dec()
++{
++  collation.set(args[0]->collation);
++  left_right_max_length();
++}
++
++
++String *Item_func_substr::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res  = args[0]->val_str(str);
++  /* must be longlong to avoid truncation */
++  longlong start= args[1]->val_int();
++  /* Assumes that the maximum length of a String is < INT_MAX32. */
++  /* Limit so that code sees out-of-bound value properly. */
++  longlong length= arg_count == 3 ? args[2]->val_int() : INT_MAX32;
++  longlong tmp_length;
++
++  if ((null_value=(args[0]->null_value || args[1]->null_value ||
++		   (arg_count == 3 && args[2]->null_value))))
++    return 0; /* purecov: inspected */
++
++  /* Negative or zero length, will return empty string. */
++  if ((arg_count == 3) && (length <= 0) && 
++      (length == 0 || !args[2]->unsigned_flag))
++    return make_empty_result();
++
++  /* Assumes that the maximum length of a String is < INT_MAX32. */
++  /* Set here so that rest of code sees out-of-bound value as such. */
++  if ((length <= 0) || (length > INT_MAX32))
++    length= INT_MAX32;
++
++  /* if "unsigned_flag" is set, we have a *huge* positive number. */
++  /* Assumes that the maximum length of a String is < INT_MAX32. */
++  if ((!args[1]->unsigned_flag && (start < INT_MIN32 || start > INT_MAX32)) ||
++      (args[1]->unsigned_flag && ((ulonglong) start > INT_MAX32)))
++    return make_empty_result();
++
++  start= ((start < 0) ? res->numchars() + start : start - 1);
++  start= res->charpos((int) start);
++  if ((start < 0) || ((uint) start + 1 > res->length()))
++    return make_empty_result();
++
++  length= res->charpos((int) length, (uint32) start);
++  tmp_length= res->length() - start;
++  length= min(length, tmp_length);
++
++  if (!start && (longlong) res->length() == length)
++    return res;
++  tmp_value.set(*res, (uint32) start, (uint32) length);
++  return &tmp_value;
++}
++
++
++void Item_func_substr::fix_length_and_dec()
++{
++  max_length=args[0]->max_length;
++
++  collation.set(args[0]->collation);
++  if (args[1]->const_item())
++  {
++    int32 start= (int32) args[1]->val_int();
++    if (start < 0)
++      max_length= ((uint)(-start) > max_length) ? 0 : (uint)(-start);
++    else
++      max_length-= min((uint)(start - 1), max_length);
++  }
++  if (arg_count == 3 && args[2]->const_item())
++  {
++    int32 length= (int32) args[2]->val_int();
++    if (length <= 0)
++      max_length=0; /* purecov: inspected */
++    else
++      set_if_smaller(max_length,(uint) length);
++  }
++  max_length*= collation.collation->mbmaxlen;
++}
++
++
++void Item_func_substr_index::fix_length_and_dec()
++{ 
++  max_length= args[0]->max_length;
++
++  if (agg_arg_charsets(collation, args, 2, MY_COLL_CMP_CONV, 1))
++    return;
++}
++
++
++String *Item_func_substr_index::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res= args[0]->val_str(str);
++  String *delimiter= args[1]->val_str(&tmp_value);
++  int32 count= (int32) args[2]->val_int();
++  uint offset;
++
++  if (args[0]->null_value || args[1]->null_value || args[2]->null_value)
++  {					// string and/or delim are null
++    null_value=1;
++    return 0;
++  }
++  null_value=0;
++  uint delimiter_length= delimiter->length();
++  if (!res->length() || !delimiter_length || !count)
++    return make_empty_result();		// Wrong parameters
++
++  res->set_charset(collation.collation);
++
++#ifdef USE_MB
++  if (use_mb(res->charset()))
++  {
++    const char *ptr= res->ptr();
++    const char *strend= ptr+res->length();
++    const char *end= strend-delimiter_length+1;
++    const char *search= delimiter->ptr();
++    const char *search_end= search+delimiter_length;
++    int32 n=0,c=count,pass;
++    register uint32 l;
++    for (pass=(count>0);pass<2;++pass)
++    {
++      while (ptr < end)
++      {
++        if (*ptr == *search)
++        {
++	  register char *i,*j;
++	  i=(char*) ptr+1; j=(char*) search+1;
++	  while (j != search_end)
++	    if (*i++ != *j++) goto skip;
++	  if (pass==0) ++n;
++	  else if (!--c) break;
++	  ptr+= delimiter_length;
++	  continue;
++	}
++    skip:
++        if ((l=my_ismbchar(res->charset(), ptr,strend))) ptr+=l;
++        else ++ptr;
++      } /* either not found or got total number when count<0 */
++      if (pass == 0) /* count<0 */
++      {
++        c+=n+1;
++        if (c<=0) return res; /* not found, return original string */
++        ptr=res->ptr();
++      }
++      else
++      {
++        if (c) return res; /* Not found, return original string */
++        if (count>0) /* return left part */
++        {
++	  tmp_value.set(*res,0,(ulong) (ptr-res->ptr()));
++        }
++        else /* return right part */
++        {
++	  ptr+= delimiter_length;
++	  tmp_value.set(*res,(ulong) (ptr-res->ptr()), (ulong) (strend-ptr));
++        }
++      }
++    }
++  }
++  else
++#endif /* USE_MB */
++  {
++    if (count > 0)
++    {					// start counting from the beginning
++      for (offset=0; ; offset+= delimiter_length)
++      {
++	if ((int) (offset= res->strstr(*delimiter, offset)) < 0)
++	  return res;			// Didn't find, return org string
++	if (!--count)
++	{
++	  tmp_value.set(*res,0,offset);
++	  break;
++	}
++      }
++    }
++    else
++    {
++      /*
++        Negative index, start counting at the end
++      */
++      for (offset=res->length(); offset ;)
++      {
++        /* 
++          this call will result in finding the position pointing to one 
++          address space less than where the found substring is located
++          in res
++        */
++	if ((int) (offset= res->strrstr(*delimiter, offset)) < 0)
++	  return res;			// Didn't find, return org string
++        /*
++          At this point, we've searched for the substring
++          the number of times as supplied by the index value
++        */
++	if (!++count)
++	{
++	  offset+= delimiter_length;
++	  tmp_value.set(*res,offset,res->length()- offset);
++	  break;
++	}
++      }
++    }
++  }
++  /*
++    We always mark tmp_value as const so that if val_str() is called again
++    on this object, we don't disrupt the contents of tmp_value when it was
++    derived from another String.
++  */
++  tmp_value.mark_as_const();
++  return (&tmp_value);
++}
++
++/*
++** The trim functions are extension to ANSI SQL because they trim substrings
++** They ltrim() and rtrim() functions are optimized for 1 byte strings
++** They also return the original string if possible, else they return
++** a substring that points at the original string.
++*/
++
++
++String *Item_func_ltrim::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  char buff[MAX_FIELD_WIDTH], *ptr, *end;
++  String tmp(buff,sizeof(buff),system_charset_info);
++  String *res, *remove_str;
++  uint remove_length;
++  LINT_INIT(remove_length);
++
++  res= args[0]->val_str(str);
++  if ((null_value=args[0]->null_value))
++    return 0;
++  remove_str= &remove;                          /* Default value. */
++  if (arg_count == 2)
++  {
++    remove_str= args[1]->val_str(&tmp);
++    if ((null_value= args[1]->null_value))
++      return 0;
++  }
++
++  if ((remove_length= remove_str->length()) == 0 ||
++      remove_length > res->length())
++    return res;
++
++  ptr= (char*) res->ptr();
++  end= ptr+res->length();
++  if (remove_length == 1)
++  {
++    char chr=(*remove_str)[0];
++    while (ptr != end && *ptr == chr)
++      ptr++;
++  }
++  else
++  {
++    const char *r_ptr=remove_str->ptr();
++    end-=remove_length;
++    while (ptr <= end && !memcmp(ptr, r_ptr, remove_length))
++      ptr+=remove_length;
++    end+=remove_length;
++  }
++  if (ptr == res->ptr())
++    return res;
++  tmp_value.set(*res,(uint) (ptr - res->ptr()),(uint) (end-ptr));
++  return &tmp_value;
++}
++
++
++String *Item_func_rtrim::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  char buff[MAX_FIELD_WIDTH], *ptr, *end;
++  String tmp(buff, sizeof(buff), system_charset_info);
++  String *res, *remove_str;
++  uint remove_length;
++  LINT_INIT(remove_length);
++
++  res= args[0]->val_str(str);
++  if ((null_value=args[0]->null_value))
++    return 0;
++  remove_str= &remove;                          /* Default value. */
++  if (arg_count == 2)
++  {
++    remove_str= args[1]->val_str(&tmp);
++    if ((null_value= args[1]->null_value))
++      return 0;
++  }
++
++  if ((remove_length= remove_str->length()) == 0 ||
++      remove_length > res->length())
++    return res;
++
++  ptr= (char*) res->ptr();
++  end= ptr+res->length();
++#ifdef USE_MB
++  char *p=ptr;
++  register uint32 l;
++#endif
++  if (remove_length == 1)
++  {
++    char chr=(*remove_str)[0];
++#ifdef USE_MB
++    if (use_mb(res->charset()))
++    {
++      while (ptr < end)
++      {
++	if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l,p=ptr;
++	else ++ptr;
++      }
++      ptr=p;
++    }
++#endif
++    while (ptr != end  && end[-1] == chr)
++      end--;
++  }
++  else
++  {
++    const char *r_ptr=remove_str->ptr();
++#ifdef USE_MB
++    if (use_mb(res->charset()))
++    {
++  loop:
++      while (ptr + remove_length < end)
++      {
++	if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l;
++	else ++ptr;
++      }
++      if (ptr + remove_length == end && !memcmp(ptr,r_ptr,remove_length))
++      {
++	end-=remove_length;
++	ptr=p;
++	goto loop;
++      }
++    }
++    else
++#endif /* USE_MB */
++    {
++      while (ptr + remove_length <= end &&
++	     !memcmp(end-remove_length, r_ptr, remove_length))
++	end-=remove_length;
++    }
++  }
++  if (end == res->ptr()+res->length())
++    return res;
++  tmp_value.set(*res,0,(uint) (end-res->ptr()));
++  return &tmp_value;
++}
++
++
++String *Item_func_trim::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  char buff[MAX_FIELD_WIDTH], *ptr, *end;
++  const char *r_ptr;
++  String tmp(buff, sizeof(buff), system_charset_info);
++  String *res, *remove_str;
++  uint remove_length;
++  LINT_INIT(remove_length);
++
++  res= args[0]->val_str(str);
++  if ((null_value=args[0]->null_value))
++    return 0;
++  remove_str= &remove;                          /* Default value. */
++  if (arg_count == 2)
++  {
++    remove_str= args[1]->val_str(&tmp);
++    if ((null_value= args[1]->null_value))
++      return 0;
++  }
++
++  if ((remove_length= remove_str->length()) == 0 ||
++      remove_length > res->length())
++    return res;
++
++  ptr= (char*) res->ptr();
++  end= ptr+res->length();
++  r_ptr= remove_str->ptr();
++  while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length))
++    ptr+=remove_length;
++#ifdef USE_MB
++  if (use_mb(res->charset()))
++  {
++    char *p=ptr;
++    register uint32 l;
++ loop:
++    while (ptr + remove_length < end)
++    {
++      if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l;
++      else ++ptr;
++    }
++    if (ptr + remove_length == end && !memcmp(ptr,r_ptr,remove_length))
++    {
++      end-=remove_length;
++      ptr=p;
++      goto loop;
++    }
++    ptr=p;
++  }
++  else
++#endif /* USE_MB */
++  {
++    while (ptr + remove_length <= end &&
++	   !memcmp(end-remove_length,r_ptr,remove_length))
++      end-=remove_length;
++  }
++  if (ptr == res->ptr() && end == ptr+res->length())
++    return res;
++  tmp_value.set(*res,(uint) (ptr - res->ptr()),(uint) (end-ptr));
++  return &tmp_value;
++}
++
++void Item_func_trim::fix_length_and_dec()
++{
++  max_length= args[0]->max_length;
++  if (arg_count == 1)
++  {
++    collation.set(args[0]->collation);
++    remove.set_charset(collation.collation);
++    remove.set_ascii(" ",1);
++  }
++  else
++  {
++    // Handle character set for args[1] and args[0].
++    // Note that we pass args[1] as the first item, and args[0] as the second.
++    if (agg_arg_charsets(collation, &args[1], 2, MY_COLL_CMP_CONV, -1))
++      return;
++  }
++}
++
++void Item_func_trim::print(String *str, enum_query_type query_type)
++{
++  if (arg_count == 1)
++  {
++    Item_func::print(str, query_type);
++    return;
++  }
++  str->append(Item_func_trim::func_name());
++  str->append('(');
++  str->append(mode_name());
++  str->append(' ');
++  args[1]->print(str, query_type);
++  str->append(STRING_WITH_LEN(" from "));
++  args[0]->print(str, query_type);
++  str->append(')');
++}
++
++
++/* Item_func_password */
++
++String *Item_func_password::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res= args[0]->val_str(str); 
++  if ((null_value=args[0]->null_value))
++    return 0;
++  if (res->length() == 0)
++    return make_empty_result();
++  my_make_scrambled_password(tmp_value, res->ptr(), res->length());
++  str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH, res->charset());
++  return str;
++}
++
++char *Item_func_password::alloc(THD *thd, const char *password,
++                                size_t pass_len)
++{
++  char *buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH+1);
++  if (buff)
++    my_make_scrambled_password(buff, password, pass_len);
++  return buff;
++}
++
++/* Item_func_old_password */
++
++String *Item_func_old_password::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res= args[0]->val_str(str);
++  if ((null_value=args[0]->null_value))
++    return 0;
++  if (res->length() == 0)
++    return make_empty_result();
++  my_make_scrambled_password_323(tmp_value, res->ptr(), res->length());
++  str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH_323, res->charset());
++  return str;
++}
++
++char *Item_func_old_password::alloc(THD *thd, const char *password,
++                                    size_t pass_len)
++{
++  char *buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1);
++  if (buff)
++    my_make_scrambled_password_323(buff, password, pass_len);
++  return buff;
++}
++
++
++#define bin_to_ascii(c) ((c)>=38?((c)-38+'a'):(c)>=12?((c)-12+'A'):(c)+'.')
++
++String *Item_func_encrypt::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res  =args[0]->val_str(str);
++
++#ifdef HAVE_CRYPT
++  char salt[3],*salt_ptr;
++  if ((null_value=args[0]->null_value))
++    return 0;
++  if (res->length() == 0)
++    return make_empty_result();
++  if (arg_count == 1)
++  {					// generate random salt
++    time_t timestamp=current_thd->query_start();
++    salt[0] = bin_to_ascii( (ulong) timestamp & 0x3f);
++    salt[1] = bin_to_ascii(( (ulong) timestamp >> 5) & 0x3f);
++    salt[2] = 0;
++    salt_ptr=salt;
++  }
++  else
++  {					// obtain salt from the first two bytes
++    String *salt_str=args[1]->val_str(&tmp_value);
++    if ((null_value= (args[1]->null_value || salt_str->length() < 2)))
++      return 0;
++    salt_ptr= salt_str->c_ptr_safe();
++  }
++  pthread_mutex_lock(&LOCK_crypt);
++  char *tmp= crypt(res->c_ptr_safe(),salt_ptr);
++  if (!tmp)
++  {
++    pthread_mutex_unlock(&LOCK_crypt);
++    null_value= 1;
++    return 0;
++  }
++  str->set(tmp, (uint) strlen(tmp), &my_charset_bin);
++  str->copy();
++  pthread_mutex_unlock(&LOCK_crypt);
++  return str;
++#else
++  null_value=1;
++  return 0;
++#endif	/* HAVE_CRYPT */
++}
++
++bool Item_func_encode::seed()
++{
++  char buf[80];
++  ulong rand_nr[2];
++  String *key, tmp(buf, sizeof(buf), system_charset_info);
++
++  if (!(key= args[1]->val_str(&tmp)))
++    return TRUE;
++
++  hash_password(rand_nr, key->ptr(), key->length());
++  sql_crypt.init(rand_nr);
++
++  return FALSE;
++}
++
++void Item_func_encode::fix_length_and_dec()
++{
++  max_length=args[0]->max_length;
++  maybe_null=args[0]->maybe_null || args[1]->maybe_null;
++  collation.set(&my_charset_bin);
++  /* Precompute the seed state if the item is constant. */
++  seeded= args[1]->const_item() &&
++          (args[1]->result_type() == STRING_RESULT) && !seed();
++}
++
++String *Item_func_encode::val_str(String *str)
++{
++  String *res;
++  DBUG_ASSERT(fixed == 1);
++
++  if (!(res=args[0]->val_str(str)))
++  {
++    null_value= 1;
++    return NULL;
++  }
++
++  if (!seeded && seed())
++  {
++    null_value= 1;
++    return NULL;
++  }
++
++  null_value= 0;
++  res= copy_if_not_alloced(str, res, res->length());
++  crypto_transform(res);
++  sql_crypt.reinit();
++
++  return res;
++}
++
++void Item_func_encode::crypto_transform(String *res)
++{
++  sql_crypt.encode((char*) res->ptr(),res->length());
++  res->set_charset(&my_charset_bin);
++}
++
++void Item_func_decode::crypto_transform(String *res)
++{
++  sql_crypt.decode((char*) res->ptr(),res->length());
++}
++
++
++Item *Item_func_sysconst::safe_charset_converter(CHARSET_INFO *tocs)
++{
++  Item_string *conv;
++  uint conv_errors;
++  String tmp, cstr, *ostr= val_str(&tmp);
++  if (null_value)
++  {
++    Item *null_item= new Item_null((char *) fully_qualified_func_name());
++    null_item->collation.set (tocs);
++    return null_item;
++  }
++  cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
++  if (conv_errors ||
++      !(conv= new Item_static_string_func(fully_qualified_func_name(),
++                                          cstr.ptr(), cstr.length(),
++                                          cstr.charset(),
++                                          collation.derivation)))
++  {
++    return NULL;
++  }
++  conv->str_value.copy();
++  conv->str_value.mark_as_const();
++  return conv;
++}
++
++
++String *Item_func_database::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  THD *thd= current_thd;
++  if (thd->db == NULL)
++  {
++    null_value= 1;
++    return 0;
++  }
++  else
++    str->copy(thd->db, thd->db_length, system_charset_info);
++  return str;
++}
++
++
++/**
++  @note USER() is replicated correctly if binlog_format=ROW or (as of
++  BUG#28086) binlog_format=MIXED, but is incorrectly replicated to ''
++  if binlog_format=STATEMENT.
++*/
++bool Item_func_user::init(const char *user, const char *host)
++{
++  DBUG_ASSERT(fixed == 1);
++
++  // For system threads (e.g. replication SQL thread) user may be empty
++  if (user)
++  {
++    CHARSET_INFO *cs= str_value.charset();
++    size_t res_length= (strlen(user)+strlen(host)+2) * cs->mbmaxlen;
++
++    if (str_value.alloc((uint) res_length))
++    {
++      null_value=1;
++      return TRUE;
++    }
++
++    res_length=cs->cset->snprintf(cs, (char*)str_value.ptr(), (uint) res_length,
++                                  "%s@%s", user, host);
++    str_value.length((uint) res_length);
++    str_value.mark_as_const();
++  }
++  return FALSE;
++}
++
++
++bool Item_func_user::fix_fields(THD *thd, Item **ref)
++{
++  return (Item_func_sysconst::fix_fields(thd, ref) ||
++          init(thd->main_security_ctx.user,
++               thd->main_security_ctx.host_or_ip));
++}
++
++
++bool Item_func_current_user::fix_fields(THD *thd, Item **ref)
++{
++  if (Item_func_sysconst::fix_fields(thd, ref))
++    return TRUE;
++
++  Security_context *ctx=
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++                         (context->security_ctx
++                          ? context->security_ctx : thd->security_ctx);
++#else
++                         thd->security_ctx;
++#endif /*NO_EMBEDDED_ACCESS_CHECKS*/
++  return init(ctx->priv_user, ctx->priv_host);
++}
++
++
++void Item_func_soundex::fix_length_and_dec()
++{
++  collation.set(args[0]->collation);
++  max_length=args[0]->max_length;
++  set_if_bigger(max_length, 4 * collation.collation->mbminlen);
++  tmp_value.set_charset(collation.collation);
++}
++
++
++/**
++  If alpha, map input letter to soundex code.
++  If not alpha and remove_garbage is set then skip to next char
++  else return 0
++*/
++
++static int soundex_toupper(int ch)
++{
++  return (ch >= 'a' && ch <= 'z') ? ch - 'a' + 'A' : ch;
++}
++
++
++static char get_scode(int wc)
++{
++  int ch= soundex_toupper(wc);
++  if (ch < 'A' || ch > 'Z')
++  {
++					// Thread extended alfa (country spec)
++    return '0';				// as vokal
++  }
++  return(soundex_map[ch-'A']);
++}
++
++
++static bool my_uni_isalpha(int wc)
++{
++  /*
++    Return true for all Basic Latin letters: a..z A..Z.
++    Return true for all Unicode characters with code higher than U+00C0:
++    - characters between 'z' and U+00C0 are controls and punctuations.
++    - "U+00C0 LATIN CAPITAL LETTER A WITH GRAVE" is the first letter after 'z'.
++  */
++  return (wc >= 'a' && wc <= 'z') ||
++         (wc >= 'A' && wc <= 'Z') ||
++         (wc >= 0xC0);
++}
++
++
++String *Item_func_soundex::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res  =args[0]->val_str(str);
++  char last_ch,ch;
++  CHARSET_INFO *cs= collation.collation;
++  my_wc_t wc;
++  uint nchars;
++  int rc;
++
++  if ((null_value= args[0]->null_value))
++    return 0; /* purecov: inspected */
++
++  if (tmp_value.alloc(max(res->length(), 4 * cs->mbminlen)))
++    return str; /* purecov: inspected */
++  char *to= (char *) tmp_value.ptr();
++  char *to_end= to + tmp_value.alloced_length();
++  char *from= (char *) res->ptr(), *end= from + res->length();
++  
++  for ( ; ; ) /* Skip pre-space */
++  {
++    if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0)
++      return make_empty_result(); /* EOL or invalid byte sequence */
++    
++    if (rc == 1 && cs->ctype)
++    {
++      /* Single byte letter found */
++      if (my_isalpha(cs, *from))
++      {
++        last_ch= get_scode(*from);       // Code of the first letter
++        *to++= soundex_toupper(*from++); // Copy first letter
++        break;
++      }
++      from++;
++    }
++    else
++    {
++      from+= rc;
++      if (my_uni_isalpha(wc))
++      {
++        /* Multibyte letter found */
++        wc= soundex_toupper(wc);
++        last_ch= get_scode(wc);     // Code of the first letter
++        if ((rc= cs->cset->wc_mb(cs, wc, (uchar*) to, (uchar*) to_end)) <= 0)
++        {
++          /* Extra safety - should not really happen */
++          DBUG_ASSERT(false);
++          return make_empty_result();
++        }
++        to+= rc;
++        break;
++      }
++    }
++  }
++  
++  /*
++     last_ch is now set to the first 'double-letter' check.
++     loop on input letters until end of input
++  */
++  for (nchars= 1 ; ; )
++  {
++    if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0)
++      break; /* EOL or invalid byte sequence */
++
++    if (rc == 1 && cs->ctype)
++    {
++      if (!my_isalpha(cs, *from++))
++        continue;
++    }
++    else
++    {
++      from+= rc;
++      if (!my_uni_isalpha(wc))
++        continue;
++    }
++    
++    ch= get_scode(wc);
++    if ((ch != '0') && (ch != last_ch)) // if not skipped or double
++    {
++      // letter, copy to output
++      if ((rc= cs->cset->wc_mb(cs, (my_wc_t) ch,
++                               (uchar*) to, (uchar*) to_end)) <= 0)
++      {
++        // Extra safety - should not really happen
++        DBUG_ASSERT(false);
++        break;
++      }
++      to+= rc;
++      nchars++;
++      last_ch= ch;  // save code of last input letter
++    }               // for next double-letter check
++  }
++  
++  /* Pad up to 4 characters with DIGIT ZERO, if the string is shorter */
++  if (nchars < 4) 
++  {
++    uint nbytes= (4 - nchars) * cs->mbminlen;
++    cs->cset->fill(cs, to, nbytes, '0');
++    to+= nbytes;
++  }
++
++  tmp_value.length((uint) (to-tmp_value.ptr()));
++  return &tmp_value;
++}
++
++
++/**
++  Change a number to format '3,333,333,333.000'.
++
++  This should be 'internationalized' sometimes.
++*/
++
++const int FORMAT_MAX_DECIMALS= 30;
++
++Item_func_format::Item_func_format(Item *org, Item *dec)
++: Item_str_func(org, dec)
++{
++}
++
++void Item_func_format::fix_length_and_dec()
++{
++  uint char_length= args[0]->max_length/args[0]->collation.collation->mbmaxlen;
++  uint max_sep_count= char_length/3 + (decimals ? 1 : 0) + /*sign*/1;
++  collation.set(default_charset());
++  max_length= (char_length + max_sep_count + decimals) *
++    collation.collation->mbmaxlen;
++}
++
++
++/**
++  @todo
++  This needs to be fixed for multi-byte character set where numbers
++  are stored in more than one byte
++*/
++
++String *Item_func_format::val_str(String *str)
++{
++  uint32 length;
++  uint32 str_length;
++  /* Number of decimal digits */
++  int dec;
++  /* Number of characters used to represent the decimals, including '.' */
++  uint32 dec_length;
++  int diff;
++  DBUG_ASSERT(fixed == 1);
++
++  dec= (int) args[1]->val_int();
++  if (args[1]->null_value)
++  {
++    null_value=1;
++    return NULL;
++  }
++
++  dec= set_zone(dec, 0, FORMAT_MAX_DECIMALS);
++  dec_length= dec ? dec+1 : 0;
++  null_value=0;
++
++  if (args[0]->result_type() == DECIMAL_RESULT ||
++      args[0]->result_type() == INT_RESULT)
++  {
++    my_decimal dec_val, rnd_dec, *res;
++    res= args[0]->val_decimal(&dec_val);
++    if ((null_value=args[0]->null_value))
++      return 0; /* purecov: inspected */
++    my_decimal_round(E_DEC_FATAL_ERROR, res, dec, false, &rnd_dec);
++    my_decimal2string(E_DEC_FATAL_ERROR, &rnd_dec, 0, 0, 0, str);
++    str_length= str->length();
++    if (rnd_dec.sign())
++      str_length--;
++  }
++  else
++  {
++    double nr= args[0]->val_real();
++    if ((null_value=args[0]->null_value))
++      return 0; /* purecov: inspected */
++    nr= my_double_round(nr, (longlong) dec, FALSE, FALSE);
++    /* Here default_charset() is right as this is not an automatic conversion */
++    str->set_real(nr, dec, default_charset());
++    if (isnan(nr))
++      return str;
++    str_length=str->length();
++    if (nr < 0)
++      str_length--;				// Don't count sign
++  }
++  /* We need this test to handle 'nan' values */
++  if (str_length >= dec_length+4)
++  {
++    char *tmp,*pos;
++    length= str->length()+(diff=((int)(str_length- dec_length-1))/3);
++    str= copy_if_not_alloced(&tmp_str,str,length);
++    str->length(length);
++    tmp= (char*) str->ptr()+length - dec_length-1;
++    for (pos= (char*) str->ptr()+length-1; pos != tmp; pos--)
++      pos[0]= pos[-diff];
++    while (diff)
++    {
++      *pos= *(pos - diff);
++      pos--;
++      *pos= *(pos - diff);
++      pos--;
++      *pos= *(pos - diff);
++      pos--;
++      pos[0]=',';
++      pos--;
++      diff--;
++    }
++  }
++  return str;
++}
++
++
++void Item_func_format::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("format("));
++  args[0]->print(str, query_type);
++  str->append(',');
++  args[1]->print(str, query_type);
++  str->append(')');
++}
++
++void Item_func_elt::fix_length_and_dec()
++{
++  max_length=0;
++  decimals=0;
++
++  if (agg_arg_charsets(collation, args+1, arg_count-1, MY_COLL_ALLOW_CONV, 1))
++    return;
++
++  for (uint i= 1 ; i < arg_count ; i++)
++  {
++    set_if_bigger(max_length,args[i]->max_length);
++    set_if_bigger(decimals,args[i]->decimals);
++  }
++  maybe_null=1;					// NULL if wrong first arg
++}
++
++
++double Item_func_elt::val_real()
++{
++  DBUG_ASSERT(fixed == 1);
++  uint tmp;
++  null_value=1;
++  if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count)
++    return 0.0;
++  double result= args[tmp]->val_real();
++  null_value= args[tmp]->null_value;
++  return result;
++}
++
++
++longlong Item_func_elt::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  uint tmp;
++  null_value=1;
++  if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count)
++    return 0;
++
++  longlong result= args[tmp]->val_int();
++  null_value= args[tmp]->null_value;
++  return result;
++}
++
++
++String *Item_func_elt::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  uint tmp;
++  null_value=1;
++  if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count)
++    return NULL;
++
++  String *result= args[tmp]->val_str(str);
++  if (result)
++    result->set_charset(collation.collation);
++  null_value= args[tmp]->null_value;
++  return result;
++}
++
++
++void Item_func_make_set::split_sum_func(THD *thd, Item **ref_pointer_array,
++					List<Item> &fields)
++{
++  item->split_sum_func2(thd, ref_pointer_array, fields, &item, TRUE);
++  Item_str_func::split_sum_func(thd, ref_pointer_array, fields);
++}
++
++
++void Item_func_make_set::fix_length_and_dec()
++{
++  max_length=arg_count-1;
++
++  if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1))
++    return;
++  
++  for (uint i=0 ; i < arg_count ; i++)
++    max_length+=args[i]->max_length;
++
++  used_tables_cache|=	  item->used_tables();
++  not_null_tables_cache&= item->not_null_tables();
++  const_item_cache&=	  item->const_item();
++  with_sum_func= with_sum_func || item->with_sum_func;
++}
++
++
++void Item_func_make_set::update_used_tables()
++{
++  Item_func::update_used_tables();
++  item->update_used_tables();
++  used_tables_cache|=item->used_tables();
++  const_item_cache&=item->const_item();
++}
++
++
++String *Item_func_make_set::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  ulonglong bits;
++  bool first_found=0;
++  Item **ptr=args;
++  String *result=&my_empty_string;
++
++  bits=item->val_int();
++  if ((null_value=item->null_value))
++    return NULL;
++
++  if (arg_count < 64)
++    bits &= ((ulonglong) 1 << arg_count)-1;
++
++  for (; bits; bits >>= 1, ptr++)
++  {
++    if (bits & 1)
++    {
++      String *res= (*ptr)->val_str(str);
++      if (res)					// Skip nulls
++      {
++	if (!first_found)
++	{					// First argument
++	  first_found=1;
++	  if (res != str)
++	    result=res;				// Use original string
++	  else
++	  {
++	    if (tmp_str.copy(*res))		// Don't use 'str'
++              return make_empty_result();
++	    result= &tmp_str;
++	  }
++	}
++	else
++	{
++	  if (result != &tmp_str)
++	  {					// Copy data to tmp_str
++	    if (tmp_str.alloc(result->length()+res->length()+1) ||
++		tmp_str.copy(*result))
++              return make_empty_result();
++	    result= &tmp_str;
++	  }
++	  if (tmp_str.append(STRING_WITH_LEN(","), &my_charset_bin) || tmp_str.append(*res))
++            return make_empty_result();
++	}
++      }
++    }
++  }
++  return result;
++}
++
++
++Item *Item_func_make_set::transform(Item_transformer transformer, uchar *arg)
++{
++  DBUG_ASSERT(!current_thd->is_stmt_prepare());
++
++  Item *new_item= item->transform(transformer, arg);
++  if (!new_item)
++    return 0;
++
++  /*
++    THD::change_item_tree() should be called only if the tree was
++    really transformed, i.e. when a new item has been created.
++    Otherwise we'll be allocating a lot of unnecessary memory for
++    change records at each execution.
++  */
++  if (item != new_item)
++    current_thd->change_item_tree(&item, new_item);
++  return Item_str_func::transform(transformer, arg);
++}
++
++
++void Item_func_make_set::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("make_set("));
++  item->print(str, query_type);
++  if (arg_count)
++  {
++    str->append(',');
++    print_args(str, 0, query_type);
++  }
++  str->append(')');
++}
++
++
++String *Item_func_char::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  str->length(0);
++  str->set_charset(collation.collation);
++  for (uint i=0 ; i < arg_count ; i++)
++  {
++    int32 num=(int32) args[i]->val_int();
++    if (!args[i]->null_value)
++    {
++      char char_num= (char) num;
++      if (num&0xFF000000L) {
++        str->append((char)(num>>24));
++        goto b2;
++      } else if (num&0xFF0000L) {
++    b2:        str->append((char)(num>>16));
++        goto b1;
++      } else if (num&0xFF00L) {
++    b1:        str->append((char)(num>>8));
++      }
++      str->append(&char_num, 1);
++    }
++  }
++  str->realloc(str->length());			// Add end 0 (for Purify)
++  return check_well_formed_result(str);
++}
++
++
++inline String* alloc_buffer(String *res,String *str,String *tmp_value,
++			    ulong length)
++{
++  if (res->alloced_length() < length)
++  {
++    if (str->alloced_length() >= length)
++    {
++      (void) str->copy(*res);
++      str->length(length);
++      return str;
++    }
++    if (tmp_value->alloc(length))
++      return 0;
++    (void) tmp_value->copy(*res);
++    tmp_value->length(length);
++    return tmp_value;
++  }
++  res->length(length);
++  return res;
++}
++
++
++void Item_func_repeat::fix_length_and_dec()
++{
++  collation.set(args[0]->collation);
++  if (args[1]->const_item())
++  {
++    /* must be longlong to avoid truncation */
++    longlong count= args[1]->val_int();
++
++    /* Assumes that the maximum length of a String is < INT_MAX32. */
++    /* Set here so that rest of code sees out-of-bound value as such. */
++    if (count > INT_MAX32)
++      count= INT_MAX32;
++
++    ulonglong max_result_length= (ulonglong) args[0]->max_length * count;
++    if (max_result_length >= MAX_BLOB_WIDTH)
++    {
++      max_result_length= MAX_BLOB_WIDTH;
++      maybe_null= 1;
++    }
++    max_length= (ulong) max_result_length;
++  }
++  else
++  {
++    max_length= MAX_BLOB_WIDTH;
++    maybe_null= 1;
++  }
++}
++
++/**
++  Item_func_repeat::str is carefully written to avoid reallocs
++  as much as possible at the cost of a local buffer
++*/
++
++String *Item_func_repeat::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  uint length,tot_length;
++  char *to;
++  /* must be longlong to avoid truncation */
++  longlong count= args[1]->val_int();
++  String *res= args[0]->val_str(str);
++
++  if (args[0]->null_value || args[1]->null_value)
++    goto err;				// string and/or delim are null
++  null_value= 0;
++
++  if (count <= 0 && (count == 0 || !args[1]->unsigned_flag))
++    return make_empty_result();
++
++  /* Assumes that the maximum length of a String is < INT_MAX32. */
++  /* Bounds check on count:  If this is triggered, we will error. */
++  if ((ulonglong) count > INT_MAX32)
++    count= INT_MAX32;
++  if (count == 1)			// To avoid reallocs
++    return res;
++  length=res->length();
++  // Safe length check
++  if (length > current_thd->variables.max_allowed_packet / (uint) count)
++  {
++    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			ER_WARN_ALLOWED_PACKET_OVERFLOWED,
++			ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
++			func_name(), current_thd->variables.max_allowed_packet);
++    goto err;
++  }
++  tot_length= length*(uint) count;
++  if (!(res= alloc_buffer(res,str,&tmp_value,tot_length)))
++    goto err;
++
++  to=(char*) res->ptr()+length;
++  while (--count)
++  {
++    memcpy(to,res->ptr(),length);
++    to+=length;
++  }
++  return (res);
++
++err:
++  null_value=1;
++  return 0;
++}
++
++
++void Item_func_rpad::fix_length_and_dec()
++{
++  // Handle character set for args[0] and args[2].
++  if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 2))
++    return;
++  if (args[1]->const_item())
++  {
++    ulonglong length= 0;
++
++    if (collation.collation->mbmaxlen > 0)
++    {
++      ulonglong temp= (ulonglong) args[1]->val_int();
++
++      /* Assumes that the maximum length of a String is < INT_MAX32. */
++      /* Set here so that rest of code sees out-of-bound value as such. */
++      if (temp > INT_MAX32)
++	temp = INT_MAX32;
++
++      length= temp * collation.collation->mbmaxlen;
++    }
++
++    if (length >= MAX_BLOB_WIDTH)
++    {
++      length= MAX_BLOB_WIDTH;
++      maybe_null= 1;
++    }
++    max_length= (ulong) length;
++  }
++  else
++  {
++    max_length= MAX_BLOB_WIDTH;
++    maybe_null= 1;
++  }
++}
++
++
++String *Item_func_rpad::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  uint32 res_byte_length,res_char_length,pad_char_length,pad_byte_length;
++  char *to;
++  const char *ptr_pad;
++  /* must be longlong to avoid truncation */
++  longlong count= args[1]->val_int();
++  longlong byte_count;
++  String *res= args[0]->val_str(str);
++  String *rpad= args[2]->val_str(&rpad_str);
++
++  if (!res || args[1]->null_value || !rpad || 
++      ((count < 0) && !args[1]->unsigned_flag))
++    goto err;
++  null_value=0;
++  /* Assumes that the maximum length of a String is < INT_MAX32. */
++  /* Set here so that rest of code sees out-of-bound value as such. */
++  if ((ulonglong) count > INT_MAX32)
++    count= INT_MAX32;
++  /*
++    There is one exception not handled (intentionaly) by the character set
++    aggregation code. If one string is strong side and is binary, and
++    another one is weak side and is a multi-byte character string,
++    then we need to operate on the second string in terms on bytes when
++    calling ::numchars() and ::charpos(), rather than in terms of characters.
++    Lets substitute its character set to binary.
++  */
++  if (collation.collation == &my_charset_bin)
++  {
++    res->set_charset(&my_charset_bin);
++    rpad->set_charset(&my_charset_bin);
++  }
++
++  if (count <= (res_char_length= res->numchars()))
++  {						// String to pad is big enough
++    res->length(res->charpos((int) count));	// Shorten result if longer
++    return (res);
++  }
++  pad_char_length= rpad->numchars();
++
++  byte_count= count * collation.collation->mbmaxlen;
++  if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet)
++  {
++    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			ER_WARN_ALLOWED_PACKET_OVERFLOWED,
++			ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
++			func_name(), current_thd->variables.max_allowed_packet);
++    goto err;
++  }
++  if (args[2]->null_value || !pad_char_length)
++    goto err;
++  res_byte_length= res->length();	/* Must be done before alloc_buffer */
++  if (!(res= alloc_buffer(res,str,&tmp_value, (ulong) byte_count)))
++    goto err;
++
++  to= (char*) res->ptr()+res_byte_length;
++  ptr_pad=rpad->ptr();
++  pad_byte_length= rpad->length();
++  count-= res_char_length;
++  for ( ; (uint32) count > pad_char_length; count-= pad_char_length)
++  {
++    memcpy(to,ptr_pad,pad_byte_length);
++    to+= pad_byte_length;
++  }
++  if (count)
++  {
++    pad_byte_length= rpad->charpos((int) count);
++    memcpy(to,ptr_pad,(size_t) pad_byte_length);
++    to+= pad_byte_length;
++  }
++  res->length((uint) (to- (char*) res->ptr()));
++  return (res);
++
++ err:
++  null_value=1;
++  return 0;
++}
++
++
++void Item_func_lpad::fix_length_and_dec()
++{
++  // Handle character set for args[0] and args[2].
++  if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 2))
++    return;
++  
++  if (args[1]->const_item())
++  {
++    ulonglong length= 0;
++
++    if (collation.collation->mbmaxlen > 0)
++    {
++      ulonglong temp= (ulonglong) args[1]->val_int();
++
++      /* Assumes that the maximum length of a String is < INT_MAX32. */
++      /* Set here so that rest of code sees out-of-bound value as such. */
++      if (temp > INT_MAX32)
++        temp= INT_MAX32;
++
++      length= temp * collation.collation->mbmaxlen;
++    }
++
++    if (length >= MAX_BLOB_WIDTH)
++    {
++      length= MAX_BLOB_WIDTH;
++      maybe_null= 1;
++    }
++    max_length= (ulong) length;
++  }
++  else
++  {
++    max_length= MAX_BLOB_WIDTH;
++    maybe_null= 1;
++  }
++}
++
++
++String *Item_func_lpad::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  uint32 res_char_length,pad_char_length;
++  /* must be longlong to avoid truncation */
++  longlong count= args[1]->val_int();
++  longlong byte_count;
++  String *res= args[0]->val_str(&tmp_value);
++  String *pad= args[2]->val_str(&lpad_str);
++
++  if (!res || args[1]->null_value || !pad ||  
++      ((count < 0) && !args[1]->unsigned_flag))
++    goto err;  
++  null_value=0;
++  /* Assumes that the maximum length of a String is < INT_MAX32. */
++  /* Set here so that rest of code sees out-of-bound value as such. */
++  if ((ulonglong) count > INT_MAX32)
++    count= INT_MAX32;
++
++  /*
++    There is one exception not handled (intentionaly) by the character set
++    aggregation code. If one string is strong side and is binary, and
++    another one is weak side and is a multi-byte character string,
++    then we need to operate on the second string in terms on bytes when
++    calling ::numchars() and ::charpos(), rather than in terms of characters.
++    Lets substitute its character set to binary.
++  */
++  if (collation.collation == &my_charset_bin)
++  {
++    res->set_charset(&my_charset_bin);
++    pad->set_charset(&my_charset_bin);
++  }
++
++  res_char_length= res->numchars();
++
++  if (count <= res_char_length)
++  {
++    res->length(res->charpos((int) count));
++    return res;
++  }
++  
++  pad_char_length= pad->numchars();
++  byte_count= count * collation.collation->mbmaxlen;
++  
++  if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet)
++  {
++    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			ER_WARN_ALLOWED_PACKET_OVERFLOWED,
++			ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
++			func_name(), current_thd->variables.max_allowed_packet);
++    goto err;
++  }
++
++  if (args[2]->null_value || !pad_char_length ||
++      str->alloc((uint32) byte_count))
++    goto err;
++  
++  str->length(0);
++  str->set_charset(collation.collation);
++  count-= res_char_length;
++  while (count >= pad_char_length)
++  {
++    str->append(*pad);
++    count-= pad_char_length;
++  }
++  if (count > 0)
++    str->append(pad->ptr(), pad->charpos((int) count), collation.collation);
++
++  str->append(*res);
++  null_value= 0;
++  return str;
++
++err:
++  null_value= 1;
++  return 0;
++}
++
++
++String *Item_func_conv::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res= args[0]->val_str(str);
++  char *endptr,ans[65],*ptr;
++  longlong dec;
++  int from_base= (int) args[1]->val_int();
++  int to_base= (int) args[2]->val_int();
++  int err;
++
++  if (args[0]->null_value || args[1]->null_value || args[2]->null_value ||
++      abs(to_base) > 36 || abs(to_base) < 2 ||
++      abs(from_base) > 36 || abs(from_base) < 2 || !(res->length()))
++  {
++    null_value= 1;
++    return NULL;
++  }
++  null_value= 0;
++  unsigned_flag= !(from_base < 0);
++
++  if (args[0]->field_type() == MYSQL_TYPE_BIT) 
++  {
++    /* 
++     Special case: The string representation of BIT doesn't resemble the
++     decimal representation, so we shouldn't change it to string and then to
++     decimal. 
++    */
++    dec= args[0]->val_int();
++  }
++  else
++  {
++    if (from_base < 0)
++      dec= my_strntoll(res->charset(), res->ptr(), res->length(),
++                       -from_base, &endptr, &err);
++    else
++      dec= (longlong) my_strntoull(res->charset(), res->ptr(), res->length(),
++                                   from_base, &endptr, &err);
++  }
++
++  ptr= longlong2str(dec, ans, to_base);
++  if (str->copy(ans, (uint32) (ptr-ans), default_charset()))
++    return make_empty_result();
++  return str;
++}
++
++
++String *Item_func_conv_charset::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  if (use_cached_value)
++    return null_value ? 0 : &str_value;
++  String *arg= args[0]->val_str(str);
++  uint dummy_errors;
++  if (!arg)
++  {
++    null_value=1;
++    return 0;
++  }
++  null_value= tmp_value.copy(arg->ptr(), arg->length(), arg->charset(),
++                             conv_charset, &dummy_errors);
++  return null_value ? 0 : check_well_formed_result(&tmp_value);
++}
++
++void Item_func_conv_charset::fix_length_and_dec()
++{
++  collation.set(conv_charset, DERIVATION_IMPLICIT);
++  max_length = args[0]->max_length*conv_charset->mbmaxlen;
++}
++
++void Item_func_conv_charset::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("convert("));
++  args[0]->print(str, query_type);
++  str->append(STRING_WITH_LEN(" using "));
++  str->append(conv_charset->csname);
++  str->append(')');
++}
++
++String *Item_func_set_collation::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  str=args[0]->val_str(str);
++  if ((null_value=args[0]->null_value))
++    return 0;
++  str->set_charset(collation.collation);
++  return str;
++}
++
++void Item_func_set_collation::fix_length_and_dec()
++{
++  CHARSET_INFO *set_collation;
++  const char *colname;
++  String tmp, *str= args[1]->val_str(&tmp);
++  colname= str->c_ptr();
++  if (colname == binary_keyword)
++    set_collation= get_charset_by_csname(args[0]->collation.collation->csname,
++					 MY_CS_BINSORT,MYF(0));
++  else
++  {
++    if (!(set_collation= get_charset_by_name(colname,MYF(0))))
++    {
++      my_error(ER_UNKNOWN_COLLATION, MYF(0), colname);
++      return;
++    }
++  }
++
++  if (!set_collation || 
++      !my_charset_same(args[0]->collation.collation,set_collation))
++  {
++    my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0),
++             colname, args[0]->collation.collation->csname);
++    return;
++  }
++  collation.set(set_collation, DERIVATION_EXPLICIT,
++                args[0]->collation.repertoire);
++  max_length= args[0]->max_length;
++}
++
++
++bool Item_func_set_collation::eq(const Item *item, bool binary_cmp) const
++{
++  /* Assume we don't have rtti */
++  if (this == item)
++    return 1;
++  if (item->type() != FUNC_ITEM)
++    return 0;
++  Item_func *item_func=(Item_func*) item;
++  if (arg_count != item_func->arg_count ||
++      functype() != item_func->functype())
++    return 0;
++  Item_func_set_collation *item_func_sc=(Item_func_set_collation*) item;
++  if (collation.collation != item_func_sc->collation.collation)
++    return 0;
++  for (uint i=0; i < arg_count ; i++)
++    if (!args[i]->eq(item_func_sc->args[i], binary_cmp))
++      return 0;
++  return 1;
++}
++
++
++void Item_func_set_collation::print(String *str, enum_query_type query_type)
++{
++  str->append('(');
++  args[0]->print(str, query_type);
++  str->append(STRING_WITH_LEN(" collate "));
++  DBUG_ASSERT(args[1]->basic_const_item() &&
++              args[1]->type() == Item::STRING_ITEM);
++  args[1]->str_value.print(str);
++  str->append(')');
++}
++
++String *Item_func_charset::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  uint dummy_errors;
++
++  CHARSET_INFO *cs= args[0]->collation.collation; 
++  null_value= 0;
++  str->copy(cs->csname, (uint) strlen(cs->csname),
++	    &my_charset_latin1, collation.collation, &dummy_errors);
++  return str;
++}
++
++String *Item_func_collation::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  uint dummy_errors;
++  CHARSET_INFO *cs= args[0]->collation.collation; 
++
++  null_value= 0;
++  str->copy(cs->name, (uint) strlen(cs->name),
++	    &my_charset_latin1, collation.collation, &dummy_errors);
++  return str;
++}
++
++
++String *Item_func_hex::val_str(String *str)
++{
++  String *res;
++  DBUG_ASSERT(fixed == 1);
++  if (args[0]->result_type() != STRING_RESULT)
++  {
++    ulonglong dec;
++    char ans[65],*ptr;
++    /* Return hex of unsigned longlong value */
++    if (args[0]->result_type() == REAL_RESULT ||
++        args[0]->result_type() == DECIMAL_RESULT)
++    {
++      double val= args[0]->val_real();
++      if ((val <= (double) LONGLONG_MIN) || 
++          (val >= (double) (ulonglong) ULONGLONG_MAX))
++        dec=  ~(longlong) 0;
++      else
++        dec= (ulonglong) (val + (val > 0 ? 0.5 : -0.5));
++    }
++    else
++      dec= (ulonglong) args[0]->val_int();
++
++    if ((null_value= args[0]->null_value))
++      return 0;
++    ptr= longlong2str(dec,ans,16);
++    if (str->copy(ans,(uint32) (ptr-ans),default_charset()))
++      return make_empty_result();			// End of memory
++    return str;
++  }
++
++  /* Convert given string to a hex string, character by character */
++  res= args[0]->val_str(str);
++  if (!res || tmp_value.alloc(res->length()*2+1))
++  {
++    null_value=1;
++    return 0;
++  }
++  null_value=0;
++  tmp_value.length(res->length()*2);
++
++  octet2hex((char*) tmp_value.ptr(), res->ptr(), res->length());
++  return &tmp_value;
++}
++
++  /** Convert given hex string to a binary string. */
++
++String *Item_func_unhex::val_str(String *str)
++{
++  const char *from, *end;
++  char *to;
++  String *res;
++  uint length;
++  DBUG_ASSERT(fixed == 1);
++
++  res= args[0]->val_str(str);
++  if (!res || tmp_value.alloc(length= (1+res->length())/2))
++  {
++    null_value=1;
++    return 0;
++  }
++
++  from= res->ptr();
++  null_value= 0;
++  tmp_value.length(length);
++  to= (char*) tmp_value.ptr();
++  if (res->length() % 2)
++  {
++    int hex_char;
++    *to++= hex_char= hexchar_to_int(*from++);
++    if ((null_value= (hex_char == -1)))
++      return 0;
++  }
++  for (end=res->ptr()+res->length(); from < end ; from+=2, to++)
++  {
++    int hex_char;
++    *to= (hex_char= hexchar_to_int(from[0])) << 4;
++    if ((null_value= (hex_char == -1)))
++      return 0;
++    *to|= hex_char= hexchar_to_int(from[1]);
++    if ((null_value= (hex_char == -1)))
++      return 0;
++  }
++  return &tmp_value;
++}
++
++
++void Item_func_binary::print(String *str, enum_query_type query_type)
++{
++  str->append(STRING_WITH_LEN("cast("));
++  args[0]->print(str, query_type);
++  str->append(STRING_WITH_LEN(" as binary)"));
++}
++
++
++#include <my_dir.h>				// For my_stat
++
++String *Item_load_file::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *file_name;
++  File file;
++  MY_STAT stat_info;
++  char path[FN_REFLEN];
++  DBUG_ENTER("load_file");
++
++  if (!(file_name= args[0]->val_str(str))
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++      || !(current_thd->security_ctx->master_access & FILE_ACL)
++#endif
++      )
++    goto err;
++
++  (void) fn_format(path, file_name->c_ptr_safe(), mysql_real_data_home, "",
++		   MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
++
++  /* Read only allowed from within dir specified by secure_file_priv */
++  if (!is_secure_file_path(path))
++    goto err;
++
++  if (!my_stat(path, &stat_info, MYF(0)))
++    goto err;
++
++  if (!(stat_info.st_mode & S_IROTH))
++  {
++    /* my_error(ER_TEXTFILE_NOT_READABLE, MYF(0), file_name->c_ptr()); */
++    goto err;
++  }
++  if (stat_info.st_size > (long) current_thd->variables.max_allowed_packet)
++  {
++    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			ER_WARN_ALLOWED_PACKET_OVERFLOWED,
++			ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
++			func_name(), current_thd->variables.max_allowed_packet);
++    goto err;
++  }
++  if (tmp_value.alloc(stat_info.st_size))
++    goto err;
++  if ((file = my_open(file_name->ptr(), O_RDONLY, MYF(0))) < 0)
++    goto err;
++  if (my_read(file, (uchar*) tmp_value.ptr(), stat_info.st_size, MYF(MY_NABP)))
++  {
++    my_close(file, MYF(0));
++    goto err;
++  }
++  tmp_value.length(stat_info.st_size);
++  my_close(file, MYF(0));
++  null_value = 0;
++  DBUG_RETURN(&tmp_value);
++
++err:
++  null_value = 1;
++  DBUG_RETURN(0);
++}
++
++
++String* Item_func_export_set::val_str(String* str)
++{
++  DBUG_ASSERT(fixed == 1);
++  ulonglong the_set = (ulonglong) args[0]->val_int();
++  String yes_buf, *yes;
++  yes = args[1]->val_str(&yes_buf);
++  String no_buf, *no;
++  no = args[2]->val_str(&no_buf);
++  String *sep = NULL, sep_buf ;
++
++  uint num_set_values = 64;
++  ulonglong mask = 0x1;
++  str->length(0);
++  str->set_charset(collation.collation);
++
++  /* Check if some argument is a NULL value */
++  if (args[0]->null_value || args[1]->null_value || args[2]->null_value)
++  {
++    null_value=1;
++    return 0;
++  }
++  /*
++    Arg count can only be 3, 4 or 5 here. This is guaranteed from the
++    grammar for EXPORT_SET()
++  */
++  switch(arg_count) {
++  case 5:
++    num_set_values = (uint) args[4]->val_int();
++    if (num_set_values > 64)
++      num_set_values=64;
++    if (args[4]->null_value)
++    {
++      null_value=1;
++      return 0;
++    }
++    /* Fall through */
++  case 4:
++    if (!(sep = args[3]->val_str(&sep_buf)))	// Only true if NULL
++    {
++      null_value=1;
++      return 0;
++    }
++    break;
++  case 3:
++    {
++      /* errors is not checked - assume "," can always be converted */
++      uint errors;
++      sep_buf.copy(STRING_WITH_LEN(","), &my_charset_bin, collation.collation, &errors);
++      sep = &sep_buf;
++    }
++    break;
++  default:
++    DBUG_ASSERT(0); // cannot happen
++  }
++  null_value=0;
++
++  for (uint i = 0; i < num_set_values; i++, mask = (mask << 1))
++  {
++    if (the_set & mask)
++      str->append(*yes);
++    else
++      str->append(*no);
++    if (i != num_set_values - 1)
++      str->append(*sep);
++  }
++  return str;
++}
++
++void Item_func_export_set::fix_length_and_dec()
++{
++  uint length=max(args[1]->max_length,args[2]->max_length);
++  uint sep_length=(arg_count > 3 ? args[3]->max_length : 1);
++  max_length=length*64+sep_length*63;
++
++  if (agg_arg_charsets(collation, args+1, min(4,arg_count)-1,
++                       MY_COLL_ALLOW_CONV, 1))
++    return;
++}
++
++String* Item_func_inet_ntoa::val_str(String* str)
++{
++  DBUG_ASSERT(fixed == 1);
++  uchar buf[8], *p;
++  ulonglong n = (ulonglong) args[0]->val_int();
++  char num[4];
++
++  /*
++    We do not know if args[0] is NULL until we have called
++    some val function on it if args[0] is not a constant!
++
++    Also return null if n > 255.255.255.255
++  */
++  if ((null_value= (args[0]->null_value || n > (ulonglong) LL(4294967295))))
++    return 0;					// Null value
++
++  str->set_charset(collation.collation);
++  str->length(0);
++  int4store(buf,n);
++
++  /* Now we can assume little endian. */
++
++  num[3]='.';
++  for (p=buf+4 ; p-- > buf ; )
++  {
++    uint c = *p;
++    uint n1,n2;					// Try to avoid divisions
++    n1= c / 100;				// 100 digits
++    c-= n1*100;
++    n2= c / 10;					// 10 digits
++    c-=n2*10;					// last digit
++    num[0]=(char) n1+'0';
++    num[1]=(char) n2+'0';
++    num[2]=(char) c+'0';
++    uint length=(n1 ? 4 : n2 ? 3 : 2);		// Remove pre-zero
++
++    (void) str->append(num+4-length,length);
++  }
++  str->length(str->length()-1);			// Remove last '.';
++  return str;
++}
++
++
++#define get_esc_bit(mask, num) (1 & (*((mask) + ((num) >> 3))) >> ((num) & 7))
++
++/**
++  QUOTE() function returns argument string in single quotes suitable for
++  using in a SQL statement.
++
++  Adds a \\ before all characters that needs to be escaped in a SQL string.
++  We also escape '^Z' (END-OF-FILE in windows) to avoid probelms when
++  running commands from a file in windows.
++
++  This function is very useful when you want to generate SQL statements.
++
++  @note
++    QUOTE(NULL) returns the string 'NULL' (4 letters, without quotes).
++
++  @retval
++    str	   Quoted string
++  @retval
++    NULL	   Out of memory.
++*/
++
++String *Item_func_quote::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  /*
++    Bit mask that has 1 for set for the position of the following characters:
++    0, \, ' and ^Z
++  */
++
++  static uchar escmask[32]=
++  {
++    0x01, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++  };
++
++  char *from, *to, *end, *start;
++  String *arg= args[0]->val_str(str);
++  uint arg_length, new_length;
++  if (!arg)					// Null argument
++  {
++    /* Return the string 'NULL' */
++    str->copy(STRING_WITH_LEN("NULL"), collation.collation);
++    null_value= 0;
++    return str;
++  }
++
++  arg_length= arg->length();
++
++  if (collation.collation->mbmaxlen == 1)
++  {
++    new_length= arg_length + 2; /* for beginning and ending ' signs */
++    for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++)
++      new_length+= get_esc_bit(escmask, (uchar) *from);
++  }
++  else
++  {
++    new_length= (arg_length * 2) +  /* For string characters */
++                (2 * collation.collation->mbmaxlen); /* For quotes */
++  }
++
++  if (tmp_value.alloc(new_length))
++    goto null;
++
++  if (collation.collation->mbmaxlen > 1)
++  {
++    CHARSET_INFO *cs= collation.collation;
++    int mblen;
++    uchar *to_end;
++    to= (char*) tmp_value.ptr();
++    to_end= (uchar*) to + new_length;
++
++    /* Put leading quote */
++    if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0)
++      goto null;
++    to+= mblen;
++
++    for (start= (char*) arg->ptr(), end= start + arg_length; start < end; )
++    {
++      my_wc_t wc;
++      bool escape;
++      if ((mblen= cs->cset->mb_wc(cs, &wc, (uchar*) start, (uchar*) end)) <= 0)
++        goto null;
++      start+= mblen;
++      switch (wc) {
++        case 0:      escape= 1; wc= '0'; break;
++        case '\032': escape= 1; wc= 'Z'; break;
++        case '\'':   escape= 1; break;
++        case '\\':   escape= 1; break;
++        default:     escape= 0; break;
++      }
++      if (escape)
++      {
++        if ((mblen= cs->cset->wc_mb(cs, '\\', (uchar*) to, to_end)) <= 0)
++          goto null;
++        to+= mblen;
++      }
++      if ((mblen= cs->cset->wc_mb(cs, wc, (uchar*) to, to_end)) <= 0)
++        goto null;
++      to+= mblen;
++    }
++
++    /* Put trailing quote */
++    if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0)
++      goto null;
++    to+= mblen;
++    new_length= to - tmp_value.ptr();
++    goto ret;
++  }
++
++  /*
++    We replace characters from the end to the beginning
++  */
++  to= (char*) tmp_value.ptr() + new_length - 1;
++  *to--= '\'';
++  for (start= (char*) arg->ptr(),end= start + arg_length; end-- != start; to--)
++  {
++    /*
++      We can't use the bitmask here as we want to replace \O and ^Z with 0
++      and Z
++    */
++    switch (*end)  {
++    case 0:
++      *to--= '0';
++      *to=   '\\';
++      break;
++    case '\032':
++      *to--= 'Z';
++      *to=   '\\';
++      break;
++    case '\'':
++    case '\\':
++      *to--= *end;
++      *to=   '\\';
++      break;
++    default:
++      *to= *end;
++      break;
++    }
++  }
++  *to= '\'';
++
++ret:
++  tmp_value.length(new_length);
++  tmp_value.set_charset(collation.collation);
++  null_value= 0;
++  return &tmp_value;
++
++null:
++  null_value= 1;
++  return 0;
++}
++
++longlong Item_func_uncompressed_length::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res= args[0]->val_str(&value);
++  if (!res)
++  {
++    null_value=1;
++    return 0; /* purecov: inspected */
++  }
++  null_value=0;
++  if (res->is_empty()) return 0;
++
++  /*
++    If length is <= 4 bytes, data is corrupt. This is the best we can do
++    to detect garbage input without decompressing it.
++  */
++  if (res->length() <= 4)
++  {
++    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
++                        ER_ZLIB_Z_DATA_ERROR,
++                        ER(ER_ZLIB_Z_DATA_ERROR));
++    null_value= 1;
++    return 0;
++  }
++
++ /*
++    res->ptr() using is safe because we have tested that string is at least
++    5 bytes long.
++    res->c_ptr() is not used because:
++      - we do not need \0 terminated string to get first 4 bytes
++      - c_ptr() tests simbol after string end (uninitialiozed memory) which
++        confuse valgrind
++  */
++  return uint4korr(res->ptr()) & 0x3FFFFFFF;
++}
++
++longlong Item_func_crc32::val_int()
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res=args[0]->val_str(&value);
++  if (!res)
++  {
++    null_value=1;
++    return 0; /* purecov: inspected */
++  }
++  null_value=0;
++  return (longlong) crc32(0L, (uchar*)res->ptr(), res->length());
++}
++
++#ifdef HAVE_COMPRESS
++#include "zlib.h"
++
++String *Item_func_compress::val_str(String *str)
++{
++  int err= Z_OK, code;
++  ulong new_size;
++  String *res;
++  Byte *body;
++  char *tmp, *last_char;
++  DBUG_ASSERT(fixed == 1);
++
++  if (!(res= args[0]->val_str(str)))
++  {
++    null_value= 1;
++    return 0;
++  }
++  null_value= 0;
++  if (res->is_empty()) return res;
++
++  /*
++    Citation from zlib.h (comment for compress function):
++
++    Compresses the source buffer into the destination buffer.  sourceLen is
++    the byte length of the source buffer. Upon entry, destLen is the total
++    size of the destination buffer, which must be at least 0.1% larger than
++    sourceLen plus 12 bytes.
++    We assume here that the buffer can't grow more than .25 %.
++  */
++  new_size= res->length() + res->length() / 5 + 12;
++
++  // Check new_size overflow: new_size <= res->length()
++  if (((uint32) (new_size+5) <= res->length()) || 
++      buffer.realloc((uint32) new_size + 4 + 1))
++  {
++    null_value= 1;
++    return 0;
++  }
++
++  body= ((Byte*)buffer.ptr()) + 4;
++
++  // As far as we have checked res->is_empty() we can use ptr()
++  if ((err= compress(body, &new_size,
++		     (const Bytef*)res->ptr(), res->length())) != Z_OK)
++  {
++    code= err==Z_MEM_ERROR ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_BUF_ERROR;
++    push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,code,ER(code));
++    null_value= 1;
++    return 0;
++  }
++
++  tmp= (char*)buffer.ptr(); // int4store is a macro; avoid side effects
++  int4store(tmp, res->length() & 0x3FFFFFFF);
++
++  /* This is to ensure that things works for CHAR fields, which trim ' ': */
++  last_char= ((char*)body)+new_size-1;
++  if (*last_char == ' ')
++  {
++    *++last_char= '.';
++    new_size++;
++  }
++
++  buffer.length((uint32)new_size + 4);
++  return &buffer;
++}
++
++
++String *Item_func_uncompress::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  String *res= args[0]->val_str(str);
++  ulong new_size;
++  int err;
++  uint code;
++
++  if (!res)
++    goto err;
++  null_value= 0;
++  if (res->is_empty())
++    return res;
++
++  /* If length is less than 4 bytes, data is corrupt */
++  if (res->length() <= 4)
++  {
++    push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
++			ER_ZLIB_Z_DATA_ERROR,
++			ER(ER_ZLIB_Z_DATA_ERROR));
++    goto err;
++  }
++
++  /* Size of uncompressed data is stored as first 4 bytes of field */
++  new_size= uint4korr(res->ptr()) & 0x3FFFFFFF;
++  if (new_size > current_thd->variables.max_allowed_packet)
++  {
++    push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
++			ER_TOO_BIG_FOR_UNCOMPRESS,
++			ER(ER_TOO_BIG_FOR_UNCOMPRESS),
++                        current_thd->variables.max_allowed_packet);
++    goto err;
++  }
++  if (buffer.realloc((uint32)new_size))
++    goto err;
++
++  if ((err= uncompress((Byte*)buffer.ptr(), &new_size,
++		       ((const Bytef*)res->ptr())+4,res->length())) == Z_OK)
++  {
++    buffer.length((uint32) new_size);
++    return &buffer;
++  }
++
++  code= ((err == Z_BUF_ERROR) ? ER_ZLIB_Z_BUF_ERROR :
++	 ((err == Z_MEM_ERROR) ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_DATA_ERROR));
++  push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,code,ER(code));
++
++err:
++  null_value= 1;
++  return 0;
++}
++#endif
++
++/*
++  UUID, as in
++    DCE 1.1: Remote Procedure Call,
++    Open Group Technical Standard Document Number C706, October 1997,
++    (supersedes C309 DCE: Remote Procedure Call 8/1994,
++    which was basis for ISO/IEC 11578:1996 specification)
++*/
++
++static struct rand_struct uuid_rand;
++static uint nanoseq;
++static ulonglong uuid_time=0;
++static char clock_seq_and_node_str[]="-0000-000000000000";
++
++/**
++  number of 100-nanosecond intervals between
++  1582-10-15 00:00:00.00 and 1970-01-01 00:00:00.00.
++*/
++#define UUID_TIME_OFFSET ((ulonglong) 141427 * 24 * 60 * 60 * \
++                          1000 * 1000 * 10)
++
++#define UUID_VERSION      0x1000
++#define UUID_VARIANT      0x8000
++
++static void tohex(char *to, uint from, uint len)
++{
++  to+= len;
++  while (len--)
++  {
++    *--to= _dig_vec_lower[from & 15];
++    from >>= 4;
++  }
++}
++
++static void set_clock_seq_str()
++{
++  uint16 clock_seq= ((uint)(my_rnd(&uuid_rand)*16383)) | UUID_VARIANT;
++  tohex(clock_seq_and_node_str+1, clock_seq, 4);
++  nanoseq= 0;
++}
++
++String *Item_func_uuid::val_str(String *str)
++{
++  DBUG_ASSERT(fixed == 1);
++  char *s;
++  THD *thd= current_thd;
++
++  pthread_mutex_lock(&LOCK_uuid_generator);
++  if (! uuid_time) /* first UUID() call. initializing data */
++  {
++    ulong tmp=sql_rnd_with_mutex();
++    uchar mac[6];
++    int i;
++    if (my_gethwaddr(mac))
++    {
++      /* purecov: begin inspected */
++      /*
++        generating random "hardware addr"
++        and because specs explicitly specify that it should NOT correlate
++        with a clock_seq value (initialized random below), we use a separate
++        randominit() here
++      */
++      randominit(&uuid_rand, tmp + (ulong) thd, tmp + (ulong)global_query_id);
++      for (i=0; i < (int)sizeof(mac); i++)
++        mac[i]=(uchar)(my_rnd(&uuid_rand)*255);
++      /* purecov: end */    
++    }
++    s=clock_seq_and_node_str+sizeof(clock_seq_and_node_str)-1;
++    for (i=sizeof(mac)-1 ; i>=0 ; i--)
++    {
++      *--s=_dig_vec_lower[mac[i] & 15];
++      *--s=_dig_vec_lower[mac[i] >> 4];
++    }
++    randominit(&uuid_rand, tmp + (ulong) server_start_time,
++	       tmp + (ulong) thd->status_var.bytes_sent);
++    set_clock_seq_str();
++  }
++
++  ulonglong tv= my_getsystime() + UUID_TIME_OFFSET + nanoseq;
++
++  if (likely(tv > uuid_time))
++  {
++    /*
++      Current time is ahead of last timestamp, as it should be.
++      If we "borrowed time", give it back, just as long as we
++      stay ahead of the previous timestamp.
++    */
++    if (nanoseq)
++    {
++      DBUG_ASSERT((tv > uuid_time) && (nanoseq > 0));
++      /*
++        -1 so we won't make tv= uuid_time for nanoseq >= (tv - uuid_time)
++      */
++      ulong delta= min(nanoseq, (ulong) (tv - uuid_time -1));
++      tv-= delta;
++      nanoseq-= delta;
++    }
++  }
++  else
++  {
++    if (unlikely(tv == uuid_time))
++    {
++      /*
++        For low-res system clocks. If several requests for UUIDs
++        end up on the same tick, we add a nano-second to make them
++        different.
++        ( current_timestamp + nanoseq * calls_in_this_period )
++        may end up > next_timestamp; this is OK. Nonetheless, we'll
++        try to unwind nanoseq when we get a chance to.
++        If nanoseq overflows, we'll start over with a new numberspace
++        (so the if() below is needed so we can avoid the ++tv and thus
++        match the follow-up if() if nanoseq overflows!).
++      */
++      if (likely(++nanoseq))
++        ++tv;
++    }
++
++    if (unlikely(tv <= uuid_time))
++    {
++      /*
++        If the admin changes the system clock (or due to Daylight
++        Saving Time), the system clock may be turned *back* so we
++        go through a period once more for which we already gave out
++        UUIDs.  To avoid duplicate UUIDs despite potentially identical
++        times, we make a new random component.
++        We also come here if the nanoseq "borrowing" overflows.
++        In either case, we throw away any nanoseq borrowing since it's
++        irrelevant in the new numberspace.
++      */
++      set_clock_seq_str();
++      tv= my_getsystime() + UUID_TIME_OFFSET;
++      nanoseq= 0;
++      DBUG_PRINT("uuid",("making new numberspace"));
++    }
++  }
++
++  uuid_time=tv;
++  pthread_mutex_unlock(&LOCK_uuid_generator);
++
++  uint32 time_low=            (uint32) (tv & 0xFFFFFFFF);
++  uint16 time_mid=            (uint16) ((tv >> 32) & 0xFFFF);
++  uint16 time_hi_and_version= (uint16) ((tv >> 48) | UUID_VERSION);
++
++  str->realloc(UUID_LENGTH+1);
++  str->length(UUID_LENGTH);
++  str->set_charset(system_charset_info);
++  s=(char *) str->ptr();
++  s[8]=s[13]='-';
++  tohex(s, time_low, 8);
++  tohex(s+9, time_mid, 4);
++  tohex(s+14, time_hi_and_version, 4);
++  strmov(s+18, clock_seq_and_node_str);
++  return str;
++}
+diff -urN mysql-old/sql/item_strfunc.h.orig mysql/sql/item_strfunc.h.orig
+--- mysql-old/sql/item_strfunc.h.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/item_strfunc.h.orig	2011-04-12 12:11:38.000000000 +0000
+@@ -0,0 +1,868 @@
++/* Copyright (C) 2000-2003 MySQL AB
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
++
++
++/* This file defines all string functions */
++
++#ifdef USE_PRAGMA_INTERFACE
++#pragma interface			/* gcc class implementation */
++#endif
++
++class Item_str_func :public Item_func
++{
++protected:
++  /**
++     Sets the result value of the function an empty string, using the current
++     character set. No memory is allocated.
++     @retval A pointer to the str_value member.
++   */
++  String *make_empty_result() {
++    str_value.set("", 0, collation.collation);
++    return &str_value; 
++  }
++public:
++  Item_str_func() :Item_func() { decimals=NOT_FIXED_DEC; }
++  Item_str_func(Item *a) :Item_func(a) {decimals=NOT_FIXED_DEC; }
++  Item_str_func(Item *a,Item *b) :Item_func(a,b) { decimals=NOT_FIXED_DEC; }
++  Item_str_func(Item *a,Item *b,Item *c) :Item_func(a,b,c) { decimals=NOT_FIXED_DEC; }
++  Item_str_func(Item *a,Item *b,Item *c,Item *d) :Item_func(a,b,c,d) {decimals=NOT_FIXED_DEC; }
++  Item_str_func(Item *a,Item *b,Item *c,Item *d, Item* e) :Item_func(a,b,c,d,e) {decimals=NOT_FIXED_DEC; }
++  Item_str_func(List<Item> &list) :Item_func(list) {decimals=NOT_FIXED_DEC; }
++  longlong val_int();
++  double val_real();
++  my_decimal *val_decimal(my_decimal *);
++  enum Item_result result_type () const { return STRING_RESULT; }
++  void left_right_max_length();
++  bool fix_fields(THD *thd, Item **ref);
++};
++
++class Item_func_md5 :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_md5(Item *a) :Item_str_func(a)
++  {
++    collation.set(&my_charset_bin);
++  }
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "md5"; }
++};
++
++
++class Item_func_sha :public Item_str_func
++{
++public:
++  Item_func_sha(Item *a) :Item_str_func(a)
++  {
++    collation.set(&my_charset_bin);
++  }
++  String *val_str(String *);    
++  void fix_length_and_dec();      
++  const char *func_name() const { return "sha"; }	
++};
++
++class Item_func_aes_encrypt :public Item_str_func
++{
++public:
++  Item_func_aes_encrypt(Item *a, Item *b) :Item_str_func(a,b) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "aes_encrypt"; }
++};
++
++class Item_func_aes_decrypt :public Item_str_func	
++{
++public:
++  Item_func_aes_decrypt(Item *a, Item *b) :Item_str_func(a,b) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "aes_decrypt"; }
++};
++
++
++class Item_func_concat :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_concat(List<Item> &list) :Item_str_func(list) {}
++  Item_func_concat(Item *a,Item *b) :Item_str_func(a,b) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "concat"; }
++};
++
++class Item_func_concat_ws :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_concat_ws(List<Item> &list) :Item_str_func(list) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "concat_ws"; }
++  table_map not_null_tables() const { return 0; }
++};
++
++class Item_func_reverse :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_reverse(Item *a) :Item_str_func(a) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "reverse"; }
++};
++
++
++class Item_func_replace :public Item_str_func
++{
++  String tmp_value,tmp_value2;
++public:
++  Item_func_replace(Item *org,Item *find,Item *replace)
++    :Item_str_func(org,find,replace) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "replace"; }
++};
++
++
++class Item_func_insert :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_insert(Item *org,Item *start,Item *length,Item *new_str)
++    :Item_str_func(org,start,length,new_str) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "insert"; }
++};
++
++
++class Item_str_conv :public Item_str_func
++{
++protected:
++  uint multiply;
++  my_charset_conv_case converter;
++  String tmp_value;
++public:
++  Item_str_conv(Item *item) :Item_str_func(item) {}
++  String *val_str(String *);
++};
++
++
++class Item_func_lcase :public Item_str_conv
++{
++public:
++  Item_func_lcase(Item *item) :Item_str_conv(item) {}
++  const char *func_name() const { return "lcase"; }
++  void fix_length_and_dec();
++};
++
++class Item_func_ucase :public Item_str_conv
++{
++public:
++  Item_func_ucase(Item *item) :Item_str_conv(item) {}
++  const char *func_name() const { return "ucase"; }
++  void fix_length_and_dec();
++};
++
++
++class Item_func_left :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_left(Item *a,Item *b) :Item_str_func(a,b) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "left"; }
++};
++
++
++class Item_func_right :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_right(Item *a,Item *b) :Item_str_func(a,b) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "right"; }
++};
++
++
++class Item_func_substr :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_substr(Item *a,Item *b) :Item_str_func(a,b) {}
++  Item_func_substr(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "substr"; }
++};
++
++
++class Item_func_substr_index :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_substr_index(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "substring_index"; }
++};
++
++
++class Item_func_trim :public Item_str_func
++{
++protected:
++  String tmp_value;
++  String remove;
++public:
++  Item_func_trim(Item *a,Item *b) :Item_str_func(a,b) {}
++  Item_func_trim(Item *a) :Item_str_func(a) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "trim"; }
++  virtual void print(String *str, enum_query_type query_type);
++  virtual const char *mode_name() const { return "both"; }
++};
++
++
++class Item_func_ltrim :public Item_func_trim
++{
++public:
++  Item_func_ltrim(Item *a,Item *b) :Item_func_trim(a,b) {}
++  Item_func_ltrim(Item *a) :Item_func_trim(a) {}
++  String *val_str(String *);
++  const char *func_name() const { return "ltrim"; }
++  const char *mode_name() const { return "leading"; }
++};
++
++
++class Item_func_rtrim :public Item_func_trim
++{
++public:
++  Item_func_rtrim(Item *a,Item *b) :Item_func_trim(a,b) {}
++  Item_func_rtrim(Item *a) :Item_func_trim(a) {}
++  String *val_str(String *);
++  const char *func_name() const { return "rtrim"; }
++  const char *mode_name() const { return "trailing"; }
++};
++
++
++/*
++  Item_func_password -- new (4.1.1) PASSWORD() function implementation.
++  Returns strcat('*', octet2hex(sha1(sha1(password)))). '*' stands for new
++  password format, sha1(sha1(password) is so-called hash_stage2 value.
++  Length of returned string is always 41 byte. To find out how entire
++  authentication procedure works, see comments in password.c.
++*/
++
++class Item_func_password :public Item_str_func
++{
++  char tmp_value[SCRAMBLED_PASSWORD_CHAR_LENGTH+1]; 
++public:
++  Item_func_password(Item *a) :Item_str_func(a) {}
++  String *val_str(String *str);
++  void fix_length_and_dec() { max_length= SCRAMBLED_PASSWORD_CHAR_LENGTH; }
++  const char *func_name() const { return "password"; }
++  static char *alloc(THD *thd, const char *password, size_t pass_len);
++};
++
++
++/*
++  Item_func_old_password -- PASSWORD() implementation used in MySQL 3.21 - 4.0
++  compatibility mode. This item is created in sql_yacc.yy when
++  'old_passwords' session variable is set, and to handle OLD_PASSWORD()
++  function.
++*/
++
++class Item_func_old_password :public Item_str_func
++{
++  char tmp_value[SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1];
++public:
++  Item_func_old_password(Item *a) :Item_str_func(a) {}
++  String *val_str(String *str);
++  void fix_length_and_dec() { max_length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323; } 
++  const char *func_name() const { return "old_password"; }
++  static char *alloc(THD *thd, const char *password, size_t pass_len);
++};
++
++
++class Item_func_des_encrypt :public Item_str_func
++{
++  String tmp_value,tmp_arg;
++public:
++  Item_func_des_encrypt(Item *a) :Item_str_func(a) {}
++  Item_func_des_encrypt(Item *a, Item *b): Item_str_func(a,b) {}
++  String *val_str(String *);
++  void fix_length_and_dec()
++  {
++    maybe_null=1;
++    /* 9 = MAX ((8- (arg_len % 8)) + 1) */
++    max_length = args[0]->max_length + 9;
++  }
++  const char *func_name() const { return "des_encrypt"; }
++};
++
++class Item_func_des_decrypt :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_des_decrypt(Item *a) :Item_str_func(a) {}
++  Item_func_des_decrypt(Item *a, Item *b): Item_str_func(a,b) {}
++  String *val_str(String *);
++  void fix_length_and_dec()
++  {
++    maybe_null=1;
++    /* 9 = MAX ((8- (arg_len % 8)) + 1) */
++    max_length = args[0]->max_length - 9;
++  }
++  const char *func_name() const { return "des_decrypt"; }
++};
++
++class Item_func_encrypt :public Item_str_func
++{
++  String tmp_value;
++
++  /* Encapsulate common constructor actions */
++  void constructor_helper()
++  {
++    collation.set(&my_charset_bin);
++  }
++public:
++  Item_func_encrypt(Item *a) :Item_str_func(a)
++  {
++    constructor_helper();
++  }
++  Item_func_encrypt(Item *a, Item *b): Item_str_func(a,b)
++  {
++    constructor_helper();
++  }
++  String *val_str(String *);
++  void fix_length_and_dec() { maybe_null=1; max_length = 13; }
++  const char *func_name() const { return "encrypt"; }
++};
++
++#include "sql_crypt.h"
++
++
++class Item_func_encode :public Item_str_func
++{
++private:
++  /** Whether the PRNG has already been seeded. */
++  bool seeded;
++protected:
++  SQL_CRYPT sql_crypt;
++public:
++  Item_func_encode(Item *a, Item *seed):
++    Item_str_func(a, seed) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "encode"; }
++protected:
++  virtual void crypto_transform(String *);
++private:
++  /** Provide a seed for the PRNG sequence. */
++  bool seed();
++};
++
++
++class Item_func_decode :public Item_func_encode
++{
++public:
++  Item_func_decode(Item *a, Item *seed): Item_func_encode(a, seed) {}
++  const char *func_name() const { return "decode"; }
++protected:
++  void crypto_transform(String *);
++};
++
++
++class Item_func_sysconst :public Item_str_func
++{
++public:
++  Item_func_sysconst()
++  { collation.set(system_charset_info,DERIVATION_SYSCONST); }
++  Item *safe_charset_converter(CHARSET_INFO *tocs);
++  /*
++    Used to create correct Item name in new converted item in
++    safe_charset_converter, return string representation of this function
++    call
++  */
++  virtual const char *fully_qualified_func_name() const = 0;
++};
++
++
++class Item_func_database :public Item_func_sysconst
++{
++public:
++  Item_func_database() :Item_func_sysconst() {}
++  String *val_str(String *);
++  void fix_length_and_dec()
++  {
++    max_length= MAX_FIELD_NAME * system_charset_info->mbmaxlen;
++    maybe_null=1;
++  }
++  const char *func_name() const { return "database"; }
++  const char *fully_qualified_func_name() const { return "database()"; }
++};
++
++
++class Item_func_user :public Item_func_sysconst
++{
++protected:
++  bool init (const char *user, const char *host);
++
++public:
++  Item_func_user()
++  {
++    str_value.set("", 0, system_charset_info);
++  }
++  String *val_str(String *)
++  {
++    DBUG_ASSERT(fixed == 1);
++    return (null_value ? 0 : &str_value);
++  }
++  bool fix_fields(THD *thd, Item **ref);
++  void fix_length_and_dec()
++  {
++    max_length= (USERNAME_LENGTH +
++                 (HOSTNAME_LENGTH + 1) * SYSTEM_CHARSET_MBMAXLEN);
++  }
++  const char *func_name() const { return "user"; }
++  const char *fully_qualified_func_name() const { return "user()"; }
++  int save_in_field(Field *field, bool no_conversions)
++  {
++    return save_str_value_in_field(field, &str_value);
++  }
++};
++
++
++class Item_func_current_user :public Item_func_user
++{
++  Name_resolution_context *context;
++
++public:
++  Item_func_current_user(Name_resolution_context *context_arg)
++    : context(context_arg) {}
++  bool fix_fields(THD *thd, Item **ref);
++  const char *func_name() const { return "current_user"; }
++  const char *fully_qualified_func_name() const { return "current_user()"; }
++};
++
++
++class Item_func_soundex :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_soundex(Item *a) :Item_str_func(a) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "soundex"; }
++};
++
++
++class Item_func_elt :public Item_str_func
++{
++public:
++  Item_func_elt(List<Item> &list) :Item_str_func(list) {}
++  double val_real();
++  longlong val_int();
++  String *val_str(String *str);
++  void fix_length_and_dec();
++  const char *func_name() const { return "elt"; }
++};
++
++
++class Item_func_make_set :public Item_str_func
++{
++  Item *item;
++  String tmp_str;
++
++public:
++  Item_func_make_set(Item *a,List<Item> &list) :Item_str_func(list),item(a) {}
++  String *val_str(String *str);
++  bool fix_fields(THD *thd, Item **ref)
++  {
++    DBUG_ASSERT(fixed == 0);
++    return ((!item->fixed && item->fix_fields(thd, &item)) ||
++	    item->check_cols(1) ||
++	    Item_func::fix_fields(thd, ref));
++  }
++  void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields);
++  void fix_length_and_dec();
++  void update_used_tables();
++  const char *func_name() const { return "make_set"; }
++
++  bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
++  {
++    return item->walk(processor, walk_subquery, arg) ||
++      Item_str_func::walk(processor, walk_subquery, arg);
++  }
++  Item *transform(Item_transformer transformer, uchar *arg);
++  virtual void print(String *str, enum_query_type query_type);
++};
++
++
++class Item_func_format :public Item_str_func
++{
++  String tmp_str;
++public:
++  Item_func_format(Item *org, Item *dec);
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "format"; }
++  virtual void print(String *str, enum_query_type query_type);
++};
++
++
++class Item_func_char :public Item_str_func
++{
++public:
++  Item_func_char(List<Item> &list) :Item_str_func(list)
++  { collation.set(&my_charset_bin); }
++  Item_func_char(List<Item> &list, CHARSET_INFO *cs) :Item_str_func(list)
++  { collation.set(cs); }  
++  String *val_str(String *);
++  void fix_length_and_dec() 
++  {
++    max_length= arg_count * 4;
++  }
++  const char *func_name() const { return "char"; }
++};
++
++
++class Item_func_repeat :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_repeat(Item *arg1,Item *arg2) :Item_str_func(arg1,arg2) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "repeat"; }
++};
++
++
++class Item_func_rpad :public Item_str_func
++{
++  String tmp_value, rpad_str;
++public:
++  Item_func_rpad(Item *arg1,Item *arg2,Item *arg3)
++    :Item_str_func(arg1,arg2,arg3) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "rpad"; }
++};
++
++
++class Item_func_lpad :public Item_str_func
++{
++  String tmp_value, lpad_str;
++public:
++  Item_func_lpad(Item *arg1,Item *arg2,Item *arg3)
++    :Item_str_func(arg1,arg2,arg3) {}
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "lpad"; }
++};
++
++
++class Item_func_conv :public Item_str_func
++{
++public:
++  Item_func_conv(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {}
++  const char *func_name() const { return "conv"; }
++  String *val_str(String *);
++  void fix_length_and_dec()
++  {
++    collation.set(default_charset());
++    max_length=64;
++    maybe_null= 1;
++  }
++};
++
++
++class Item_func_hex :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_hex(Item *a) :Item_str_func(a) {}
++  const char *func_name() const { return "hex"; }
++  String *val_str(String *);
++  void fix_length_and_dec()
++  {
++    collation.set(default_charset());
++    decimals=0;
++    max_length=args[0]->max_length*2*collation.collation->mbmaxlen;
++  }
++};
++
++class Item_func_unhex :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_unhex(Item *a) :Item_str_func(a) 
++  { 
++    /* there can be bad hex strings */
++    maybe_null= 1; 
++  }
++  const char *func_name() const { return "unhex"; }
++  String *val_str(String *);
++  void fix_length_and_dec()
++  {
++    collation.set(&my_charset_bin);
++    decimals=0;
++    max_length=(1+args[0]->max_length)/2;
++  }
++};
++
++
++class Item_func_binary :public Item_str_func
++{
++public:
++  Item_func_binary(Item *a) :Item_str_func(a) {}
++  String *val_str(String *a)
++  {
++    DBUG_ASSERT(fixed == 1);
++    String *tmp=args[0]->val_str(a);
++    null_value=args[0]->null_value;
++    if (tmp)
++      tmp->set_charset(&my_charset_bin);
++    return tmp;
++  }
++  void fix_length_and_dec()
++  {
++    collation.set(&my_charset_bin);
++    max_length=args[0]->max_length;
++  }
++  virtual void print(String *str, enum_query_type query_type);
++  const char *func_name() const { return "cast_as_binary"; }
++};
++
++
++class Item_load_file :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_load_file(Item *a) :Item_str_func(a) {}
++  String *val_str(String *);
++  const char *func_name() const { return "load_file"; }
++  void fix_length_and_dec()
++  {
++    collation.set(&my_charset_bin, DERIVATION_COERCIBLE);
++    maybe_null=1;
++    max_length=MAX_BLOB_WIDTH;
++  }
++};
++
++
++class Item_func_export_set: public Item_str_func
++{
++ public:
++  Item_func_export_set(Item *a,Item *b,Item* c) :Item_str_func(a,b,c) {}
++  Item_func_export_set(Item *a,Item *b,Item* c,Item* d) :Item_str_func(a,b,c,d) {}
++  Item_func_export_set(Item *a,Item *b,Item* c,Item* d,Item* e) :Item_str_func(a,b,c,d,e) {}
++  String  *val_str(String *str);
++  void fix_length_and_dec();
++  const char *func_name() const { return "export_set"; }
++};
++
++class Item_func_inet_ntoa : public Item_str_func
++{
++public:
++  Item_func_inet_ntoa(Item *a) :Item_str_func(a)
++    {
++    }
++  String* val_str(String* str);
++  const char *func_name() const { return "inet_ntoa"; }
++  void fix_length_and_dec() 
++  { 
++    decimals= 0; 
++    max_length= 3 * 8 + 7; 
++    maybe_null= 1;
++  }
++};
++
++class Item_func_quote :public Item_str_func
++{
++  String tmp_value;
++public:
++  Item_func_quote(Item *a) :Item_str_func(a) {}
++  const char *func_name() const { return "quote"; }
++  String *val_str(String *);
++  void fix_length_and_dec()
++  {
++    collation.set(args[0]->collation);
++    ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 +
++                                  2 * collation.collation->mbmaxlen;
++    max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH);
++  }
++};
++
++class Item_func_conv_charset :public Item_str_func
++{
++  bool use_cached_value;
++  String tmp_value;
++public:
++  bool safe;
++  CHARSET_INFO *conv_charset; // keep it public
++  Item_func_conv_charset(Item *a, CHARSET_INFO *cs) :Item_str_func(a) 
++  { conv_charset= cs; use_cached_value= 0; safe= 0; }
++  Item_func_conv_charset(Item *a, CHARSET_INFO *cs, bool cache_if_const) 
++    :Item_str_func(a) 
++  {
++    DBUG_ASSERT(args[0]->fixed);
++    conv_charset= cs;
++    if (cache_if_const && args[0]->const_item())
++    {
++      uint errors= 0;
++      String tmp, *str= args[0]->val_str(&tmp);
++      if (!str || str_value.copy(str->ptr(), str->length(),
++                                 str->charset(), conv_charset, &errors))
++        null_value= 1;
++      use_cached_value= 1;
++      str_value.mark_as_const();
++      safe= (errors == 0);
++    }
++    else
++    {
++      use_cached_value= 0;
++      /*
++        Conversion from and to "binary" is safe.
++        Conversion to Unicode is safe.
++        Other kind of conversions are potentially lossy.
++      */
++      safe= (args[0]->collation.collation == &my_charset_bin ||
++             cs == &my_charset_bin ||
++             (cs->state & MY_CS_UNICODE));
++    }
++  }
++  String *val_str(String *);
++  void fix_length_and_dec();
++  const char *func_name() const { return "convert"; }
++  virtual void print(String *str, enum_query_type query_type);
++};
++
++class Item_func_set_collation :public Item_str_func
++{
++public:
++  Item_func_set_collation(Item *a, Item *b) :Item_str_func(a,b) {};
++  String *val_str(String *);
++  void fix_length_and_dec();
++  bool eq(const Item *item, bool binary_cmp) const;
++  const char *func_name() const { return "collate"; }
++  enum Functype functype() const { return COLLATE_FUNC; }
++  virtual void print(String *str, enum_query_type query_type);
++  Item_field *filed_for_view_update()
++  {
++    /* this function is transparent for view updating */
++    return args[0]->filed_for_view_update();
++  }
++};
++
++class Item_func_charset :public Item_str_func
++{
++public:
++  Item_func_charset(Item *a) :Item_str_func(a) {}
++  String *val_str(String *);
++  const char *func_name() const { return "charset"; }
++  void fix_length_and_dec()
++  {
++     collation.set(system_charset_info);
++     max_length= 64 * collation.collation->mbmaxlen; // should be enough
++     maybe_null= 0;
++  };
++  table_map not_null_tables() const { return 0; }
++};
++
++class Item_func_collation :public Item_str_func
++{
++public:
++  Item_func_collation(Item *a) :Item_str_func(a) {}
++  String *val_str(String *);
++  const char *func_name() const { return "collation"; }
++  void fix_length_and_dec()
++  {
++     collation.set(system_charset_info);
++     max_length= 64 * collation.collation->mbmaxlen; // should be enough
++     maybe_null= 0;
++  };
++  table_map not_null_tables() const { return 0; }
++};
++
++class Item_func_crc32 :public Item_int_func
++{
++  String value;
++public:
++  Item_func_crc32(Item *a) :Item_int_func(a) { unsigned_flag= 1; }
++  const char *func_name() const { return "crc32"; }
++  void fix_length_and_dec() { max_length=10; }
++  longlong val_int();
++};
++
++class Item_func_uncompressed_length : public Item_int_func
++{
++  String value;
++public:
++  Item_func_uncompressed_length(Item *a):Item_int_func(a){}
++  const char *func_name() const{return "uncompressed_length";}
++  void fix_length_and_dec() { max_length=10; }
++  longlong val_int();
++};
++
++#ifdef HAVE_COMPRESS
++#define ZLIB_DEPENDED_FUNCTION ;
++#else
++#define ZLIB_DEPENDED_FUNCTION { null_value=1; return 0; }
++#endif
++
++class Item_func_compress: public Item_str_func
++{
++  String buffer;
++public:
++  Item_func_compress(Item *a):Item_str_func(a){}
++  void fix_length_and_dec(){max_length= (args[0]->max_length*120)/100+12;}
++  const char *func_name() const{return "compress";}
++  String *val_str(String *) ZLIB_DEPENDED_FUNCTION
++};
++
++class Item_func_uncompress: public Item_str_func
++{
++  String buffer;
++public:
++  Item_func_uncompress(Item *a): Item_str_func(a){}
++  void fix_length_and_dec(){ maybe_null= 1; max_length= MAX_BLOB_WIDTH; }
++  const char *func_name() const{return "uncompress";}
++  String *val_str(String *) ZLIB_DEPENDED_FUNCTION
++};
++
++#define UUID_LENGTH (8+1+4+1+4+1+4+1+12)
++class Item_func_uuid: public Item_str_func
++{
++public:
++  Item_func_uuid(): Item_str_func() {}
++  void fix_length_and_dec() {
++    collation.set(system_charset_info);
++    /*
++       NOTE! uuid() should be changed to use 'ascii'
++       charset when hex(), format(), md5(), etc, and implicit
++       number-to-string conversion will use 'ascii'
++    */
++    max_length= UUID_LENGTH * system_charset_info->mbmaxlen;
++  }
++  const char *func_name() const{ return "uuid"; }
++  String *val_str(String *);
++};
++
+diff -urN mysql-old/sql/item_strfunc.h.rej mysql/sql/item_strfunc.h.rej
+--- mysql-old/sql/item_strfunc.h.rej	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/item_strfunc.h.rej	2011-05-10 17:56:01.353349043 +0000
+@@ -0,0 +1,11 @@
++--- sql/item_strfunc.h	2010-08-03 17:24:28.000000000 +0000
+++++ sql/item_strfunc.h	2010-08-20 22:27:12.919596025 +0000
++@@ -705,7 +705,7 @@
++   void fix_length_and_dec()
++   {
++     ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + 2;
++-    max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH);
+++    max_length= (uint32) MYSQL_MIN(max_result_length, MAX_BLOB_WIDTH);
++     collation.set(args[0]->collation);
++   }
++ };
+diff -urN mysql-old/sql/item_sum.cc mysql/sql/item_sum.cc
+--- mysql-old/sql/item_sum.cc	2011-05-10 17:45:45.636682376 +0000
++++ mysql/sql/item_sum.cc	2011-05-10 17:56:01.353349043 +0000
+@@ -1143,7 +1143,7 @@
+     AVG() will divide val by count. We need to reserve digits
+     after decimal point as the result can be fractional.
+   */
+-  decimals= min(decimals + prec_increment, NOT_FIXED_DEC);
++  decimals= MYSQL_MIN(decimals + prec_increment, NOT_FIXED_DEC);
+ }
+ 
+ 
+@@ -1206,16 +1206,16 @@
+   if (hybrid_type == DECIMAL_RESULT)
+   {
+     int precision= args[0]->decimal_precision() + prec_increment;
+-    decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
++    decimals= MYSQL_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
+     max_length= my_decimal_precision_to_length_no_truncation(precision,
+                                                              decimals,
+                                                              unsigned_flag);
+-    f_precision= min(precision+DECIMAL_LONGLONG_DIGITS, DECIMAL_MAX_PRECISION);
++    f_precision= MYSQL_MIN(precision+DECIMAL_LONGLONG_DIGITS, DECIMAL_MAX_PRECISION);
+     f_scale=  args[0]->decimals;
+     dec_bin_size= my_decimal_get_binary_size(f_precision, f_scale);
+   }
+   else {
+-    decimals= min(args[0]->decimals + prec_increment, NOT_FIXED_DEC);
++    decimals= MYSQL_MIN(args[0]->decimals + prec_increment, NOT_FIXED_DEC);
+     max_length= args[0]->max_length + prec_increment;
+   }
+ }
+@@ -1406,13 +1406,13 @@
+   switch (args[0]->result_type()) {
+   case REAL_RESULT:
+   case STRING_RESULT:
+-    decimals= min(args[0]->decimals + 4, NOT_FIXED_DEC);
++    decimals= MYSQL_MIN(args[0]->decimals + 4, NOT_FIXED_DEC);
+     break;
+   case INT_RESULT:
+   case DECIMAL_RESULT:
+   {
+     int precision= args[0]->decimal_precision()*2 + prec_increment;
+-    decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
++    decimals= MYSQL_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
+     max_length= my_decimal_precision_to_length_no_truncation(precision,
+                                                              decimals,
+                                                              unsigned_flag);
+@@ -3349,7 +3349,7 @@
+       syntax of this function). If there is no ORDER BY clause, we don't
+       create this tree.
+     */
+-    init_tree(tree, (uint) min(thd->variables.max_heap_table_size,
++    init_tree(tree, (uint) MYSQL_MIN(thd->variables.max_heap_table_size,
+                                thd->variables.sortbuff_size/16), 0,
+               tree_key_length, 
+               group_concat_key_cmp_with_order , 0, NULL, (void*) this);
+diff -urN mysql-old/sql/item_timefunc.cc mysql/sql/item_timefunc.cc
+--- mysql-old/sql/item_timefunc.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/item_timefunc.cc	2011-05-10 17:56:01.356682376 +0000
+@@ -308,14 +308,14 @@
+       switch (*++ptr) {
+ 	/* Year */
+       case 'Y':
+-	tmp= (char*) val + min(4, val_len);
++	tmp= (char*) val + MYSQL_MIN(4, val_len);
+ 	l_time->year= (int) my_strtoll10(val, &tmp, &error);
+         if ((int) (tmp-val) <= 2)
+           l_time->year= year_2000_handling(l_time->year);
+ 	val= tmp;
+ 	break;
+       case 'y':
+-	tmp= (char*) val + min(2, val_len);
++	tmp= (char*) val + MYSQL_MIN(2, val_len);
+ 	l_time->year= (int) my_strtoll10(val, &tmp, &error);
+ 	val= tmp;
+         l_time->year= year_2000_handling(l_time->year);
+@@ -324,7 +324,7 @@
+ 	/* Month */
+       case 'm':
+       case 'c':
+-	tmp= (char*) val + min(2, val_len);
++	tmp= (char*) val + MYSQL_MIN(2, val_len);
+ 	l_time->month= (int) my_strtoll10(val, &tmp, &error);
+ 	val= tmp;
+ 	break;
+@@ -341,15 +341,15 @@
+ 	/* Day */
+       case 'd':
+       case 'e':
+-	tmp= (char*) val + min(2, val_len);
++	tmp= (char*) val + MYSQL_MIN(2, val_len);
+ 	l_time->day= (int) my_strtoll10(val, &tmp, &error);
+ 	val= tmp;
+ 	break;
+       case 'D':
+-	tmp= (char*) val + min(2, val_len);
++	tmp= (char*) val + MYSQL_MIN(2, val_len);
+ 	l_time->day= (int) my_strtoll10(val, &tmp, &error);
+ 	/* Skip 'st, 'nd, 'th .. */
+-	val= tmp + min((int) (val_end-tmp), 2);
++	val= tmp + MYSQL_MIN((int) (val_end-tmp), 2);
+ 	break;
+ 
+ 	/* Hour */
+@@ -360,14 +360,14 @@
+ 	/* fall through */
+       case 'k':
+       case 'H':
+-	tmp= (char*) val + min(2, val_len);
++	tmp= (char*) val + MYSQL_MIN(2, val_len);
+ 	l_time->hour= (int) my_strtoll10(val, &tmp, &error);
+ 	val= tmp;
+ 	break;
+ 
+ 	/* Minute */
+       case 'i':
+-	tmp= (char*) val + min(2, val_len);
++	tmp= (char*) val + MYSQL_MIN(2, val_len);
+ 	l_time->minute= (int) my_strtoll10(val, &tmp, &error);
+ 	val= tmp;
+ 	break;
+@@ -375,7 +375,7 @@
+ 	/* Second */
+       case 's':
+       case 'S':
+-	tmp= (char*) val + min(2, val_len);
++	tmp= (char*) val + MYSQL_MIN(2, val_len);
+ 	l_time->second= (int) my_strtoll10(val, &tmp, &error);
+ 	val= tmp;
+ 	break;
+@@ -427,7 +427,7 @@
+ 	val= tmp;
+ 	break;
+       case 'j':
+-	tmp= (char*) val + min(val_len, 3);
++	tmp= (char*) val + MYSQL_MIN(val_len, 3);
+ 	yearday= (int) my_strtoll10(val, &tmp, &error);
+ 	val= tmp;
+ 	break;
+@@ -439,7 +439,7 @@
+       case 'u':
+         sunday_first_n_first_week_non_iso= (*ptr=='U' || *ptr== 'V');
+         strict_week_number= (*ptr=='V' || *ptr=='v');
+-	tmp= (char*) val + min(val_len, 2);
++	tmp= (char*) val + MYSQL_MIN(val_len, 2);
+ 	if ((week_number= (int) my_strtoll10(val, &tmp, &error)) < 0 ||
+             (strict_week_number && !week_number) ||
+             week_number > 53)
+@@ -451,7 +451,7 @@
+       case 'X':
+       case 'x':
+         strict_week_number_year_type= (*ptr=='X');
+-        tmp= (char*) val + min(4, val_len);
++        tmp= (char*) val + MYSQL_MIN(4, val_len);
+         strict_week_number_year= (int) my_strtoll10(val, &tmp, &error);
+         val= tmp;
+         break;
+@@ -596,7 +596,7 @@
+ err:
+   {
+     char buff[128];
+-    strmake(buff, val_begin, min(length, sizeof(buff)-1));
++    strmake(buff, val_begin, MYSQL_MIN(length, sizeof(buff)-1));
+     push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+                         ER_WRONG_VALUE_FOR_TYPE, ER(ER_WRONG_VALUE_FOR_TYPE),
+                         date_time_type, buff, "str_to_date");
+@@ -1823,7 +1823,7 @@
+   else
+   {
+     fixed_length=0;
+-    max_length=min(arg1->max_length, MAX_BLOB_WIDTH) * 10 *
++    max_length=MYSQL_MIN(arg1->max_length, MAX_BLOB_WIDTH) * 10 *
+                    collation.collation->mbmaxlen;
+     set_if_smaller(max_length,MAX_BLOB_WIDTH);
+   }
+diff -urN mysql-old/sql/key.cc mysql/sql/key.cc
+--- mysql-old/sql/key.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/key.cc	2011-05-10 17:56:01.356682376 +0000
+@@ -125,13 +125,13 @@
+         key_part->key_part_flag & HA_VAR_LENGTH_PART)
+     {
+       key_length-= HA_KEY_BLOB_LENGTH;
+-      length= min(key_length, key_part->length);
++      length= MYSQL_MIN(key_length, key_part->length);
+       key_part->field->get_key_image(to_key, length, Field::itRAW);
+       to_key+= HA_KEY_BLOB_LENGTH;
+     }
+     else
+     {
+-      length= min(key_length, key_part->length);
++      length= MYSQL_MIN(key_length, key_part->length);
+       Field *field= key_part->field;
+       CHARSET_INFO *cs= field->charset();
+       uint bytes= field->get_key_image(to_key, length, Field::itRAW);
+@@ -215,7 +215,7 @@
+       my_ptrdiff_t ptrdiff= to_record - field->table->record[0];
+       field->move_field_offset(ptrdiff);
+       key_length-= HA_KEY_BLOB_LENGTH;
+-      length= min(key_length, key_part->length);
++      length= MYSQL_MIN(key_length, key_part->length);
+       old_map= dbug_tmp_use_all_columns(field->table, field->table->write_set);
+       field->set_key_image(from_key, length);
+       dbug_tmp_restore_column_map(field->table->write_set, old_map);
+@@ -224,7 +224,7 @@
+     }
+     else
+     {
+-      length= min(key_length, key_part->length);
++      length= MYSQL_MIN(key_length, key_part->length);
+       /* skip the byte with 'uneven' bits, if used */
+       memcpy(to_record + key_part->offset, from_key + used_uneven_bits
+              , (size_t) length - used_uneven_bits);
+@@ -285,7 +285,7 @@
+ 	return 1;
+       continue;
+     }
+-    length= min((uint) (key_end-key), store_length);
++    length= MYSQL_MIN((uint) (key_end-key), store_length);
+     if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+
+                                 FIELDFLAG_PACK)))
+     {
+@@ -351,7 +351,7 @@
+     {
+       field->val_str(&tmp);
+       if (key_part->length < field->pack_length())
+-	tmp.length(min(tmp.length(),key_part->length));
++	tmp.length(MYSQL_MIN(tmp.length(),key_part->length));
+       to->append(tmp);
+     }
+     else
+diff -urN mysql-old/sql/log.cc mysql/sql/log.cc
+--- mysql-old/sql/log.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/log.cc	2011-05-10 17:56:01.360015709 +0000
+@@ -597,11 +597,11 @@
+     t.neg= 0;
+ 
+     /* fill in query_time field */
+-    calc_time_from_sec(&t, (long) min(query_time, (longlong) TIME_MAX_VALUE_SECONDS), 0);
++    calc_time_from_sec(&t, (long) MYSQL_MIN(query_time, (longlong) TIME_MAX_VALUE_SECONDS), 0);
+     if (table->field[2]->store_time(&t, MYSQL_TIMESTAMP_TIME))
+       goto err;
+     /* lock_time */
+-    calc_time_from_sec(&t, (long) min(lock_time, (longlong) TIME_MAX_VALUE_SECONDS), 0);
++    calc_time_from_sec(&t, (long) MYSQL_MIN(lock_time, (longlong) TIME_MAX_VALUE_SECONDS), 0);
+     if (table->field[3]->store_time(&t, MYSQL_TIMESTAMP_TIME))
+       goto err;
+     /* rows_sent */
+@@ -2429,7 +2429,7 @@
+   {
+     char *p= fn_ext(log_name);
+     uint length= (uint) (p - log_name);
+-    strmake(buff, log_name, min(length, FN_REFLEN-1));
++    strmake(buff, log_name, MYSQL_MIN(length, FN_REFLEN-1));
+     return (const char*)buff;
+   }
+   return log_name;
+@@ -5236,7 +5236,7 @@
+   DBUG_ENTER("print_buffer_to_nt_eventlog");
+ 
+   /* Add ending CR/LF's to string, overwrite last chars if necessary */
+-  strmov(buffptr+min(length, buffLen-5), "\r\n\r\n");
++  strmov(buffptr+MYSQL_MIN(length, buffLen-5), "\r\n\r\n");
+ 
+   setup_windows_event_source();
+   if ((event= RegisterEventSource(NULL,"MySQL")))
+diff -urN mysql-old/sql/log_event.cc mysql/sql/log_event.cc
+--- mysql-old/sql/log_event.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/log_event.cc	2011-05-10 17:56:01.456682377 +0000
+@@ -1075,7 +1075,7 @@
+     of 13 bytes, whereas LOG_EVENT_MINIMAL_HEADER_LEN is 19 bytes (it's
+     "minimal" over the set {MySQL >=4.0}).
+   */
+-  uint header_size= min(description_event->common_header_len,
++  uint header_size= MYSQL_MIN(description_event->common_header_len,
+                         LOG_EVENT_MINIMAL_HEADER_LEN);
+ 
+   LOCK_MUTEX;
+@@ -2671,7 +2671,7 @@
+       be even bigger, but this will suffice to catch most corruption
+       errors that can lead to a crash.
+     */
+-    if (status_vars_len > min(data_len, MAX_SIZE_LOG_EVENT_STATUS))
++    if (status_vars_len > MYSQL_MIN(data_len, MAX_SIZE_LOG_EVENT_STATUS))
+     {
+       DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0",
+                           status_vars_len, data_len));
+@@ -5588,7 +5588,7 @@
+   char buf[UV_NAME_LEN_SIZE];
+   char buf1[UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + 
+ 	    UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE];
+-  uchar buf2[max(8, DECIMAL_MAX_FIELD_SIZE + 2)], *pos= buf2;
++  uchar buf2[MYSQL_MAX(8, DECIMAL_MAX_FIELD_SIZE + 2)], *pos= buf2;
+   uint buf1_length;
+   ulong event_length;
+ 
+@@ -7315,7 +7315,7 @@
+     trigger false warnings.
+    */
+ #ifndef HAVE_purify
+-  DBUG_DUMP("row_data", row_data, min(length, 32));
++  DBUG_DUMP("row_data", row_data, MYSQL_MIN(length, 32));
+ #endif
+ 
+   DBUG_ASSERT(m_rows_buf <= m_rows_cur);
+diff -urN mysql-old/sql/log_event_old.cc mysql/sql/log_event_old.cc
+--- mysql-old/sql/log_event_old.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/log_event_old.cc	2011-05-10 17:56:01.460015710 +0000
+@@ -1420,7 +1420,7 @@
+     trigger false warnings.
+    */
+ #ifndef HAVE_purify
+-  DBUG_DUMP("row_data", row_data, min(length, 32));
++  DBUG_DUMP("row_data", row_data, MYSQL_MIN(length, 32));
+ #endif
+ 
+   DBUG_ASSERT(m_rows_buf <= m_rows_cur);
+diff -urN mysql-old/sql/mysqld.cc mysql/sql/mysqld.cc
+--- mysql-old/sql/mysqld.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/mysqld.cc	2011-05-10 17:56:01.463349043 +0000
+@@ -3383,7 +3383,7 @@
+       can't get max_connections*5 but still got no less than was
+       requested (value of wanted_files).
+     */
+-    max_open_files= max(max(wanted_files, max_connections*5),
++    max_open_files= MYSQL_MAX(MYSQL_MAX(wanted_files, max_connections*5),
+                         open_files_limit);
+     files= my_set_max_open_files(max_open_files);
+ 
+@@ -3395,15 +3395,15 @@
+           If we have requested too much file handles than we bring
+           max_connections in supported bounds.
+         */
+-        max_connections= (ulong) min(files-10-TABLE_OPEN_CACHE_MIN*2,
++        max_connections= (ulong) MYSQL_MIN(files-10-TABLE_OPEN_CACHE_MIN*2,
+                                      max_connections);
+         /*
+           Decrease table_cache_size according to max_connections, but
+-          not below TABLE_OPEN_CACHE_MIN.  Outer min() ensures that we
++          not below TABLE_OPEN_CACHE_MIN.  Outer MYSQL_MIN() ensures that we
+           never increase table_cache_size automatically (that could
+           happen if max_connections is decreased above).
+         */
+-        table_cache_size= (ulong) min(max((files-10-max_connections)/2,
++        table_cache_size= (ulong) MYSQL_MIN(MYSQL_MAX((files-10-max_connections)/2,
+                                           TABLE_OPEN_CACHE_MIN),
+                                       table_cache_size);
+ 	DBUG_PRINT("warning",
+@@ -5085,7 +5085,7 @@
+ {
+   my_socket sock,new_sock;
+   uint error_count=0;
+-  uint max_used_connection= (uint) (max(ip_sock,unix_sock)+1);
++  uint max_used_connection= (uint) (MYSQL_MAX(ip_sock,unix_sock)+1);
+   fd_set readFDs,clientFDs;
+   THD *thd;
+   struct sockaddr_in cAddr;
+diff -urN mysql-old/sql/mysqld.cc.orig mysql/sql/mysqld.cc.orig
+--- mysql-old/sql/mysqld.cc.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/mysqld.cc.orig	2011-04-12 12:11:35.000000000 +0000
+@@ -0,0 +1,9292 @@
++/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
++
++#include "mysql_priv.h"
++#include <m_ctype.h>
++#include <my_dir.h>
++#include <my_bit.h>
++#include "slave.h"
++#include "rpl_mi.h"
++#include "sql_repl.h"
++#include "rpl_filter.h"
++#include "repl_failsafe.h"
++#include <my_stacktrace.h>
++#include "mysqld_suffix.h"
++#include "mysys_err.h"
++#include "events.h"
++#include "debug_sync.h"
++
++#include "../storage/myisam/ha_myisam.h"
++
++#include "rpl_injector.h"
++
++#ifdef HAVE_SYS_PRCTL_H
++#include <sys/prctl.h>
++#endif
++
++#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
++#if defined(NOT_ENOUGH_TESTED) \
++  && defined(NDB_SHM_TRANSPORTER) && MYSQL_VERSION_ID >= 50000
++#define OPT_NDB_SHM_DEFAULT 1
++#else
++#define OPT_NDB_SHM_DEFAULT 0
++#endif
++#endif
++
++#ifndef DEFAULT_SKIP_THREAD_PRIORITY
++#define DEFAULT_SKIP_THREAD_PRIORITY 0
++#endif
++
++#include <thr_alarm.h>
++#include <ft_global.h>
++#include <errmsg.h>
++#include "sp_rcontext.h"
++#include "sp_cache.h"
++
++#define mysqld_charset &my_charset_latin1
++
++#ifdef HAVE_purify
++#define IF_PURIFY(A,B) (A)
++#else
++#define IF_PURIFY(A,B) (B)
++#endif
++
++#if SIZEOF_CHARP == 4
++#define MAX_MEM_TABLE_SIZE ~(ulong) 0
++#else
++#define MAX_MEM_TABLE_SIZE ~(ulonglong) 0
++#endif
++
++/* stack traces are only supported on linux intel */
++#if defined(__linux__)  && defined(__i386__)
++#define	HAVE_STACK_TRACE_ON_SEGV
++#endif /* __linux__ */
++
++/* We have HAVE_purify below as this speeds up the shutdown of MySQL */
++
++#if defined(HAVE_DEC_3_2_THREADS) || defined(SIGNALS_DONT_BREAK_READ) || defined(HAVE_purify) && defined(__linux__)
++#define HAVE_CLOSE_SERVER_SOCK 1
++#endif
++
++extern "C" {					// Because of SCO 3.2V4.2
++#include <errno.h>
++#include <sys/stat.h>
++#ifndef __GNU_LIBRARY__
++#define __GNU_LIBRARY__				// Skip warnings in getopt.h
++#endif
++#include <my_getopt.h>
++#ifdef HAVE_SYSENT_H
++#include <sysent.h>
++#endif
++#ifdef HAVE_PWD_H
++#include <pwd.h>				// For getpwent
++#endif
++#ifdef HAVE_GRP_H
++#include <grp.h>
++#endif
++#include <my_net.h>
++
++#if !defined(__WIN__)
++#  ifndef __NETWARE__
++#include <sys/resource.h>
++#  endif /* __NETWARE__ */
++#ifdef HAVE_SYS_UN_H
++#  include <sys/un.h>
++#endif
++#include <netdb.h>
++#ifdef HAVE_SELECT_H
++#  include <select.h>
++#endif
++#ifdef HAVE_SYS_SELECT_H
++#include <sys/select.h>
++#endif
++#include <sys/utsname.h>
++#endif /* __WIN__ */
++
++#include <my_libwrap.h>
++
++#ifdef HAVE_SYS_MMAN_H
++#include <sys/mman.h>
++#endif
++
++#ifdef __WIN__ 
++#include <crtdbg.h>
++#define SIGNAL_FMT "exception 0x%x"
++#else
++#define SIGNAL_FMT "signal %d"
++#endif
++
++#ifdef __NETWARE__
++#define zVOLSTATE_ACTIVE 6
++#define zVOLSTATE_DEACTIVE 2
++#define zVOLSTATE_MAINTENANCE 3
++
++#undef __event_h__
++#include <../include/event.h>
++/*
++  This #undef exists here because both libc of NetWare and MySQL have
++  files named event.h which causes compilation errors.
++*/
++
++#include <nks/netware.h>
++#include <nks/vm.h>
++#include <library.h>
++#include <monitor.h>
++#include <zOmni.h>                              //For NEB
++#include <neb.h>                                //For NEB
++#include <nebpub.h>                             //For NEB
++#include <zEvent.h>                             //For NSS event structures
++#include <zPublics.h>
++
++static void *neb_consumer_id= NULL;             //For storing NEB consumer id
++static char datavolname[256]= {0};
++static VolumeID_t datavolid;
++static event_handle_t eh;
++static Report_t ref;
++static void *refneb= NULL;
++my_bool event_flag= FALSE;
++static int volumeid= -1;
++
++  /* NEB event callback */
++unsigned long neb_event_callback(struct EventBlock *eblock);
++static void registerwithneb();
++static void getvolumename();
++static void getvolumeID(BYTE *volumeName);
++#endif /* __NETWARE__ */
++  
++
++#ifdef _AIX41
++int initgroups(const char *,unsigned int);
++#endif
++
++#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H)
++#include <ieeefp.h>
++#ifdef HAVE_FP_EXCEPT				// Fix type conflict
++typedef fp_except fp_except_t;
++#endif
++#endif /* __FreeBSD__ && HAVE_IEEEFP_H */
++#ifdef HAVE_SYS_FPU_H
++/* for IRIX to use set_fpc_csr() */
++#include <sys/fpu.h>
++#endif
++#ifdef HAVE_FPU_CONTROL_H
++#include <fpu_control.h>
++#endif
++#if defined(__i386__) && !defined(HAVE_FPU_CONTROL_H)
++# define fpu_control_t unsigned int
++# define _FPU_EXTENDED 0x300
++# define _FPU_DOUBLE 0x200
++# if defined(__GNUC__) || (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x590)
++#  define _FPU_GETCW(cw) asm volatile ("fnstcw %0" : "=m" (*&cw))
++#  define _FPU_SETCW(cw) asm volatile ("fldcw %0" : : "m" (*&cw))
++# else
++#  define _FPU_GETCW(cw) (cw= 0)
++#  define _FPU_SETCW(cw)
++# endif
++#endif
++
++extern "C" my_bool reopen_fstreams(const char *filename,
++                                   FILE *outstream, FILE *errstream);
++
++inline void setup_fpu()
++{
++#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H)
++  /* We can't handle floating point exceptions with threads, so disable
++     this on freebsd
++     Don't fall for overflow, underflow,divide-by-zero or loss of precision
++  */
++#if defined(__i386__)
++  fpsetmask(~(FP_X_INV | FP_X_DNML | FP_X_OFL | FP_X_UFL | FP_X_DZ |
++	      FP_X_IMP));
++#else
++  fpsetmask(~(FP_X_INV |             FP_X_OFL | FP_X_UFL | FP_X_DZ |
++              FP_X_IMP));
++#endif /* __i386__ */
++#endif /* __FreeBSD__ && HAVE_IEEEFP_H */
++
++#ifdef HAVE_FESETROUND
++    /* Set FPU rounding mode to "round-to-nearest" */
++  fesetround(FE_TONEAREST);
++#endif /* HAVE_FESETROUND */
++
++  /*
++    x86 (32-bit) requires FPU precision to be explicitly set to 64 bit
++    (double precision) for portable results of floating point operations.
++    However, there is no need to do so if compiler is using SSE2 for floating
++    point, double values will be stored and processed in 64 bits anyway.
++  */
++#if defined(__i386__) && !defined(__SSE2_MATH__)
++#if defined(_WIN32)
++#if !defined(_WIN64)
++  _control87(_PC_53, MCW_PC);
++#endif /* !_WIN64 */
++#else /* !_WIN32 */
++  fpu_control_t cw;
++  _FPU_GETCW(cw);
++  cw= (cw & ~_FPU_EXTENDED) | _FPU_DOUBLE;
++  _FPU_SETCW(cw);
++#endif /* _WIN32 && */
++#endif /* __i386__ */
++
++#if defined(__sgi) && defined(HAVE_SYS_FPU_H)
++  /* Enable denormalized DOUBLE values support for IRIX */
++  union fpc_csr n;
++  n.fc_word = get_fpc_csr();
++  n.fc_struct.flush = 0;
++  set_fpc_csr(n.fc_word);
++#endif
++}
++
++} /* cplusplus */
++
++#define MYSQL_KILL_SIGNAL SIGTERM
++
++#ifdef HAVE_GLIBC2_STYLE_GETHOSTBYNAME_R
++#include <sys/types.h>
++#else
++#include <my_pthread.h>			// For thr_setconcurency()
++#endif
++
++#ifdef SOLARIS
++extern "C" int gethostname(char *name, int namelen);
++#endif
++
++extern "C" sig_handler handle_segfault(int sig);
++
++#if defined(__linux__)
++#define ENABLE_TEMP_POOL 1
++#else
++#define ENABLE_TEMP_POOL 0
++#endif
++
++/* Constants */
++
++const char *show_comp_option_name[]= {"YES", "NO", "DISABLED"};
++/*
++  WARNING: When adding new SQL modes don't forget to update the
++           tables definitions that stores it's value.
++           (ie: mysql.event, mysql.proc)
++*/
++static const char *sql_mode_names[]=
++{
++  "REAL_AS_FLOAT", "PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE",
++  "?", "ONLY_FULL_GROUP_BY", "NO_UNSIGNED_SUBTRACTION",
++  "NO_DIR_IN_CREATE",
++  "POSTGRESQL", "ORACLE", "MSSQL", "DB2", "MAXDB", "NO_KEY_OPTIONS",
++  "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "MYSQL323", "MYSQL40", "ANSI",
++  "NO_AUTO_VALUE_ON_ZERO", "NO_BACKSLASH_ESCAPES", "STRICT_TRANS_TABLES",
++  "STRICT_ALL_TABLES",
++  "NO_ZERO_IN_DATE", "NO_ZERO_DATE", "ALLOW_INVALID_DATES",
++  "ERROR_FOR_DIVISION_BY_ZERO",
++  "TRADITIONAL", "NO_AUTO_CREATE_USER", "HIGH_NOT_PRECEDENCE",
++  "NO_ENGINE_SUBSTITUTION",
++  "PAD_CHAR_TO_FULL_LENGTH",
++  NullS
++};
++
++static const unsigned int sql_mode_names_len[]=
++{
++  /*REAL_AS_FLOAT*/               13,
++  /*PIPES_AS_CONCAT*/             15,
++  /*ANSI_QUOTES*/                 11,
++  /*IGNORE_SPACE*/                12,
++  /*?*/                           1,
++  /*ONLY_FULL_GROUP_BY*/          18,
++  /*NO_UNSIGNED_SUBTRACTION*/     23,
++  /*NO_DIR_IN_CREATE*/            16,
++  /*POSTGRESQL*/                  10,
++  /*ORACLE*/                      6,
++  /*MSSQL*/                       5,
++  /*DB2*/                         3,
++  /*MAXDB*/                       5,
++  /*NO_KEY_OPTIONS*/              14,
++  /*NO_TABLE_OPTIONS*/            16,
++  /*NO_FIELD_OPTIONS*/            16,
++  /*MYSQL323*/                    8,
++  /*MYSQL40*/                     7,
++  /*ANSI*/                        4,
++  /*NO_AUTO_VALUE_ON_ZERO*/       21,
++  /*NO_BACKSLASH_ESCAPES*/        20,
++  /*STRICT_TRANS_TABLES*/         19,
++  /*STRICT_ALL_TABLES*/           17,
++  /*NO_ZERO_IN_DATE*/             15,
++  /*NO_ZERO_DATE*/                12,
++  /*ALLOW_INVALID_DATES*/         19,
++  /*ERROR_FOR_DIVISION_BY_ZERO*/  26,
++  /*TRADITIONAL*/                 11,
++  /*NO_AUTO_CREATE_USER*/         19,
++  /*HIGH_NOT_PRECEDENCE*/         19,
++  /*NO_ENGINE_SUBSTITUTION*/      22,
++  /*PAD_CHAR_TO_FULL_LENGTH*/     23
++};
++
++TYPELIB sql_mode_typelib= { array_elements(sql_mode_names)-1,"",
++			    sql_mode_names,
++                            (unsigned int *)sql_mode_names_len };
++
++static const char *optimizer_switch_names[]=
++{
++  "index_merge","index_merge_union","index_merge_sort_union", 
++  "index_merge_intersection", "default", NullS
++};
++/* Corresponding defines are named OPTIMIZER_SWITCH_XXX */
++static const unsigned int optimizer_switch_names_len[]=
++{
++  sizeof("index_merge") - 1,
++  sizeof("index_merge_union") - 1,
++  sizeof("index_merge_sort_union") - 1,
++  sizeof("index_merge_intersection") - 1,
++  sizeof("default") - 1
++};
++TYPELIB optimizer_switch_typelib= { array_elements(optimizer_switch_names)-1,"",
++                                    optimizer_switch_names,
++                                    (unsigned int *)optimizer_switch_names_len };
++
++static const char *tc_heuristic_recover_names[]=
++{
++  "COMMIT", "ROLLBACK", NullS
++};
++static TYPELIB tc_heuristic_recover_typelib=
++{
++  array_elements(tc_heuristic_recover_names)-1,"",
++  tc_heuristic_recover_names, NULL
++};
++
++static const char *thread_handling_names[]=
++{ "one-thread-per-connection", "no-threads",
++#if HAVE_POOL_OF_THREADS == 1
++  "pool-of-threads",
++#endif
++  NullS};
++
++TYPELIB thread_handling_typelib=
++{
++  array_elements(thread_handling_names) - 1, "",
++  thread_handling_names, NULL
++};
++
++const char *first_keyword= "first", *binary_keyword= "BINARY";
++const char *my_localhost= "localhost", *delayed_user= "DELAYED";
++#if SIZEOF_OFF_T > 4 && defined(BIG_TABLES)
++#define GET_HA_ROWS GET_ULL
++#else
++#define GET_HA_ROWS GET_ULONG
++#endif
++
++bool opt_large_files= sizeof(my_off_t) > 4;
++
++/*
++  Used with --help for detailed option
++*/
++static my_bool opt_help= 0, opt_verbose= 0;
++
++arg_cmp_func Arg_comparator::comparator_matrix[5][2] =
++{{&Arg_comparator::compare_string,     &Arg_comparator::compare_e_string},
++ {&Arg_comparator::compare_real,       &Arg_comparator::compare_e_real},
++ {&Arg_comparator::compare_int_signed, &Arg_comparator::compare_e_int},
++ {&Arg_comparator::compare_row,        &Arg_comparator::compare_e_row},
++ {&Arg_comparator::compare_decimal,    &Arg_comparator::compare_e_decimal}};
++
++const char *log_output_names[] = { "NONE", "FILE", "TABLE", NullS};
++static const unsigned int log_output_names_len[]= { 4, 4, 5, 0 };
++TYPELIB log_output_typelib= {array_elements(log_output_names)-1,"",
++                             log_output_names, 
++                             (unsigned int *) log_output_names_len};
++
++/* static variables */
++
++/* the default log output is log tables */
++static bool lower_case_table_names_used= 0;
++static bool max_long_data_size_used= false;
++static bool volatile select_thread_in_use, signal_thread_in_use;
++static bool volatile ready_to_exit;
++static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
++static my_bool opt_short_log_format= 0;
++static uint kill_cached_threads, wake_thread;
++static ulong killed_threads, thread_created;
++static ulong max_used_connections;
++static ulong my_bind_addr;			/**< the address we bind to */
++static volatile ulong cached_thread_count= 0;
++static const char *sql_mode_str= "OFF";
++/* Text representation for OPTIMIZER_SWITCH_DEFAULT */
++static const char *optimizer_switch_str="index_merge=on,index_merge_union=on,"
++                                        "index_merge_sort_union=on,"
++                                        "index_merge_intersection=on";
++static char *mysqld_user, *mysqld_chroot, *log_error_file_ptr;
++static char *opt_init_slave, *language_ptr, *opt_init_connect;
++static char *default_character_set_name;
++static char *character_set_filesystem_name;
++static char *lc_time_names_name;
++static char *my_bind_addr_str;
++static char *default_collation_name; 
++static char *default_storage_engine_str;
++static char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME;
++static I_List<THD> thread_cache;
++static double long_query_time;
++
++static pthread_cond_t COND_thread_cache, COND_flush_thread_cache;
++
++/* Global variables */
++
++bool opt_update_log, opt_bin_log, opt_ignore_builtin_innodb= 0;
++my_bool opt_log, opt_slow_log;
++ulong log_output_options;
++my_bool opt_log_queries_not_using_indexes= 0;
++bool opt_error_log= IF_WIN(1,0);
++bool opt_disable_networking=0, opt_skip_show_db=0;
++bool opt_skip_name_resolve=0;
++my_bool opt_character_set_client_handshake= 1;
++bool server_id_supplied = 0;
++bool opt_endinfo, using_udf_functions;
++my_bool locked_in_memory;
++bool opt_using_transactions;
++bool volatile abort_loop;
++bool volatile shutdown_in_progress;
++/*
++  True if the bootstrap thread is running. Protected by LOCK_thread_count,
++  just like thread_count.
++  Used in bootstrap() function to determine if the bootstrap thread
++  has completed. Note, that we can't use 'thread_count' instead,
++  since in 5.1, in presence of the Event Scheduler, there may be
++  event threads running in parallel, so it's impossible to know
++  what value of 'thread_count' is a sign of completion of the
++  bootstrap thread.
++
++  At the same time, we can't start the event scheduler after
++  bootstrap either, since we want to be able to process event-related
++  SQL commands in the init file and in --bootstrap mode.
++*/
++bool in_bootstrap= FALSE;
++/**
++   @brief 'grant_option' is used to indicate if privileges needs
++   to be checked, in which case the lock, LOCK_grant, is used
++   to protect access to the grant table.
++   @note This flag is dropped in 5.1 
++   @see grant_init()
++ */
++bool volatile grant_option;
++
++my_bool opt_skip_slave_start = 0; ///< If set, slave is not autostarted
++my_bool opt_reckless_slave = 0;
++my_bool opt_enable_named_pipe= 0;
++my_bool opt_local_infile, opt_slave_compressed_protocol;
++my_bool opt_safe_user_create = 0, opt_no_mix_types = 0;
++my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0;
++my_bool opt_log_slave_updates= 0;
++bool slave_warning_issued = false; 
++
++/*
++  Legacy global handlerton. These will be removed (please do not add more).
++*/
++handlerton *heap_hton;
++handlerton *myisam_hton;
++handlerton *partition_hton;
++
++#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
++const char *opt_ndbcluster_connectstring= 0;
++const char *opt_ndb_connectstring= 0;
++char opt_ndb_constrbuf[1024]= {0};
++unsigned opt_ndb_constrbuf_len= 0;
++my_bool	opt_ndb_shm, opt_ndb_optimized_node_selection;
++ulong opt_ndb_cache_check_time;
++const char *opt_ndb_mgmd;
++ulong opt_ndb_nodeid;
++ulong ndb_extra_logging;
++#ifdef HAVE_NDB_BINLOG
++ulong ndb_report_thresh_binlog_epoch_slip;
++ulong ndb_report_thresh_binlog_mem_usage;
++#endif
++
++extern const char *ndb_distribution_names[];
++extern TYPELIB ndb_distribution_typelib;
++extern const char *opt_ndb_distribution;
++extern enum ndb_distribution opt_ndb_distribution_id;
++#endif
++my_bool opt_readonly, use_temp_pool, relay_log_purge;
++my_bool opt_sync_frm, opt_allow_suspicious_udfs;
++my_bool opt_secure_auth= 0;
++char* opt_secure_file_priv= 0;
++my_bool opt_log_slow_admin_statements= 0;
++my_bool opt_log_slow_slave_statements= 0;
++my_bool lower_case_file_system= 0;
++my_bool opt_large_pages= 0;
++my_bool opt_myisam_use_mmap= 0;
++uint    opt_large_page_size= 0;
++#if defined(ENABLED_DEBUG_SYNC)
++uint    opt_debug_sync_timeout= 0;
++#endif /* defined(ENABLED_DEBUG_SYNC) */
++my_bool opt_old_style_user_limits= 0, trust_function_creators= 0;
++/*
++  True if there is at least one per-hour limit for some user, so we should
++  check them before each query (and possibly reset counters when hour is
++  changed). False otherwise.
++*/
++volatile bool mqh_used = 0;
++my_bool opt_noacl;
++my_bool sp_automatic_privileges= 1;
++
++ulong opt_binlog_rows_event_max_size;
++const char *binlog_format_names[]= {"MIXED", "STATEMENT", "ROW", NullS};
++TYPELIB binlog_format_typelib=
++  { array_elements(binlog_format_names) - 1, "",
++    binlog_format_names, NULL };
++ulong opt_binlog_format_id= (ulong) BINLOG_FORMAT_UNSPEC;
++const char *opt_binlog_format= binlog_format_names[opt_binlog_format_id];
++#ifdef HAVE_INITGROUPS
++static bool calling_initgroups= FALSE; /**< Used in SIGSEGV handler. */
++#endif
++uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options;
++uint mysqld_port_timeout;
++uint delay_key_write_options, protocol_version;
++uint lower_case_table_names;
++uint tc_heuristic_recover= 0;
++uint volatile thread_count, thread_running;
++ulonglong thd_startup_options;
++ulong back_log, connect_timeout, concurrency, server_id;
++ulong table_cache_size, table_def_size;
++ulong what_to_log;
++ulong query_buff_size, slow_launch_time, slave_open_temp_tables;
++ulong open_files_limit, max_binlog_size, max_relay_log_size;
++ulong slave_net_timeout, slave_trans_retries;
++ulong slave_exec_mode_options;
++static const char *slave_exec_mode_str= "STRICT";
++ulong thread_cache_size=0, thread_pool_size= 0;
++ulong binlog_cache_size=0;
++ulonglong  max_binlog_cache_size=0;
++ulong query_cache_size=0;
++ulong refresh_version;  /* Increments on each reload */
++query_id_t global_query_id;
++ulong aborted_threads, aborted_connects;
++ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size;
++ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use;
++ulong delayed_insert_errors,flush_time;
++ulong specialflag=0;
++ulong binlog_cache_use= 0, binlog_cache_disk_use= 0;
++ulong max_connections, max_connect_errors;
++/*
++  Maximum length of parameter value which can be set through
++  mysql_send_long_data() call.
++*/
++ulong max_long_data_size;
++uint  max_user_connections= 0;
++/**
++  Limit of the total number of prepared statements in the server.
++  Is necessary to protect the server against out-of-memory attacks.
++*/
++ulong max_prepared_stmt_count;
++/**
++  Current total number of prepared statements in the server. This number
++  is exact, and therefore may not be equal to the difference between
++  `com_stmt_prepare' and `com_stmt_close' (global status variables), as
++  the latter ones account for all registered attempts to prepare
++  a statement (including unsuccessful ones).  Prepared statements are
++  currently connection-local: if the same SQL query text is prepared in
++  two different connections, this counts as two distinct prepared
++  statements.
++*/
++ulong prepared_stmt_count=0;
++ulong thread_id=1L,current_pid;
++ulong slow_launch_threads = 0, sync_binlog_period;
++ulong expire_logs_days = 0;
++ulong rpl_recovery_rank=0;
++const char *log_output_str= "FILE";
++
++time_t server_start_time, flush_status_time;
++
++char mysql_home[FN_REFLEN], pidfile_name[FN_REFLEN], system_time_zone[30];
++char *default_tz_name;
++char log_error_file[FN_REFLEN], glob_hostname[FN_REFLEN];
++char mysql_real_data_home[FN_REFLEN],
++     language[FN_REFLEN], reg_ext[FN_EXTLEN], mysql_charsets_dir[FN_REFLEN],
++     *opt_init_file, *opt_tc_log_file,
++     def_ft_boolean_syntax[sizeof(ft_boolean_syntax)];
++char mysql_unpacked_real_data_home[FN_REFLEN];
++int mysql_unpacked_real_data_home_len;
++uint reg_ext_length;
++const key_map key_map_empty(0);
++key_map key_map_full(0);                        // Will be initialized later
++
++const char *opt_date_time_formats[3];
++
++uint mysql_data_home_len;
++char mysql_data_home_buff[2], *mysql_data_home=mysql_real_data_home;
++char server_version[SERVER_VERSION_LENGTH];
++char *mysqld_unix_port, *opt_mysql_tmpdir;
++const char **errmesg;			/**< Error messages */
++const char *myisam_recover_options_str="OFF";
++const char *myisam_stats_method_str="nulls_unequal";
++
++/** name of reference on left espression in rewritten IN subquery */
++const char *in_left_expr_name= "<left expr>";
++/** name of additional condition */
++const char *in_additional_cond= "<IN COND>";
++const char *in_having_cond= "<IN HAVING>";
++
++my_decimal decimal_zero;
++/* classes for comparation parsing/processing */
++Eq_creator eq_creator;
++Ne_creator ne_creator;
++Gt_creator gt_creator;
++Lt_creator lt_creator;
++Ge_creator ge_creator;
++Le_creator le_creator;
++
++FILE *bootstrap_file;
++int bootstrap_error;
++FILE *stderror_file=0;
++
++I_List<THD> threads;
++I_List<NAMED_LIST> key_caches;
++Rpl_filter* rpl_filter;
++Rpl_filter* binlog_filter;
++
++struct system_variables global_system_variables;
++struct system_variables max_system_variables;
++struct system_status_var global_status_var;
++
++MY_TMPDIR mysql_tmpdir_list;
++MY_BITMAP temp_pool;
++
++CHARSET_INFO *system_charset_info, *files_charset_info ;
++CHARSET_INFO *national_charset_info, *table_alias_charset;
++CHARSET_INFO *character_set_filesystem;
++
++MY_LOCALE *my_default_lc_time_names;
++
++SHOW_COMP_OPTION have_ssl, have_symlink, have_dlopen, have_query_cache;
++SHOW_COMP_OPTION have_geometry, have_rtree_keys;
++SHOW_COMP_OPTION have_crypt, have_compress;
++SHOW_COMP_OPTION have_community_features;
++
++/* Thread specific variables */
++
++pthread_key(MEM_ROOT**,THR_MALLOC);
++pthread_key(THD*, THR_THD);
++pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
++		LOCK_mapped_file, LOCK_status, LOCK_global_read_lock,
++		LOCK_error_log, LOCK_uuid_generator,
++		LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create,
++		LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received,
++	        LOCK_global_system_variables,
++		LOCK_user_conn, LOCK_slave_list, LOCK_active_mi,
++                LOCK_connection_count;
++/**
++  The below lock protects access to two global server variables:
++  max_prepared_stmt_count and prepared_stmt_count. These variables
++  set the limit and hold the current total number of prepared statements
++  in the server, respectively. As PREPARE/DEALLOCATE rate in a loaded
++  server may be fairly high, we need a dedicated lock.
++*/
++pthread_mutex_t LOCK_prepared_stmt_count;
++#ifdef HAVE_OPENSSL
++pthread_mutex_t LOCK_des_key_file;
++#endif
++rw_lock_t	LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
++rw_lock_t	LOCK_system_variables_hash;
++pthread_cond_t COND_refresh, COND_thread_count, COND_global_read_lock;
++pthread_t signal_thread;
++pthread_attr_t connection_attrib;
++pthread_mutex_t  LOCK_server_started;
++pthread_cond_t  COND_server_started;
++
++int mysqld_server_started= 0;
++
++File_parser_dummy_hook file_parser_dummy_hook;
++
++/* replication parameters, if master_host is not NULL, we are a slave */
++uint master_port= MYSQL_PORT, master_connect_retry = 60;
++uint report_port= MYSQL_PORT;
++ulong master_retry_count=0;
++char *master_user, *master_password, *master_host, *master_info_file;
++char *relay_log_info_file, *report_user, *report_password, *report_host;
++char *opt_relay_logname = 0, *opt_relaylog_index_name=0;
++my_bool master_ssl;
++char *master_ssl_key, *master_ssl_cert;
++char *master_ssl_ca, *master_ssl_capath, *master_ssl_cipher;
++char *opt_logname, *opt_slow_logname;
++
++/* Static variables */
++
++static bool kill_in_progress, segfaulted;
++#ifdef HAVE_STACK_TRACE_ON_SEGV
++static my_bool opt_do_pstack;
++#endif /* HAVE_STACK_TRACE_ON_SEGV */
++static my_bool opt_bootstrap, opt_myisam_log;
++static int cleanup_done;
++static ulong opt_specialflag, opt_myisam_block_size;
++static char *opt_update_logname, *opt_binlog_index_name;
++static char *opt_tc_heuristic_recover;
++static char *mysql_home_ptr, *pidfile_name_ptr;
++static int defaults_argc;
++static char **defaults_argv;
++static char *opt_bin_logname;
++
++int orig_argc;
++char **orig_argv;
++
++static my_socket unix_sock,ip_sock;
++struct rand_struct sql_rand; ///< used by sql_class.cc:THD::THD()
++
++#ifndef EMBEDDED_LIBRARY
++struct passwd *user_info;
++static pthread_t select_thread;
++static uint thr_kill_signal;
++#endif
++
++/* OS specific variables */
++
++#ifdef __WIN__
++#undef	 getpid
++#include <process.h>
++
++static pthread_cond_t COND_handler_count;
++static uint handler_count;
++static bool start_mode=0, use_opt_args;
++static int opt_argc;
++static char **opt_argv;
++
++#if !defined(EMBEDDED_LIBRARY)
++static HANDLE hEventShutdown;
++static char shutdown_event_name[40];
++#include "nt_servc.h"
++static	 NTService  Service;	      ///< Service object for WinNT
++#endif /* EMBEDDED_LIBRARY */
++#endif /* __WIN__ */
++
++#ifdef __NT__
++static char pipe_name[512];
++static SECURITY_ATTRIBUTES saPipeSecurity;
++static SECURITY_DESCRIPTOR sdPipeDescriptor;
++static HANDLE hPipe = INVALID_HANDLE_VALUE;
++#endif
++
++#ifndef EMBEDDED_LIBRARY
++bool mysqld_embedded=0;
++#else
++bool mysqld_embedded=1;
++#endif
++
++static my_bool plugins_are_initialized= FALSE;
++
++#ifndef DBUG_OFF
++static const char* default_dbug_option;
++#endif
++#ifdef HAVE_LIBWRAP
++const char *libwrapName= NULL;
++int allow_severity = LOG_INFO;
++int deny_severity = LOG_WARNING;
++#endif
++#ifdef HAVE_QUERY_CACHE
++static ulong query_cache_limit= 0;
++ulong query_cache_min_res_unit= QUERY_CACHE_MIN_RESULT_DATA_SIZE;
++Query_cache query_cache;
++#endif
++#ifdef HAVE_SMEM
++char *shared_memory_base_name= default_shared_memory_base_name;
++my_bool opt_enable_shared_memory;
++HANDLE smem_event_connect_request= 0;
++#endif
++
++scheduler_functions thread_scheduler;
++
++#define SSL_VARS_NOT_STATIC
++#include "sslopt-vars.h"
++#ifdef HAVE_OPENSSL
++#include <openssl/crypto.h>
++#ifndef HAVE_YASSL
++typedef struct CRYPTO_dynlock_value
++{
++  rw_lock_t lock;
++} openssl_lock_t;
++
++static openssl_lock_t *openssl_stdlocks;
++static openssl_lock_t *openssl_dynlock_create(const char *, int);
++static void openssl_dynlock_destroy(openssl_lock_t *, const char *, int);
++static void openssl_lock_function(int, int, const char *, int);
++static void openssl_lock(int, openssl_lock_t *, const char *, int);
++static unsigned long openssl_id_function();
++#endif
++char *des_key_file;
++struct st_VioSSLFd *ssl_acceptor_fd;
++#endif /* HAVE_OPENSSL */
++
++/**
++  Number of currently active user connections. The variable is protected by
++  LOCK_connection_count.
++*/
++uint connection_count= 0;
++
++/* Function declarations */
++
++pthread_handler_t signal_hand(void *arg);
++static int mysql_init_variables(void);
++static int get_options(int *argc,char **argv);
++extern "C" my_bool mysqld_get_one_option(int, const struct my_option *, char *);
++static void set_server_version(void);
++static int init_thread_environment();
++static char *get_relative_path(const char *path);
++static int fix_paths(void);
++pthread_handler_t handle_connections_sockets(void *arg);
++pthread_handler_t kill_server_thread(void *arg);
++static void bootstrap(FILE *file);
++static bool read_init_file(char *file_name);
++#ifdef __NT__
++pthread_handler_t handle_connections_namedpipes(void *arg);
++#endif
++#ifdef HAVE_SMEM
++pthread_handler_t handle_connections_shared_memory(void *arg);
++#endif
++pthread_handler_t handle_slave(void *arg);
++static ulong find_bit_type(const char *x, TYPELIB *bit_lib);
++static ulong find_bit_type_or_exit(const char *x, TYPELIB *bit_lib,
++                                   const char *option, int *error);
++static void clean_up(bool print_message);
++static int test_if_case_insensitive(const char *dir_name);
++
++#ifndef EMBEDDED_LIBRARY
++static void usage(void);
++static void start_signal_handler(void);
++static void close_server_sock();
++static void clean_up_mutexes(void);
++static void wait_for_signal_thread_to_end(void);
++static void create_pid_file();
++static void end_ssl();
++#endif
++
++
++#ifndef EMBEDDED_LIBRARY
++/****************************************************************************
++** Code to end mysqld
++****************************************************************************/
++
++static void close_connections(void)
++{
++#ifdef EXTRA_DEBUG
++  int count=0;
++#endif
++  DBUG_ENTER("close_connections");
++
++  /* Clear thread cache */
++  kill_cached_threads++;
++  flush_thread_cache();
++
++  /* kill connection thread */
++#if !defined(__WIN__) && !defined(__NETWARE__)
++  DBUG_PRINT("quit", ("waiting for select thread: 0x%lx",
++                      (ulong) select_thread));
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++
++  while (select_thread_in_use)
++  {
++    struct timespec abstime;
++    int error;
++    LINT_INIT(error);
++    DBUG_PRINT("info",("Waiting for select thread"));
++
++#ifndef DONT_USE_THR_ALARM
++    if (pthread_kill(select_thread, thr_client_alarm))
++      break;					// allready dead
++#endif
++    set_timespec(abstime, 2);
++    for (uint tmp=0 ; tmp < 10 && select_thread_in_use; tmp++)
++    {
++      error=pthread_cond_timedwait(&COND_thread_count,&LOCK_thread_count,
++				   &abstime);
++      if (error != EINTR)
++	break;
++    }
++#ifdef EXTRA_DEBUG
++    if (error != 0 && !count++)
++      sql_print_error("Got error %d from pthread_cond_timedwait",error);
++#endif
++    close_server_sock();
++  }
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++#endif /* __WIN__ */
++
++
++  /* Abort listening to new connections */
++  DBUG_PRINT("quit",("Closing sockets"));
++  if (!opt_disable_networking )
++  {
++    if (ip_sock != INVALID_SOCKET)
++    {
++      (void) shutdown(ip_sock, SHUT_RDWR);
++      (void) closesocket(ip_sock);
++      ip_sock= INVALID_SOCKET;
++    }
++  }
++#ifdef __NT__
++  if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe)
++  {
++    HANDLE temp;
++    DBUG_PRINT("quit", ("Closing named pipes") );
++
++    /* Create connection to the handle named pipe handler to break the loop */
++    if ((temp = CreateFile(pipe_name,
++			   GENERIC_READ | GENERIC_WRITE,
++			   0,
++			   NULL,
++			   OPEN_EXISTING,
++			   0,
++			   NULL )) != INVALID_HANDLE_VALUE)
++    {
++      WaitNamedPipe(pipe_name, 1000);
++      DWORD dwMode = PIPE_READMODE_BYTE | PIPE_WAIT;
++      SetNamedPipeHandleState(temp, &dwMode, NULL, NULL);
++      CancelIo(temp);
++      DisconnectNamedPipe(temp);
++      CloseHandle(temp);
++    }
++  }
++#endif
++#ifdef HAVE_SYS_UN_H
++  if (unix_sock != INVALID_SOCKET)
++  {
++    (void) shutdown(unix_sock, SHUT_RDWR);
++    (void) closesocket(unix_sock);
++    (void) unlink(mysqld_unix_port);
++    unix_sock= INVALID_SOCKET;
++  }
++#endif
++  end_thr_alarm(0);			 // Abort old alarms.
++
++  /*
++    First signal all threads that it's time to die
++    This will give the threads some time to gracefully abort their
++    statements and inform their clients that the server is about to die.
++  */
++
++  THD *tmp;
++  (void) pthread_mutex_lock(&LOCK_thread_count); // For unlink from list
++
++  I_List_iterator<THD> it(threads);
++  while ((tmp=it++))
++  {
++    DBUG_PRINT("quit",("Informing thread %ld that it's time to die",
++		       tmp->thread_id));
++    /* We skip slave threads & scheduler on this first loop through. */
++    if (tmp->slave_thread)
++      continue;
++
++    tmp->killed= THD::KILL_CONNECTION;
++    thread_scheduler.post_kill_notification(tmp);
++    if (tmp->mysys_var)
++    {
++      tmp->mysys_var->abort=1;
++      pthread_mutex_lock(&tmp->mysys_var->mutex);
++      if (tmp->mysys_var->current_cond)
++      {
++	pthread_mutex_lock(tmp->mysys_var->current_mutex);
++	pthread_cond_broadcast(tmp->mysys_var->current_cond);
++	pthread_mutex_unlock(tmp->mysys_var->current_mutex);
++      }
++      pthread_mutex_unlock(&tmp->mysys_var->mutex);
++    }
++  }
++  (void) pthread_mutex_unlock(&LOCK_thread_count); // For unlink from list
++
++  Events::deinit();
++  end_slave();
++
++  if (thread_count)
++    sleep(2);					// Give threads time to die
++
++  /*
++    Force remaining threads to die by closing the connection to the client
++    This will ensure that threads that are waiting for a command from the
++    client on a blocking read call are aborted.
++  */
++
++  for (;;)
++  {
++    DBUG_PRINT("quit",("Locking LOCK_thread_count"));
++    (void) pthread_mutex_lock(&LOCK_thread_count); // For unlink from list
++    if (!(tmp=threads.get()))
++    {
++      DBUG_PRINT("quit",("Unlocking LOCK_thread_count"));
++      (void) pthread_mutex_unlock(&LOCK_thread_count);
++      break;
++    }
++#ifndef __bsdi__				// Bug in BSDI kernel
++    if (tmp->vio_ok())
++    {
++      if (global_system_variables.log_warnings)
++        sql_print_warning(ER(ER_FORCING_CLOSE),my_progname,
++                          tmp->thread_id,
++                          (tmp->main_security_ctx.user ?
++                           tmp->main_security_ctx.user : ""));
++      close_connection(tmp,0,0);
++    }
++#endif
++    DBUG_PRINT("quit",("Unlocking LOCK_thread_count"));
++    (void) pthread_mutex_unlock(&LOCK_thread_count);
++  }
++  /* All threads has now been aborted */
++  DBUG_PRINT("quit",("Waiting for threads to die (count=%u)",thread_count));
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  while (thread_count)
++  {
++    (void) pthread_cond_wait(&COND_thread_count,&LOCK_thread_count);
++    DBUG_PRINT("quit",("One thread died (count=%u)",thread_count));
++  }
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++
++  close_active_mi();
++  DBUG_PRINT("quit",("close_connections thread"));
++  DBUG_VOID_RETURN;
++}
++
++
++static void close_server_sock()
++{
++#ifdef HAVE_CLOSE_SERVER_SOCK
++  DBUG_ENTER("close_server_sock");
++  my_socket tmp_sock;
++  tmp_sock=ip_sock;
++  if (tmp_sock != INVALID_SOCKET)
++  {
++    ip_sock=INVALID_SOCKET;
++    DBUG_PRINT("info",("calling shutdown on TCP/IP socket"));
++    VOID(shutdown(tmp_sock, SHUT_RDWR));
++#if defined(__NETWARE__)
++    /*
++      The following code is disabled for normal systems as it causes MySQL
++      to hang on AIX 4.3 during shutdown
++    */
++    DBUG_PRINT("info",("calling closesocket on TCP/IP socket"));
++    VOID(closesocket(tmp_sock));
++#endif
++  }
++  tmp_sock=unix_sock;
++  if (tmp_sock != INVALID_SOCKET)
++  {
++    unix_sock=INVALID_SOCKET;
++    DBUG_PRINT("info",("calling shutdown on unix socket"));
++    VOID(shutdown(tmp_sock, SHUT_RDWR));
++#if defined(__NETWARE__)
++    /*
++      The following code is disabled for normal systems as it may cause MySQL
++      to hang on AIX 4.3 during shutdown
++    */
++    DBUG_PRINT("info",("calling closesocket on unix/IP socket"));
++    VOID(closesocket(tmp_sock));
++#endif
++    VOID(unlink(mysqld_unix_port));
++  }
++  DBUG_VOID_RETURN;
++#endif
++}
++
++#endif /*EMBEDDED_LIBRARY*/
++
++
++void kill_mysql(void)
++{
++  DBUG_ENTER("kill_mysql");
++
++#if defined(SIGNALS_DONT_BREAK_READ) && !defined(EMBEDDED_LIBRARY)
++  abort_loop=1;					// Break connection loops
++  close_server_sock();				// Force accept to wake up
++#endif
++
++#if defined(__WIN__)
++#if !defined(EMBEDDED_LIBRARY)
++  {
++    if (!SetEvent(hEventShutdown))
++    {
++      DBUG_PRINT("error",("Got error: %ld from SetEvent",GetLastError()));
++    }
++    /*
++      or:
++      HANDLE hEvent=OpenEvent(0, FALSE, "MySqlShutdown");
++      SetEvent(hEventShutdown);
++      CloseHandle(hEvent);
++    */
++  }
++#endif
++#elif defined(HAVE_PTHREAD_KILL)
++  if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL))
++  {
++    DBUG_PRINT("error",("Got error %d from pthread_kill",errno)); /* purecov: inspected */
++  }
++#elif !defined(SIGNALS_DONT_BREAK_READ)
++  kill(current_pid, MYSQL_KILL_SIGNAL);
++#endif
++  DBUG_PRINT("quit",("After pthread_kill"));
++  shutdown_in_progress=1;			// Safety if kill didn't work
++#ifdef SIGNALS_DONT_BREAK_READ
++  if (!kill_in_progress)
++  {
++    pthread_t tmp;
++    abort_loop=1;
++    if (pthread_create(&tmp,&connection_attrib, kill_server_thread,
++			   (void*) 0))
++      sql_print_error("Can't create thread to kill server");
++  }
++#endif
++  DBUG_VOID_RETURN;
++}
++
++/**
++  Force server down. Kill all connections and threads and exit.
++
++  @param  sig_ptr       Signal number that caused kill_server to be called.
++
++  @note
++    A signal number of 0 mean that the function was not called
++    from a signal handler and there is thus no signal to block
++    or stop, we just want to kill the server.
++*/
++
++#if defined(__NETWARE__)
++extern "C" void kill_server(int sig_ptr)
++#define RETURN_FROM_KILL_SERVER return
++#elif !defined(__WIN__)
++static void *kill_server(void *sig_ptr)
++#define RETURN_FROM_KILL_SERVER return 0
++#else
++static void __cdecl kill_server(int sig_ptr)
++#define RETURN_FROM_KILL_SERVER return
++#endif
++{
++  DBUG_ENTER("kill_server");
++#ifndef EMBEDDED_LIBRARY
++  int sig=(int) (long) sig_ptr;			// This is passed a int
++  // if there is a signal during the kill in progress, ignore the other
++  if (kill_in_progress)				// Safety
++  {
++    DBUG_LEAVE;
++    RETURN_FROM_KILL_SERVER;
++  }
++  kill_in_progress=TRUE;
++  abort_loop=1;					// This should be set
++  if (sig != 0) // 0 is not a valid signal number
++    my_sigset(sig, SIG_IGN);                    /* purify inspected */
++  if (sig == MYSQL_KILL_SIGNAL || sig == 0)
++    sql_print_information(ER(ER_NORMAL_SHUTDOWN),my_progname);
++  else
++    sql_print_error(ER(ER_GOT_SIGNAL),my_progname,sig); /* purecov: inspected */
++
++#if defined(HAVE_SMEM) && defined(__WIN__)    
++  /*    
++   Send event to smem_event_connect_request for aborting    
++   */    
++  if (!SetEvent(smem_event_connect_request))    
++  {      
++	  DBUG_PRINT("error",
++		("Got error: %ld from SetEvent of smem_event_connect_request",
++		 GetLastError()));    
++  }
++#endif  
++  
++  close_connections();
++  if (sig != MYSQL_KILL_SIGNAL &&
++      sig != 0)
++    unireg_abort(1);				/* purecov: inspected */
++  else
++    unireg_end();
++
++  /* purecov: begin deadcode */
++#ifdef __NETWARE__
++  if (!event_flag)
++    pthread_join(select_thread, NULL);		// wait for main thread
++#endif /* __NETWARE__ */
++
++  DBUG_LEAVE;                                   // Must match DBUG_ENTER()
++  my_thread_end();
++  pthread_exit(0);
++  /* purecov: end */
++
++  RETURN_FROM_KILL_SERVER;                      // Avoid compiler warnings
++
++#else /* EMBEDDED_LIBRARY*/
++
++  DBUG_LEAVE;
++  RETURN_FROM_KILL_SERVER;
++
++#endif /* EMBEDDED_LIBRARY */
++}
++
++
++#if defined(USE_ONE_SIGNAL_HAND) || (defined(__NETWARE__) && defined(SIGNALS_DONT_BREAK_READ))
++pthread_handler_t kill_server_thread(void *arg __attribute__((unused)))
++{
++  my_thread_init();				// Initialize new thread
++  kill_server(0);
++  /* purecov: begin deadcode */
++  my_thread_end();
++  pthread_exit(0);
++  return 0;
++  /* purecov: end */
++}
++#endif
++
++
++extern "C" sig_handler print_signal_warning(int sig)
++{
++  if (global_system_variables.log_warnings)
++    sql_print_warning("Got signal %d from thread %ld", sig,my_thread_id());
++#ifdef SIGNAL_HANDLER_RESET_ON_DELIVERY
++  my_sigset(sig,print_signal_warning);		/* int. thread system calls */
++#endif
++#if !defined(__WIN__) && !defined(__NETWARE__)
++  if (sig == SIGALRM)
++    alarm(2);					/* reschedule alarm */
++#endif
++}
++
++#ifndef EMBEDDED_LIBRARY
++
++/**
++  cleanup all memory and end program nicely.
++
++    If SIGNALS_DONT_BREAK_READ is defined, this function is called
++    by the main thread. To get MySQL to shut down nicely in this case
++    (Mac OS X) we have to call exit() instead if pthread_exit().
++
++  @note
++    This function never returns.
++*/
++void unireg_end(void)
++{
++  clean_up(1);
++  my_thread_end();
++#if defined(SIGNALS_DONT_BREAK_READ) && !defined(__NETWARE__)
++  exit(0);
++#else
++  pthread_exit(0);				// Exit is in main thread
++#endif
++}
++
++extern "C" void unireg_abort(int exit_code)
++{
++  DBUG_ENTER("unireg_abort");
++
++  if (opt_help)
++    usage();
++  if (exit_code)
++    sql_print_error("Aborting\n");
++  clean_up(!opt_help && (exit_code || !opt_bootstrap)); /* purecov: inspected */
++  DBUG_PRINT("quit",("done with cleanup in unireg_abort"));
++  wait_for_signal_thread_to_end();
++  clean_up_mutexes();
++  my_end(opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0);
++  exit(exit_code); /* purecov: inspected */
++}
++
++#endif /*EMBEDDED_LIBRARY*/
++
++
++void clean_up(bool print_message)
++{
++  DBUG_PRINT("exit",("clean_up"));
++  if (cleanup_done++)
++    return; /* purecov: inspected */
++
++  stop_handle_manager();
++  release_ddl_log();
++
++  /*
++    make sure that handlers finish up
++    what they have that is dependent on the binlog
++  */
++  ha_binlog_end(current_thd);
++
++  logger.cleanup_base();
++
++  injector::free_instance();
++  mysql_bin_log.cleanup();
++
++#ifdef HAVE_REPLICATION
++  if (use_slave_mask)
++    bitmap_free(&slave_error_mask);
++#endif
++  my_tz_free();
++  my_database_names_free();
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  servers_free(1);
++  acl_free(1);
++  grant_free();
++#endif
++  query_cache_destroy();
++  table_cache_free();
++  table_def_free();
++  hostname_cache_free();
++  item_user_lock_free();
++  lex_free();				/* Free some memory */
++  item_create_cleanup();
++  set_var_free();
++  free_charsets();
++  if (!opt_noacl)
++  {
++#ifdef HAVE_DLOPEN
++    udf_free();
++#endif
++  }
++  plugin_shutdown();
++  ha_end();
++  if (tc_log)
++    tc_log->close();
++  xid_cache_free();
++  delete_elements(&key_caches, (void (*)(const char*, uchar*)) free_key_cache);
++  multi_keycache_free();
++  free_status_vars();
++  end_thr_alarm(1);			/* Free allocated memory */
++  my_free_open_file_info();
++  my_free((char*) global_system_variables.date_format,
++	  MYF(MY_ALLOW_ZERO_PTR));
++  my_free((char*) global_system_variables.time_format,
++	  MYF(MY_ALLOW_ZERO_PTR));
++  my_free((char*) global_system_variables.datetime_format,
++	  MYF(MY_ALLOW_ZERO_PTR));
++  if (defaults_argv)
++    free_defaults(defaults_argv);
++  my_free(sys_init_connect.value, MYF(MY_ALLOW_ZERO_PTR));
++  my_free(sys_init_slave.value, MYF(MY_ALLOW_ZERO_PTR));
++  my_free(sys_var_general_log_path.value, MYF(MY_ALLOW_ZERO_PTR));
++  my_free(sys_var_slow_log_path.value, MYF(MY_ALLOW_ZERO_PTR));
++  free_tmpdir(&mysql_tmpdir_list);
++#ifdef HAVE_REPLICATION
++  my_free(slave_load_tmpdir,MYF(MY_ALLOW_ZERO_PTR));
++#endif
++  x_free(opt_bin_logname);
++  x_free(opt_relay_logname);
++  x_free(opt_secure_file_priv);
++  bitmap_free(&temp_pool);
++  free_max_user_conn();
++#ifdef HAVE_REPLICATION
++  end_slave_list();
++#endif
++  delete binlog_filter;
++  delete rpl_filter;
++#ifndef EMBEDDED_LIBRARY
++  end_ssl();
++#endif
++  vio_end();
++#ifdef USE_REGEX
++  my_regex_end();
++#endif
++#if defined(ENABLED_DEBUG_SYNC)
++  /* End the debug sync facility. See debug_sync.cc. */
++  debug_sync_end();
++#endif /* defined(ENABLED_DEBUG_SYNC) */
++
++#if !defined(EMBEDDED_LIBRARY)
++  if (!opt_bootstrap)
++    (void) my_delete(pidfile_name,MYF(0));	// This may not always exist
++#endif
++  if (print_message && errmesg && server_start_time)
++    sql_print_information(ER(ER_SHUTDOWN_COMPLETE),my_progname);
++  thread_scheduler.end();
++  finish_client_errs();
++  my_free((uchar*) my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST),
++          MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR));
++  DBUG_PRINT("quit", ("Error messages freed"));
++  /* Tell main we are ready */
++  logger.cleanup_end();
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  DBUG_PRINT("quit", ("got thread count lock"));
++  ready_to_exit=1;
++  /* do the broadcast inside the lock to ensure that my_end() is not called */
++  (void) pthread_cond_broadcast(&COND_thread_count);
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++
++  /*
++    The following lines may never be executed as the main thread may have
++    killed us
++  */
++  DBUG_PRINT("quit", ("done with cleanup"));
++} /* clean_up */
++
++
++#ifndef EMBEDDED_LIBRARY
++
++/**
++  This is mainly needed when running with purify, but it's still nice to
++  know that all child threads have died when mysqld exits.
++*/
++static void wait_for_signal_thread_to_end()
++{
++#ifndef __NETWARE__
++  uint i;
++  /*
++    Wait up to 10 seconds for signal thread to die. We use this mainly to
++    avoid getting warnings that my_thread_end has not been called
++  */
++  for (i= 0 ; i < 100 && signal_thread_in_use; i++)
++  {
++    if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL) != ESRCH)
++      break;
++    my_sleep(100);				// Give it time to die
++  }
++#endif
++}
++
++
++static void clean_up_mutexes()
++{
++  (void) pthread_mutex_destroy(&LOCK_mysql_create_db);
++  (void) pthread_mutex_destroy(&LOCK_lock_db);
++  (void) pthread_mutex_destroy(&LOCK_Acl);
++  (void) rwlock_destroy(&LOCK_grant);
++  (void) pthread_mutex_destroy(&LOCK_open);
++  (void) pthread_mutex_destroy(&LOCK_thread_count);
++  (void) pthread_mutex_destroy(&LOCK_mapped_file);
++  (void) pthread_mutex_destroy(&LOCK_status);
++  (void) pthread_mutex_destroy(&LOCK_error_log);
++  (void) pthread_mutex_destroy(&LOCK_delayed_insert);
++  (void) pthread_mutex_destroy(&LOCK_delayed_status);
++  (void) pthread_mutex_destroy(&LOCK_delayed_create);
++  (void) pthread_mutex_destroy(&LOCK_manager);
++  (void) pthread_mutex_destroy(&LOCK_crypt);
++  (void) pthread_mutex_destroy(&LOCK_bytes_sent);
++  (void) pthread_mutex_destroy(&LOCK_bytes_received);
++  (void) pthread_mutex_destroy(&LOCK_user_conn);
++  (void) pthread_mutex_destroy(&LOCK_connection_count);
++  Events::destroy_mutexes();
++#ifdef HAVE_OPENSSL
++  (void) pthread_mutex_destroy(&LOCK_des_key_file);
++#ifndef HAVE_YASSL
++  for (int i= 0; i < CRYPTO_num_locks(); ++i)
++    (void) rwlock_destroy(&openssl_stdlocks[i].lock);
++  OPENSSL_free(openssl_stdlocks);
++#endif
++#endif
++#ifdef HAVE_REPLICATION
++  (void) pthread_mutex_destroy(&LOCK_rpl_status);
++  (void) pthread_cond_destroy(&COND_rpl_status);
++#endif
++  (void) pthread_mutex_destroy(&LOCK_active_mi);
++  (void) rwlock_destroy(&LOCK_sys_init_connect);
++  (void) rwlock_destroy(&LOCK_sys_init_slave);
++  (void) pthread_mutex_destroy(&LOCK_global_system_variables);
++  (void) rwlock_destroy(&LOCK_system_variables_hash);
++  (void) pthread_mutex_destroy(&LOCK_global_read_lock);
++  (void) pthread_mutex_destroy(&LOCK_uuid_generator);
++  (void) pthread_mutex_destroy(&LOCK_prepared_stmt_count);
++  (void) pthread_cond_destroy(&COND_thread_count);
++  (void) pthread_cond_destroy(&COND_refresh);
++  (void) pthread_cond_destroy(&COND_global_read_lock);
++  (void) pthread_cond_destroy(&COND_thread_cache);
++  (void) pthread_cond_destroy(&COND_flush_thread_cache);
++  (void) pthread_cond_destroy(&COND_manager);
++}
++
++#endif /*EMBEDDED_LIBRARY*/
++
++
++/****************************************************************************
++** Init IP and UNIX socket
++****************************************************************************/
++
++#ifndef EMBEDDED_LIBRARY
++static void set_ports()
++{
++  char	*env;
++  if (!mysqld_port && !opt_disable_networking)
++  {					// Get port if not from commandline
++    mysqld_port= MYSQL_PORT;
++
++    /*
++      if builder specifically requested a default port, use that
++      (even if it coincides with our factory default).
++      only if they didn't do we check /etc/services (and, failing
++      on that, fall back to the factory default of 3306).
++      either default can be overridden by the environment variable
++      MYSQL_TCP_PORT, which in turn can be overridden with command
++      line options.
++    */
++
++#if MYSQL_PORT_DEFAULT == 0
++    struct  servent *serv_ptr;
++    if ((serv_ptr= getservbyname("mysql", "tcp")))
++      mysqld_port= ntohs((u_short) serv_ptr->s_port); /* purecov: inspected */
++#endif
++    if ((env = getenv("MYSQL_TCP_PORT")))
++      mysqld_port= (uint) atoi(env);		/* purecov: inspected */
++  }
++  if (!mysqld_unix_port)
++  {
++#ifdef __WIN__
++    mysqld_unix_port= (char*) MYSQL_NAMEDPIPE;
++#else
++    mysqld_unix_port= (char*) MYSQL_UNIX_ADDR;
++#endif
++    if ((env = getenv("MYSQL_UNIX_PORT")))
++      mysqld_unix_port= env;			/* purecov: inspected */
++  }
++}
++
++/* Change to run as another user if started with --user */
++
++static struct passwd *check_user(const char *user)
++{
++#if !defined(__WIN__) && !defined(__NETWARE__)
++  struct passwd *tmp_user_info;
++  uid_t user_id= geteuid();
++
++  // Don't bother if we aren't superuser
++  if (user_id)
++  {
++    if (user)
++    {
++      /* Don't give a warning, if real user is same as given with --user */
++      /* purecov: begin tested */
++      tmp_user_info= getpwnam(user);
++      if ((!tmp_user_info || user_id != tmp_user_info->pw_uid) &&
++	  global_system_variables.log_warnings)
++        sql_print_warning(
++                    "One can only use the --user switch if running as root\n");
++      /* purecov: end */
++    }
++    return NULL;
++  }
++  if (!user)
++  {
++    if (!opt_bootstrap)
++    {
++      sql_print_error("Fatal error: Please read \"Security\" section of the manual to find out how to run mysqld as root!\n");
++      unireg_abort(1);
++    }
++    return NULL;
++  }
++  /* purecov: begin tested */
++  if (!strcmp(user,"root"))
++    return NULL;                        // Avoid problem with dynamic libraries
++
++  if (!(tmp_user_info= getpwnam(user)))
++  {
++    // Allow a numeric uid to be used
++    const char *pos;
++    for (pos= user; my_isdigit(mysqld_charset,*pos); pos++) ;
++    if (*pos)                                   // Not numeric id
++      goto err;
++    if (!(tmp_user_info= getpwuid(atoi(user))))
++      goto err;
++  }
++  return tmp_user_info;
++  /* purecov: end */
++
++err:
++  sql_print_error("Fatal error: Can't change to run as user '%s' ;  Please check that the user exists!\n",user);
++  unireg_abort(1);
++
++#ifdef PR_SET_DUMPABLE
++  if (test_flags & TEST_CORE_ON_SIGNAL)
++  {
++    /* inform kernel that process is dumpable */
++    (void) prctl(PR_SET_DUMPABLE, 1);
++  }
++#endif
++
++#endif
++  return NULL;
++}
++
++static void set_user(const char *user, struct passwd *user_info_arg)
++{
++  /* purecov: begin tested */
++#if !defined(__WIN__) && !defined(__NETWARE__)
++  DBUG_ASSERT(user_info_arg != 0);
++#ifdef HAVE_INITGROUPS
++  /*
++    We can get a SIGSEGV when calling initgroups() on some systems when NSS
++    is configured to use LDAP and the server is statically linked.  We set
++    calling_initgroups as a flag to the SIGSEGV handler that is then used to
++    output a specific message to help the user resolve this problem.
++  */
++  calling_initgroups= TRUE;
++  initgroups((char*) user, user_info_arg->pw_gid);
++  calling_initgroups= FALSE;
++#endif
++  if (setgid(user_info_arg->pw_gid) == -1)
++  {
++    sql_perror("setgid");
++    unireg_abort(1);
++  }
++  if (setuid(user_info_arg->pw_uid) == -1)
++  {
++    sql_perror("setuid");
++    unireg_abort(1);
++  }
++#endif
++  /* purecov: end */
++}
++
++
++static void set_effective_user(struct passwd *user_info_arg)
++{
++#if !defined(__WIN__) && !defined(__NETWARE__)
++  DBUG_ASSERT(user_info_arg != 0);
++  if (setregid((gid_t)-1, user_info_arg->pw_gid) == -1)
++  {
++    sql_perror("setregid");
++    unireg_abort(1);
++  }
++  if (setreuid((uid_t)-1, user_info_arg->pw_uid) == -1)
++  {
++    sql_perror("setreuid");
++    unireg_abort(1);
++  }
++#endif
++}
++
++
++/** Change root user if started with @c --chroot . */
++static void set_root(const char *path)
++{
++#if !defined(__WIN__) && !defined(__NETWARE__)
++  if (chroot(path) == -1)
++  {
++    sql_perror("chroot");
++    unireg_abort(1);
++  }
++  my_setwd("/", MYF(0));
++#endif
++}
++
++static void network_init(void)
++{
++  struct sockaddr_in	IPaddr;
++#ifdef HAVE_SYS_UN_H
++  struct sockaddr_un	UNIXaddr;
++#endif
++  int	arg=1;
++  int   ret;
++  uint  waited;
++  uint  this_wait;
++  uint  retry;
++  DBUG_ENTER("network_init");
++  LINT_INIT(ret);
++
++  if (thread_scheduler.init())
++    unireg_abort(1);			/* purecov: inspected */
++
++  set_ports();
++
++  if (mysqld_port != 0 && !opt_disable_networking && !opt_bootstrap)
++  {
++    DBUG_PRINT("general",("IP Socket is %d",mysqld_port));
++    ip_sock = socket(AF_INET, SOCK_STREAM, 0);
++    if (ip_sock == INVALID_SOCKET)
++    {
++      DBUG_PRINT("error",("Got error: %d from socket()",socket_errno));
++      sql_perror(ER(ER_IPSOCK_ERROR));		/* purecov: tested */
++      unireg_abort(1);				/* purecov: tested */
++    }
++    bzero((char*) &IPaddr, sizeof(IPaddr));
++    IPaddr.sin_family = AF_INET;
++    IPaddr.sin_addr.s_addr = my_bind_addr;
++    IPaddr.sin_port = (unsigned short) htons((unsigned short) mysqld_port);
++
++#ifndef __WIN__
++    /*
++      We should not use SO_REUSEADDR on windows as this would enable a
++      user to open two mysqld servers with the same TCP/IP port.
++    */
++    (void) setsockopt(ip_sock,SOL_SOCKET,SO_REUSEADDR,(char*)&arg,sizeof(arg));
++#endif /* __WIN__ */
++    /*
++      Sometimes the port is not released fast enough when stopping and
++      restarting the server. This happens quite often with the test suite
++      on busy Linux systems. Retry to bind the address at these intervals:
++      Sleep intervals: 1, 2, 4,  6,  9, 13, 17, 22, ...
++      Retry at second: 1, 3, 7, 13, 22, 35, 52, 74, ...
++      Limit the sequence by mysqld_port_timeout (set --port-open-timeout=#).
++    */
++    for (waited= 0, retry= 1; ; retry++, waited+= this_wait)
++    {
++      if (((ret= bind(ip_sock, my_reinterpret_cast(struct sockaddr *) (&IPaddr),
++                      sizeof(IPaddr))) >= 0) ||
++          (socket_errno != SOCKET_EADDRINUSE) ||
++          (waited >= mysqld_port_timeout))
++        break;
++      sql_print_information("Retrying bind on TCP/IP port %u", mysqld_port);
++      this_wait= retry * retry / 3 + 1;
++      sleep(this_wait);
++    }
++    if (ret < 0)
++    {
++      DBUG_PRINT("error",("Got error: %d from bind",socket_errno));
++      sql_perror("Can't start server: Bind on TCP/IP port");
++      sql_print_error("Do you already have another mysqld server running on port: %d ?",mysqld_port);
++      unireg_abort(1);
++    }
++    if (listen(ip_sock,(int) back_log) < 0)
++    {
++      sql_perror("Can't start server: listen() on TCP/IP port");
++      sql_print_error("listen() on TCP/IP failed with error %d",
++		      socket_errno);
++      unireg_abort(1);
++    }
++  }
++
++#ifdef __NT__
++  /* create named pipe */
++  if (Service.IsNT() && mysqld_unix_port[0] && !opt_bootstrap &&
++      opt_enable_named_pipe)
++  {
++    
++    strxnmov(pipe_name, sizeof(pipe_name)-1, "\\\\.\\pipe\\",
++	     mysqld_unix_port, NullS);
++    bzero((char*) &saPipeSecurity, sizeof(saPipeSecurity));
++    bzero((char*) &sdPipeDescriptor, sizeof(sdPipeDescriptor));
++    if (!InitializeSecurityDescriptor(&sdPipeDescriptor,
++				      SECURITY_DESCRIPTOR_REVISION))
++    {
++      sql_perror("Can't start server : Initialize security descriptor");
++      unireg_abort(1);
++    }
++    if (!SetSecurityDescriptorDacl(&sdPipeDescriptor, TRUE, NULL, FALSE))
++    {
++      sql_perror("Can't start server : Set security descriptor");
++      unireg_abort(1);
++    }
++    saPipeSecurity.nLength = sizeof(SECURITY_ATTRIBUTES);
++    saPipeSecurity.lpSecurityDescriptor = &sdPipeDescriptor;
++    saPipeSecurity.bInheritHandle = FALSE;
++    if ((hPipe= CreateNamedPipe(pipe_name,
++				PIPE_ACCESS_DUPLEX|FILE_FLAG_OVERLAPPED,
++				PIPE_TYPE_BYTE |
++				PIPE_READMODE_BYTE |
++				PIPE_WAIT,
++				PIPE_UNLIMITED_INSTANCES,
++				(int) global_system_variables.net_buffer_length,
++				(int) global_system_variables.net_buffer_length,
++				NMPWAIT_USE_DEFAULT_WAIT,
++				&saPipeSecurity)) == INVALID_HANDLE_VALUE)
++      {
++	LPVOID lpMsgBuf;
++	int error=GetLastError();
++	FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
++		      FORMAT_MESSAGE_FROM_SYSTEM,
++		      NULL, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
++		      (LPTSTR) &lpMsgBuf, 0, NULL );
++	sql_perror((char *)lpMsgBuf);
++	LocalFree(lpMsgBuf);
++	unireg_abort(1);
++      }
++  }
++#endif
++
++#if defined(HAVE_SYS_UN_H)
++  /*
++  ** Create the UNIX socket
++  */
++  if (mysqld_unix_port[0] && !opt_bootstrap)
++  {
++    DBUG_PRINT("general",("UNIX Socket is %s",mysqld_unix_port));
++
++    if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1))
++    {
++      sql_print_error("The socket file path is too long (> %u): %s",
++                      (uint) sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
++      unireg_abort(1);
++    }
++    if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0)
++    {
++      sql_perror("Can't start server : UNIX Socket "); /* purecov: inspected */
++      unireg_abort(1);				/* purecov: inspected */
++    }
++    bzero((char*) &UNIXaddr, sizeof(UNIXaddr));
++    UNIXaddr.sun_family = AF_UNIX;
++    strmov(UNIXaddr.sun_path, mysqld_unix_port);
++    (void) unlink(mysqld_unix_port);
++    (void) setsockopt(unix_sock,SOL_SOCKET,SO_REUSEADDR,(char*)&arg,
++		      sizeof(arg));
++    umask(0);
++    if (bind(unix_sock, my_reinterpret_cast(struct sockaddr *) (&UNIXaddr),
++	     sizeof(UNIXaddr)) < 0)
++    {
++      sql_perror("Can't start server : Bind on unix socket"); /* purecov: tested */
++      sql_print_error("Do you already have another mysqld server running on socket: %s ?",mysqld_unix_port);
++      unireg_abort(1);					/* purecov: tested */
++    }
++    umask(((~my_umask) & 0666));
++#if defined(S_IFSOCK) && defined(SECURE_SOCKETS)
++    (void) chmod(mysqld_unix_port,S_IFSOCK);	/* Fix solaris 2.6 bug */
++#endif
++    if (listen(unix_sock,(int) back_log) < 0)
++      sql_print_warning("listen() on Unix socket failed with error %d",
++		      socket_errno);
++  }
++#endif
++  DBUG_PRINT("info",("server started"));
++  DBUG_VOID_RETURN;
++}
++
++#endif /*!EMBEDDED_LIBRARY*/
++
++
++#ifndef EMBEDDED_LIBRARY
++/**
++  Close a connection.
++
++  @param thd		Thread handle
++  @param errcode	Error code to print to console
++  @param lock	        1 if we have have to lock LOCK_thread_count
++
++  @note
++    For the connection that is doing shutdown, this is called twice
++*/
++void close_connection(THD *thd, uint errcode, bool lock)
++{
++  st_vio *vio;
++  DBUG_ENTER("close_connection");
++  DBUG_PRINT("enter",("fd: %s  error: '%s'",
++		      thd->net.vio ? vio_description(thd->net.vio) :
++		      "(not connected)",
++		      errcode ? ER(errcode) : ""));
++  if (lock)
++    (void) pthread_mutex_lock(&LOCK_thread_count);
++  thd->killed= THD::KILL_CONNECTION;
++  if ((vio= thd->net.vio) != 0)
++  {
++    if (errcode)
++      net_send_error(thd, errcode, ER(errcode)); /* purecov: inspected */
++    vio_close(vio);			/* vio is freed in delete thd */
++  }
++  if (lock)
++    (void) pthread_mutex_unlock(&LOCK_thread_count);
++  DBUG_VOID_RETURN;
++}
++#endif /* EMBEDDED_LIBRARY */
++
++
++/** Called when a thread is aborted. */
++/* ARGSUSED */
++extern "C" sig_handler end_thread_signal(int sig __attribute__((unused)))
++{
++  THD *thd=current_thd;
++  DBUG_ENTER("end_thread_signal");
++  if (thd && ! thd->bootstrap)
++  {
++    statistic_increment(killed_threads, &LOCK_status);
++    thread_scheduler.end_thread(thd,0);		/* purecov: inspected */
++  }
++  DBUG_VOID_RETURN;				/* purecov: deadcode */
++}
++
++
++/*
++  Unlink thd from global list of available connections and free thd
++
++  SYNOPSIS
++    unlink_thd()
++    thd		 Thread handler
++
++  NOTES
++    LOCK_thread_count is locked and left locked
++*/
++
++void unlink_thd(THD *thd)
++{
++  DBUG_ENTER("unlink_thd");
++  DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
++  thd->cleanup();
++
++  pthread_mutex_lock(&LOCK_connection_count);
++  --connection_count;
++  pthread_mutex_unlock(&LOCK_connection_count);
++
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  thread_count--;
++  delete thd;
++  DBUG_VOID_RETURN;
++}
++
++
++/*
++  Store thread in cache for reuse by new connections
++
++  SYNOPSIS
++    cache_thread()
++
++  NOTES
++    LOCK_thread_count has to be locked
++
++  RETURN
++    0  Thread was not put in cache
++    1  Thread is to be reused by new connection.
++       (ie, caller should return, not abort with pthread_exit())
++*/
++
++
++static bool cache_thread()
++{
++  safe_mutex_assert_owner(&LOCK_thread_count);
++  if (cached_thread_count < thread_cache_size &&
++      ! abort_loop && !kill_cached_threads)
++  {
++    /* Don't kill the thread, just put it in cache for reuse */
++    DBUG_PRINT("info", ("Adding thread to cache"));
++    cached_thread_count++;
++    while (!abort_loop && ! wake_thread && ! kill_cached_threads)
++      (void) pthread_cond_wait(&COND_thread_cache, &LOCK_thread_count);
++    cached_thread_count--;
++    if (kill_cached_threads)
++      pthread_cond_signal(&COND_flush_thread_cache);
++    if (wake_thread)
++    {
++      THD *thd;
++      wake_thread--;
++      thd= thread_cache.get();
++      thd->thread_stack= (char*) &thd;          // For store_globals
++      (void) thd->store_globals();
++      /*
++        THD::mysys_var::abort is associated with physical thread rather
++        than with THD object. So we need to reset this flag before using
++        this thread for handling of new THD object/connection.
++      */
++      thd->mysys_var->abort= 0;
++      thd->thr_create_utime= my_micro_time();
++      threads.append(thd);
++      return(1);
++    }
++  }
++  return(0);
++}
++
++
++/*
++  End thread for the current connection
++
++  SYNOPSIS
++    one_thread_per_connection_end()
++    thd		  Thread handler
++    put_in_cache  Store thread in cache, if there is room in it
++                  Normally this is true in all cases except when we got
++                  out of resources initializing the current thread
++
++  NOTES
++    If thread is cached, we will wait until thread is scheduled to be
++    reused and then we will return.
++    If thread is not cached, we end the thread.
++
++  RETURN
++    0    Signal to handle_one_connection to reuse connection
++*/
++
++bool one_thread_per_connection_end(THD *thd, bool put_in_cache)
++{
++  DBUG_ENTER("one_thread_per_connection_end");
++  unlink_thd(thd);
++  if (put_in_cache)
++    put_in_cache= cache_thread();
++  pthread_mutex_unlock(&LOCK_thread_count);
++  if (put_in_cache)
++    DBUG_RETURN(0);                             // Thread is reused
++
++  /* It's safe to broadcast outside a lock (COND... is not deleted here) */
++  DBUG_PRINT("signal", ("Broadcasting COND_thread_count"));
++  DBUG_LEAVE;                                   // Must match DBUG_ENTER()
++  my_thread_end();
++  (void) pthread_cond_broadcast(&COND_thread_count);
++
++  pthread_exit(0);
++  return 0;                                     // Avoid compiler warnings
++}
++
++
++void flush_thread_cache()
++{
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  kill_cached_threads++;
++  while (cached_thread_count)
++  {
++    pthread_cond_broadcast(&COND_thread_cache);
++    pthread_cond_wait(&COND_flush_thread_cache,&LOCK_thread_count);
++  }
++  kill_cached_threads--;
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++}
++
++
++#ifdef THREAD_SPECIFIC_SIGPIPE
++/**
++  Aborts a thread nicely. Comes here on SIGPIPE.
++
++  @todo
++    One should have to fix that thr_alarm know about this thread too.
++*/
++extern "C" sig_handler abort_thread(int sig __attribute__((unused)))
++{
++  THD *thd=current_thd;
++  DBUG_ENTER("abort_thread");
++  if (thd)
++    thd->killed= THD::KILL_CONNECTION;
++  DBUG_VOID_RETURN;
++}
++#endif
++
++
++/******************************************************************************
++  Setup a signal thread with handles all signals.
++  Because Linux doesn't support schemas use a mutex to check that
++  the signal thread is ready before continuing
++******************************************************************************/
++
++#if defined(__WIN__)
++
++
++/*
++  On Windows, we use native SetConsoleCtrlHandler for handle events like Ctrl-C
++  with graceful shutdown.
++  Also, we do not use signal(), but SetUnhandledExceptionFilter instead - as it
++  provides possibility to pass the exception to just-in-time debugger, collect
++  dumps and potentially also the exception and thread context used to output
++  callstack.
++*/
++
++static BOOL WINAPI console_event_handler( DWORD type ) 
++{
++  DBUG_ENTER("console_event_handler");
++#ifndef EMBEDDED_LIBRARY
++  if(type == CTRL_C_EVENT)
++  {
++     /*
++       Do not shutdown before startup is finished and shutdown
++       thread is initialized. Otherwise there is a race condition 
++       between main thread doing initialization and CTRL-C thread doing
++       cleanup, which can result into crash.
++     */
++#ifndef EMBEDDED_LIBRARY
++     if(hEventShutdown)
++       kill_mysql();
++     else
++#endif
++       sql_print_warning("CTRL-C ignored during startup");
++     DBUG_RETURN(TRUE);
++  }
++#endif
++  DBUG_RETURN(FALSE);
++}
++
++
++/*
++  In Visual Studio 2005 and later, default SIGABRT handler will overwrite
++  any unhandled exception filter set by the application  and will try to
++  call JIT debugger. This is not what we want, this we calling __debugbreak
++  to stop in debugger, if process is being debugged or to generate 
++  EXCEPTION_BREAKPOINT and then handle_segfault will do its magic.
++*/
++
++#if (_MSC_VER >= 1400)
++static void my_sigabrt_handler(int sig)
++{
++  __debugbreak();
++}
++#endif /*_MSC_VER >=1400 */
++
++void win_install_sigabrt_handler(void)
++{
++#if (_MSC_VER >=1400)
++  /*abort() should not override our exception filter*/
++  _set_abort_behavior(0,_CALL_REPORTFAULT);
++  signal(SIGABRT,my_sigabrt_handler);
++#endif /* _MSC_VER >=1400 */
++}
++
++#ifdef DEBUG_UNHANDLED_EXCEPTION_FILTER
++#define DEBUGGER_ATTACH_TIMEOUT 120
++/*
++  Wait for debugger to attach and break into debugger. If debugger is not attached,
++  resume after timeout.
++*/
++static void wait_for_debugger(int timeout_sec)
++{
++   if(!IsDebuggerPresent())
++   {
++     int i;
++     printf("Waiting for debugger to attach, pid=%u\n",GetCurrentProcessId());
++     fflush(stdout);
++     for(i= 0; i < timeout_sec; i++)
++     {
++       Sleep(1000);
++       if(IsDebuggerPresent())
++       {
++         /* Break into debugger */
++         __debugbreak();
++         return;
++       }
++     }
++     printf("pid=%u, debugger not attached after %d seconds, resuming\n",GetCurrentProcessId(),
++       timeout_sec);
++     fflush(stdout);
++   }
++}
++#endif /* DEBUG_UNHANDLED_EXCEPTION_FILTER */
++
++LONG WINAPI my_unhandler_exception_filter(EXCEPTION_POINTERS *ex_pointers)
++{
++   static BOOL first_time= TRUE;
++   if(!first_time)
++   {
++     /*
++       This routine can be called twice, typically
++       when detaching in JIT debugger.
++       Return EXCEPTION_EXECUTE_HANDLER to terminate process.
++     */
++     return EXCEPTION_EXECUTE_HANDLER;
++   }
++   first_time= FALSE;
++#ifdef DEBUG_UNHANDLED_EXCEPTION_FILTER
++   /*
++    Unfortunately there is no clean way to debug unhandled exception filters,
++    as debugger does not stop there(also documented in MSDN) 
++    To overcome, one could put a MessageBox, but this will not work in service.
++    Better solution is to print error message and sleep some minutes 
++    until debugger is attached
++  */
++  wait_for_debugger(DEBUGGER_ATTACH_TIMEOUT);
++#endif /* DEBUG_UNHANDLED_EXCEPTION_FILTER */
++  __try
++  {
++    my_set_exception_pointers(ex_pointers);
++    handle_segfault(ex_pointers->ExceptionRecord->ExceptionCode);
++  }
++  __except(EXCEPTION_EXECUTE_HANDLER)
++  {
++    DWORD written;
++    const char msg[] = "Got exception in exception handler!\n";
++    WriteFile(GetStdHandle(STD_OUTPUT_HANDLE),msg, sizeof(msg)-1, 
++      &written,NULL);
++  }
++  /*
++    Return EXCEPTION_CONTINUE_SEARCH to give JIT debugger
++    (drwtsn32 or vsjitdebugger) possibility to attach,
++    if JIT debugger is configured.
++    Windows Error reporting might generate a dump here.
++  */
++  return EXCEPTION_CONTINUE_SEARCH;
++}
++
++
++static void init_signals(void)
++{
++  win_install_sigabrt_handler();
++  if(opt_console)
++    SetConsoleCtrlHandler(console_event_handler,TRUE);
++
++    /* Avoid MessageBox()es*/
++  _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
++  _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
++  _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE);
++  _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
++  _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE);
++  _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
++
++   /*
++     Do not use SEM_NOGPFAULTERRORBOX in the following SetErrorMode (),
++     because it would prevent JIT debugger and Windows error reporting
++     from working. We need WER or JIT-debugging, since our own unhandled
++     exception filter is not guaranteed to work in all situation
++     (like heap corruption or stack overflow)
++   */
++  SetErrorMode(SetErrorMode(0) | SEM_FAILCRITICALERRORS
++                               | SEM_NOOPENFILEERRORBOX);
++  SetUnhandledExceptionFilter(my_unhandler_exception_filter);
++}
++
++
++static void start_signal_handler(void)
++{
++#ifndef EMBEDDED_LIBRARY
++  // Save vm id of this process
++  if (!opt_bootstrap)
++    create_pid_file();
++#endif /* EMBEDDED_LIBRARY */
++}
++
++
++static void check_data_home(const char *path)
++{}
++
++
++#elif defined(__NETWARE__)
++
++/// down server event callback.
++void mysql_down_server_cb(void *, void *)
++{
++  event_flag= TRUE;
++  kill_server(0);
++}
++
++
++/// destroy callback resources.
++void mysql_cb_destroy(void *)
++{
++  UnRegisterEventNotification(eh);  // cleanup down event notification
++  NX_UNWRAP_INTERFACE(ref);
++  /* Deregister NSS volume deactivation event */
++  NX_UNWRAP_INTERFACE(refneb);
++  if (neb_consumer_id)
++    UnRegisterConsumer(neb_consumer_id, NULL);
++}
++
++
++/// initialize callbacks.
++void mysql_cb_init()
++{
++  // register for down server event
++  void *handle = getnlmhandle();
++  rtag_t rt= AllocateResourceTag(handle, "MySQL Down Server Callback",
++                                 EventSignature);
++  NX_WRAP_INTERFACE((void *)mysql_down_server_cb, 2, (void **)&ref);
++  eh= RegisterForEventNotification(rt, EVENT_PRE_DOWN_SERVER,
++                                   EVENT_PRIORITY_APPLICATION,
++                                   NULL, ref, NULL);
++
++  /*
++    Register for volume deactivation event
++    Wrap the callback function, as it is called by non-LibC thread
++  */
++  (void *) NX_WRAP_INTERFACE(neb_event_callback, 1, &refneb);
++  registerwithneb();
++
++  NXVmRegisterExitHandler(mysql_cb_destroy, NULL);  // clean-up
++}
++
++
++/** To get the name of the NetWare volume having MySQL data folder. */
++static void getvolumename()
++{
++  char *p;
++  /*
++    We assume that data path is already set.
++    If not it won't come here. Terminate after volume name
++  */
++  if ((p= strchr(mysql_real_data_home, ':')))
++    strmake(datavolname, mysql_real_data_home,
++            (uint) (p - mysql_real_data_home));
++}
++
++
++/**
++  Registering with NEB for NSS Volume Deactivation event.
++*/
++
++static void registerwithneb()
++{
++
++  ConsumerRegistrationInfo reg_info;
++    
++  /* Clear NEB registration structure */
++  bzero((char*) &reg_info, sizeof(struct ConsumerRegistrationInfo));
++
++  /* Fill the NEB consumer information structure */
++  reg_info.CRIVersion= 1;  	            // NEB version
++  /* NEB Consumer name */
++  reg_info.CRIConsumerName= (BYTE *) "MySQL Database Server";
++  /* Event of interest */
++  reg_info.CRIEventName= (BYTE *) "NSS.ChangeVolState.Enter";
++  reg_info.CRIUserParameter= NULL;	    // Consumer Info
++  reg_info.CRIEventFlags= 0;	            // Event flags
++  /* Consumer NLM handle */
++  reg_info.CRIOwnerID= (LoadDefinitionStructure *)getnlmhandle();
++  reg_info.CRIConsumerESR= NULL;	    // No consumer ESR required
++  reg_info.CRISecurityToken= 0;	            // No security token for the event
++  reg_info.CRIConsumerFlags= 0;             // SMP_ENABLED_BIT;	
++  reg_info.CRIFilterName= 0;	            // No event filtering
++  reg_info.CRIFilterDataLength= 0;          // No filtering data
++  reg_info.CRIFilterData= 0;	            // No filtering data
++  /* Callback function for the event */
++  (void *)reg_info.CRIConsumerCallback= (void *) refneb;
++  reg_info.CRIOrder= 0;	                    // Event callback order
++  reg_info.CRIConsumerType= CHECK_CONSUMER; // Consumer type
++
++  /* Register for the event with NEB */
++  if (RegisterConsumer(&reg_info))
++  {
++    consoleprintf("Failed to register for NSS Volume Deactivation event \n");
++    return;
++  }
++  /* This ID is required for deregistration */
++  neb_consumer_id= reg_info.CRIConsumerID;
++
++  /* Get MySQL data volume name, stored in global variable datavolname */
++  getvolumename();
++
++  /*
++    Get the NSS volume ID of the MySQL Data volume.
++    Volume ID is stored in a global variable
++  */
++  getvolumeID((BYTE*) datavolname);	
++}
++
++
++/**
++  Callback for NSS Volume Deactivation event.
++*/
++
++ulong neb_event_callback(struct EventBlock *eblock)
++{
++  EventChangeVolStateEnter_s *voldata;
++  extern bool nw_panic;
++
++  voldata= (EventChangeVolStateEnter_s *)eblock->EBEventData;
++
++  /* Deactivation of a volume */
++  if ((voldata->oldState == zVOLSTATE_ACTIVE &&
++       voldata->newState == zVOLSTATE_DEACTIVE ||
++       voldata->newState == zVOLSTATE_MAINTENANCE))
++  {
++    /*
++      Ensure that we bring down MySQL server only for MySQL data
++      volume deactivation
++    */
++    if (!memcmp(&voldata->volID, &datavolid, sizeof(VolumeID_t)))
++    {
++      consoleprintf("MySQL data volume is deactivated, shutting down MySQL Server \n");
++      event_flag= TRUE;
++      nw_panic = TRUE;
++      event_flag= TRUE;
++      kill_server(0);
++    }
++  }
++  return 0;
++}
++
++
++#define ADMIN_VOL_PATH					"_ADMIN:/Volumes/"
++
++/**
++  Function to get NSS volume ID of the MySQL data.
++*/
++static void getvolumeID(BYTE *volumeName)
++{
++  char path[zMAX_FULL_NAME];
++  Key_t rootKey= 0, fileKey= 0;
++  QUAD getInfoMask;
++  zInfo_s info;
++  STATUS status;
++
++  /* Get the root key */
++  if ((status= zRootKey(0, &rootKey)) != zOK)
++  {
++    consoleprintf("\nGetNSSVolumeProperties - Failed to get root key, status: %d\n.", (int) status);
++    goto exit;
++  }
++
++  /*
++    Get the file key. This is the key to the volume object in the
++    NSS admin volumes directory.
++  */
++
++  strxmov(path, (const char *) ADMIN_VOL_PATH, (const char *) volumeName,
++          NullS);
++  if ((status= zOpen(rootKey, zNSS_TASK, zNSPACE_LONG|zMODE_UTF8, 
++                     (BYTE *) path, zRR_READ_ACCESS, &fileKey)) != zOK)
++  {
++    consoleprintf("\nGetNSSVolumeProperties - Failed to get file, status: %d\n.", (int) status);
++    goto exit;
++  }
++
++  getInfoMask= zGET_IDS | zGET_VOLUME_INFO ;
++  if ((status= zGetInfo(fileKey, getInfoMask, sizeof(info), 
++                        zINFO_VERSION_A, &info)) != zOK)
++  {
++    consoleprintf("\nGetNSSVolumeProperties - Failed in zGetInfo, status: %d\n.", (int) status);
++    goto exit;
++  }
++
++  /* Copy the data to global variable */
++  datavolid.timeLow= info.vol.volumeID.timeLow;
++  datavolid.timeMid= info.vol.volumeID.timeMid;
++  datavolid.timeHighAndVersion= info.vol.volumeID.timeHighAndVersion;
++  datavolid.clockSeqHighAndReserved= info.vol.volumeID.clockSeqHighAndReserved;
++  datavolid.clockSeqLow= info.vol.volumeID.clockSeqLow;
++  /* This is guranteed to be 6-byte length (but sizeof() would be better) */
++  memcpy(datavolid.node, info.vol.volumeID.node, (unsigned int) 6);
++
++exit:
++  if (rootKey)
++    zClose(rootKey);
++  if (fileKey)
++    zClose(fileKey);
++}
++
++
++static void init_signals(void)
++{
++  int signals[] = {SIGINT,SIGILL,SIGFPE,SIGSEGV,SIGTERM,SIGABRT};
++
++  for (uint i=0 ; i < sizeof(signals)/sizeof(int) ; i++)
++    signal(signals[i], kill_server);
++  mysql_cb_init();  // initialize callbacks
++
++}
++
++
++static void start_signal_handler(void)
++{
++  // Save vm id of this process
++  if (!opt_bootstrap)
++    create_pid_file();
++  // no signal handler
++}
++
++
++/**
++  Warn if the data is on a Traditional volume.
++
++  @note
++    Already done by mysqld_safe
++*/
++
++static void check_data_home(const char *path)
++{
++}
++
++#endif /*__WIN__ || __NETWARE */
++
++#ifdef HAVE_LINUXTHREADS
++#define UNSAFE_DEFAULT_LINUX_THREADS 200
++#endif
++
++
++#if BACKTRACE_DEMANGLE
++#include <cxxabi.h>
++extern "C" char *my_demangle(const char *mangled_name, int *status)
++{
++  return abi::__cxa_demangle(mangled_name, NULL, NULL, status);
++}
++#endif
++
++
++extern "C" sig_handler handle_segfault(int sig)
++{
++  time_t curr_time;
++  struct tm tm;
++
++  /*
++    Strictly speaking, one needs a mutex here
++    but since we have got SIGSEGV already, things are a mess
++    so not having the mutex is not as bad as possibly using a buggy
++    mutex - so we keep things simple
++  */
++  if (segfaulted)
++  {
++    fprintf(stderr, "Fatal " SIGNAL_FMT " while backtracing\n", sig);
++    exit(1);
++  }
++
++  segfaulted = 1;
++
++  curr_time= my_time(0);
++  localtime_r(&curr_time, &tm);
++
++  fprintf(stderr,"\
++%02d%02d%02d %2d:%02d:%02d - mysqld got " SIGNAL_FMT " ;\n\
++This could be because you hit a bug. It is also possible that this binary\n\
++or one of the libraries it was linked against is corrupt, improperly built,\n\
++or misconfigured. This error can also be caused by malfunctioning hardware.\n",
++          tm.tm_year % 100, tm.tm_mon+1, tm.tm_mday,
++          tm.tm_hour, tm.tm_min, tm.tm_sec,
++	  sig);
++  fprintf(stderr, "\
++We will try our best to scrape up some info that will hopefully help diagnose\n\
++the problem, but since we have already crashed, something is definitely wrong\n\
++and this may fail.\n\n");
++  fprintf(stderr, "key_buffer_size=%lu\n",
++          (ulong) dflt_key_cache->key_cache_mem_size);
++  fprintf(stderr, "read_buffer_size=%ld\n", (long) global_system_variables.read_buff_size);
++  fprintf(stderr, "max_used_connections=%lu\n", max_used_connections);
++  fprintf(stderr, "max_threads=%u\n", thread_scheduler.max_threads);
++  fprintf(stderr, "threads_connected=%u\n", thread_count);
++  fprintf(stderr, "It is possible that mysqld could use up to \n\
++key_buffer_size + (read_buffer_size + sort_buffer_size)*max_threads = %lu K\n\
++bytes of memory\n", ((ulong) dflt_key_cache->key_cache_mem_size +
++		     (global_system_variables.read_buff_size +
++		      global_system_variables.sortbuff_size) *
++		     thread_scheduler.max_threads +
++                     max_connections * sizeof(THD)) / 1024);
++  fprintf(stderr, "Hope that's ok; if not, decrease some variables in the equation.\n\n");
++
++#if defined(HAVE_LINUXTHREADS)
++  if (sizeof(char*) == 4 && thread_count > UNSAFE_DEFAULT_LINUX_THREADS)
++  {
++    fprintf(stderr, "\
++You seem to be running 32-bit Linux and have %d concurrent connections.\n\
++If you have not changed STACK_SIZE in LinuxThreads and built the binary \n\
++yourself, LinuxThreads is quite likely to steal a part of the global heap for\n\
++the thread stack. Please read http://dev.mysql.com/doc/mysql/en/linux.html\n\n",
++	    thread_count);
++  }
++#endif /* HAVE_LINUXTHREADS */
++
++#ifdef HAVE_STACKTRACE
++  THD *thd=current_thd;
++
++  if (!(test_flags & TEST_NO_STACKTRACE))
++  {
++    fprintf(stderr, "Thread pointer: 0x%lx\n", (long) thd);
++    fprintf(stderr, "Attempting backtrace. You can use the following "
++                    "information to find out\nwhere mysqld died. If "
++                    "you see no messages after this, something went\n"
++                    "terribly wrong...\n");
++    my_print_stacktrace(thd ? (uchar*) thd->thread_stack : NULL,
++                        my_thread_stack_size);
++  }
++  if (thd)
++  {
++    const char *kreason= "UNKNOWN";
++    switch (thd->killed) {
++    case THD::NOT_KILLED:
++      kreason= "NOT_KILLED";
++      break;
++    case THD::KILL_BAD_DATA:
++      kreason= "KILL_BAD_DATA";
++      break;
++    case THD::KILL_CONNECTION:
++      kreason= "KILL_CONNECTION";
++      break;
++    case THD::KILL_QUERY:
++      kreason= "KILL_QUERY";
++      break;
++    case THD::KILLED_NO_VALUE:
++      kreason= "KILLED_NO_VALUE";
++      break;
++    }
++    fprintf(stderr, "\nTrying to get some variables.\n"
++                    "Some pointers may be invalid and cause the dump to abort.\n");
++    fprintf(stderr, "Query (%p): ", thd->query());
++    my_safe_print_str(thd->query(), min(1024, thd->query_length()));
++    fprintf(stderr, "Connection ID (thread ID): %lu\n", (ulong) thd->thread_id);
++    fprintf(stderr, "Status: %s\n", kreason);
++    fputc('\n', stderr);
++  }
++  fprintf(stderr, "\
++The manual page at http://dev.mysql.com/doc/mysql/en/crashing.html contains\n\
++information that should help you find out what is causing the crash.\n");
++  fflush(stderr);
++#endif /* HAVE_STACKTRACE */
++
++#ifdef HAVE_INITGROUPS
++  if (calling_initgroups)
++    fprintf(stderr, "\n\
++This crash occured while the server was calling initgroups(). This is\n\
++often due to the use of a mysqld that is statically linked against glibc\n\
++and configured to use LDAP in /etc/nsswitch.conf. You will need to either\n\
++upgrade to a version of glibc that does not have this problem (2.3.4 or\n\
++later when used with nscd), disable LDAP in your nsswitch.conf, or use a\n\
++mysqld that is not statically linked.\n");
++#endif
++
++#ifdef HAVE_NPTL
++  if (thd_lib_detected == THD_LIB_LT && !getenv("LD_ASSUME_KERNEL"))
++    fprintf(stderr,"\n\
++You are running a statically-linked LinuxThreads binary on an NPTL system.\n\
++This can result in crashes on some distributions due to LT/NPTL conflicts.\n\
++You should either build a dynamically-linked binary, or force LinuxThreads\n\
++to be used with the LD_ASSUME_KERNEL environment variable. Please consult\n\
++the documentation for your distribution on how to do that.\n");
++#endif
++  
++  if (locked_in_memory)
++  {
++    fprintf(stderr, "\n\
++The \"--memlock\" argument, which was enabled, uses system calls that are\n\
++unreliable and unstable on some operating systems and operating-system\n\
++versions (notably, some versions of Linux).  This crash could be due to use\n\
++of those buggy OS calls.  You should consider whether you really need the\n\
++\"--memlock\" parameter and/or consult the OS distributer about \"mlockall\"\n\
++bugs.\n");
++  }
++
++#ifdef HAVE_WRITE_CORE
++  if (test_flags & TEST_CORE_ON_SIGNAL)
++  {
++    fprintf(stderr, "Writing a core file\n");
++    fflush(stderr);
++    my_write_core(sig);
++  }
++#endif
++
++#ifndef __WIN__
++  /* On Windows, do not terminate, but pass control to exception filter */
++  exit(1);
++#endif
++}
++
++#if !defined(__WIN__) && !defined(__NETWARE__)
++#ifndef SA_RESETHAND
++#define SA_RESETHAND 0
++#endif
++#ifndef SA_NODEFER
++#define SA_NODEFER 0
++#endif
++
++#ifndef EMBEDDED_LIBRARY
++
++static void init_signals(void)
++{
++  sigset_t set;
++  struct sigaction sa;
++  DBUG_ENTER("init_signals");
++
++  my_sigset(THR_SERVER_ALARM,print_signal_warning); // Should never be called!
++
++  if (!(test_flags & TEST_NO_STACKTRACE) || (test_flags & TEST_CORE_ON_SIGNAL))
++  {
++    sa.sa_flags = SA_RESETHAND | SA_NODEFER;
++    sigemptyset(&sa.sa_mask);
++    sigprocmask(SIG_SETMASK,&sa.sa_mask,NULL);
++
++#ifdef HAVE_STACKTRACE
++    my_init_stacktrace();
++#endif
++#if defined(__amiga__)
++    sa.sa_handler=(void(*)())handle_segfault;
++#else
++    sa.sa_handler=handle_segfault;
++#endif
++    sigaction(SIGSEGV, &sa, NULL);
++    sigaction(SIGABRT, &sa, NULL);
++#ifdef SIGBUS
++    sigaction(SIGBUS, &sa, NULL);
++#endif
++    sigaction(SIGILL, &sa, NULL);
++    sigaction(SIGFPE, &sa, NULL);
++  }
++
++#ifdef HAVE_GETRLIMIT
++  if (test_flags & TEST_CORE_ON_SIGNAL)
++  {
++    /* Change limits so that we will get a core file */
++    STRUCT_RLIMIT rl;
++    rl.rlim_cur = rl.rlim_max = RLIM_INFINITY;
++    if (setrlimit(RLIMIT_CORE, &rl) && global_system_variables.log_warnings)
++      sql_print_warning("setrlimit could not change the size of core files to 'infinity';  We may not be able to generate a core file on signals");
++  }
++#endif
++  (void) sigemptyset(&set);
++  my_sigset(SIGPIPE,SIG_IGN);
++  sigaddset(&set,SIGPIPE);
++#ifndef IGNORE_SIGHUP_SIGQUIT
++  sigaddset(&set,SIGQUIT);
++  sigaddset(&set,SIGHUP);
++#endif
++  sigaddset(&set,SIGTERM);
++
++  /* Fix signals if blocked by parents (can happen on Mac OS X) */
++  sigemptyset(&sa.sa_mask);
++  sa.sa_flags = 0;
++  sa.sa_handler = print_signal_warning;
++  sigaction(SIGTERM, &sa, (struct sigaction*) 0);
++  sa.sa_flags = 0;
++  sa.sa_handler = print_signal_warning;
++  sigaction(SIGHUP, &sa, (struct sigaction*) 0);
++#ifdef SIGTSTP
++  sigaddset(&set,SIGTSTP);
++#endif
++  if (thd_lib_detected != THD_LIB_LT)
++    sigaddset(&set,THR_SERVER_ALARM);
++  if (test_flags & TEST_SIGINT)
++  {
++    my_sigset(thr_kill_signal, end_thread_signal);
++    // May be SIGINT
++    sigdelset(&set, thr_kill_signal);
++  }
++  else
++    sigaddset(&set,SIGINT);
++  sigprocmask(SIG_SETMASK,&set,NULL);
++  pthread_sigmask(SIG_SETMASK,&set,NULL);
++  DBUG_VOID_RETURN;
++}
++
++
++static void start_signal_handler(void)
++{
++  int error;
++  pthread_attr_t thr_attr;
++  DBUG_ENTER("start_signal_handler");
++
++  (void) pthread_attr_init(&thr_attr);
++#if !defined(HAVE_DEC_3_2_THREADS)
++  pthread_attr_setscope(&thr_attr,PTHREAD_SCOPE_SYSTEM);
++  (void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
++  if (!(opt_specialflag & SPECIAL_NO_PRIOR))
++    my_pthread_attr_setprio(&thr_attr,INTERRUPT_PRIOR);
++#if defined(__ia64__) || defined(__ia64)
++  /*
++    Peculiar things with ia64 platforms - it seems we only have half the
++    stack size in reality, so we have to double it here
++  */
++  pthread_attr_setstacksize(&thr_attr,my_thread_stack_size*2);
++#else
++  pthread_attr_setstacksize(&thr_attr,my_thread_stack_size);
++#endif
++#endif
++
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  if ((error=pthread_create(&signal_thread,&thr_attr,signal_hand,0)))
++  {
++    sql_print_error("Can't create interrupt-thread (error %d, errno: %d)",
++		    error,errno);
++    exit(1);
++  }
++  (void) pthread_cond_wait(&COND_thread_count,&LOCK_thread_count);
++  pthread_mutex_unlock(&LOCK_thread_count);
++
++  (void) pthread_attr_destroy(&thr_attr);
++  DBUG_VOID_RETURN;
++}
++
++
++/** This threads handles all signals and alarms. */
++/* ARGSUSED */
++pthread_handler_t signal_hand(void *arg __attribute__((unused)))
++{
++  sigset_t set;
++  int sig;
++  my_thread_init();				// Init new thread
++  DBUG_ENTER("signal_hand");
++  signal_thread_in_use= 1;
++
++  /*
++    Setup alarm handler
++    This should actually be '+ max_number_of_slaves' instead of +10,
++    but the +10 should be quite safe.
++  */
++  init_thr_alarm(thread_scheduler.max_threads +
++		 global_system_variables.max_insert_delayed_threads + 10);
++  if (thd_lib_detected != THD_LIB_LT && (test_flags & TEST_SIGINT))
++  {
++    (void) sigemptyset(&set);			// Setup up SIGINT for debug
++    (void) sigaddset(&set,SIGINT);		// For debugging
++    (void) pthread_sigmask(SIG_UNBLOCK,&set,NULL);
++  }
++  (void) sigemptyset(&set);			// Setup up SIGINT for debug
++#ifdef USE_ONE_SIGNAL_HAND
++  (void) sigaddset(&set,THR_SERVER_ALARM);	// For alarms
++#endif
++#ifndef IGNORE_SIGHUP_SIGQUIT
++  (void) sigaddset(&set,SIGQUIT);
++  (void) sigaddset(&set,SIGHUP);
++#endif
++  (void) sigaddset(&set,SIGTERM);
++  (void) sigaddset(&set,SIGTSTP);
++
++  /* Save pid to this process (or thread on Linux) */
++  if (!opt_bootstrap)
++    create_pid_file();
++
++  /*
++    signal to start_signal_handler that we are ready
++    This works by waiting for start_signal_handler to free mutex,
++    after which we signal it that we are ready.
++    At this pointer there is no other threads running, so there
++    should not be any other pthread_cond_signal() calls.
++  */
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++  (void) pthread_cond_broadcast(&COND_thread_count);
++
++  (void) pthread_sigmask(SIG_BLOCK,&set,NULL);
++  for (;;)
++  {
++    int error;					// Used when debugging
++    if (shutdown_in_progress && !abort_loop)
++    {
++      sig= SIGTERM;
++      error=0;
++    }
++    else
++      while ((error=my_sigwait(&set,&sig)) == EINTR) ;
++    if (cleanup_done)
++    {
++      DBUG_PRINT("quit",("signal_handler: calling my_thread_end()"));
++      my_thread_end();
++      signal_thread_in_use= 0;
++      DBUG_LEAVE;                               // Must match DBUG_ENTER()
++      pthread_exit(0);				// Safety
++      return 0;                                 // Avoid compiler warnings
++    }
++    switch (sig) {
++    case SIGTERM:
++    case SIGQUIT:
++    case SIGKILL:
++#ifdef EXTRA_DEBUG
++      sql_print_information("Got signal %d to shutdown mysqld",sig);
++#endif
++      /* switch to the old log message processing */
++      logger.set_handlers(LOG_FILE, opt_slow_log ? LOG_FILE:LOG_NONE,
++                          opt_log ? LOG_FILE:LOG_NONE);
++      DBUG_PRINT("info",("Got signal: %d  abort_loop: %d",sig,abort_loop));
++      if (!abort_loop)
++      {
++	abort_loop=1;				// mark abort for threads
++#ifdef USE_ONE_SIGNAL_HAND
++	pthread_t tmp;
++	if (!(opt_specialflag & SPECIAL_NO_PRIOR))
++	  my_pthread_attr_setprio(&connection_attrib,INTERRUPT_PRIOR);
++	if (pthread_create(&tmp,&connection_attrib, kill_server_thread,
++			   (void*) &sig))
++	  sql_print_error("Can't create thread to kill server");
++#else
++	kill_server((void*) sig);	// MIT THREAD has a alarm thread
++#endif
++      }
++      break;
++    case SIGHUP:
++      if (!abort_loop)
++      {
++        int not_used;
++	mysql_print_status();		// Print some debug info
++	reload_acl_and_cache((THD*) 0,
++			     (REFRESH_LOG | REFRESH_TABLES | REFRESH_FAST |
++			      REFRESH_GRANT |
++			      REFRESH_THREADS | REFRESH_HOSTS),
++			     (TABLE_LIST*) 0, &not_used); // Flush logs
++      }
++      /* reenable logs after the options were reloaded */
++      if (log_output_options & LOG_NONE)
++      {
++        logger.set_handlers(LOG_FILE,
++                            opt_slow_log ? LOG_TABLE : LOG_NONE,
++                            opt_log ? LOG_TABLE : LOG_NONE);
++      }
++      else
++      {
++        logger.set_handlers(LOG_FILE,
++                            opt_slow_log ? log_output_options : LOG_NONE,
++                            opt_log ? log_output_options : LOG_NONE);
++      }
++      break;
++#ifdef USE_ONE_SIGNAL_HAND
++    case THR_SERVER_ALARM:
++      process_alarm(sig);			// Trigger alarms.
++      break;
++#endif
++    default:
++#ifdef EXTRA_DEBUG
++      sql_print_warning("Got signal: %d  error: %d",sig,error); /* purecov: tested */
++#endif
++      break;					/* purecov: tested */
++    }
++  }
++  return(0);					/* purecov: deadcode */
++}
++
++static void check_data_home(const char *path)
++{}
++
++#endif /*!EMBEDDED_LIBRARY*/
++#endif	/* __WIN__*/
++
++
++/**
++  All global error messages are sent here where the first one is stored
++  for the client.
++*/
++/* ARGSUSED */
++extern "C" int my_message_sql(uint error, const char *str, myf MyFlags);
++
++int my_message_sql(uint error, const char *str, myf MyFlags)
++{
++  THD *thd;
++  DBUG_ENTER("my_message_sql");
++  DBUG_PRINT("error", ("error: %u  message: '%s'", error, str));
++
++  DBUG_ASSERT(str != NULL);
++  /*
++    An error should have a valid error number (!= 0), so it can be caught
++    in stored procedures by SQL exception handlers.
++    Calling my_error() with error == 0 is a bug.
++    Remaining known places to fix:
++    - storage/myisam/mi_create.c, my_printf_error()
++    TODO:
++    DBUG_ASSERT(error != 0);
++  */
++
++  if (error == 0)
++  {
++    /* At least, prevent new abuse ... */
++    DBUG_ASSERT(strncmp(str, "MyISAM table", 12) == 0);
++    error= ER_UNKNOWN_ERROR;
++  }
++
++  if ((thd= current_thd))
++  {
++    /*
++      TODO: There are two exceptions mechanism (THD and sp_rcontext),
++      this could be improved by having a common stack of handlers.
++    */
++    if (thd->handle_error(error, str,
++                          MYSQL_ERROR::WARN_LEVEL_ERROR))
++      DBUG_RETURN(0);
++
++    thd->is_slave_error=  1; // needed to catch query errors during replication
++
++    /*
++      thd->lex->current_select == 0 if lex structure is not inited
++      (not query command (COM_QUERY))
++    */
++    if (thd->lex->current_select &&
++	thd->lex->current_select->no_error && !thd->is_fatal_error)
++    {
++      DBUG_PRINT("error",
++                 ("Error converted to warning: current_select: no_error %d  "
++                  "fatal_error: %d",
++                  (thd->lex->current_select ?
++                   thd->lex->current_select->no_error : 0),
++                  (int) thd->is_fatal_error));
++    }
++    else
++    {
++      if (! thd->main_da.is_error())            // Return only first message
++      {
++        thd->main_da.set_error_status(thd, error, str);
++      }
++      query_cache_abort(&thd->net);
++    }
++    /*
++      If a continue handler is found, the error message will be cleared
++      by the stored procedures code.
++    */
++    if (thd->spcont &&
++        ! (MyFlags & ME_NO_SP_HANDLER) &&
++        thd->spcont->handle_error(error, MYSQL_ERROR::WARN_LEVEL_ERROR, thd))
++    {
++      /*
++        Do not push any warnings, a handled error must be completely
++        silenced.
++      */
++      DBUG_RETURN(0);
++    }
++
++    /* When simulating OOM, skip writing to error log to avoid mtr errors */
++    DBUG_EXECUTE_IF("simulate_out_of_memory", DBUG_RETURN(0););
++
++    if (!thd->no_warnings_for_error &&
++        !(MyFlags & ME_NO_WARNING_FOR_ERROR))
++    {
++      /*
++        Suppress infinite recursion if there a memory allocation error
++        inside push_warning.
++      */
++      thd->no_warnings_for_error= TRUE;
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error, str);
++      thd->no_warnings_for_error= FALSE;
++    }
++  }
++
++  /* When simulating OOM, skip writing to error log to avoid mtr errors */
++  DBUG_EXECUTE_IF("simulate_out_of_memory", DBUG_RETURN(0););
++
++  if (!thd || MyFlags & ME_NOREFRESH)
++    sql_print_error("%s: %s",my_progname,str); /* purecov: inspected */
++  DBUG_RETURN(0);
++}
++
++
++#ifndef EMBEDDED_LIBRARY
++extern "C" void *my_str_malloc_mysqld(size_t size);
++extern "C" void my_str_free_mysqld(void *ptr);
++
++void *my_str_malloc_mysqld(size_t size)
++{
++  return my_malloc(size, MYF(MY_FAE));
++}
++
++
++void my_str_free_mysqld(void *ptr)
++{
++  my_free((uchar*)ptr, MYF(MY_FAE));
++}
++#endif /* EMBEDDED_LIBRARY */
++
++
++#ifdef __WIN__
++
++pthread_handler_t handle_shutdown(void *arg)
++{
++  MSG msg;
++  my_thread_init();
++
++  /* this call should create the message queue for this thread */
++  PeekMessage(&msg, NULL, 1, 65534,PM_NOREMOVE);
++#if !defined(EMBEDDED_LIBRARY)
++  if (WaitForSingleObject(hEventShutdown,INFINITE)==WAIT_OBJECT_0)
++#endif /* EMBEDDED_LIBRARY */
++     kill_server(MYSQL_KILL_SIGNAL);
++  return 0;
++}
++#endif
++
++const char *load_default_groups[]= {
++#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
++"mysql_cluster",
++#endif
++"mysqld","server", MYSQL_BASE_VERSION, 0, 0};
++
++#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
++static const int load_default_groups_sz=
++sizeof(load_default_groups)/sizeof(load_default_groups[0]);
++#endif
++
++
++#ifndef EMBEDDED_LIBRARY
++static
++int
++check_enough_stack_size()
++{
++  uchar stack_top;
++
++  return check_stack_overrun(current_thd, STACK_MIN_SIZE,
++                             &stack_top);
++}
++#endif
++
++
++/**
++  Initialize one of the global date/time format variables.
++
++  @param format_type		What kind of format should be supported
++  @param var_ptr		Pointer to variable that should be updated
++
++  @note
++    The default value is taken from either opt_date_time_formats[] or
++    the ISO format (ANSI SQL)
++
++  @retval
++    0 ok
++  @retval
++    1 error
++*/
++
++static bool init_global_datetime_format(timestamp_type format_type,
++                                        DATE_TIME_FORMAT **var_ptr)
++{
++  /* Get command line option */
++  const char *str= opt_date_time_formats[format_type];
++
++  if (!str)					// No specified format
++  {
++    str= get_date_time_format_str(&known_date_time_formats[ISO_FORMAT],
++				  format_type);
++    /*
++      Set the "command line" option to point to the generated string so
++      that we can set global formats back to default
++    */
++    opt_date_time_formats[format_type]= str;
++  }
++  if (!(*var_ptr= date_time_format_make(format_type, str, strlen(str))))
++  {
++    fprintf(stderr, "Wrong date/time format specifier: %s\n", str);
++    return 1;
++  }
++  return 0;
++}
++
++SHOW_VAR com_status_vars[]= {
++  {"admin_commands",       (char*) offsetof(STATUS_VAR, com_other), SHOW_LONG_STATUS},
++  {"assign_to_keycache",   (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ASSIGN_TO_KEYCACHE]), SHOW_LONG_STATUS},
++  {"alter_db",             (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_DB]), SHOW_LONG_STATUS},
++  {"alter_db_upgrade",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_DB_UPGRADE]), SHOW_LONG_STATUS},
++  {"alter_event",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_EVENT]), SHOW_LONG_STATUS},
++  {"alter_function",       (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_FUNCTION]), SHOW_LONG_STATUS},
++  {"alter_procedure",      (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_PROCEDURE]), SHOW_LONG_STATUS},
++  {"alter_server",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_SERVER]), SHOW_LONG_STATUS},
++  {"alter_table",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_TABLE]), SHOW_LONG_STATUS},
++  {"alter_tablespace",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_TABLESPACE]), SHOW_LONG_STATUS},
++  {"analyze",              (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ANALYZE]), SHOW_LONG_STATUS},
++  {"backup_table",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BACKUP_TABLE]), SHOW_LONG_STATUS},
++  {"begin",                (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BEGIN]), SHOW_LONG_STATUS},
++  {"binlog",               (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BINLOG_BASE64_EVENT]), SHOW_LONG_STATUS},
++  {"call_procedure",       (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CALL]), SHOW_LONG_STATUS},
++  {"change_db",            (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_DB]), SHOW_LONG_STATUS},
++  {"change_master",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_MASTER]), SHOW_LONG_STATUS},
++  {"check",                (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECK]), SHOW_LONG_STATUS},
++  {"checksum",             (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECKSUM]), SHOW_LONG_STATUS},
++  {"commit",               (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_COMMIT]), SHOW_LONG_STATUS},
++  {"create_db",            (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_DB]), SHOW_LONG_STATUS},
++  {"create_event",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_EVENT]), SHOW_LONG_STATUS},
++  {"create_function",      (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_SPFUNCTION]), SHOW_LONG_STATUS},
++  {"create_index",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_INDEX]), SHOW_LONG_STATUS},
++  {"create_procedure",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_PROCEDURE]), SHOW_LONG_STATUS},
++  {"create_server",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_SERVER]), SHOW_LONG_STATUS},
++  {"create_table",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_TABLE]), SHOW_LONG_STATUS},
++  {"create_trigger",       (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_TRIGGER]), SHOW_LONG_STATUS},
++  {"create_udf",           (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_FUNCTION]), SHOW_LONG_STATUS},
++  {"create_user",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_USER]), SHOW_LONG_STATUS},
++  {"create_view",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_VIEW]), SHOW_LONG_STATUS},
++  {"dealloc_sql",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DEALLOCATE_PREPARE]), SHOW_LONG_STATUS},
++  {"delete",               (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE]), SHOW_LONG_STATUS},
++  {"delete_multi",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE_MULTI]), SHOW_LONG_STATUS},
++  {"do",                   (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DO]), SHOW_LONG_STATUS},
++  {"drop_db",              (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_DB]), SHOW_LONG_STATUS},
++  {"drop_event",           (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_EVENT]), SHOW_LONG_STATUS},
++  {"drop_function",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_FUNCTION]), SHOW_LONG_STATUS},
++  {"drop_index",           (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_INDEX]), SHOW_LONG_STATUS},
++  {"drop_procedure",       (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_PROCEDURE]), SHOW_LONG_STATUS},
++  {"drop_server",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_SERVER]), SHOW_LONG_STATUS},
++  {"drop_table",           (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_TABLE]), SHOW_LONG_STATUS},
++  {"drop_trigger",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_TRIGGER]), SHOW_LONG_STATUS},
++  {"drop_user",            (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_USER]), SHOW_LONG_STATUS},
++  {"drop_view",            (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_VIEW]), SHOW_LONG_STATUS},
++  {"empty_query",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EMPTY_QUERY]), SHOW_LONG_STATUS},
++  {"execute_sql",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EXECUTE]), SHOW_LONG_STATUS},
++  {"flush",                (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_FLUSH]), SHOW_LONG_STATUS},
++  {"grant",                (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GRANT]), SHOW_LONG_STATUS},
++  {"ha_close",             (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_CLOSE]), SHOW_LONG_STATUS},
++  {"ha_open",              (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_OPEN]), SHOW_LONG_STATUS},
++  {"ha_read",              (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_READ]), SHOW_LONG_STATUS},
++  {"help",                 (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HELP]), SHOW_LONG_STATUS},
++  {"insert",               (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT]), SHOW_LONG_STATUS},
++  {"insert_select",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT_SELECT]), SHOW_LONG_STATUS},
++  {"install_plugin",       (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSTALL_PLUGIN]), SHOW_LONG_STATUS},
++  {"kill",                 (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_KILL]), SHOW_LONG_STATUS},
++  {"load",                 (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD]), SHOW_LONG_STATUS},
++  {"load_master_data",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_DATA]), SHOW_LONG_STATUS},
++  {"load_master_table",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_TABLE]), SHOW_LONG_STATUS},
++  {"lock_tables",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOCK_TABLES]), SHOW_LONG_STATUS},
++  {"optimize",             (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_OPTIMIZE]), SHOW_LONG_STATUS},
++  {"preload_keys",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PRELOAD_KEYS]), SHOW_LONG_STATUS},
++  {"prepare_sql",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PREPARE]), SHOW_LONG_STATUS},
++  {"purge",                (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE]), SHOW_LONG_STATUS},
++  {"purge_before_date",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE_BEFORE]), SHOW_LONG_STATUS},
++  {"release_savepoint",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RELEASE_SAVEPOINT]), SHOW_LONG_STATUS},
++  {"rename_table",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RENAME_TABLE]), SHOW_LONG_STATUS},
++  {"rename_user",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RENAME_USER]), SHOW_LONG_STATUS},
++  {"repair",               (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPAIR]), SHOW_LONG_STATUS},
++  {"replace",              (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE]), SHOW_LONG_STATUS},
++  {"replace_select",       (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE_SELECT]), SHOW_LONG_STATUS},
++  {"reset",                (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESET]), SHOW_LONG_STATUS},
++  {"restore_table",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESTORE_TABLE]), SHOW_LONG_STATUS},
++  {"revoke",               (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE]), SHOW_LONG_STATUS},
++  {"revoke_all",           (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE_ALL]), SHOW_LONG_STATUS},
++  {"rollback",             (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ROLLBACK]), SHOW_LONG_STATUS},
++  {"rollback_to_savepoint",(char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ROLLBACK_TO_SAVEPOINT]), SHOW_LONG_STATUS},
++  {"savepoint",            (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SAVEPOINT]), SHOW_LONG_STATUS},
++  {"select",               (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SELECT]), SHOW_LONG_STATUS},
++  {"set_option",           (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SET_OPTION]), SHOW_LONG_STATUS},
++  {"show_authors",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_AUTHORS]), SHOW_LONG_STATUS},
++  {"show_binlog_events",   (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOG_EVENTS]), SHOW_LONG_STATUS},
++  {"show_binlogs",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOGS]), SHOW_LONG_STATUS},
++  {"show_charsets",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CHARSETS]), SHOW_LONG_STATUS},
++  {"show_collations",      (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLLATIONS]), SHOW_LONG_STATUS},
++  {"show_column_types",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLUMN_TYPES]), SHOW_LONG_STATUS},
++  {"show_contributors",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CONTRIBUTORS]), SHOW_LONG_STATUS},
++  {"show_create_db",       (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS},
++  {"show_create_event",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_EVENT]), SHOW_LONG_STATUS},
++  {"show_create_func",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_FUNC]), SHOW_LONG_STATUS},
++  {"show_create_proc",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_PROC]), SHOW_LONG_STATUS},
++  {"show_create_table",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE]), SHOW_LONG_STATUS},
++  {"show_create_trigger",  (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_TRIGGER]), SHOW_LONG_STATUS},
++  {"show_databases",       (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_DATABASES]), SHOW_LONG_STATUS},
++  {"show_engine_logs",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_LOGS]), SHOW_LONG_STATUS},
++  {"show_engine_mutex",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_MUTEX]), SHOW_LONG_STATUS},
++  {"show_engine_status",   (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_STATUS]), SHOW_LONG_STATUS},
++  {"show_events",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_EVENTS]), SHOW_LONG_STATUS},
++  {"show_errors",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS},
++  {"show_fields",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS},
++#ifndef DBUG_OFF
++  {"show_function_code",   (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FUNC_CODE]), SHOW_LONG_STATUS},
++#endif
++  {"show_function_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS_FUNC]), SHOW_LONG_STATUS},
++  {"show_grants",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS},
++  {"show_keys",            (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS},
++  {"show_master_status",   (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS},
++  {"show_new_master",      (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS},
++  {"show_open_tables",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS},
++  {"show_plugins",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PLUGINS]), SHOW_LONG_STATUS},
++  {"show_privileges",      (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PRIVILEGES]), SHOW_LONG_STATUS},
++#ifndef DBUG_OFF
++  {"show_procedure_code",  (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROC_CODE]), SHOW_LONG_STATUS},
++#endif
++  {"show_procedure_status",(char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS_PROC]), SHOW_LONG_STATUS},
++  {"show_processlist",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROCESSLIST]), SHOW_LONG_STATUS},
++  {"show_profile",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROFILE]), SHOW_LONG_STATUS},
++  {"show_profiles",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROFILES]), SHOW_LONG_STATUS},
++  {"show_slave_hosts",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_HOSTS]), SHOW_LONG_STATUS},
++  {"show_slave_status",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_STAT]), SHOW_LONG_STATUS},
++  {"show_status",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS]), SHOW_LONG_STATUS},
++  {"show_storage_engines", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STORAGE_ENGINES]), SHOW_LONG_STATUS},
++  {"show_table_status",    (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLE_STATUS]), SHOW_LONG_STATUS},
++  {"show_tables",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLES]), SHOW_LONG_STATUS},
++  {"show_triggers",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TRIGGERS]), SHOW_LONG_STATUS},
++  {"show_variables",       (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS},
++  {"show_warnings",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS},
++  {"slave_start",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS},
++  {"slave_stop",           (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_STOP]), SHOW_LONG_STATUS},
++  {"stmt_close",           (char*) offsetof(STATUS_VAR, com_stmt_close), SHOW_LONG_STATUS},
++  {"stmt_execute",         (char*) offsetof(STATUS_VAR, com_stmt_execute), SHOW_LONG_STATUS},
++  {"stmt_fetch",           (char*) offsetof(STATUS_VAR, com_stmt_fetch), SHOW_LONG_STATUS},
++  {"stmt_prepare",         (char*) offsetof(STATUS_VAR, com_stmt_prepare), SHOW_LONG_STATUS},
++  {"stmt_reprepare",       (char*) offsetof(STATUS_VAR, com_stmt_reprepare), SHOW_LONG_STATUS},
++  {"stmt_reset",           (char*) offsetof(STATUS_VAR, com_stmt_reset), SHOW_LONG_STATUS},
++  {"stmt_send_long_data",  (char*) offsetof(STATUS_VAR, com_stmt_send_long_data), SHOW_LONG_STATUS},
++  {"truncate",             (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_TRUNCATE]), SHOW_LONG_STATUS},
++  {"uninstall_plugin",     (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNINSTALL_PLUGIN]), SHOW_LONG_STATUS},
++  {"unlock_tables",        (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNLOCK_TABLES]), SHOW_LONG_STATUS},
++  {"update",               (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE]), SHOW_LONG_STATUS},
++  {"update_multi",         (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE_MULTI]), SHOW_LONG_STATUS},
++  {"xa_commit",            (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_COMMIT]),SHOW_LONG_STATUS},
++  {"xa_end",               (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_END]),SHOW_LONG_STATUS},
++  {"xa_prepare",           (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_PREPARE]),SHOW_LONG_STATUS},
++  {"xa_recover",           (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_RECOVER]),SHOW_LONG_STATUS},
++  {"xa_rollback",          (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_ROLLBACK]),SHOW_LONG_STATUS},
++  {"xa_start",             (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_START]),SHOW_LONG_STATUS},
++  {NullS, NullS, SHOW_LONG}
++};
++
++static int init_common_variables(const char *conf_file_name, int argc,
++				 char **argv, const char **groups)
++{
++  char buff[FN_REFLEN], *s;
++  umask(((~my_umask) & 0666));
++  my_decimal_set_zero(&decimal_zero); // set decimal_zero constant;
++  tzset();			// Set tzname
++
++  max_system_variables.pseudo_thread_id= (ulong)~0;
++  server_start_time= flush_status_time= my_time(0);
++
++  rpl_filter= new Rpl_filter;
++  binlog_filter= new Rpl_filter;
++  if (!rpl_filter || !binlog_filter)
++  {
++    sql_perror("Could not allocate replication and binlog filters");
++    return 1;
++  }
++
++  if (init_thread_environment() ||
++      mysql_init_variables())
++    return 1;
++
++#ifdef HAVE_TZNAME
++  {
++    struct tm tm_tmp;
++    localtime_r(&server_start_time,&tm_tmp);
++    strmake(system_time_zone, tzname[tm_tmp.tm_isdst != 0 ? 1 : 0],
++            sizeof(system_time_zone)-1);
++
++ }
++#endif
++  /*
++    We set SYSTEM time zone as reasonable default and
++    also for failure of my_tz_init() and bootstrap mode.
++    If user explicitly set time zone with --default-time-zone
++    option we will change this value in my_tz_init().
++  */
++  global_system_variables.time_zone= my_tz_SYSTEM;
++
++  /*
++    Init mutexes for the global MYSQL_BIN_LOG objects.
++    As safe_mutex depends on what MY_INIT() does, we can't init the mutexes of
++    global MYSQL_BIN_LOGs in their constructors, because then they would be
++    inited before MY_INIT(). So we do it here.
++  */
++  mysql_bin_log.init_pthread_objects();
++
++  /* TODO: remove this when my_time_t is 64 bit compatible */
++  if (!IS_TIME_T_VALID_FOR_TIMESTAMP(server_start_time))
++  {
++    sql_print_error("This MySQL server doesn't support dates later then 2038");
++    return 1;
++  }
++
++  if (gethostname(glob_hostname,sizeof(glob_hostname)) < 0)
++  {
++    strmake(glob_hostname, STRING_WITH_LEN("localhost"));
++    sql_print_warning("gethostname failed, using '%s' as hostname",
++                      glob_hostname);
++    strmake(pidfile_name, STRING_WITH_LEN("mysql"));
++  }
++  else
++  strmake(pidfile_name, glob_hostname, sizeof(pidfile_name)-5);
++  strmov(fn_ext(pidfile_name),".pid");		// Add proper extension
++
++  /*
++    Add server status variables to the dynamic list of
++    status variables that is shown by SHOW STATUS.
++    Later, in plugin_init, and mysql_install_plugin
++    new entries could be added to that list.
++  */
++  if (add_status_vars(status_vars))
++    return 1; // an error was already reported
++
++#ifndef DBUG_OFF
++  /*
++    We have few debug-only commands in com_status_vars, only visible in debug
++    builds. for simplicity we enable the assert only in debug builds
++
++    There are 8 Com_ variables which don't have corresponding SQLCOM_ values:
++    (TODO strictly speaking they shouldn't be here, should not have Com_ prefix
++    that is. Perhaps Stmt_ ? Comstmt_ ? Prepstmt_ ?)
++
++      Com_admin_commands       => com_other
++      Com_stmt_close           => com_stmt_close
++      Com_stmt_execute         => com_stmt_execute
++      Com_stmt_fetch           => com_stmt_fetch
++      Com_stmt_prepare         => com_stmt_prepare
++      Com_stmt_reprepare       => com_stmt_reprepare
++      Com_stmt_reset           => com_stmt_reset
++      Com_stmt_send_long_data  => com_stmt_send_long_data
++
++    With this correction the number of Com_ variables (number of elements in
++    the array, excluding the last element - terminator) must match the number
++    of SQLCOM_ constants.
++  */
++  compile_time_assert(sizeof(com_status_vars)/sizeof(com_status_vars[0]) - 1 ==
++                     SQLCOM_END + 8);
++#endif
++
++  orig_argc=argc;
++  orig_argv=argv;
++  load_defaults(conf_file_name, groups, &argc, &argv);
++  defaults_argv=argv;
++  defaults_argc=argc;
++  if (get_options(&defaults_argc, defaults_argv))
++    return 1;
++  set_server_version();
++
++  DBUG_PRINT("info",("%s  Ver %s for %s on %s\n",my_progname,
++		     server_version, SYSTEM_TYPE,MACHINE_TYPE));
++
++#ifdef HAVE_LARGE_PAGES
++  /* Initialize large page size */
++  if (opt_large_pages && (opt_large_page_size= my_get_large_page_size()))
++  {
++      my_use_large_pages= 1;
++      my_large_page_size= opt_large_page_size;
++  }
++#endif /* HAVE_LARGE_PAGES */
++
++  /* connections and databases needs lots of files */
++  {
++    uint files, wanted_files, max_open_files;
++
++    /* MyISAM requires two file handles per table. */
++    wanted_files= 10+max_connections+table_cache_size*2;
++    /*
++      We are trying to allocate no less than max_connections*5 file
++      handles (i.e. we are trying to set the limit so that they will
++      be available).  In addition, we allocate no less than how much
++      was already allocated.  However below we report a warning and
++      recompute values only if we got less file handles than were
++      explicitly requested.  No warning and re-computation occur if we
++      can't get max_connections*5 but still got no less than was
++      requested (value of wanted_files).
++    */
++    max_open_files= max(max(wanted_files, max_connections*5),
++                        open_files_limit);
++    files= my_set_max_open_files(max_open_files);
++
++    if (files < wanted_files)
++    {
++      if (!open_files_limit)
++      {
++        /*
++          If we have requested too much file handles than we bring
++          max_connections in supported bounds.
++        */
++        max_connections= (ulong) min(files-10-TABLE_OPEN_CACHE_MIN*2,
++                                     max_connections);
++        /*
++          Decrease table_cache_size according to max_connections, but
++          not below TABLE_OPEN_CACHE_MIN.  Outer min() ensures that we
++          never increase table_cache_size automatically (that could
++          happen if max_connections is decreased above).
++        */
++        table_cache_size= (ulong) min(max((files-10-max_connections)/2,
++                                          TABLE_OPEN_CACHE_MIN),
++                                      table_cache_size);
++	DBUG_PRINT("warning",
++		   ("Changed limits: max_open_files: %u  max_connections: %ld  table_cache: %ld",
++		    files, max_connections, table_cache_size));
++	if (global_system_variables.log_warnings)
++	  sql_print_warning("Changed limits: max_open_files: %u  max_connections: %ld  table_cache: %ld",
++			files, max_connections, table_cache_size);
++      }
++      else if (global_system_variables.log_warnings)
++	sql_print_warning("Could not increase number of max_open_files to more than %u (request: %u)", files, wanted_files);
++    }
++    open_files_limit= files;
++  }
++  unireg_init(opt_specialflag); /* Set up extern variabels */
++  if (init_errmessage())	/* Read error messages from file */
++    return 1;
++  init_client_errs();
++  lex_init();
++  if (item_create_init())
++    return 1;
++  item_init();
++  if (set_var_init())
++    return 1;
++#ifdef HAVE_REPLICATION
++  if (init_replication_sys_vars())
++    return 1;
++#endif
++  mysys_uses_curses=0;
++#ifdef USE_REGEX
++#ifndef EMBEDDED_LIBRARY
++  my_regex_init(&my_charset_latin1, check_enough_stack_size);
++#else
++  my_regex_init(&my_charset_latin1, NULL);
++#endif
++#endif
++  /*
++    Process a comma-separated character set list and choose
++    the first available character set. This is mostly for
++    test purposes, to be able to start "mysqld" even if
++    the requested character set is not available (see bug#18743).
++  */
++  for (;;)
++  {
++    char *next_character_set_name= strchr(default_character_set_name, ',');
++    if (next_character_set_name)
++      *next_character_set_name++= '\0';
++    if (!(default_charset_info=
++          get_charset_by_csname(default_character_set_name,
++                                MY_CS_PRIMARY, MYF(MY_WME))))
++    {
++      if (next_character_set_name)
++      {
++        default_character_set_name= next_character_set_name;
++        default_collation_name= 0;          // Ignore collation
++      }
++      else
++        return 1;                           // Eof of the list
++    }
++    else
++      break;
++  }
++
++  if (default_collation_name)
++  {
++    CHARSET_INFO *default_collation;
++    default_collation= get_charset_by_name(default_collation_name, MYF(0));
++    if (!default_collation)
++    {
++      sql_print_error(ER(ER_UNKNOWN_COLLATION), default_collation_name);
++      return 1;
++    }
++    if (!my_charset_same(default_charset_info, default_collation))
++    {
++      sql_print_error(ER(ER_COLLATION_CHARSET_MISMATCH),
++		      default_collation_name,
++		      default_charset_info->csname);
++      return 1;
++    }
++    default_charset_info= default_collation;
++  }
++  /* Set collactions that depends on the default collation */
++  global_system_variables.collation_server=	 default_charset_info;
++  global_system_variables.collation_database=	 default_charset_info;
++  global_system_variables.collation_connection=  default_charset_info;
++  global_system_variables.character_set_results= default_charset_info;
++  global_system_variables.character_set_client= default_charset_info;
++
++  if (!(character_set_filesystem= 
++        get_charset_by_csname(character_set_filesystem_name,
++                              MY_CS_PRIMARY, MYF(MY_WME))))
++    return 1;
++  global_system_variables.character_set_filesystem= character_set_filesystem;
++
++  if (!(my_default_lc_time_names=
++        my_locale_by_name(lc_time_names_name)))
++  {
++    sql_print_error("Unknown locale: '%s'", lc_time_names_name);
++    return 1;
++  }
++  global_system_variables.lc_time_names= my_default_lc_time_names;
++  
++  sys_init_connect.value_length= 0;
++  if ((sys_init_connect.value= opt_init_connect))
++    sys_init_connect.value_length= strlen(opt_init_connect);
++  else
++    sys_init_connect.value=my_strdup("",MYF(0));
++  sys_init_connect.is_os_charset= TRUE;
++
++  sys_init_slave.value_length= 0;
++  if ((sys_init_slave.value= opt_init_slave))
++    sys_init_slave.value_length= strlen(opt_init_slave);
++  else
++    sys_init_slave.value=my_strdup("",MYF(0));
++  sys_init_slave.is_os_charset= TRUE;
++
++  /* check log options and issue warnings if needed */
++  if (opt_log && opt_logname && !(log_output_options & LOG_FILE) &&
++      !(log_output_options & LOG_NONE))
++    sql_print_warning("Although a path was specified for the "
++                      "--log option, log tables are used. "
++                      "To enable logging to files use the --log-output option.");
++
++  if (opt_slow_log && opt_slow_logname && !(log_output_options & LOG_FILE)
++      && !(log_output_options & LOG_NONE))
++    sql_print_warning("Although a path was specified for the "
++                      "--log_slow_queries option, log tables are used. "
++                      "To enable logging to files use the --log-output=file option.");
++
++  s= opt_logname ? opt_logname : make_default_log_name(buff, ".log");
++  sys_var_general_log_path.value= my_strdup(s, MYF(0));
++  sys_var_general_log_path.value_length= strlen(s);
++
++  s= opt_slow_logname ? opt_slow_logname : make_default_log_name(buff, "-slow.log");
++  sys_var_slow_log_path.value= my_strdup(s, MYF(0));
++  sys_var_slow_log_path.value_length= strlen(s);
++
++#if defined(ENABLED_DEBUG_SYNC)
++  /* Initialize the debug sync facility. See debug_sync.cc. */
++  if (debug_sync_init())
++    return 1; /* purecov: tested */
++#endif /* defined(ENABLED_DEBUG_SYNC) */
++
++#if (ENABLE_TEMP_POOL)
++  if (use_temp_pool && bitmap_init(&temp_pool,0,1024,1))
++    return 1;
++#else
++  use_temp_pool= 0;
++#endif
++
++  if (my_database_names_init())
++    return 1;
++
++  /*
++    Ensure that lower_case_table_names is set on system where we have case
++    insensitive names.  If this is not done the users MyISAM tables will
++    get corrupted if accesses with names of different case.
++  */
++  DBUG_PRINT("info", ("lower_case_table_names: %d", lower_case_table_names));
++  lower_case_file_system= test_if_case_insensitive(mysql_real_data_home);
++  if (!lower_case_table_names && lower_case_file_system == 1)
++  {
++    if (lower_case_table_names_used)
++    {
++      if (global_system_variables.log_warnings)
++	sql_print_warning("\
++You have forced lower_case_table_names to 0 through a command-line \
++option, even though your file system '%s' is case insensitive.  This means \
++that you can corrupt a MyISAM table by accessing it with different cases. \
++You should consider changing lower_case_table_names to 1 or 2",
++			mysql_real_data_home);
++    }
++    else
++    {
++      if (global_system_variables.log_warnings)
++	sql_print_warning("Setting lower_case_table_names=2 because file system for %s is case insensitive", mysql_real_data_home);
++      lower_case_table_names= 2;
++    }
++  }
++  else if (lower_case_table_names == 2 &&
++           !(lower_case_file_system=
++             (test_if_case_insensitive(mysql_real_data_home) == 1)))
++  {
++    if (global_system_variables.log_warnings)
++      sql_print_warning("lower_case_table_names was set to 2, even though your "
++                        "the file system '%s' is case sensitive.  Now setting "
++                        "lower_case_table_names to 0 to avoid future problems.",
++			mysql_real_data_home);
++    lower_case_table_names= 0;
++  }
++  else
++  {
++    lower_case_file_system=
++      (test_if_case_insensitive(mysql_real_data_home) == 1);
++  }
++
++  /* Reset table_alias_charset, now that lower_case_table_names is set. */
++  table_alias_charset= (lower_case_table_names ?
++			files_charset_info :
++			&my_charset_bin);
++
++  return 0;
++}
++
++
++static int init_thread_environment()
++{
++  (void) pthread_mutex_init(&LOCK_mysql_create_db,MY_MUTEX_INIT_SLOW);
++  (void) pthread_mutex_init(&LOCK_lock_db,MY_MUTEX_INIT_SLOW);
++  (void) pthread_mutex_init(&LOCK_Acl,MY_MUTEX_INIT_SLOW);
++  (void) pthread_mutex_init(&LOCK_open, MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_thread_count,MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_mapped_file,MY_MUTEX_INIT_SLOW);
++  (void) pthread_mutex_init(&LOCK_status,MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_error_log,MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_delayed_insert,MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_delayed_status,MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_delayed_create,MY_MUTEX_INIT_SLOW);
++  (void) pthread_mutex_init(&LOCK_manager,MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_crypt,MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_bytes_sent,MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_bytes_received,MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_user_conn, MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_active_mi, MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
++  (void) my_rwlock_init(&LOCK_system_variables_hash, NULL);
++  (void) pthread_mutex_init(&LOCK_global_read_lock, MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_prepared_stmt_count, MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_uuid_generator, MY_MUTEX_INIT_FAST);
++  (void) pthread_mutex_init(&LOCK_connection_count, MY_MUTEX_INIT_FAST);
++#ifdef HAVE_OPENSSL
++  (void) pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST);
++#ifndef HAVE_YASSL
++  openssl_stdlocks= (openssl_lock_t*) OPENSSL_malloc(CRYPTO_num_locks() *
++                                                     sizeof(openssl_lock_t));
++  for (int i= 0; i < CRYPTO_num_locks(); ++i)
++    (void) my_rwlock_init(&openssl_stdlocks[i].lock, NULL); 
++  CRYPTO_set_dynlock_create_callback(openssl_dynlock_create);
++  CRYPTO_set_dynlock_destroy_callback(openssl_dynlock_destroy);
++  CRYPTO_set_dynlock_lock_callback(openssl_lock);
++  CRYPTO_set_locking_callback(openssl_lock_function);
++  CRYPTO_set_id_callback(openssl_id_function);
++#endif
++#endif
++  (void) my_rwlock_init(&LOCK_sys_init_connect, NULL);
++  (void) my_rwlock_init(&LOCK_sys_init_slave, NULL);
++  (void) my_rwlock_init(&LOCK_grant, NULL);
++  (void) pthread_cond_init(&COND_thread_count,NULL);
++  (void) pthread_cond_init(&COND_refresh,NULL);
++  (void) pthread_cond_init(&COND_global_read_lock,NULL);
++  (void) pthread_cond_init(&COND_thread_cache,NULL);
++  (void) pthread_cond_init(&COND_flush_thread_cache,NULL);
++  (void) pthread_cond_init(&COND_manager,NULL);
++#ifdef HAVE_REPLICATION
++  (void) pthread_mutex_init(&LOCK_rpl_status, MY_MUTEX_INIT_FAST);
++  (void) pthread_cond_init(&COND_rpl_status, NULL);
++#endif
++  (void) pthread_mutex_init(&LOCK_server_started, MY_MUTEX_INIT_FAST);
++  (void) pthread_cond_init(&COND_server_started,NULL);
++  sp_cache_init();
++#ifdef HAVE_EVENT_SCHEDULER
++  Events::init_mutexes();
++#endif
++  /* Parameter for threads created for connections */
++  (void) pthread_attr_init(&connection_attrib);
++  (void) pthread_attr_setdetachstate(&connection_attrib,
++				     PTHREAD_CREATE_DETACHED);
++  pthread_attr_setscope(&connection_attrib, PTHREAD_SCOPE_SYSTEM);
++  if (!(opt_specialflag & SPECIAL_NO_PRIOR))
++    my_pthread_attr_setprio(&connection_attrib,WAIT_PRIOR);
++
++  if (pthread_key_create(&THR_THD,NULL) ||
++      pthread_key_create(&THR_MALLOC,NULL))
++  {
++    sql_print_error("Can't create thread-keys");
++    return 1;
++  }
++  return 0;
++}
++
++
++#if defined(HAVE_OPENSSL) && !defined(HAVE_YASSL)
++static unsigned long openssl_id_function()
++{ 
++  return (unsigned long) pthread_self();
++} 
++
++
++static openssl_lock_t *openssl_dynlock_create(const char *file, int line)
++{ 
++  openssl_lock_t *lock= new openssl_lock_t;
++  my_rwlock_init(&lock->lock, NULL);
++  return lock;
++}
++
++
++static void openssl_dynlock_destroy(openssl_lock_t *lock, const char *file, 
++				    int line)
++{
++  rwlock_destroy(&lock->lock);
++  delete lock;
++}
++
++
++static void openssl_lock_function(int mode, int n, const char *file, int line)
++{
++  if (n < 0 || n > CRYPTO_num_locks())
++  {
++    /* Lock number out of bounds. */
++    sql_print_error("Fatal: OpenSSL interface problem (n = %d)", n);
++    abort();
++  }
++  openssl_lock(mode, &openssl_stdlocks[n], file, line);
++}
++
++
++static void openssl_lock(int mode, openssl_lock_t *lock, const char *file, 
++			 int line)
++{
++  int err;
++  char const *what;
++
++  switch (mode) {
++  case CRYPTO_LOCK|CRYPTO_READ:
++    what = "read lock";
++    err = rw_rdlock(&lock->lock);
++    break;
++  case CRYPTO_LOCK|CRYPTO_WRITE:
++    what = "write lock";
++    err = rw_wrlock(&lock->lock);
++    break;
++  case CRYPTO_UNLOCK|CRYPTO_READ:
++  case CRYPTO_UNLOCK|CRYPTO_WRITE:
++    what = "unlock";
++    err = rw_unlock(&lock->lock);
++    break;
++  default:
++    /* Unknown locking mode. */
++    sql_print_error("Fatal: OpenSSL interface problem (mode=0x%x)", mode);
++    abort();
++  }
++  if (err) 
++  {
++    sql_print_error("Fatal: can't %s OpenSSL lock", what);
++    abort();
++  }
++}
++#endif /* HAVE_OPENSSL */
++
++
++#ifndef EMBEDDED_LIBRARY
++
++static void init_ssl()
++{
++#ifdef HAVE_OPENSSL
++  if (opt_use_ssl)
++  {
++    enum enum_ssl_init_error error= SSL_INITERR_NOERROR;
++
++    /* having ssl_acceptor_fd != 0 signals the use of SSL */
++    ssl_acceptor_fd= new_VioSSLAcceptorFd(opt_ssl_key, opt_ssl_cert,
++					  opt_ssl_ca, opt_ssl_capath,
++					  opt_ssl_cipher, &error);
++    DBUG_PRINT("info",("ssl_acceptor_fd: 0x%lx", (long) ssl_acceptor_fd));
++    if (!ssl_acceptor_fd)
++    {
++      sql_print_warning("Failed to setup SSL");
++      sql_print_warning("SSL error: %s", sslGetErrString(error));
++      opt_use_ssl = 0;
++      have_ssl= SHOW_OPTION_DISABLED;
++    }
++  }
++  else
++  {
++    have_ssl= SHOW_OPTION_DISABLED;
++  }
++  if (des_key_file)
++    load_des_key_file(des_key_file);
++#endif /* HAVE_OPENSSL */
++}
++
++
++static void end_ssl()
++{
++#ifdef HAVE_OPENSSL
++  if (ssl_acceptor_fd)
++  {
++    free_vio_ssl_acceptor_fd(ssl_acceptor_fd);
++    ssl_acceptor_fd= 0;
++  }
++#endif /* HAVE_OPENSSL */
++}
++
++#endif /* EMBEDDED_LIBRARY */
++
++
++static int init_server_components()
++{
++  DBUG_ENTER("init_server_components");
++  /*
++    We need to call each of these following functions to ensure that
++    all things are initialized so that unireg_abort() doesn't fail
++  */
++  if (table_cache_init() | table_def_init() | hostname_cache_init())
++    unireg_abort(1);
++
++  query_cache_result_size_limit(query_cache_limit);
++  query_cache_set_min_res_unit(query_cache_min_res_unit);
++  query_cache_init();
++  query_cache_resize(query_cache_size);
++  randominit(&sql_rand,(ulong) server_start_time,(ulong) server_start_time/2);
++  setup_fpu();
++  init_thr_lock();
++#ifdef HAVE_REPLICATION
++  init_slave_list();
++#endif
++
++  /* Setup logs */
++
++  /*
++    Enable old-fashioned error log, except when the user has requested
++    help information. Since the implementation of plugin server
++    variables the help output is now written much later.
++  */
++  if (opt_error_log && !opt_help)
++  {
++    if (!log_error_file_ptr[0])
++      fn_format(log_error_file, pidfile_name, mysql_data_home, ".err",
++                MY_REPLACE_EXT); /* replace '.<domain>' by '.err', bug#4997 */
++    else
++      fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err",
++                MY_UNPACK_FILENAME | MY_SAFE_PATH);
++    if (!log_error_file[0])
++      opt_error_log= 1;				// Too long file name
++    else
++    {
++      my_bool res;
++#ifndef EMBEDDED_LIBRARY
++      res= reopen_fstreams(log_error_file, stdout, stderr);
++#else
++      res= reopen_fstreams(log_error_file, NULL, stderr);
++#endif
++
++      if (!res)
++        setbuf(stderr, NULL);
++    }
++  }
++
++  if (xid_cache_init())
++  {
++    sql_print_error("Out of memory");
++    unireg_abort(1);
++  }
++
++  /* need to configure logging before initializing storage engines */
++  if (opt_update_log)
++  {
++    /*
++      Update log is removed since 5.0. But we still accept the option.
++      The idea is if the user already uses the binlog and the update log,
++      we completely ignore any option/variable related to the update log, like
++      if the update log did not exist. But if the user uses only the update
++      log, then we translate everything into binlog for him (with warnings).
++      Implementation of the above :
++      - If mysqld is started with --log-update and --log-bin,
++      ignore --log-update (print a warning), push a warning when SQL_LOG_UPDATE
++      is used, and turn off --sql-bin-update-same.
++      This will completely ignore SQL_LOG_UPDATE
++      - If mysqld is started with --log-update only,
++      change it to --log-bin (with the filename passed to log-update,
++      plus '-bin') (print a warning), push a warning when SQL_LOG_UPDATE is
++      used, and turn on --sql-bin-update-same.
++      This will translate SQL_LOG_UPDATE to SQL_LOG_BIN.
++
++      Note that we tell the user that --sql-bin-update-same is deprecated and
++      does nothing, and we don't take into account if he used this option or
++      not; but internally we give this variable a value to have the behaviour
++      we want (i.e. have SQL_LOG_UPDATE influence SQL_LOG_BIN or not).
++      As sql-bin-update-same, log-update and log-bin cannot be changed by the
++      user after starting the server (they are not variables), the user will
++      not later interfere with the settings we do here.
++    */
++    if (opt_bin_log)
++    {
++      opt_sql_bin_update= 0;
++      sql_print_error("The update log is no longer supported by MySQL in \
++version 5.0 and above. It is replaced by the binary log.");
++    }
++    else
++    {
++      opt_sql_bin_update= 1;
++      opt_bin_log= 1;
++      if (opt_update_logname)
++      {
++        /* as opt_bin_log==0, no need to free opt_bin_logname */
++        if (!(opt_bin_logname= my_strdup(opt_update_logname, MYF(MY_WME))))
++        {
++          sql_print_error("Out of memory");
++          return EXIT_OUT_OF_MEMORY;
++        }
++        sql_print_error("The update log is no longer supported by MySQL in \
++version 5.0 and above. It is replaced by the binary log. Now starting MySQL \
++with --log-bin='%s' instead.",opt_bin_logname);
++      }
++      else
++        sql_print_error("The update log is no longer supported by MySQL in \
++version 5.0 and above. It is replaced by the binary log. Now starting MySQL \
++with --log-bin instead.");
++    }
++  }
++  if (opt_log_slave_updates && !opt_bin_log)
++  {
++    sql_print_error("You need to use --log-bin to make "
++                    "--log-slave-updates work.");
++    unireg_abort(1);
++  }
++  if (!opt_bin_log)
++  {
++    if (opt_binlog_format_id != BINLOG_FORMAT_UNSPEC)
++    {
++      sql_print_error("You need to use --log-bin to make "
++                      "--binlog-format work.");
++      unireg_abort(1);
++    }
++    else
++    {
++      global_system_variables.binlog_format= BINLOG_FORMAT_STMT;
++    }
++  }
++  else
++    if (opt_binlog_format_id == BINLOG_FORMAT_UNSPEC)
++      global_system_variables.binlog_format= BINLOG_FORMAT_STMT;
++    else
++    { 
++      DBUG_ASSERT(global_system_variables.binlog_format != BINLOG_FORMAT_UNSPEC);
++    }
++
++  /* Check that we have not let the format to unspecified at this point */
++  DBUG_ASSERT((uint)global_system_variables.binlog_format <=
++              array_elements(binlog_format_names)-1);
++
++#ifdef HAVE_REPLICATION
++  if (opt_log_slave_updates && replicate_same_server_id)
++  {
++    sql_print_error("\
++using --replicate-same-server-id in conjunction with \
++--log-slave-updates is impossible, it would lead to infinite loops in this \
++server.");
++    unireg_abort(1);
++  }
++#endif
++
++  if (opt_bin_log)
++  {
++    /* Reports an error and aborts, if the --log-bin's path 
++       is a directory.*/
++    if (opt_bin_logname && 
++        opt_bin_logname[strlen(opt_bin_logname) - 1] == FN_LIBCHAR)
++    {
++      sql_print_error("Path '%s' is a directory name, please specify \
++a file name for --log-bin option", opt_bin_logname);
++      unireg_abort(1);
++    }
++
++    /* Reports an error and aborts, if the --log-bin-index's path 
++       is a directory.*/
++    if (opt_binlog_index_name && 
++        opt_binlog_index_name[strlen(opt_binlog_index_name) - 1] 
++        == FN_LIBCHAR)
++    {
++      sql_print_error("Path '%s' is a directory name, please specify \
++a file name for --log-bin-index option", opt_binlog_index_name);
++      unireg_abort(1);
++    }
++
++    char buf[FN_REFLEN];
++    const char *ln;
++    ln= mysql_bin_log.generate_name(opt_bin_logname, "-bin", 1, buf);
++    if (!opt_bin_logname && !opt_binlog_index_name)
++    {
++      /*
++        User didn't give us info to name the binlog index file.
++        Picking `hostname`-bin.index like did in 4.x, causes replication to
++        fail if the hostname is changed later. So, we would like to instead
++        require a name. But as we don't want to break many existing setups, we
++        only give warning, not error.
++      */
++      sql_print_warning("No argument was provided to --log-bin, and "
++                        "--log-bin-index was not used; so replication "
++                        "may break when this MySQL server acts as a "
++                        "master and has his hostname changed!! Please "
++                        "use '--log-bin=%s' to avoid this problem.", ln);
++    }
++    if (ln == buf)
++    {
++      my_free(opt_bin_logname, MYF(MY_ALLOW_ZERO_PTR));
++      opt_bin_logname=my_strdup(buf, MYF(0));
++    }
++    if (mysql_bin_log.open_index_file(opt_binlog_index_name, ln, TRUE))
++    {
++      unireg_abort(1);
++    }
++  }
++
++  /* call ha_init_key_cache() on all key caches to init them */
++  process_key_caches(&ha_init_key_cache);
++
++  /* Allow storage engine to give real error messages */
++  if (ha_init_errors())
++    DBUG_RETURN(1);
++
++  { 
++    if (plugin_init(&defaults_argc, defaults_argv,
++		    (opt_noacl ? PLUGIN_INIT_SKIP_PLUGIN_TABLE : 0) |
++		    (opt_help ? PLUGIN_INIT_SKIP_INITIALIZATION : 0)))
++    {
++      sql_print_error("Failed to initialize plugins.");
++      unireg_abort(1);
++    }
++    plugins_are_initialized= TRUE;  /* Don't separate from init function */
++  }
++
++  if (opt_help)
++    unireg_abort(0);
++
++  /* we do want to exit if there are any other unknown options */
++  if (defaults_argc > 1)
++  {
++    int ho_error;
++    char **tmp_argv= defaults_argv;
++    struct my_option no_opts[]=
++    {
++      {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
++    };
++    /*
++      We need to eat any 'loose' arguments first before we conclude
++      that there are unprocessed options.
++      But we need to preserve defaults_argv pointer intact for
++      free_defaults() to work. Thus we use a copy here.
++    */
++    my_getopt_skip_unknown= 0;
++
++    if ((ho_error= handle_options(&defaults_argc, &tmp_argv, no_opts,
++                                  mysqld_get_one_option)))
++      unireg_abort(ho_error);
++    my_getopt_skip_unknown= TRUE;
++
++    if (defaults_argc)
++    {
++      fprintf(stderr, "%s: Too many arguments (first extra is '%s').\n"
++              "Use --verbose --help to get a list of available options\n",
++              my_progname, *tmp_argv);
++      unireg_abort(1);
++    }
++  }
++
++  /* if the errmsg.sys is not loaded, terminate to maintain behaviour */
++  if (!errmesg[0][0])
++    unireg_abort(1);
++
++  /* We have to initialize the storage engines before CSV logging */
++  if (ha_init())
++  {
++    sql_print_error("Can't init databases");
++    unireg_abort(1);
++  }
++
++#ifdef WITH_CSV_STORAGE_ENGINE
++  if (opt_bootstrap)
++    log_output_options= LOG_FILE;
++  else
++    logger.init_log_tables();
++
++  if (log_output_options & LOG_NONE)
++  {
++    /*
++      Issue a warining if there were specified additional options to the
++      log-output along with NONE. Probably this wasn't what user wanted.
++    */
++    if ((log_output_options & LOG_NONE) && (log_output_options & ~LOG_NONE))
++      sql_print_warning("There were other values specified to "
++                        "log-output besides NONE. Disabling slow "
++                        "and general logs anyway.");
++    logger.set_handlers(LOG_FILE, LOG_NONE, LOG_NONE);
++  }
++  else
++  {
++    /* fall back to the log files if tables are not present */
++    LEX_STRING csv_name={C_STRING_WITH_LEN("csv")};
++    if (!plugin_is_ready(&csv_name, MYSQL_STORAGE_ENGINE_PLUGIN))
++    {
++      /* purecov: begin inspected */
++      sql_print_error("CSV engine is not present, falling back to the "
++                      "log files");
++      log_output_options= (log_output_options & ~LOG_TABLE) | LOG_FILE;
++      /* purecov: end */
++    }
++
++    logger.set_handlers(LOG_FILE, opt_slow_log ? log_output_options:LOG_NONE,
++                        opt_log ? log_output_options:LOG_NONE);
++  }
++#else
++  logger.set_handlers(LOG_FILE, opt_slow_log ? LOG_FILE:LOG_NONE,
++                      opt_log ? LOG_FILE:LOG_NONE);
++#endif
++
++  /*
++    Check that the default storage engine is actually available.
++  */
++  if (default_storage_engine_str)
++  {
++    LEX_STRING name= { default_storage_engine_str,
++                       strlen(default_storage_engine_str) };
++    plugin_ref plugin;
++    handlerton *hton;
++    
++    if ((plugin= ha_resolve_by_name(0, &name)))
++      hton= plugin_data(plugin, handlerton*);
++    else
++    {
++      sql_print_error("Unknown/unsupported table type: %s",
++                      default_storage_engine_str);
++      unireg_abort(1);
++    }
++    if (!ha_storage_engine_is_enabled(hton))
++    {
++      if (!opt_bootstrap)
++      {
++        sql_print_error("Default storage engine (%s) is not available",
++                        default_storage_engine_str);
++        unireg_abort(1);
++      }
++      DBUG_ASSERT(global_system_variables.table_plugin);
++    }
++    else
++    {
++      /*
++        Need to unlock as global_system_variables.table_plugin 
++        was acquired during plugin_init()
++      */
++      plugin_unlock(0, global_system_variables.table_plugin);
++      global_system_variables.table_plugin= plugin;
++    }
++  }
++
++  tc_log= (total_ha_2pc > 1 ? (opt_bin_log  ?
++                               (TC_LOG *) &mysql_bin_log :
++                               (TC_LOG *) &tc_log_mmap) :
++           (TC_LOG *) &tc_log_dummy);
++
++  if (tc_log->open(opt_bin_log ? opt_bin_logname : opt_tc_log_file))
++  {
++    sql_print_error("Can't init tc log");
++    unireg_abort(1);
++  }
++
++  if (ha_recover(0))
++  {
++    unireg_abort(1);
++  }
++
++  if (opt_bin_log && mysql_bin_log.open(opt_bin_logname, LOG_BIN, 0,
++                                        WRITE_CACHE, 0, max_binlog_size, 0, TRUE))
++    unireg_abort(1);
++
++#ifdef HAVE_REPLICATION
++  if (opt_bin_log && expire_logs_days)
++  {
++    time_t purge_time= server_start_time - expire_logs_days*24*60*60;
++    if (purge_time >= 0)
++      mysql_bin_log.purge_logs_before_date(purge_time);
++  }
++#endif
++#ifdef __NETWARE__
++  /* Increasing stacksize of threads on NetWare */
++  pthread_attr_setstacksize(&connection_attrib, NW_THD_STACKSIZE);
++#endif
++
++  if (opt_myisam_log)
++    (void) mi_log(1);
++
++#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) && !defined(EMBEDDED_LIBRARY)
++  if (locked_in_memory && !getuid())
++  {
++    if (setreuid((uid_t)-1, 0) == -1)
++    {                        // this should never happen
++      sql_perror("setreuid");
++      unireg_abort(1);
++    }
++    if (mlockall(MCL_CURRENT))
++    {
++      if (global_system_variables.log_warnings)
++	sql_print_warning("Failed to lock memory. Errno: %d\n",errno);
++      locked_in_memory= 0;
++    }
++    if (user_info)
++      set_user(mysqld_user, user_info);
++  }
++  else
++#endif
++    locked_in_memory=0;
++
++  ft_init_stopwords();
++
++  init_max_user_conn();
++  init_update_queries();
++  DBUG_RETURN(0);
++}
++
++
++#ifndef EMBEDDED_LIBRARY
++
++static void create_shutdown_thread()
++{
++#ifdef __WIN__
++  hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name);
++  pthread_t hThread;
++  if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0))
++    sql_print_warning("Can't create thread to handle shutdown requests");
++
++  // On "Stop Service" we have to do regular shutdown
++  Service.SetShutdownEvent(hEventShutdown);
++#endif /* __WIN__ */
++}
++
++#endif /* EMBEDDED_LIBRARY */
++
++
++#if (defined(__NT__) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
++static void handle_connections_methods()
++{
++  pthread_t hThread;
++  DBUG_ENTER("handle_connections_methods");
++#ifdef __NT__
++  if (hPipe == INVALID_HANDLE_VALUE &&
++      (!have_tcpip || opt_disable_networking) &&
++      !opt_enable_shared_memory)
++  {
++    sql_print_error("TCP/IP, --shared-memory, or --named-pipe should be configured on NT OS");
++    unireg_abort(1);				// Will not return
++  }
++#endif
++
++  pthread_mutex_lock(&LOCK_thread_count);
++  (void) pthread_cond_init(&COND_handler_count,NULL);
++  handler_count=0;
++#ifdef __NT__
++  if (hPipe != INVALID_HANDLE_VALUE)
++  {
++    handler_count++;
++    if (pthread_create(&hThread,&connection_attrib,
++		       handle_connections_namedpipes, 0))
++    {
++      sql_print_warning("Can't create thread to handle named pipes");
++      handler_count--;
++    }
++  }
++#endif /* __NT__ */
++  if (have_tcpip && !opt_disable_networking)
++  {
++    handler_count++;
++    if (pthread_create(&hThread,&connection_attrib,
++		       handle_connections_sockets, 0))
++    {
++      sql_print_warning("Can't create thread to handle TCP/IP");
++      handler_count--;
++    }
++  }
++#ifdef HAVE_SMEM
++  if (opt_enable_shared_memory)
++  {
++    handler_count++;
++    if (pthread_create(&hThread,&connection_attrib,
++		       handle_connections_shared_memory, 0))
++    {
++      sql_print_warning("Can't create thread to handle shared memory");
++      handler_count--;
++    }
++  }
++#endif 
++
++  while (handler_count > 0)
++    pthread_cond_wait(&COND_handler_count,&LOCK_thread_count);
++  pthread_mutex_unlock(&LOCK_thread_count);
++  DBUG_VOID_RETURN;
++}
++
++void decrement_handler_count()
++{
++  pthread_mutex_lock(&LOCK_thread_count);
++  handler_count--;
++  pthread_cond_signal(&COND_handler_count);
++  pthread_mutex_unlock(&LOCK_thread_count);  
++  my_thread_end();
++}
++#else
++#define decrement_handler_count()
++#endif /* defined(__NT__) || defined(HAVE_SMEM) */
++
++
++#ifndef EMBEDDED_LIBRARY
++#ifndef DBUG_OFF
++/*
++  Debugging helper function to keep the locale database
++  (see sql_locale.cc) and max_month_name_length and
++  max_day_name_length variable values in consistent state.
++*/
++static void test_lc_time_sz()
++{
++  DBUG_ENTER("test_lc_time_sz");
++  for (MY_LOCALE **loc= my_locales; *loc; loc++)
++  {
++    uint max_month_len= 0;
++    uint max_day_len = 0;
++    for (const char **month= (*loc)->month_names->type_names; *month; month++)
++    {
++      set_if_bigger(max_month_len,
++                    my_numchars_mb(&my_charset_utf8_general_ci,
++                                   *month, *month + strlen(*month)));
++    }
++    for (const char **day= (*loc)->day_names->type_names; *day; day++)
++    {
++      set_if_bigger(max_day_len,
++                    my_numchars_mb(&my_charset_utf8_general_ci,
++                                   *day, *day + strlen(*day)));
++    }
++    if ((*loc)->max_month_name_length != max_month_len ||
++        (*loc)->max_day_name_length != max_day_len)
++    {
++      DBUG_PRINT("Wrong max day name(or month name) length for locale:",
++                 ("%s", (*loc)->name));
++      DBUG_ASSERT(0);
++    }
++  }
++  DBUG_VOID_RETURN;
++}
++#endif//DBUG_OFF
++
++
++#ifdef __WIN__
++int win_main(int argc, char **argv)
++#else
++int main(int argc, char **argv)
++#endif
++{
++  MY_INIT(argv[0]);		// init my_sys library & pthreads
++  /* nothing should come before this line ^^^ */
++
++  /* Set signal used to kill MySQL */
++#if defined(SIGUSR2)
++  thr_kill_signal= thd_lib_detected == THD_LIB_LT ? SIGINT : SIGUSR2;
++#else
++  thr_kill_signal= SIGINT;
++#endif
++
++  /*
++    Perform basic logger initialization logger. Should be called after
++    MY_INIT, as it initializes mutexes. Log tables are inited later.
++  */
++  logger.init_base();
++
++#ifdef _CUSTOMSTARTUPCONFIG_
++  if (_cust_check_startup())
++  {
++    / * _cust_check_startup will report startup failure error * /
++    exit(1);
++  }
++#endif
++
++#ifdef	__WIN__
++  /*
++    Before performing any socket operation (like retrieving hostname
++    in init_common_variables we have to call WSAStartup
++  */
++  {
++    WSADATA WsaData;
++    if (SOCKET_ERROR == WSAStartup (0x0101, &WsaData))
++    {
++      /* errors are not read yet, so we use english text here */
++      my_message(ER_WSAS_FAILED, "WSAStartup Failed", MYF(0));
++      unireg_abort(1);
++    }
++  }
++#endif /* __WIN__ */
++
++  if (init_common_variables(MYSQL_CONFIG_NAME,
++			    argc, argv, load_default_groups))
++    unireg_abort(1);				// Will do exit
++
++  init_signals();
++  if (!(opt_specialflag & SPECIAL_NO_PRIOR))
++    my_pthread_setprio(pthread_self(),CONNECT_PRIOR);
++#if defined(__ia64__) || defined(__ia64)
++  /*
++    Peculiar things with ia64 platforms - it seems we only have half the
++    stack size in reality, so we have to double it here
++  */
++  pthread_attr_setstacksize(&connection_attrib,my_thread_stack_size*2);
++#else
++  pthread_attr_setstacksize(&connection_attrib,my_thread_stack_size);
++#endif
++#ifdef HAVE_PTHREAD_ATTR_GETSTACKSIZE
++  {
++    /* Retrieve used stack size;  Needed for checking stack overflows */
++    size_t stack_size= 0;
++    pthread_attr_getstacksize(&connection_attrib, &stack_size);
++#if defined(__ia64__) || defined(__ia64)
++    stack_size/= 2;
++#endif
++    /* We must check if stack_size = 0 as Solaris 2.9 can return 0 here */
++    if (stack_size && stack_size < my_thread_stack_size)
++    {
++      if (global_system_variables.log_warnings)
++	sql_print_warning("Asked for %lu thread stack, but got %ld",
++			  my_thread_stack_size, (long) stack_size);
++#if defined(__ia64__) || defined(__ia64)
++      my_thread_stack_size= stack_size*2;
++#else
++      my_thread_stack_size= stack_size;
++#endif
++    }
++  }
++#endif
++#ifdef __NETWARE__
++  /* Increasing stacksize of threads on NetWare */
++  pthread_attr_setstacksize(&connection_attrib, NW_THD_STACKSIZE);
++#endif
++
++  (void) thr_setconcurrency(concurrency);	// 10 by default
++
++  select_thread=pthread_self();
++  select_thread_in_use=1;
++
++#ifdef HAVE_LIBWRAP
++  libwrapName= my_progname+dirname_length(my_progname);
++  openlog(libwrapName, LOG_PID, LOG_AUTH);
++#endif
++
++#ifndef DBUG_OFF
++  test_lc_time_sz();
++#endif
++
++  /*
++    We have enough space for fiddling with the argv, continue
++  */
++  check_data_home(mysql_real_data_home);
++  if (my_setwd(mysql_real_data_home,MYF(MY_WME)) && !opt_help)
++    unireg_abort(1);				/* purecov: inspected */
++  mysql_data_home= mysql_data_home_buff;
++  mysql_data_home[0]=FN_CURLIB;		// all paths are relative from here
++  mysql_data_home[1]=0;
++  mysql_data_home_len= 2;
++
++  if ((user_info= check_user(mysqld_user)))
++  {
++#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT)
++    if (locked_in_memory) // getuid() == 0 here
++      set_effective_user(user_info);
++    else
++#endif
++      set_user(mysqld_user, user_info);
++  }
++
++  if (opt_bin_log && !server_id)
++  {
++    server_id= !master_host ? 1 : 2;
++#ifdef EXTRA_DEBUG
++    switch (server_id) {
++    case 1:
++      sql_print_warning("\
++You have enabled the binary log, but you haven't set server-id to \
++a non-zero value: we force server id to 1; updates will be logged to the \
++binary log, but connections from slaves will not be accepted.");
++      break;
++    case 2:
++      sql_print_warning("\
++You should set server-id to a non-0 value if master_host is set; \
++we force server id to 2, but this MySQL server will not act as a slave.");
++      break;
++    }
++#endif
++  }
++
++  if (init_server_components())
++    unireg_abort(1);
++
++  init_ssl();
++  network_init();
++
++#ifdef __WIN__
++  if (!opt_console)
++  {
++    if (reopen_fstreams(log_error_file, stdout, stderr))
++      unireg_abort(1);
++    setbuf(stderr, NULL);
++    FreeConsole();				// Remove window
++  }
++#endif
++
++  /*
++   Initialize my_str_malloc() and my_str_free()
++  */
++  my_str_malloc= &my_str_malloc_mysqld;
++  my_str_free= &my_str_free_mysqld;
++
++  /*
++    init signals & alarm
++    After this we can't quit by a simple unireg_abort
++  */
++  error_handler_hook= my_message_sql;
++  start_signal_handler();				// Creates pidfile
++
++  if (mysql_rm_tmp_tables() || acl_init(opt_noacl) ||
++      my_tz_init((THD *)0, default_tz_name, opt_bootstrap))
++  {
++    abort_loop=1;
++    select_thread_in_use=0;
++#ifndef __NETWARE__
++    (void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL);
++#endif /* __NETWARE__ */
++
++    if (!opt_bootstrap)
++      (void) my_delete(pidfile_name,MYF(MY_WME));	// Not needed anymore
++
++    if (unix_sock != INVALID_SOCKET)
++      unlink(mysqld_unix_port);
++    exit(1);
++  }
++  if (!opt_noacl)
++    (void) grant_init();
++
++  if (!opt_bootstrap)
++    servers_init(0);
++
++  if (!opt_noacl)
++  {
++#ifdef HAVE_DLOPEN
++    udf_init();
++#endif
++  }
++
++  init_status_vars();
++  if (opt_bootstrap) /* If running with bootstrap, do not start replication. */
++    opt_skip_slave_start= 1;
++  /*
++    init_slave() must be called after the thread keys are created.
++    Some parts of the code (e.g. SHOW STATUS LIKE 'slave_running' and other
++    places) assume that active_mi != 0, so let's fail if it's 0 (out of
++    memory); a message has already been printed.
++  */
++  if (init_slave() && !active_mi)
++  {
++    unireg_abort(1);
++  }
++
++  execute_ddl_log_recovery();
++
++  if (Events::init(opt_noacl || opt_bootstrap))
++    unireg_abort(1);
++
++  if (opt_bootstrap)
++  {
++    select_thread_in_use= 0;                    // Allow 'kill' to work
++    bootstrap(stdin);
++    unireg_abort(bootstrap_error ? 1 : 0);
++  }
++  if (opt_init_file)
++  {
++    if (read_init_file(opt_init_file))
++      unireg_abort(1);
++  }
++
++  create_shutdown_thread();
++  start_handle_manager();
++
++  sql_print_information(ER(ER_STARTUP),my_progname,server_version,
++                        ((unix_sock == INVALID_SOCKET) ? (char*) ""
++                                                       : mysqld_unix_port),
++                         mysqld_port,
++                         MYSQL_COMPILATION_COMMENT);
++#if defined(_WIN32) && !defined(EMBEDDED_LIBRARY)
++  Service.SetRunning();
++#endif
++
++
++  /* Signal threads waiting for server to be started */
++  pthread_mutex_lock(&LOCK_server_started);
++  mysqld_server_started= 1;
++  pthread_cond_signal(&COND_server_started);
++  pthread_mutex_unlock(&LOCK_server_started);
++
++#if defined(__NT__) || defined(HAVE_SMEM)
++  handle_connections_methods();
++#else
++#ifdef __WIN__
++  if (!have_tcpip || opt_disable_networking)
++  {
++    sql_print_error("TCP/IP unavailable or disabled with --skip-networking; no available interfaces");
++    unireg_abort(1);
++  }
++#endif
++  handle_connections_sockets(0);
++#endif /* __NT__ */
++
++  /* (void) pthread_attr_destroy(&connection_attrib); */
++  
++  DBUG_PRINT("quit",("Exiting main thread"));
++
++#ifndef __WIN__
++#ifdef EXTRA_DEBUG2
++  sql_print_error("Before Lock_thread_count");
++#endif
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  DBUG_PRINT("quit", ("Got thread_count mutex"));
++  select_thread_in_use=0;			// For close_connections
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++  (void) pthread_cond_broadcast(&COND_thread_count);
++#ifdef EXTRA_DEBUG2
++  sql_print_error("After lock_thread_count");
++#endif
++#endif /* __WIN__ */
++
++  /* Wait until cleanup is done */
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  while (!ready_to_exit)
++    pthread_cond_wait(&COND_thread_count,&LOCK_thread_count);
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++
++#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
++  if (Service.IsNT() && start_mode)
++    Service.Stop();
++  else
++  {
++    Service.SetShutdownEvent(0);
++    if (hEventShutdown)
++      CloseHandle(hEventShutdown);
++  }
++#endif
++  clean_up(1);
++  wait_for_signal_thread_to_end();
++  clean_up_mutexes();
++  my_end(opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0);
++
++  exit(0);
++  return(0);					/* purecov: deadcode */
++}
++
++#endif /* EMBEDDED_LIBRARY */
++
++
++/****************************************************************************
++  Main and thread entry function for Win32
++  (all this is needed only to run mysqld as a service on WinNT)
++****************************************************************************/
++
++#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
++int mysql_service(void *p)
++{
++  if (use_opt_args)
++    win_main(opt_argc, opt_argv);
++  else
++    win_main(Service.my_argc, Service.my_argv);
++  return 0;
++}
++
++
++/* Quote string if it contains space, else copy */
++
++static char *add_quoted_string(char *to, const char *from, char *to_end)
++{
++  uint length= (uint) (to_end-to);
++
++  if (!strchr(from, ' '))
++    return strmake(to, from, length-1);
++  return strxnmov(to, length-1, "\"", from, "\"", NullS);
++}
++
++
++/**
++  Handle basic handling of services, like installation and removal.
++
++  @param argv	   	        Pointer to argument list
++  @param servicename		Internal name of service
++  @param displayname		Display name of service (in taskbar ?)
++  @param file_path		Path to this program
++  @param startup_option	Startup option to mysqld
++
++  @retval
++    0		option handled
++  @retval
++    1		Could not handle option
++*/
++
++static bool
++default_service_handling(char **argv,
++			 const char *servicename,
++			 const char *displayname,
++			 const char *file_path,
++			 const char *extra_opt,
++			 const char *account_name)
++{
++  char path_and_service[FN_REFLEN+FN_REFLEN+32], *pos, *end;
++  const char *opt_delim;
++  end= path_and_service + sizeof(path_and_service)-3;
++
++  /* We have to quote filename if it contains spaces */
++  pos= add_quoted_string(path_and_service, file_path, end);
++  if (*extra_opt)
++  {
++    /* 
++     Add option after file_path. There will be zero or one extra option.  It's 
++     assumed to be --defaults-file=file but isn't checked.  The variable (not
++     the option name) should be quoted if it contains a string.  
++    */
++    *pos++= ' ';
++    if (opt_delim= strchr(extra_opt, '='))
++    {
++      size_t length= ++opt_delim - extra_opt;
++      pos= strnmov(pos, extra_opt, length);
++    }
++    else
++      opt_delim= extra_opt;
++    
++    pos= add_quoted_string(pos, opt_delim, end);
++  }
++  /* We must have servicename last */
++  *pos++= ' ';
++  (void) add_quoted_string(pos, servicename, end);
++
++  if (Service.got_service_option(argv, "install"))
++  {
++    Service.Install(1, servicename, displayname, path_and_service,
++                    account_name);
++    return 0;
++  }
++  if (Service.got_service_option(argv, "install-manual"))
++  {
++    Service.Install(0, servicename, displayname, path_and_service,
++                    account_name);
++    return 0;
++  }
++  if (Service.got_service_option(argv, "remove"))
++  {
++    Service.Remove(servicename);
++    return 0;
++  }
++  return 1;
++}
++
++
++int main(int argc, char **argv)
++{
++  /*
++    When several instances are running on the same machine, we
++    need to have an  unique  named  hEventShudown  through the
++    application PID e.g.: MySQLShutdown1890; MySQLShutdown2342
++  */
++  int10_to_str((int) GetCurrentProcessId(),strmov(shutdown_event_name,
++                                                  "MySQLShutdown"), 10);
++
++  /* Must be initialized early for comparison of service name */
++  system_charset_info= &my_charset_utf8_general_ci;
++
++  if (Service.GetOS())	/* true NT family */
++  {
++    char file_path[FN_REFLEN];
++    my_path(file_path, argv[0], "");		      /* Find name in path */
++    fn_format(file_path,argv[0],file_path,"",
++	      MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_RESOLVE_SYMLINKS);
++
++    if (argc == 2)
++    {
++      if (!default_service_handling(argv, MYSQL_SERVICENAME, MYSQL_SERVICENAME,
++				   file_path, "", NULL))
++	return 0;
++      if (Service.IsService(argv[1]))        /* Start an optional service */
++      {
++	/*
++	  Only add the service name to the groups read from the config file
++	  if it's not "MySQL". (The default service name should be 'mysqld'
++	  but we started a bad tradition by calling it MySQL from the start
++	  and we are now stuck with it.
++	*/
++	if (my_strcasecmp(system_charset_info, argv[1],"mysql"))
++	  load_default_groups[load_default_groups_sz-2]= argv[1];
++        start_mode= 1;
++        Service.Init(argv[1], mysql_service);
++        return 0;
++      }
++    }
++    else if (argc == 3) /* install or remove any optional service */
++    {
++      if (!default_service_handling(argv, argv[2], argv[2], file_path, "",
++                                    NULL))
++	return 0;
++      if (Service.IsService(argv[2]))
++      {
++	/*
++	  mysqld was started as
++	  mysqld --defaults-file=my_path\my.ini service-name
++	*/
++	use_opt_args=1;
++	opt_argc= 2;				// Skip service-name
++	opt_argv=argv;
++	start_mode= 1;
++	if (my_strcasecmp(system_charset_info, argv[2],"mysql"))
++	  load_default_groups[load_default_groups_sz-2]= argv[2];
++	Service.Init(argv[2], mysql_service);
++	return 0;
++      }
++    }
++    else if (argc == 4 || argc == 5)
++    {
++      /*
++        This may seem strange, because we handle --local-service while
++        preserving 4.1's behavior of allowing any one other argument that is
++        passed to the service on startup. (The assumption is that this is
++        --defaults-file=file, but that was not enforced in 4.1, so we don't
++        enforce it here.)
++      */
++      const char *extra_opt= NullS;
++      const char *account_name = NullS;
++      int index;
++      for (index = 3; index < argc; index++)
++      {
++        if (!strcmp(argv[index], "--local-service"))
++          account_name= "NT AUTHORITY\\LocalService";
++        else
++          extra_opt= argv[index];
++      }
++
++      if (argc == 4 || account_name)
++        if (!default_service_handling(argv, argv[2], argv[2], file_path,
++                                      extra_opt, account_name))
++          return 0;
++    }
++    else if (argc == 1 && Service.IsService(MYSQL_SERVICENAME))
++    {
++      /* start the default service */
++      start_mode= 1;
++      Service.Init(MYSQL_SERVICENAME, mysql_service);
++      return 0;
++    }
++  }
++  /* Start as standalone server */
++  Service.my_argc=argc;
++  Service.my_argv=argv;
++  mysql_service(NULL);
++  return 0;
++}
++#endif
++
++
++/**
++  Execute all commands from a file. Used by the mysql_install_db script to
++  create MySQL privilege tables without having to start a full MySQL server.
++*/
++
++static void bootstrap(FILE *file)
++{
++  DBUG_ENTER("bootstrap");
++
++  THD *thd= new THD;
++  thd->bootstrap=1;
++  my_net_init(&thd->net,(st_vio*) 0);
++  thd->max_client_packet_length= thd->net.max_packet;
++  thd->security_ctx->master_access= ~(ulong)0;
++  thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
++  thread_count++;
++  in_bootstrap= TRUE;
++
++  bootstrap_file=file;
++#ifndef EMBEDDED_LIBRARY			// TODO:  Enable this
++  if (pthread_create(&thd->real_id,&connection_attrib,handle_bootstrap,
++		     (void*) thd))
++  {
++    sql_print_warning("Can't create thread to handle bootstrap");
++    bootstrap_error=-1;
++    DBUG_VOID_RETURN;
++  }
++  /* Wait for thread to die */
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  while (in_bootstrap)
++  {
++    (void) pthread_cond_wait(&COND_thread_count,&LOCK_thread_count);
++    DBUG_PRINT("quit",("One thread died (count=%u)",thread_count));
++  }
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++#else
++  thd->mysql= 0;
++  handle_bootstrap((void *)thd);
++#endif
++
++  DBUG_VOID_RETURN;
++}
++
++
++static bool read_init_file(char *file_name)
++{
++  FILE *file;
++  DBUG_ENTER("read_init_file");
++  DBUG_PRINT("enter",("name: %s",file_name));
++  if (!(file=my_fopen(file_name,O_RDONLY,MYF(MY_WME))))
++    DBUG_RETURN(TRUE);
++  bootstrap(file);
++  (void) my_fclose(file,MYF(MY_WME));
++  DBUG_RETURN(FALSE);
++}
++
++
++#ifndef EMBEDDED_LIBRARY
++
++/*
++   Simple scheduler that use the main thread to handle the request
++
++   NOTES
++     This is only used for debugging, when starting mysqld with
++     --thread-handling=no-threads or --one-thread
++
++     When we enter this function, LOCK_thread_count is hold!
++*/
++   
++void handle_connection_in_main_thread(THD *thd)
++{
++  safe_mutex_assert_owner(&LOCK_thread_count);
++  thread_cache_size=0;			// Safety
++  threads.append(thd);
++  pthread_mutex_unlock(&LOCK_thread_count);
++  thd->start_utime= my_micro_time();
++  handle_one_connection(thd);
++}
++
++
++/*
++  Scheduler that uses one thread per connection
++*/
++
++void create_thread_to_handle_connection(THD *thd)
++{
++  if (cached_thread_count > wake_thread)
++  {
++    /* Get thread from cache */
++    thread_cache.append(thd);
++    wake_thread++;
++    pthread_cond_signal(&COND_thread_cache);
++  }
++  else
++  {
++    char error_message_buff[MYSQL_ERRMSG_SIZE];
++    /* Create new thread to handle connection */
++    int error;
++    thread_created++;
++    threads.append(thd);
++    DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id));
++    thd->prior_thr_create_utime= thd->start_utime= my_micro_time();
++    if ((error=pthread_create(&thd->real_id,&connection_attrib,
++                              handle_one_connection,
++                              (void*) thd)))
++    {
++      /* purecov: begin inspected */
++      DBUG_PRINT("error",
++                 ("Can't create thread to handle request (error %d)",
++                  error));
++      thread_count--;
++      thd->killed= THD::KILL_CONNECTION;			// Safety
++      (void) pthread_mutex_unlock(&LOCK_thread_count);
++
++      pthread_mutex_lock(&LOCK_connection_count);
++      --connection_count;
++      pthread_mutex_unlock(&LOCK_connection_count);
++
++      statistic_increment(aborted_connects,&LOCK_status);
++      /* Can't use my_error() since store_globals has not been called. */
++      my_snprintf(error_message_buff, sizeof(error_message_buff),
++                  ER(ER_CANT_CREATE_THREAD), error);
++      net_send_error(thd, ER_CANT_CREATE_THREAD, error_message_buff);
++      (void) pthread_mutex_lock(&LOCK_thread_count);
++      close_connection(thd,0,0);
++      delete thd;
++      (void) pthread_mutex_unlock(&LOCK_thread_count);
++      return;
++      /* purecov: end */
++    }
++  }
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++  DBUG_PRINT("info",("Thread created"));
++}
++
++
++/**
++  Create new thread to handle incoming connection.
++
++    This function will create new thread to handle the incoming
++    connection.  If there are idle cached threads one will be used.
++    'thd' will be pushed into 'threads'.
++
++    In single-threaded mode (\#define ONE_THREAD) connection will be
++    handled inside this function.
++
++  @param[in,out] thd    Thread handle of future thread.
++*/
++
++static void create_new_thread(THD *thd)
++{
++  NET *net=&thd->net;
++  DBUG_ENTER("create_new_thread");
++
++  if (protocol_version > 9)
++    net->return_errno=1;
++
++  /*
++    Don't allow too many connections. We roughly check here that we allow
++    only (max_connections + 1) connections.
++  */
++
++  pthread_mutex_lock(&LOCK_connection_count);
++
++  if (connection_count >= max_connections + 1 || abort_loop)
++  {
++    pthread_mutex_unlock(&LOCK_connection_count);
++
++    DBUG_PRINT("error",("Too many connections"));
++    close_connection(thd, ER_CON_COUNT_ERROR, 1);
++    delete thd;
++    DBUG_VOID_RETURN;
++  }
++
++  ++connection_count;
++
++  if (connection_count > max_used_connections)
++    max_used_connections= connection_count;
++
++  pthread_mutex_unlock(&LOCK_connection_count);
++
++  /* Start a new thread to handle connection. */
++
++  pthread_mutex_lock(&LOCK_thread_count);
++
++  /*
++    The initialization of thread_id is done in create_embedded_thd() for
++    the embedded library.
++    TODO: refactor this to avoid code duplication there
++  */
++  thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
++
++  thread_count++;
++
++  thread_scheduler.add_connection(thd);
++
++  DBUG_VOID_RETURN;
++}
++#endif /* EMBEDDED_LIBRARY */
++
++
++#ifdef SIGNALS_DONT_BREAK_READ
++inline void kill_broken_server()
++{
++  /* hack to get around signals ignored in syscalls for problem OS's */
++  if (
++#if !defined(__NETWARE__)
++      unix_sock == INVALID_SOCKET ||
++#endif
++      (!opt_disable_networking && ip_sock == INVALID_SOCKET))
++  {
++    select_thread_in_use = 0;
++    /* The following call will never return */
++    kill_server(IF_NETWARE(MYSQL_KILL_SIGNAL, (void*) MYSQL_KILL_SIGNAL));
++  }
++}
++#define MAYBE_BROKEN_SYSCALL kill_broken_server();
++#else
++#define MAYBE_BROKEN_SYSCALL
++#endif
++
++	/* Handle new connections and spawn new process to handle them */
++
++#ifndef EMBEDDED_LIBRARY
++pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
++{
++  my_socket sock,new_sock;
++  uint error_count=0;
++  uint max_used_connection= (uint) (max(ip_sock,unix_sock)+1);
++  fd_set readFDs,clientFDs;
++  THD *thd;
++  struct sockaddr_in cAddr;
++  int ip_flags=0,socket_flags=0,flags;
++  st_vio *vio_tmp;
++  DBUG_ENTER("handle_connections_sockets");
++
++  LINT_INIT(new_sock);
++
++  (void) my_pthread_getprio(pthread_self());		// For debugging
++
++  FD_ZERO(&clientFDs);
++  if (ip_sock != INVALID_SOCKET)
++  {
++    FD_SET(ip_sock,&clientFDs);
++#ifdef HAVE_FCNTL
++    ip_flags = fcntl(ip_sock, F_GETFL, 0);
++#endif
++  }
++#ifdef HAVE_SYS_UN_H
++  FD_SET(unix_sock,&clientFDs);
++#ifdef HAVE_FCNTL
++  socket_flags=fcntl(unix_sock, F_GETFL, 0);
++#endif
++#endif
++
++  DBUG_PRINT("general",("Waiting for connections."));
++  MAYBE_BROKEN_SYSCALL;
++  while (!abort_loop)
++  {
++    readFDs=clientFDs;
++#ifdef HPUX10
++    if (select(max_used_connection,(int*) &readFDs,0,0,0) < 0)
++      continue;
++#else
++    if (select((int) max_used_connection,&readFDs,0,0,0) < 0)
++    {
++      if (socket_errno != SOCKET_EINTR)
++      {
++	if (!select_errors++ && !abort_loop)	/* purecov: inspected */
++	  sql_print_error("mysqld: Got error %d from select",socket_errno); /* purecov: inspected */
++      }
++      MAYBE_BROKEN_SYSCALL
++      continue;
++    }
++#endif	/* HPUX10 */
++    if (abort_loop)
++    {
++      MAYBE_BROKEN_SYSCALL;
++      break;
++    }
++
++    /* Is this a new connection request ? */
++#ifdef HAVE_SYS_UN_H
++    if (FD_ISSET(unix_sock,&readFDs))
++    {
++      sock = unix_sock;
++      flags= socket_flags;
++    }
++    else
++#endif
++    {
++      sock = ip_sock;
++      flags= ip_flags;
++    }
++
++#if !defined(NO_FCNTL_NONBLOCK)
++    if (!(test_flags & TEST_BLOCKING))
++    {
++#if defined(O_NONBLOCK)
++      fcntl(sock, F_SETFL, flags | O_NONBLOCK);
++#elif defined(O_NDELAY)
++      fcntl(sock, F_SETFL, flags | O_NDELAY);
++#endif
++    }
++#endif /* NO_FCNTL_NONBLOCK */
++    for (uint retry=0; retry < MAX_ACCEPT_RETRY; retry++)
++    {
++      size_socket length=sizeof(struct sockaddr_in);
++      new_sock = accept(sock, my_reinterpret_cast(struct sockaddr *) (&cAddr),
++			&length);
++#ifdef __NETWARE__ 
++      // TODO: temporary fix, waiting for TCP/IP fix - DEFECT000303149
++      if ((new_sock == INVALID_SOCKET) && (socket_errno == EINVAL))
++      {
++        kill_server(SIGTERM);
++      }
++#endif
++      if (new_sock != INVALID_SOCKET ||
++	  (socket_errno != SOCKET_EINTR && socket_errno != SOCKET_EAGAIN))
++	break;
++      MAYBE_BROKEN_SYSCALL;
++#if !defined(NO_FCNTL_NONBLOCK)
++      if (!(test_flags & TEST_BLOCKING))
++      {
++	if (retry == MAX_ACCEPT_RETRY - 1)
++	  fcntl(sock, F_SETFL, flags);		// Try without O_NONBLOCK
++      }
++#endif
++    }
++#if !defined(NO_FCNTL_NONBLOCK)
++    if (!(test_flags & TEST_BLOCKING))
++      fcntl(sock, F_SETFL, flags);
++#endif
++    if (new_sock == INVALID_SOCKET)
++    {
++      if ((error_count++ & 255) == 0)		// This can happen often
++	sql_perror("Error in accept");
++      MAYBE_BROKEN_SYSCALL;
++      if (socket_errno == SOCKET_ENFILE || socket_errno == SOCKET_EMFILE)
++	sleep(1);				// Give other threads some time
++      continue;
++    }
++
++#ifdef HAVE_LIBWRAP
++    {
++      if (sock == ip_sock)
++      {
++	struct request_info req;
++	signal(SIGCHLD, SIG_DFL);
++	request_init(&req, RQ_DAEMON, libwrapName, RQ_FILE, new_sock, NULL);
++	my_fromhost(&req);
++	if (!my_hosts_access(&req))
++	{
++	  /*
++	    This may be stupid but refuse() includes an exit(0)
++	    which we surely don't want...
++	    clean_exit() - same stupid thing ...
++	  */
++	  syslog(deny_severity, "refused connect from %s",
++		 my_eval_client(&req));
++
++	  /*
++	    C++ sucks (the gibberish in front just translates the supplied
++	    sink function pointer in the req structure from a void (*sink)();
++	    to a void(*sink)(int) if you omit the cast, the C++ compiler
++	    will cry...
++	  */
++	  if (req.sink)
++	    ((void (*)(int))req.sink)(req.fd);
++
++	  (void) shutdown(new_sock, SHUT_RDWR);
++	  (void) closesocket(new_sock);
++	  continue;
++	}
++      }
++    }
++#endif /* HAVE_LIBWRAP */
++
++    {
++      size_socket dummyLen;
++      struct sockaddr dummy;
++      dummyLen = sizeof(struct sockaddr);
++      if (getsockname(new_sock,&dummy, &dummyLen) < 0)
++      {
++	sql_perror("Error on new connection socket");
++	(void) shutdown(new_sock, SHUT_RDWR);
++	(void) closesocket(new_sock);
++	continue;
++      }
++    }
++
++    /*
++    ** Don't allow too many connections
++    */
++
++    if (!(thd= new THD))
++    {
++      (void) shutdown(new_sock, SHUT_RDWR);
++      VOID(closesocket(new_sock));
++      continue;
++    }
++    if (!(vio_tmp=vio_new(new_sock,
++			  sock == unix_sock ? VIO_TYPE_SOCKET :
++			  VIO_TYPE_TCPIP,
++			  sock == unix_sock ? VIO_LOCALHOST: 0)) ||
++	my_net_init(&thd->net,vio_tmp))
++    {
++      /*
++        Only delete the temporary vio if we didn't already attach it to the
++        NET object. The destructor in THD will delete any initialized net
++        structure.
++      */
++      if (vio_tmp && thd->net.vio != vio_tmp)
++        vio_delete(vio_tmp);
++      else
++      {
++	(void) shutdown(new_sock, SHUT_RDWR);
++	(void) closesocket(new_sock);
++      }
++      delete thd;
++      continue;
++    }
++    if (sock == unix_sock)
++      thd->security_ctx->host=(char*) my_localhost;
++
++    create_new_thread(thd);
++  }
++  DBUG_LEAVE;
++  decrement_handler_count();
++  return 0;
++}
++
++
++#ifdef __NT__
++pthread_handler_t handle_connections_namedpipes(void *arg)
++{
++  HANDLE hConnectedPipe;
++  OVERLAPPED connectOverlapped= {0};
++  THD *thd;
++  my_thread_init();
++  DBUG_ENTER("handle_connections_namedpipes");
++  connectOverlapped.hEvent= CreateEvent(NULL, TRUE, FALSE, NULL);
++  if (!connectOverlapped.hEvent)
++  {
++    sql_print_error("Can't create event, last error=%u", GetLastError());
++    unireg_abort(1);
++  }
++  DBUG_PRINT("general",("Waiting for named pipe connections."));
++  while (!abort_loop)
++  {
++    /* wait for named pipe connection */
++    BOOL fConnected= ConnectNamedPipe(hPipe, &connectOverlapped);
++    if (!fConnected && (GetLastError() == ERROR_IO_PENDING))
++    {
++        /*
++          ERROR_IO_PENDING says async IO has started but not yet finished.
++          GetOverlappedResult will wait for completion.
++        */
++        DWORD bytes;
++        fConnected= GetOverlappedResult(hPipe, &connectOverlapped,&bytes, TRUE);
++    }
++    if (abort_loop)
++      break;
++    if (!fConnected)
++      fConnected = GetLastError() == ERROR_PIPE_CONNECTED;
++    if (!fConnected)
++    {
++      CloseHandle(hPipe);
++      if ((hPipe= CreateNamedPipe(pipe_name,
++                                  PIPE_ACCESS_DUPLEX |
++                                  FILE_FLAG_OVERLAPPED,
++                                  PIPE_TYPE_BYTE |
++                                  PIPE_READMODE_BYTE |
++                                  PIPE_WAIT,
++                                  PIPE_UNLIMITED_INSTANCES,
++                                  (int) global_system_variables.
++                                  net_buffer_length,
++                                  (int) global_system_variables.
++                                  net_buffer_length,
++                                  NMPWAIT_USE_DEFAULT_WAIT,
++                                  &saPipeSecurity)) ==
++	  INVALID_HANDLE_VALUE)
++      {
++	sql_perror("Can't create new named pipe!");
++	break;					// Abort
++      }
++    }
++    hConnectedPipe = hPipe;
++    /* create new pipe for new connection */
++    if ((hPipe = CreateNamedPipe(pipe_name,
++                 PIPE_ACCESS_DUPLEX |
++                 FILE_FLAG_OVERLAPPED,
++				 PIPE_TYPE_BYTE |
++				 PIPE_READMODE_BYTE |
++				 PIPE_WAIT,
++				 PIPE_UNLIMITED_INSTANCES,
++				 (int) global_system_variables.net_buffer_length,
++				 (int) global_system_variables.net_buffer_length,
++				 NMPWAIT_USE_DEFAULT_WAIT,
++				 &saPipeSecurity)) ==
++	INVALID_HANDLE_VALUE)
++    {
++      sql_perror("Can't create new named pipe!");
++      hPipe=hConnectedPipe;
++      continue;					// We have to try again
++    }
++
++    if (!(thd = new THD))
++    {
++      DisconnectNamedPipe(hConnectedPipe);
++      CloseHandle(hConnectedPipe);
++      continue;
++    }
++    if (!(thd->net.vio= vio_new_win32pipe(hConnectedPipe)) ||
++	my_net_init(&thd->net, thd->net.vio))
++    {
++      close_connection(thd, ER_OUT_OF_RESOURCES, 1);
++      delete thd;
++      continue;
++    }
++    /* Host is unknown */
++    thd->security_ctx->host= my_strdup(my_localhost, MYF(0));
++    create_new_thread(thd);
++  }
++  CloseHandle(connectOverlapped.hEvent);
++  DBUG_LEAVE;
++  decrement_handler_count();
++  return 0;
++}
++#endif /* __NT__ */
++
++
++#ifdef HAVE_SMEM
++
++/**
++  Thread of shared memory's service.
++
++  @param arg                              Arguments of thread
++*/
++pthread_handler_t handle_connections_shared_memory(void *arg)
++{
++  /* file-mapping object, use for create shared memory */
++  HANDLE handle_connect_file_map= 0;
++  char  *handle_connect_map= 0;                 // pointer on shared memory
++  HANDLE event_connect_answer= 0;
++  ulong smem_buffer_length= shared_memory_buffer_length + 4;
++  ulong connect_number= 1;
++  char *tmp= NULL;
++  char *suffix_pos;
++  char connect_number_char[22], *p;
++  const char *errmsg= 0;
++  SECURITY_ATTRIBUTES *sa_event= 0, *sa_mapping= 0;
++  my_thread_init();
++  DBUG_ENTER("handle_connections_shared_memorys");
++  DBUG_PRINT("general",("Waiting for allocated shared memory."));
++
++  /*
++     get enough space base-name + '_' + longest suffix we might ever send
++   */
++  if (!(tmp= (char *)my_malloc(strlen(shared_memory_base_name) + 32L, MYF(MY_FAE))))
++    goto error;
++
++  if (my_security_attr_create(&sa_event, &errmsg,
++                              GENERIC_ALL, SYNCHRONIZE | EVENT_MODIFY_STATE))
++    goto error;
++
++  if (my_security_attr_create(&sa_mapping, &errmsg,
++                             GENERIC_ALL, FILE_MAP_READ | FILE_MAP_WRITE))
++    goto error;
++
++  /*
++    The name of event and file-mapping events create agree next rule:
++      shared_memory_base_name+unique_part
++    Where:
++      shared_memory_base_name is unique value for each server
++      unique_part is unique value for each object (events and file-mapping)
++  */
++  suffix_pos= strxmov(tmp,shared_memory_base_name,"_",NullS);
++  strmov(suffix_pos, "CONNECT_REQUEST");
++  if ((smem_event_connect_request= CreateEvent(sa_event,
++                                               FALSE, FALSE, tmp)) == 0)
++  {
++    errmsg= "Could not create request event";
++    goto error;
++  }
++  strmov(suffix_pos, "CONNECT_ANSWER");
++  if ((event_connect_answer= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
++  {
++    errmsg="Could not create answer event";
++    goto error;
++  }
++  strmov(suffix_pos, "CONNECT_DATA");
++  if ((handle_connect_file_map=
++       CreateFileMapping(INVALID_HANDLE_VALUE, sa_mapping,
++                         PAGE_READWRITE, 0, sizeof(connect_number), tmp)) == 0)
++  {
++    errmsg= "Could not create file mapping";
++    goto error;
++  }
++  if ((handle_connect_map= (char *)MapViewOfFile(handle_connect_file_map,
++						  FILE_MAP_WRITE,0,0,
++						  sizeof(DWORD))) == 0)
++  {
++    errmsg= "Could not create shared memory service";
++    goto error;
++  }
++
++  while (!abort_loop)
++  {
++    /* Wait a request from client */
++    WaitForSingleObject(smem_event_connect_request,INFINITE);
++
++    /*
++       it can be after shutdown command
++    */
++    if (abort_loop)
++      goto error;
++
++    HANDLE handle_client_file_map= 0;
++    char  *handle_client_map= 0;
++    HANDLE event_client_wrote= 0;
++    HANDLE event_client_read= 0;    // for transfer data server <-> client
++    HANDLE event_server_wrote= 0;
++    HANDLE event_server_read= 0;
++    HANDLE event_conn_closed= 0;
++    THD *thd= 0;
++
++    p= int10_to_str(connect_number, connect_number_char, 10);
++    /*
++      The name of event and file-mapping events create agree next rule:
++        shared_memory_base_name+unique_part+number_of_connection
++        Where:
++	  shared_memory_base_name is uniquel value for each server
++	  unique_part is unique value for each object (events and file-mapping)
++	  number_of_connection is connection-number between server and client
++    */
++    suffix_pos= strxmov(tmp,shared_memory_base_name,"_",connect_number_char,
++			 "_",NullS);
++    strmov(suffix_pos, "DATA");
++    if ((handle_client_file_map=
++         CreateFileMapping(INVALID_HANDLE_VALUE, sa_mapping,
++                           PAGE_READWRITE, 0, smem_buffer_length, tmp)) == 0)
++    {
++      errmsg= "Could not create file mapping";
++      goto errorconn;
++    }
++    if ((handle_client_map= (char*)MapViewOfFile(handle_client_file_map,
++						  FILE_MAP_WRITE,0,0,
++						  smem_buffer_length)) == 0)
++    {
++      errmsg= "Could not create memory map";
++      goto errorconn;
++    }
++    strmov(suffix_pos, "CLIENT_WROTE");
++    if ((event_client_wrote= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
++    {
++      errmsg= "Could not create client write event";
++      goto errorconn;
++    }
++    strmov(suffix_pos, "CLIENT_READ");
++    if ((event_client_read= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
++    {
++      errmsg= "Could not create client read event";
++      goto errorconn;
++    }
++    strmov(suffix_pos, "SERVER_READ");
++    if ((event_server_read= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
++    {
++      errmsg= "Could not create server read event";
++      goto errorconn;
++    }
++    strmov(suffix_pos, "SERVER_WROTE");
++    if ((event_server_wrote= CreateEvent(sa_event,
++                                         FALSE, FALSE, tmp)) == 0)
++    {
++      errmsg= "Could not create server write event";
++      goto errorconn;
++    }
++    strmov(suffix_pos, "CONNECTION_CLOSED");
++    if ((event_conn_closed= CreateEvent(sa_event,
++                                        TRUE, FALSE, tmp)) == 0)
++    {
++      errmsg= "Could not create closed connection event";
++      goto errorconn;
++    }
++    if (abort_loop)
++      goto errorconn;
++    if (!(thd= new THD))
++      goto errorconn;
++    /* Send number of connection to client */
++    int4store(handle_connect_map, connect_number);
++    if (!SetEvent(event_connect_answer))
++    {
++      errmsg= "Could not send answer event";
++      goto errorconn;
++    }
++    /* Set event that client should receive data */
++    if (!SetEvent(event_client_read))
++    {
++      errmsg= "Could not set client to read mode";
++      goto errorconn;
++    }
++    if (!(thd->net.vio= vio_new_win32shared_memory(handle_client_file_map,
++                                                   handle_client_map,
++                                                   event_client_wrote,
++                                                   event_client_read,
++                                                   event_server_wrote,
++                                                   event_server_read,
++                                                   event_conn_closed)) ||
++                        my_net_init(&thd->net, thd->net.vio))
++    {
++      close_connection(thd, ER_OUT_OF_RESOURCES, 1);
++      errmsg= 0;
++      goto errorconn;
++    }
++    thd->security_ctx->host= my_strdup(my_localhost, MYF(0)); /* Host is unknown */
++    create_new_thread(thd);
++    connect_number++;
++    continue;
++
++errorconn:
++    /* Could not form connection;  Free used handlers/memort and retry */
++    if (errmsg)
++    {
++      char buff[180];
++      strxmov(buff, "Can't create shared memory connection: ", errmsg, ".",
++	      NullS);
++      sql_perror(buff);
++    }
++    if (handle_client_file_map) 
++      CloseHandle(handle_client_file_map);
++    if (handle_client_map)
++      UnmapViewOfFile(handle_client_map);
++    if (event_server_wrote)
++      CloseHandle(event_server_wrote);
++    if (event_server_read)
++      CloseHandle(event_server_read);
++    if (event_client_wrote)
++      CloseHandle(event_client_wrote);
++    if (event_client_read)
++      CloseHandle(event_client_read);
++    if (event_conn_closed)
++      CloseHandle(event_conn_closed);
++    delete thd;
++  }
++
++  /* End shared memory handling */
++error:
++  if (tmp)
++    my_free(tmp, MYF(0));
++
++  if (errmsg)
++  {
++    char buff[180];
++    strxmov(buff, "Can't create shared memory service: ", errmsg, ".", NullS);
++    sql_perror(buff);
++  }
++  my_security_attr_free(sa_event);
++  my_security_attr_free(sa_mapping);
++  if (handle_connect_map)	UnmapViewOfFile(handle_connect_map);
++  if (handle_connect_file_map)	CloseHandle(handle_connect_file_map);
++  if (event_connect_answer)	CloseHandle(event_connect_answer);
++  if (smem_event_connect_request) CloseHandle(smem_event_connect_request);
++  DBUG_LEAVE;
++  decrement_handler_count();
++  return 0;
++}
++#endif /* HAVE_SMEM */
++#endif /* EMBEDDED_LIBRARY */
++
++
++/****************************************************************************
++  Handle start options
++******************************************************************************/
++
++enum options_mysqld
++{
++  OPT_ISAM_LOG=256,            OPT_SKIP_NEW, 
++  OPT_SKIP_GRANT,              OPT_SKIP_LOCK, 
++  OPT_ENABLE_LOCK,             OPT_USE_LOCKING,
++  OPT_SOCKET,                  OPT_UPDATE_LOG,
++  OPT_BIN_LOG,                 OPT_SKIP_RESOLVE,
++  OPT_SKIP_NETWORKING,         OPT_BIN_LOG_INDEX,
++  OPT_BIND_ADDRESS,            OPT_PID_FILE,
++  OPT_SKIP_PRIOR,              OPT_BIG_TABLES,
++  OPT_STANDALONE,              OPT_ONE_THREAD,
++  OPT_CONSOLE,                 OPT_LOW_PRIORITY_UPDATES,
++  OPT_SKIP_HOST_CACHE,         OPT_SHORT_LOG_FORMAT,
++  OPT_FLUSH,                   OPT_SAFE,
++  OPT_BOOTSTRAP,               OPT_SKIP_SHOW_DB,
++  OPT_STORAGE_ENGINE,          OPT_INIT_FILE,
++  OPT_DELAY_KEY_WRITE_ALL,     OPT_SLOW_QUERY_LOG,
++  OPT_DELAY_KEY_WRITE,	       OPT_CHARSETS_DIR,
++  OPT_MASTER_HOST,             OPT_MASTER_USER,
++  OPT_MASTER_PASSWORD,         OPT_MASTER_PORT,
++  OPT_MASTER_INFO_FILE,        OPT_MASTER_CONNECT_RETRY,
++  OPT_MASTER_RETRY_COUNT,      OPT_LOG_TC, OPT_LOG_TC_SIZE,
++  OPT_MASTER_SSL,              OPT_MASTER_SSL_KEY,
++  OPT_MASTER_SSL_CERT,         OPT_MASTER_SSL_CAPATH,
++  OPT_MASTER_SSL_CIPHER,       OPT_MASTER_SSL_CA,
++  OPT_SQL_BIN_UPDATE_SAME,     OPT_REPLICATE_DO_DB,
++  OPT_REPLICATE_IGNORE_DB,     OPT_LOG_SLAVE_UPDATES,
++  OPT_BINLOG_DO_DB,            OPT_BINLOG_IGNORE_DB,
++  OPT_BINLOG_FORMAT,
++#ifndef DBUG_OFF
++  OPT_BINLOG_SHOW_XID,
++#endif
++  OPT_BINLOG_ROWS_EVENT_MAX_SIZE, 
++  OPT_WANT_CORE,               OPT_CONCURRENT_INSERT,
++  OPT_MEMLOCK,                 OPT_MYISAM_RECOVER,
++  OPT_REPLICATE_REWRITE_DB,    OPT_SERVER_ID,
++  OPT_SKIP_SLAVE_START,        OPT_SAFE_SHOW_DB, 
++  OPT_SAFEMALLOC_MEM_LIMIT,    OPT_REPLICATE_DO_TABLE,
++  OPT_REPLICATE_IGNORE_TABLE,  OPT_REPLICATE_WILD_DO_TABLE,
++  OPT_REPLICATE_WILD_IGNORE_TABLE, OPT_REPLICATE_SAME_SERVER_ID,
++  OPT_DISCONNECT_SLAVE_EVENT_COUNT, OPT_TC_HEURISTIC_RECOVER,
++  OPT_ABORT_SLAVE_EVENT_COUNT,
++  OPT_LOG_BIN_TRUST_FUNCTION_CREATORS,
++  OPT_LOG_BIN_TRUST_FUNCTION_CREATORS_OLD,
++  OPT_ENGINE_CONDITION_PUSHDOWN, OPT_NDB_CONNECTSTRING, 
++  OPT_NDB_USE_EXACT_COUNT, OPT_NDB_USE_TRANSACTIONS,
++  OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
++  OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME,
++  OPT_NDB_MGMD, OPT_NDB_NODEID,
++  OPT_NDB_DISTRIBUTION,
++  OPT_NDB_INDEX_STAT_ENABLE,
++  OPT_NDB_EXTRA_LOGGING,
++  OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP,
++  OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
++  OPT_NDB_USE_COPYING_ALTER_TABLE,
++  OPT_SKIP_SAFEMALLOC,
++  OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
++  OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
++  OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL,
++  OPT_SAFE_USER_CREATE, OPT_SQL_MODE,
++  OPT_HAVE_NAMED_PIPE,
++  OPT_DO_PSTACK, OPT_EVENT_SCHEDULER, OPT_REPORT_HOST,
++  OPT_REPORT_USER, OPT_REPORT_PASSWORD, OPT_REPORT_PORT,
++  OPT_SHOW_SLAVE_AUTH_INFO,
++  OPT_SLAVE_LOAD_TMPDIR, OPT_NO_MIX_TYPE,
++  OPT_RPL_RECOVERY_RANK,OPT_INIT_RPL_ROLE,
++  OPT_RELAY_LOG, OPT_RELAY_LOG_INDEX, OPT_RELAY_LOG_INFO_FILE,
++  OPT_SLAVE_SKIP_ERRORS, OPT_DES_KEY_FILE, OPT_LOCAL_INFILE,
++  OPT_SSL_SSL, OPT_SSL_KEY, OPT_SSL_CERT, OPT_SSL_CA,
++  OPT_SSL_CAPATH, OPT_SSL_CIPHER,
++  OPT_BACK_LOG, OPT_BINLOG_CACHE_SIZE,
++  OPT_CONNECT_TIMEOUT, OPT_DELAYED_INSERT_TIMEOUT,
++  OPT_DELAYED_INSERT_LIMIT, OPT_DELAYED_QUEUE_SIZE,
++  OPT_FLUSH_TIME, OPT_FT_MIN_WORD_LEN, OPT_FT_BOOLEAN_SYNTAX,
++  OPT_FT_MAX_WORD_LEN, OPT_FT_QUERY_EXPANSION_LIMIT, OPT_FT_STOPWORD_FILE,
++  OPT_INTERACTIVE_TIMEOUT, OPT_JOIN_BUFF_SIZE,
++  OPT_KEY_BUFFER_SIZE, OPT_KEY_CACHE_BLOCK_SIZE,
++  OPT_KEY_CACHE_DIVISION_LIMIT, OPT_KEY_CACHE_AGE_THRESHOLD,
++  OPT_LONG_QUERY_TIME,
++  OPT_LOWER_CASE_TABLE_NAMES, OPT_MAX_ALLOWED_PACKET,
++  OPT_MAX_BINLOG_CACHE_SIZE, OPT_MAX_BINLOG_SIZE,
++  OPT_MAX_CONNECTIONS, OPT_MAX_CONNECT_ERRORS,
++  OPT_MAX_DELAYED_THREADS, OPT_MAX_HEP_TABLE_SIZE,
++  OPT_MAX_JOIN_SIZE, OPT_MAX_PREPARED_STMT_COUNT,
++  OPT_MAX_RELAY_LOG_SIZE, OPT_MAX_SORT_LENGTH,
++  OPT_MAX_SEEKS_FOR_KEY, OPT_MAX_TMP_TABLES, OPT_MAX_USER_CONNECTIONS,
++  OPT_MAX_LENGTH_FOR_SORT_DATA,
++  OPT_MAX_WRITE_LOCK_COUNT, OPT_BULK_INSERT_BUFFER_SIZE,
++  OPT_MAX_ERROR_COUNT, OPT_MULTI_RANGE_COUNT, OPT_MYISAM_DATA_POINTER_SIZE,
++  OPT_MYISAM_BLOCK_SIZE, OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE,
++  OPT_MYISAM_MAX_SORT_FILE_SIZE, OPT_MYISAM_SORT_BUFFER_SIZE,
++  OPT_MYISAM_USE_MMAP, OPT_MYISAM_REPAIR_THREADS,
++  OPT_MYISAM_MMAP_SIZE,
++  OPT_MYISAM_STATS_METHOD,
++  OPT_NET_BUFFER_LENGTH, OPT_NET_RETRY_COUNT,
++  OPT_NET_READ_TIMEOUT, OPT_NET_WRITE_TIMEOUT,
++  OPT_OPEN_FILES_LIMIT,
++  OPT_PRELOAD_BUFFER_SIZE,
++  OPT_QUERY_CACHE_LIMIT, OPT_QUERY_CACHE_MIN_RES_UNIT, OPT_QUERY_CACHE_SIZE,
++  OPT_QUERY_CACHE_TYPE, OPT_QUERY_CACHE_WLOCK_INVALIDATE, OPT_RECORD_BUFFER,
++  OPT_RECORD_RND_BUFFER, OPT_DIV_PRECINCREMENT, OPT_RELAY_LOG_SPACE_LIMIT,
++  OPT_RELAY_LOG_PURGE,
++  OPT_SLAVE_NET_TIMEOUT, OPT_SLAVE_COMPRESSED_PROTOCOL, OPT_SLOW_LAUNCH_TIME,
++  OPT_SLAVE_TRANS_RETRIES, OPT_READONLY, OPT_DEBUGGING,
++  OPT_SORT_BUFFER, OPT_TABLE_OPEN_CACHE, OPT_TABLE_DEF_CACHE,
++  OPT_THREAD_CONCURRENCY, OPT_THREAD_CACHE_SIZE,
++  OPT_TMP_TABLE_SIZE, OPT_THREAD_STACK,
++  OPT_WAIT_TIMEOUT,
++  OPT_ERROR_LOG_FILE,
++  OPT_DEFAULT_WEEK_FORMAT,
++  OPT_RANGE_ALLOC_BLOCK_SIZE, OPT_ALLOW_SUSPICIOUS_UDFS,
++  OPT_QUERY_ALLOC_BLOCK_SIZE, OPT_QUERY_PREALLOC_SIZE,
++  OPT_TRANS_ALLOC_BLOCK_SIZE, OPT_TRANS_PREALLOC_SIZE,
++  OPT_SYNC_FRM, OPT_SYNC_BINLOG,
++  OPT_SYNC_REPLICATION,
++  OPT_SYNC_REPLICATION_SLAVE_ID,
++  OPT_SYNC_REPLICATION_TIMEOUT,
++  OPT_ENABLE_SHARED_MEMORY,
++  OPT_SHARED_MEMORY_BASE_NAME,
++  OPT_OLD_PASSWORDS,
++  OPT_OLD_ALTER_TABLE,
++  OPT_EXPIRE_LOGS_DAYS,
++  OPT_GROUP_CONCAT_MAX_LEN,
++  OPT_DEFAULT_COLLATION,
++  OPT_DEFAULT_COLLATION_OLD,
++  OPT_CHARACTER_SET_CLIENT_HANDSHAKE,
++  OPT_CHARACTER_SET_FILESYSTEM,
++  OPT_LC_TIME_NAMES,
++  OPT_INIT_CONNECT,
++  OPT_INIT_SLAVE,
++  OPT_SECURE_AUTH,
++  OPT_DATE_FORMAT,
++  OPT_TIME_FORMAT,
++  OPT_DATETIME_FORMAT,
++  OPT_LOG_QUERIES_NOT_USING_INDEXES,
++  OPT_DEFAULT_TIME_ZONE,
++  OPT_SYSDATE_IS_NOW,
++  OPT_OPTIMIZER_SEARCH_DEPTH,
++  OPT_OPTIMIZER_PRUNE_LEVEL,
++  OPT_OPTIMIZER_SWITCH,
++  OPT_UPDATABLE_VIEWS_WITH_LIMIT,
++  OPT_SP_AUTOMATIC_PRIVILEGES,
++  OPT_MAX_SP_RECURSION_DEPTH,
++  OPT_AUTO_INCREMENT, OPT_AUTO_INCREMENT_OFFSET,
++  OPT_ENABLE_LARGE_PAGES,
++  OPT_TIMED_MUTEXES,
++  OPT_OLD_STYLE_USER_LIMITS,
++  OPT_LOG_SLOW_ADMIN_STATEMENTS,
++  OPT_TABLE_LOCK_WAIT_TIMEOUT,
++  OPT_PLUGIN_LOAD,
++  OPT_PLUGIN_DIR,
++  OPT_SYMBOLIC_LINKS,
++  OPT_WARNINGS,
++  OPT_RECORD_BUFFER_OLD,
++  OPT_LOG_OUTPUT,
++  OPT_PORT_OPEN_TIMEOUT,
++  OPT_PROFILING,
++  OPT_KEEP_FILES_ON_CREATE,
++  OPT_GENERAL_LOG,
++  OPT_SLOW_LOG,
++  OPT_THREAD_HANDLING,
++  OPT_INNODB_ROLLBACK_ON_TIMEOUT,
++  OPT_SECURE_FILE_PRIV,
++  OPT_MIN_EXAMINED_ROW_LIMIT,
++  OPT_LOG_SLOW_SLAVE_STATEMENTS,
++#if defined(ENABLED_DEBUG_SYNC)
++  OPT_DEBUG_SYNC_TIMEOUT,
++#endif /* defined(ENABLED_DEBUG_SYNC) */
++  OPT_OLD_MODE,
++  OPT_SLAVE_EXEC_MODE,
++  OPT_GENERAL_LOG_FILE,
++  OPT_SLOW_QUERY_LOG_FILE,
++  OPT_IGNORE_BUILTIN_INNODB,
++  OPT_BINLOG_DIRECT_NON_TRANS_UPDATE,
++  OPT_DEFAULT_CHARACTER_SET_OLD,
++  OPT_MAX_LONG_DATA_SIZE
++};
++
++
++#define LONG_TIMEOUT ((ulong) 3600L*24L*365L)
++
++struct my_option my_long_options[] =
++{
++  {"help", '?', "Display this help and exit.", 
++   &opt_help, &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
++   0, 0},
++#ifdef HAVE_REPLICATION
++  {"abort-slave-event-count", OPT_ABORT_SLAVE_EVENT_COUNT,
++   "Option used by mysql-test for debugging and testing of replication.",
++   &abort_slave_event_count,  &abort_slave_event_count,
++   0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#endif /* HAVE_REPLICATION */
++  {"allow-suspicious-udfs", OPT_ALLOW_SUSPICIOUS_UDFS,
++   "Allows use of UDFs consisting of only one symbol xxx() "
++   "without corresponding xxx_init() or xxx_deinit(). That also means "
++   "that one can load any function from any library, for example exit() "
++   "from libc.so",
++   &opt_allow_suspicious_udfs, &opt_allow_suspicious_udfs,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax. This mode "
++   "will also set transaction isolation level 'serializable'.", 0, 0, 0,
++   GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"auto-increment-increment", OPT_AUTO_INCREMENT,
++   "Auto-increment columns are incremented by this.",
++   &global_system_variables.auto_increment_increment,
++   &max_system_variables.auto_increment_increment, 0, GET_ULONG,
++   OPT_ARG, 1, 1, 65535, 0, 1, 0 },
++  {"auto-increment-offset", OPT_AUTO_INCREMENT_OFFSET,
++   "Offset added to Auto-increment columns. Used when auto-increment-increment != 1.",
++   &global_system_variables.auto_increment_offset,
++   &max_system_variables.auto_increment_offset, 0, GET_ULONG, OPT_ARG,
++   1, 1, 65535, 0, 1, 0 },
++  {"automatic-sp-privileges", OPT_SP_AUTOMATIC_PRIVILEGES,
++   "Creating and dropping stored procedures alters ACLs. Disable with --skip-automatic-sp-privileges.",
++   &sp_automatic_privileges, &sp_automatic_privileges,
++   0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
++  {"basedir", 'b',
++   "Path to installation directory. All paths are usually resolved relative to this.",
++   &mysql_home_ptr, &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"big-tables", OPT_BIG_TABLES,
++   "Allow big result sets by saving all temporary sets on file (solves most 'table full' errors).",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.",
++   &my_bind_addr_str, &my_bind_addr_str, 0, GET_STR,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"binlog_format", OPT_BINLOG_FORMAT,
++   "Does not have any effect without '--log-bin'. "
++   "Tell the master the form of binary logging to use: either 'row' for "
++   "row-based binary logging, 'statement' for statement-based binary "
++   "logging, or 'mixed'. 'mixed' is statement-based binary logging except "
++   "for statements where only row-based is correct: Statements that involve "
++   "user-defined functions (i.e., UDFs) or the UUID() function."
++#ifdef HAVE_NDB_BINLOG
++   "If ndbcluster is enabled and binlog_format is `mixed', the format switches"
++   " to 'row' and back implicitly per each query accessing a NDB table."
++#endif
++   , &opt_binlog_format, &opt_binlog_format,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"binlog-do-db", OPT_BINLOG_DO_DB,
++   "Tells the master it should log updates for the specified database, "
++   "and exclude all others not explicitly mentioned.",
++   0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"binlog-ignore-db", OPT_BINLOG_IGNORE_DB,
++   "Tells the master that updates to the given database should not be logged to the binary log.",
++   0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"binlog-row-event-max-size", OPT_BINLOG_ROWS_EVENT_MAX_SIZE,
++   "The maximum size of a row-based binary log event in bytes. Rows will be "
++   "grouped into events smaller than this size if possible. "
++   "The value has to be a multiple of 256.",
++   &opt_binlog_rows_event_max_size, &opt_binlog_rows_event_max_size,
++   0, GET_ULONG, REQUIRED_ARG,
++   /* def_value */ 1024, /* min_value */  256, /* max_value */ ULONG_MAX, 
++   /* sub_size */     0, /* block_size */ 256, 
++   /* app_type */ 0
++  },
++#ifndef DISABLE_GRANT_OPTIONS
++  {"bootstrap", OPT_BOOTSTRAP, "Used by mysql installation scripts.", 0, 0, 0,
++   GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  {"character-set-client-handshake", OPT_CHARACTER_SET_CLIENT_HANDSHAKE,
++   "Don't ignore client side character set value sent during handshake.",
++   &opt_character_set_client_handshake,
++   &opt_character_set_client_handshake,
++    0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
++  {"character-set-filesystem", OPT_CHARACTER_SET_FILESYSTEM,
++   "Set the filesystem character set.",
++   &character_set_filesystem_name,
++   &character_set_filesystem_name,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
++  {"character-set-server", 'C', "Set the default character set.",
++   &default_character_set_name, &default_character_set_name,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
++  {"character-sets-dir", OPT_CHARSETS_DIR,
++   "Directory where character sets are.", &charsets_dir,
++   &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"chroot", 'r', "Chroot mysqld daemon during startup.",
++   &mysqld_chroot, &mysqld_chroot, 0, GET_STR, REQUIRED_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"collation-server", OPT_DEFAULT_COLLATION, "Set the default collation.",
++   &default_collation_name, &default_collation_name,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
++  {"completion-type", OPT_COMPLETION_TYPE, "Default completion type.",
++   &global_system_variables.completion_type,
++   &max_system_variables.completion_type, 0, GET_ULONG,
++   REQUIRED_ARG, 0, 0, 2, 0, 1, 0},
++  {"concurrent-insert", OPT_CONCURRENT_INSERT,
++   "Use concurrent insert with MyISAM. Disable with --concurrent-insert=0.",
++   &myisam_concurrent_insert, &myisam_concurrent_insert,
++   0, GET_ULONG, OPT_ARG, 1, 0, 2, 0, 0, 0},
++  {"console", OPT_CONSOLE, "Write error output on screen; don't remove the console window on windows.",
++   &opt_console, &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0,
++   0, 0, 0},
++  {"core-file", OPT_WANT_CORE, "Write core on errors.", 0, 0, 0, GET_NO_ARG,
++   NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"datadir", 'h', "Path to the database root.", &mysql_data_home,
++   &mysql_data_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#ifndef DBUG_OFF
++  {"debug", '#', "Debug log.", &default_dbug_option,
++   &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  {"default-character-set", OPT_DEFAULT_CHARACTER_SET_OLD, 
++   "Set the default character set (deprecated option, use --character-set-server instead).",
++   &default_character_set_name, &default_character_set_name,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
++  {"default-collation", OPT_DEFAULT_COLLATION_OLD, "Set the default collation "
++   "(deprecated option, use --collation-server instead).",
++   &default_collation_name, &default_collation_name,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
++  {"default-storage-engine", OPT_STORAGE_ENGINE,
++   "Set the default storage engine (table type) for tables.",
++   &default_storage_engine_str, &default_storage_engine_str,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"default-table-type", OPT_STORAGE_ENGINE,
++   "(deprecated) Use --default-storage-engine.",
++   &default_storage_engine_str, &default_storage_engine_str,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"default-time-zone", OPT_DEFAULT_TIME_ZONE, "Set the default time zone.",
++   &default_tz_name, &default_tz_name,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
++  {"delay-key-write", OPT_DELAY_KEY_WRITE, "Type of DELAY_KEY_WRITE.",
++   0,0,0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"delay-key-write-for-all-tables", OPT_DELAY_KEY_WRITE_ALL,
++   "Don't flush key buffers between writes for any MyISAM table. "
++   "(Deprecated option, use --delay-key-write=all instead.)",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef HAVE_OPENSSL
++  {"des-key-file", OPT_DES_KEY_FILE,
++   "Load keys for des_encrypt() and des_encrypt from given file.",
++   &des_key_file, &des_key_file, 0, GET_STR, REQUIRED_ARG,
++   0, 0, 0, 0, 0, 0},
++#endif /* HAVE_OPENSSL */
++#ifdef HAVE_REPLICATION
++  {"disconnect-slave-event-count", OPT_DISCONNECT_SLAVE_EVENT_COUNT,
++   "Option used by mysql-test for debugging and testing of replication.",
++   &disconnect_slave_event_count, &disconnect_slave_event_count,
++   0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#endif /* HAVE_REPLICATION */
++  {"enable-locking", OPT_ENABLE_LOCK,
++   "Deprecated option, use --external-locking instead.",
++   &opt_external_locking, &opt_external_locking,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef __NT__
++  {"enable-named-pipe", OPT_HAVE_NAMED_PIPE, "Enable the named pipe (NT).",
++   &opt_enable_named_pipe, &opt_enable_named_pipe, 0, GET_BOOL,
++   NO_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++#ifdef HAVE_STACK_TRACE_ON_SEGV
++  {"enable-pstack", OPT_DO_PSTACK, "Print a symbolic stack trace on failure. "
++   "This option is deprecated and has no effect; a symbolic stack trace will "
++   "be printed after a crash whenever possible.", &opt_do_pstack, &opt_do_pstack,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++#endif /* HAVE_STACK_TRACE_ON_SEGV */
++  {"engine-condition-pushdown",
++   OPT_ENGINE_CONDITION_PUSHDOWN,
++   "Push supported query conditions to the storage engine.",
++   &global_system_variables.engine_condition_pushdown,
++   &global_system_variables.engine_condition_pushdown,
++   0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
++  /* See how it's handled in get_one_option() */
++  {"event-scheduler", OPT_EVENT_SCHEDULER, "Enable/disable the event scheduler.",
++   NULL,  NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"exit-info", 'T', "Used for debugging. Use at your own risk.", 0, 0, 0,
++   GET_LONG, OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"external-locking", OPT_USE_LOCKING, "Use system (external) locking "
++   "(disabled by default).  With this option enabled you can run myisamchk "
++   "to test (not repair) tables while the MySQL server is running. "
++   "Disable with --skip-external-locking.",
++   &opt_external_locking, &opt_external_locking,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"flush", OPT_FLUSH, "Flush tables to disk between SQL commands.", 0, 0, 0,
++   GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++  /* We must always support the next option to make scripts like mysqltest
++     easier to do */
++  {"gdb", OPT_DEBUGGING,
++   "Set up signals usable for debugging.",
++   &opt_debugging, &opt_debugging,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"general_log", OPT_GENERAL_LOG,
++   "Enable/disable general log.", &opt_log,
++   &opt_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef HAVE_LARGE_PAGES
++  {"large-pages", OPT_ENABLE_LARGE_PAGES, "Enable support for large pages. "
++   "Disable with --skip-large-pages.", &opt_large_pages, &opt_large_pages,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  {"ignore-builtin-innodb", OPT_IGNORE_BUILTIN_INNODB ,
++   "Disable initialization of builtin InnoDB plugin.",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"init-connect", OPT_INIT_CONNECT, 
++   "Command(s) that are executed for each new connection.",
++   &opt_init_connect, &opt_init_connect, 0, GET_STR_ALLOC,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#ifndef DISABLE_GRANT_OPTIONS
++  {"init-file", OPT_INIT_FILE, "Read SQL commands from this file at startup.",
++   &opt_init_file, &opt_init_file, 0, GET_STR, REQUIRED_ARG,
++   0, 0, 0, 0, 0, 0},
++#endif
++  {"init-rpl-role", OPT_INIT_RPL_ROLE, "Set the replication role.", 0, 0, 0,
++   GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"init-slave", OPT_INIT_SLAVE, "Command(s) that are executed by a slave server \
++each time the SQL thread starts.",
++   &opt_init_slave, &opt_init_slave, 0, GET_STR_ALLOC,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"language", 'L',
++   "Client error messages in given language. May be given as a full path.",
++   &language_ptr, &language_ptr, 0, GET_STR, REQUIRED_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"lc-time-names", OPT_LC_TIME_NAMES,
++   "Set the language used for the month names and the days of the week.",
++   &lc_time_names_name, &lc_time_names_name,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
++  {"local-infile", OPT_LOCAL_INFILE,
++   "Enable/disable LOAD DATA LOCAL INFILE (takes values 1 or 0).",
++   &opt_local_infile, &opt_local_infile, 0, GET_BOOL, OPT_ARG,
++   1, 0, 0, 0, 0, 0},
++  {"log", 'l', "Log connections and queries to file (deprecated option, use "
++   "--general_log/--general_log_file instead).", &opt_logname,
++   &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"general_log_file", OPT_GENERAL_LOG_FILE,
++   "Log connections and queries to given file.", &opt_logname,
++   &opt_logname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-bin", OPT_BIN_LOG,
++   "Log update queries in binary format. Optional (but strongly recommended "
++   "to avoid replication problems if server's hostname changes) argument "
++   "should be the chosen location for the binary log files.",
++   &opt_bin_logname, &opt_bin_logname, 0, GET_STR_ALLOC,
++   OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-bin-index", OPT_BIN_LOG_INDEX,
++   "File that holds the names for last binary log files.",
++   &opt_binlog_index_name, &opt_binlog_index_name, 0, GET_STR,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#ifndef TO_BE_REMOVED_IN_5_1_OR_6_0
++  /*
++    In 5.0.6 we introduced the below option, then in 5.0.16 we renamed it to
++    log-bin-trust-function-creators but kept also the old name for
++    compatibility; the behaviour was also changed to apply only to functions
++    (and triggers). In a future release this old name could be removed.
++  */
++  {"log-bin-trust-routine-creators", OPT_LOG_BIN_TRUST_FUNCTION_CREATORS_OLD,
++   "(deprecated) Use log-bin-trust-function-creators.",
++   &trust_function_creators, &trust_function_creators, 0,
++   GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  /*
++    This option starts with "log-bin" to emphasize that it is specific of
++    binary logging.
++  */
++  {"log-bin-trust-function-creators", OPT_LOG_BIN_TRUST_FUNCTION_CREATORS,
++   "If equal to 0 (the default), then when --log-bin is used, creation of "
++   "a stored function (or trigger) is allowed only to users having the SUPER "
++   "privilege, and only if this stored function (trigger) may not break "
++   "binary logging."
++   "Note that if ALL connections to this server ALWAYS use row-based binary "
++   "logging, the security issues do not exist and the binary logging cannot "
++   "break, so you can safely set this to 1."
++   ,&trust_function_creators, &trust_function_creators, 0,
++   GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-error", OPT_ERROR_LOG_FILE, "Error log file.",
++   &log_error_file_ptr, &log_error_file_ptr, 0, GET_STR,
++   OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-isam", OPT_ISAM_LOG, "Log all MyISAM changes to file.",
++   &myisam_log_filename, &myisam_log_filename, 0, GET_STR,
++   OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-long-format", '0',
++   "Log some extra information to update log. Please note that this option "
++   "is deprecated; see --log-short-format option.",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef WITH_CSV_STORAGE_ENGINE
++  {"log-output", OPT_LOG_OUTPUT,
++   "Syntax: log-output[=value[,value...]], where \"value\" could be TABLE, "
++   "FILE or NONE.",
++   &log_output_str, &log_output_str, 0,
++   GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  {"log-queries-not-using-indexes", OPT_LOG_QUERIES_NOT_USING_INDEXES,
++   "Log queries that are executed without benefit of any index to the slow log if it is open.",
++   &opt_log_queries_not_using_indexes, &opt_log_queries_not_using_indexes,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-short-format", OPT_SHORT_LOG_FORMAT,
++   "Don't log extra information to update and slow-query logs.",
++   &opt_short_log_format, &opt_short_log_format,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-slave-updates", OPT_LOG_SLAVE_UPDATES,
++   "Tells the slave to log the updates from the slave thread to the binary log. "
++   "You will need to turn it on if you plan to daisy-chain the slaves.",
++   &opt_log_slave_updates, &opt_log_slave_updates, 0, GET_BOOL,
++   NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-slow-admin-statements", OPT_LOG_SLOW_ADMIN_STATEMENTS,
++   "Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements "
++   "to the slow log if it is open.", &opt_log_slow_admin_statements,
++   &opt_log_slow_admin_statements, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++ {"log-slow-slave-statements", OPT_LOG_SLOW_SLAVE_STATEMENTS,
++  "Log slow statements executed by slave thread to the slow log if it is open.",
++  &opt_log_slow_slave_statements,
++  &opt_log_slow_slave_statements,
++  0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"log_slow_queries", OPT_SLOW_QUERY_LOG,
++    "Log slow queries to a table or log file. Defaults logging to table "
++    "mysql.slow_log or hostname-slow.log if --log-output=file is used. "
++    "Must be enabled to activate other slow log options. "
++    "(deprecated option, use --slow_query_log/--slow_query_log_file instead)",
++   &opt_slow_logname, &opt_slow_logname, 0, GET_STR, OPT_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"slow_query_log_file", OPT_SLOW_QUERY_LOG_FILE,
++    "Log slow queries to given log file. Defaults logging to hostname-slow.log. "
++    "Must be enabled to activate other slow log options.",
++   &opt_slow_logname, &opt_slow_logname, 0, GET_STR,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-tc", OPT_LOG_TC,
++   "Path to transaction coordinator log (used for transactions that affect "
++   "more than one storage engine, when binary log is disabled).",
++   &opt_tc_log_file, &opt_tc_log_file, 0, GET_STR,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef HAVE_MMAP
++  {"log-tc-size", OPT_LOG_TC_SIZE, "Size of transaction coordinator log.",
++   &opt_tc_log_size, &opt_tc_log_size, 0, GET_ULONG,
++   REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, ULONG_MAX, 0,
++   TC_LOG_PAGE_SIZE, 0},
++#endif
++  {"log-update", OPT_UPDATE_LOG,
++   "The update log is deprecated since version 5.0, is replaced by the binary "
++   "log and this option just turns on --log-bin instead.",
++   &opt_update_logname, &opt_update_logname, 0, GET_STR,
++   OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"log-warnings", 'W', "Log some not critical warnings to the log file.",
++   &global_system_variables.log_warnings,
++   &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0,
++   0, 0, 0},
++  {"low-priority-updates", OPT_LOW_PRIORITY_UPDATES,
++   "INSERT/DELETE/UPDATE has lower priority than selects.",
++   &global_system_variables.low_priority_updates,
++   &max_system_variables.low_priority_updates,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"master-connect-retry", OPT_MASTER_CONNECT_RETRY,
++   "The number of seconds the slave thread will sleep before retrying to "
++   "connect to the master, in case the master goes down or the connection "
++   "is lost.",
++   &master_connect_retry, &master_connect_retry, 0, GET_UINT,
++   REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
++  {"master-host", OPT_MASTER_HOST,
++   "Master hostname or IP address for replication. If not set, the slave "
++   "thread will not be started. Note that the setting of master-host will "
++   "be ignored if there exists a valid master.info file.",
++   &master_host, &master_host, 0, GET_STR, REQUIRED_ARG, 0, 0,
++   0, 0, 0, 0},
++  {"master-info-file", OPT_MASTER_INFO_FILE,
++   "The location and name of the file that remembers the master and where "
++   "the I/O replication thread is in the master's binlogs.",
++   &master_info_file, &master_info_file, 0, GET_STR,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"master-password", OPT_MASTER_PASSWORD,
++   "The password the slave thread will authenticate with when connecting to "
++   "the master. If not set, an empty password is assumed. The value in "
++   "master.info will take precedence if it can be read.",
++   &master_password, &master_password, 0,
++   GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"master-port", OPT_MASTER_PORT,
++   "The port the master is listening on. If not set, the compiled setting of "
++   "MYSQL_PORT is assumed. If you have not tinkered with configure options, "
++   "this should be 3306. The value in master.info will take precedence if it "
++   "can be read.", &master_port, &master_port, 0, GET_UINT, REQUIRED_ARG,
++   MYSQL_PORT, 0, 0, 0, 0, 0},
++  {"master-retry-count", OPT_MASTER_RETRY_COUNT,
++   "The number of tries the slave will make to connect to the master before giving up.",
++   &master_retry_count, &master_retry_count, 0, GET_ULONG,
++   REQUIRED_ARG, 3600*24, 0, 0, 0, 0, 0},
++  {"master-ssl", OPT_MASTER_SSL,
++   "Enable the slave to connect to the master using SSL.",
++   &master_ssl, &master_ssl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
++   0, 0},
++  {"master-ssl-ca", OPT_MASTER_SSL_CA,
++   "Master SSL CA file. Only applies if you have enabled master-ssl.",
++   &master_ssl_ca, &master_ssl_ca, 0, GET_STR, OPT_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"master-ssl-capath", OPT_MASTER_SSL_CAPATH,
++   "Master SSL CA path. Only applies if you have enabled master-ssl.",
++   &master_ssl_capath, &master_ssl_capath, 0, GET_STR, OPT_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"master-ssl-cert", OPT_MASTER_SSL_CERT,
++   "Master SSL certificate file name. Only applies if you have enabled "
++   "master-ssl.",
++   &master_ssl_cert, &master_ssl_cert, 0, GET_STR, OPT_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"master-ssl-cipher", OPT_MASTER_SSL_CIPHER,
++   "Master SSL cipher. Only applies if you have enabled master-ssl.",
++   &master_ssl_cipher, &master_ssl_capath, 0, GET_STR, OPT_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"master-ssl-key", OPT_MASTER_SSL_KEY,
++   "Master SSL keyfile name. Only applies if you have enabled master-ssl.",
++   &master_ssl_key, &master_ssl_key, 0, GET_STR, OPT_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"master-user", OPT_MASTER_USER,
++   "The username the slave thread will use for authentication when "
++   "connecting to the master. The user must have FILE privilege. "
++   "If the master user is not set, user test is assumed. The value "
++   "in master.info will take precedence if it can be read.",
++   &master_user, &master_user, 0, GET_STR, REQUIRED_ARG, 0, 0,
++   0, 0, 0, 0},
++#ifdef HAVE_REPLICATION
++  {"max-binlog-dump-events", OPT_MAX_BINLOG_DUMP_EVENTS,
++   "Option used by mysql-test for debugging and testing of replication.",
++   &max_binlog_dump_events, &max_binlog_dump_events, 0,
++   GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#endif /* HAVE_REPLICATION */
++  {"memlock", OPT_MEMLOCK, "Lock mysqld in memory.", &locked_in_memory,
++   &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"myisam-recover", OPT_MYISAM_RECOVER,
++   "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.",
++   &myisam_recover_options_str, &myisam_recover_options_str, 0,
++   GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
++  {"ndb-connectstring", OPT_NDB_CONNECTSTRING,
++   "Connect string for ndbcluster.",
++   &opt_ndb_connectstring, &opt_ndb_connectstring,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"ndb-mgmd-host", OPT_NDB_MGMD,
++   "Set host and port for ndb_mgmd. Syntax: hostname[:port]",
++   &opt_ndb_mgmd, &opt_ndb_mgmd,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"ndb-nodeid", OPT_NDB_NODEID,
++   "Nodeid for this mysqlserver in the cluster.",
++   &opt_ndb_nodeid,
++   &opt_ndb_nodeid,
++   0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"ndb-autoincrement-prefetch-sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
++   "Specify number of autoincrement values that are prefetched.",
++   &global_system_variables.ndb_autoincrement_prefetch_sz,
++   &max_system_variables.ndb_autoincrement_prefetch_sz,
++   0, GET_ULONG, REQUIRED_ARG, 1, 1, 256, 0, 0, 0},
++  {"ndb-force-send", OPT_NDB_FORCE_SEND,
++   "Force send of buffers to ndb immediately without waiting for "
++   "other threads.",
++   &global_system_variables.ndb_force_send,
++   &global_system_variables.ndb_force_send,
++   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
++  {"ndb_force_send", OPT_NDB_FORCE_SEND,
++   "same as --ndb-force-send.",
++   &global_system_variables.ndb_force_send,
++   &global_system_variables.ndb_force_send,
++   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
++  {"ndb-extra-logging", OPT_NDB_EXTRA_LOGGING,
++   "Turn on more logging in the error log.",
++   &ndb_extra_logging,
++   &ndb_extra_logging,
++   0, GET_INT, OPT_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef HAVE_NDB_BINLOG
++  {"ndb-report-thresh-binlog-epoch-slip", OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP,
++   "Threshold on number of epochs to be behind before reporting binlog status. "
++   "E.g., 3 means that if the difference between what epoch has been received "
++   "from the storage nodes and what has been applied to the binlog is 3 or more, "
++   "a status message will be sent to the cluster log.",
++   &ndb_report_thresh_binlog_epoch_slip,
++   &ndb_report_thresh_binlog_epoch_slip,
++   0, GET_ULONG, REQUIRED_ARG, 3, 0, 256, 0, 0, 0},
++  {"ndb-report-thresh-binlog-mem-usage", OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
++   "Threshold on percentage of free memory before reporting binlog status. E.g., "
++   "10 means that if amount of available memory for receiving binlog data from "
++   "the storage nodes goes below 10%, "
++   "a status message will be sent to the cluster log.",
++   &ndb_report_thresh_binlog_mem_usage,
++   &ndb_report_thresh_binlog_mem_usage,
++   0, GET_ULONG, REQUIRED_ARG, 10, 0, 100, 0, 0, 0},
++#endif
++  {"ndb-use-exact-count", OPT_NDB_USE_EXACT_COUNT,
++   "Use exact records count during query planning and for fast "
++   "select count(*), disable for faster queries.",
++   &global_system_variables.ndb_use_exact_count,
++   &global_system_variables.ndb_use_exact_count,
++   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
++  {"ndb_use_exact_count", OPT_NDB_USE_EXACT_COUNT,
++   "Same as --ndb-use-exact-count.",
++   &global_system_variables.ndb_use_exact_count,
++   &global_system_variables.ndb_use_exact_count,
++   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
++  {"ndb-use-transactions", OPT_NDB_USE_TRANSACTIONS,
++   "Use transactions for large inserts, if enabled then large "
++   "inserts will be split into several smaller transactions",
++   &global_system_variables.ndb_use_transactions,
++   &global_system_variables.ndb_use_transactions,
++   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
++  {"ndb_use_transactions", OPT_NDB_USE_TRANSACTIONS,
++   "Same as --ndb-use-transactions.",
++   &global_system_variables.ndb_use_transactions,
++   &global_system_variables.ndb_use_transactions,
++   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
++  {"ndb-shm", OPT_NDB_SHM,
++   "Use shared memory connections when available.",
++   &opt_ndb_shm, &opt_ndb_shm,
++   0, GET_BOOL, OPT_ARG, OPT_NDB_SHM_DEFAULT, 0, 0, 0, 0, 0},
++  {"ndb-optimized-node-selection", OPT_NDB_OPTIMIZED_NODE_SELECTION,
++   "Select nodes for transactions in a more optimal way.",
++   &opt_ndb_optimized_node_selection,
++   &opt_ndb_optimized_node_selection,
++   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
++  { "ndb-cache-check-time", OPT_NDB_CACHE_CHECK_TIME,
++    "A dedicated thread is created to, at the given milliseconds interval, "
++    "invalidate the query cache if another MySQL server in the cluster has "
++    "changed the data in the database.",
++    &opt_ndb_cache_check_time, &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG,
++    0, 0, LONG_TIMEOUT, 0, 1, 0},
++  {"ndb-index-stat-enable", OPT_NDB_INDEX_STAT_ENABLE,
++   "Use ndb index statistics in query optimization.",
++   &global_system_variables.ndb_index_stat_enable,
++   &max_system_variables.ndb_index_stat_enable,
++   0, GET_BOOL, OPT_ARG, 0, 0, 1, 0, 0, 0},
++#endif
++  {"ndb-use-copying-alter-table",
++   OPT_NDB_USE_COPYING_ALTER_TABLE,
++   "Force ndbcluster to always copy tables at alter table "
++   "(should only be used if on-line alter table fails).",
++   &global_system_variables.ndb_use_copying_alter_table,
++   &global_system_variables.ndb_use_copying_alter_table,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},  
++  {"new", 'n', "Use very new, possibly 'unsafe', functions.",
++   &global_system_variables.new_mode,
++   &max_system_variables.new_mode,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef NOT_YET
++  {"no-mix-table-types", OPT_NO_MIX_TYPE, 
++   "Don't allow commands that use two different table types.",
++   &opt_no_mix_types, &opt_no_mix_types, 0, GET_BOOL, NO_ARG,
++   0, 0, 0, 0, 0, 0},
++#endif
++  {"old-alter-table", OPT_OLD_ALTER_TABLE,
++   "Use old, non-optimized alter table.",
++   &global_system_variables.old_alter_table,
++   &max_system_variables.old_alter_table, 0, GET_BOOL, NO_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"old-passwords", OPT_OLD_PASSWORDS, "Use old password "
++   "encryption method (needed for 4.0 and older clients).",
++   &global_system_variables.old_passwords,
++   &max_system_variables.old_passwords, 0, GET_BOOL, NO_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"one-thread", OPT_ONE_THREAD,
++   "(Deprecated): Only use one thread (for debugging under Linux). Use "
++   "thread-handling=no-threads instead.",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"old-style-user-limits", OPT_OLD_STYLE_USER_LIMITS,
++   "Enable old-style user limits (before 5.0.3, user resources were counted "
++   "per each user+host vs. per account).",
++   &opt_old_style_user_limits, &opt_old_style_user_limits,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"pid-file", OPT_PID_FILE, "Pid file used by safe_mysqld.",
++   &pidfile_name_ptr, &pidfile_name_ptr, 0, GET_STR,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"port", 'P', "Port number to use for connection or 0 for default to, in "
++   "order of preference, my.cnf, $MYSQL_TCP_PORT, "
++#if MYSQL_PORT_DEFAULT == 0
++   "/etc/services, "
++#endif
++   "built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").",
++   &mysqld_port,
++   &mysqld_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"port-open-timeout", OPT_PORT_OPEN_TIMEOUT,
++   "Maximum time in seconds to wait for the port to become free. "
++   "(Default: No wait).", &mysqld_port_timeout,
++   &mysqld_port_timeout, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++  {"profiling_history_size", OPT_PROFILING, "Limit of query profiling memory.",
++   &global_system_variables.profiling_history_size,
++   &max_system_variables.profiling_history_size,
++   0, GET_ULONG, REQUIRED_ARG, 15, 0, 100, 0, 0, 0},
++#endif
++  {"relay-log", OPT_RELAY_LOG,
++   "The location and name to use for relay logs.",
++   &opt_relay_logname, &opt_relay_logname, 0,
++   GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"relay-log-index", OPT_RELAY_LOG_INDEX,
++   "The location and name to use for the file that keeps a list of the last \
++relay logs.",
++   &opt_relaylog_index_name, &opt_relaylog_index_name, 0,
++   GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE,
++   "The location and name of the file that remembers where the SQL replication \
++thread is in the relay logs.",
++   &relay_log_info_file, &relay_log_info_file, 0, GET_STR,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"replicate-do-db", OPT_REPLICATE_DO_DB,
++   "Tells the slave thread to restrict replication to the specified database. "
++   "To specify more than one database, use the directive multiple times, "
++   "once for each database. Note that this will only work if you do not use "
++   "cross-database queries such as UPDATE some_db.some_table SET foo='bar' "
++   "while having selected a different or no database. If you need cross "
++   "database updates to work, make sure you have 3.23.28 or later, and use "
++   "replicate-wild-do-table=db_name.%.",
++   0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"replicate-do-table", OPT_REPLICATE_DO_TABLE,
++   "Tells the slave thread to restrict replication to the specified table. "
++   "To specify more than one table, use the directive multiple times, once "
++   "for each table. This will work for cross-database updates, in contrast "
++   "to replicate-do-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"replicate-ignore-db", OPT_REPLICATE_IGNORE_DB,
++   "Tells the slave thread to not replicate to the specified database. To "
++   "specify more than one database to ignore, use the directive multiple "
++   "times, once for each database. This option will not work if you use "
++   "cross database updates. If you need cross database updates to work, "
++   "make sure you have 3.23.28 or later, and use replicate-wild-ignore-"
++   "table=db_name.%. ", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"replicate-ignore-table", OPT_REPLICATE_IGNORE_TABLE,
++   "Tells the slave thread to not replicate to the specified table. To specify "
++   "more than one table to ignore, use the directive multiple times, once for "
++   "each table. This will work for cross-database updates, in contrast to "
++   "replicate-ignore-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB,
++   "Updates to a database with a different name than the original. Example: "
++   "replicate-rewrite-db=master_db_name->slave_db_name.",
++   0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef HAVE_REPLICATION
++  {"replicate-same-server-id", OPT_REPLICATE_SAME_SERVER_ID,
++   "In replication, if set to 1, do not skip events having our server id. "
++   "Default value is 0 (to break infinite loops in circular replication). "
++   "Can't be set to 1 if --log-slave-updates is used.",
++   &replicate_same_server_id, &replicate_same_server_id,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE,
++   "Tells the slave thread to restrict replication to the tables that match "
++   "the specified wildcard pattern. To specify more than one table, use the "
++   "directive multiple times, once for each table. This will work for cross-"
++   "database updates. Example: replicate-wild-do-table=foo%.bar% will "
++   "replicate only updates to tables in all databases that start with foo "
++   "and whose table names start with bar.",
++   0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE,
++   "Tells the slave thread to not replicate to the tables that match the "
++   "given wildcard pattern. To specify more than one table to ignore, use "
++   "the directive multiple times, once for each table. This will work for "
++   "cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% "
++   "will not do updates to tables in databases that start with foo and whose "
++   "table names start with bar.",
++   0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  // In replication, we may need to tell the other servers how to connect
++  {"report-host", OPT_REPORT_HOST,
++   "Hostname or IP of the slave to be reported to the master during slave "
++   "registration. Will appear in the output of SHOW SLAVE HOSTS. Leave unset "
++   "if you do not want the slave to register itself with the master. Note that "
++   "it is not sufficient for the master to simply read the IP of the slave "
++   "from the socket once the slave connects. Due to NAT and other routing "
++   "issues, that IP may not be valid for connecting to the slave from the "
++   "master or other hosts.",
++   &report_host, &report_host, 0, GET_STR, REQUIRED_ARG, 0, 0,
++   0, 0, 0, 0},
++  {"report-password", OPT_REPORT_PASSWORD, "Undocumented.",
++   &report_password, &report_password, 0, GET_STR,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"report-port", OPT_REPORT_PORT,
++   "Port for connecting to slave reported to the master during slave "
++   "registration. Set it only if the slave is listening on a non-default "
++   "port or if you have a special tunnel from the master or other clients "
++   "to the slave. If not sure, leave this option unset.",
++   &report_port, &report_port, 0, GET_UINT, REQUIRED_ARG,
++   MYSQL_PORT, 0, 0, 0, 0, 0},
++  {"report-user", OPT_REPORT_USER, "Undocumented.", &report_user,
++   &report_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"rpl-recovery-rank", OPT_RPL_RECOVERY_RANK, "Undocumented.",
++   &rpl_recovery_rank, &rpl_recovery_rank, 0, GET_ULONG,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"safe-mode", OPT_SAFE, "Skip some optimize stages (for testing).",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++#ifndef TO_BE_DELETED
++  {"safe-show-database", OPT_SAFE_SHOW_DB,
++   "Deprecated option; use GRANT SHOW DATABASES instead.",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  {"safe-user-create", OPT_SAFE_USER_CREATE,
++   "Don't allow new user creation by the user who has no write privileges to the mysql.user table.",
++   &opt_safe_user_create, &opt_safe_user_create, 0, GET_BOOL,
++   NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"safemalloc-mem-limit", OPT_SAFEMALLOC_MEM_LIMIT,
++   "Simulate memory shortage when compiled with the --with-debug=full option.",
++   0, 0, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"secure-auth", OPT_SECURE_AUTH, "Disallow authentication for accounts that have old (pre-4.1) passwords.",
++   &opt_secure_auth, &opt_secure_auth, 0, GET_BOOL, NO_ARG,
++   my_bool(0), 0, 0, 0, 0, 0},
++  {"secure-file-priv", OPT_SECURE_FILE_PRIV,
++   "Limit LOAD DATA, SELECT ... OUTFILE, and LOAD_FILE() to files within specified directory.",
++   &opt_secure_file_priv, &opt_secure_file_priv, 0,
++   GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"server-id",	OPT_SERVER_ID,
++   "Uniquely identifies the server instance in the community of replication partners.",
++   &server_id, &server_id, 0, GET_ULONG, REQUIRED_ARG, 0, 0, UINT_MAX32,
++   0, 0, 0},
++  {"set-variable", 'O',
++   "Change the value of a variable. Please note that this option is deprecated; "
++   "you can set variables directly with --variable-name=value.",
++   0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef HAVE_SMEM
++  {"shared-memory", OPT_ENABLE_SHARED_MEMORY,
++   "Enable the shared memory.",&opt_enable_shared_memory, &opt_enable_shared_memory,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++#ifdef HAVE_SMEM
++  {"shared-memory-base-name",OPT_SHARED_MEMORY_BASE_NAME,
++   "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  {"show-slave-auth-info", OPT_SHOW_SLAVE_AUTH_INFO,
++   "Show user and password in SHOW SLAVE HOSTS on this master.",
++   &opt_show_slave_auth_info, &opt_show_slave_auth_info, 0,
++   GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++#ifndef DISABLE_GRANT_OPTIONS
++  {"skip-grant-tables", OPT_SKIP_GRANT,
++   "Start without grant tables. This gives all users FULL ACCESS to all tables.",
++   &opt_noacl, &opt_noacl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
++   0},
++#endif
++  {"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names.", 0, 0, 0,
++   GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"skip-locking", OPT_SKIP_LOCK,
++   "Deprecated option, use --skip-external-locking instead.",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"skip-name-resolve", OPT_SKIP_RESOLVE,
++   "Don't resolve hostnames. All hostnames are IP's or 'localhost'.",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"skip-networking", OPT_SKIP_NETWORKING,
++   "Don't allow connection with TCP/IP.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0,
++   0, 0, 0},
++  {"skip-new", OPT_SKIP_NEW, "Don't use new, possibly wrong routines.",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++#ifndef DBUG_OFF
++#ifdef SAFEMALLOC
++  {"skip-safemalloc", OPT_SKIP_SAFEMALLOC,
++   "Don't use the memory allocation checking.", 0, 0, 0, GET_NO_ARG, NO_ARG,
++   0, 0, 0, 0, 0, 0},
++#endif
++#endif
++  {"skip-show-database", OPT_SKIP_SHOW_DB,
++   "Don't allow 'SHOW DATABASE' commands.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0,
++   0, 0, 0, 0},
++  {"skip-slave-start", OPT_SKIP_SLAVE_START,
++   "If set, slave is not autostarted.", &opt_skip_slave_start,
++   &opt_skip_slave_start, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"skip-stack-trace", OPT_SKIP_STACK_TRACE,
++   "Don't print a stack trace on failure.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0,
++   0, 0, 0, 0},
++  {"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. "
++  "Deprecated option. Use --skip-symbolic-links instead.",
++   0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"skip-thread-priority", OPT_SKIP_PRIOR,
++   "Don't give threads different priorities. Deprecated option.", 0, 0, 0, GET_NO_ARG, NO_ARG,
++   DEFAULT_SKIP_THREAD_PRIORITY, 0, 0, 0, 0, 0},
++#ifdef HAVE_REPLICATION
++  {"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR,
++   "The location where the slave should put its temporary files when "
++   "replicating a LOAD DATA INFILE command.",
++   &slave_load_tmpdir, &slave_load_tmpdir, 0, GET_STR_ALLOC,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"slave-skip-errors", OPT_SLAVE_SKIP_ERRORS,
++   "Tells the slave thread to continue replication when a query event returns an error from the provided list.",
++   0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"slave-exec-mode", OPT_SLAVE_EXEC_MODE,
++   "Modes for how replication events should be executed. Legal values are "
++   "STRICT (default) and IDEMPOTENT. In IDEMPOTENT mode, replication will "
++   "not stop for operations that are idempotent. In STRICT mode, replication "
++   "will stop on any unexpected difference between the master and the slave.",
++   &slave_exec_mode_str, &slave_exec_mode_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  {"slow-query-log", OPT_SLOW_LOG,
++   "Enable/disable slow query log.", &opt_slow_log,
++   &opt_slow_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"socket", OPT_SOCKET, "Socket file to use for connection.",
++   &mysqld_unix_port, &mysqld_unix_port, 0, GET_STR,
++   REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#ifdef HAVE_REPLICATION
++  {"sporadic-binlog-dump-fail", OPT_SPORADIC_BINLOG_DUMP_FAIL,
++   "Option used by mysql-test for debugging and testing of replication.",
++   &opt_sporadic_binlog_dump_fail,
++   &opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
++   0},
++#endif /* HAVE_REPLICATION */
++  {"sql-bin-update-same", OPT_SQL_BIN_UPDATE_SAME,
++   "The update log is deprecated since version 5.0, is replaced by the "
++   "binary log and this option does nothing anymore.",
++   0, 0, 0, GET_DISABLED, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"sql-mode", OPT_SQL_MODE,
++   "Syntax: sql-mode=option[,option[,option...]] where option can be one "
++   "of: REAL_AS_FLOAT, PIPES_AS_CONCAT, ANSI_QUOTES, IGNORE_SPACE, "
++   "ONLY_FULL_GROUP_BY, NO_UNSIGNED_SUBTRACTION.",
++   &sql_mode_str, &sql_mode_str, 0, GET_STR, REQUIRED_ARG, 0,
++   0, 0, 0, 0, 0},
++#ifdef HAVE_OPENSSL
++#include "sslopt-longopts.h"
++#endif
++#ifdef __WIN__
++  {"standalone", OPT_STANDALONE,
++  "Dummy option to start as a standalone program (NT).", 0, 0, 0, GET_NO_ARG,
++   NO_ARG, 0, 0, 0, 0, 0, 0},
++#endif
++  {"symbolic-links", 's', "Enable symbolic link support.",
++   &my_use_symdir, &my_use_symdir, 0, GET_BOOL, NO_ARG,
++   /*
++     The system call realpath() produces warnings under valgrind and
++     purify. These are not suppressed: instead we disable symlinks
++     option if compiled with valgrind support.
++   */
++   IF_PURIFY(0,1), 0, 0, 0, 0, 0},
++  {"sysdate-is-now", OPT_SYSDATE_IS_NOW,
++   "Non-default option to alias SYSDATE() to NOW() to make it safe-replicable. "
++   "Since 5.0, SYSDATE() returns a `dynamic' value different for different "
++   "invocations, even within the same statement.",
++   &global_system_variables.sysdate_is_now,
++   0, 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
++  {"tc-heuristic-recover", OPT_TC_HEURISTIC_RECOVER,
++   "Decision to use in heuristic recover process. Possible values are COMMIT "
++   "or ROLLBACK.", &opt_tc_heuristic_recover, &opt_tc_heuristic_recover,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++#if defined(ENABLED_DEBUG_SYNC)
++  {"debug-sync-timeout", OPT_DEBUG_SYNC_TIMEOUT,
++   "Enable the debug sync facility "
++   "and optionally specify a default wait timeout in seconds. "
++   "A zero value keeps the facility disabled.",
++   &opt_debug_sync_timeout, 0,
++   0, GET_UINT, OPT_ARG, 0, 0, UINT_MAX, 0, 0, 0},
++#endif /* defined(ENABLED_DEBUG_SYNC) */
++  {"temp-pool", OPT_TEMP_POOL,
++#if (ENABLE_TEMP_POOL)
++   "Using this option will cause most temporary files created to use a small "
++   "set of names, rather than a unique name for each new file.",
++#else
++   "This option is ignored on this OS.",
++#endif
++   &use_temp_pool, &use_temp_pool, 0, GET_BOOL, NO_ARG, 1,
++   0, 0, 0, 0, 0},
++  {"timed_mutexes", OPT_TIMED_MUTEXES,
++   "Specify whether to time mutexes (only InnoDB mutexes are currently supported).",
++   &timed_mutexes, &timed_mutexes, 0, GET_BOOL, NO_ARG, 0,
++    0, 0, 0, 0, 0},
++  {"tmpdir", 't',
++   "Path for temporary files. Several paths may be specified, separated by a "
++#if defined(__WIN__) || defined(__NETWARE__)
++   "semicolon (;)"
++#else
++   "colon (:)"
++#endif
++   ", in this case they are used in a round-robin fashion.",
++   &opt_mysql_tmpdir, &opt_mysql_tmpdir, 0, GET_STR, REQUIRED_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"transaction-isolation", OPT_TX_ISOLATION,
++   "Default transaction isolation level.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0,
++   0, 0, 0, 0, 0},
++  {"use-symbolic-links", OPT_SYMBOLIC_LINKS, "Enable symbolic link support. "
++   "Deprecated option; use --symbolic-links instead.",
++   &my_use_symdir, &my_use_symdir, 0, GET_BOOL, NO_ARG,
++   IF_PURIFY(0,1), 0, 0, 0, 0, 0},
++  {"user", 'u', "Run mysqld daemon as user.", 0, 0, 0, GET_STR, REQUIRED_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"verbose", 'v', "Used with --help option for detailed help.",
++   &opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG,
++   NO_ARG, 0, 0, 0, 0, 0, 0},
++  {"warnings", OPT_WARNINGS, "Deprecated; use --log-warnings instead.",
++   &global_system_variables.log_warnings,
++   &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG,
++   1, 0, ULONG_MAX, 0, 0, 0},
++  {"back_log", OPT_BACK_LOG,
++   "The number of outstanding connection requests MySQL can have. This "
++   "comes into play when the main MySQL thread gets very many connection "
++   "requests in a very short time.", &back_log, &back_log, 0, GET_ULONG,
++   REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 },
++  {"binlog_cache_size", OPT_BINLOG_CACHE_SIZE,
++   "The size of the cache to hold the SQL statements for the binary log "
++   "during a transaction. If you often use big, multi-statement "
++   "transactions you can increase this to get more performance.",
++   &binlog_cache_size, &binlog_cache_size, 0, GET_ULONG,
++   REQUIRED_ARG, 32*1024L, IO_SIZE, ULONG_MAX, 0, IO_SIZE, 0},
++  {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE,
++   "Size of tree cache used in bulk insert optimization. Note that this "
++   "is a limit per thread.", &global_system_variables.bulk_insert_buff_size,
++   &max_system_variables.bulk_insert_buff_size,
++   0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ULONG_MAX, 0, 1, 0},
++  {"connect_timeout", OPT_CONNECT_TIMEOUT,
++   "The number of seconds the mysqld server is waiting for a connect packet "
++   "before responding with 'Bad handshake'.", &connect_timeout, &connect_timeout,
++   0, GET_ULONG, REQUIRED_ARG, CONNECT_TIMEOUT, 2, LONG_TIMEOUT, 0, 1, 0 },
++  { "date_format", OPT_DATE_FORMAT,
++    "The DATE format (for future).",
++    &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
++    &opt_date_time_formats[MYSQL_TIMESTAMP_DATE],
++    0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  { "datetime_format", OPT_DATETIME_FORMAT,
++    "The DATETIME/TIMESTAMP format (for future).",
++    &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
++    &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME],
++    0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  { "default_week_format", OPT_DEFAULT_WEEK_FORMAT,
++    "The default week format used by WEEK() functions.",
++    &global_system_variables.default_week_format,
++    &max_system_variables.default_week_format,
++    0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0},
++  {"delayed_insert_limit", OPT_DELAYED_INSERT_LIMIT,
++   "After inserting delayed_insert_limit rows, the INSERT DELAYED handler "
++   "will check if there are any SELECT statements pending. If so, it allows "
++   "these to execute before continuing.",
++    &delayed_insert_limit, &delayed_insert_limit, 0, GET_ULONG,
++    REQUIRED_ARG, DELAYED_LIMIT, 1, ULONG_MAX, 0, 1, 0},
++  {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT,
++   "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.",
++   &delayed_insert_timeout, &delayed_insert_timeout, 0,
++   GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
++  { "delayed_queue_size", OPT_DELAYED_QUEUE_SIZE,
++    "What size queue (in rows) should be allocated for handling INSERT DELAYED. "
++    "If the queue becomes full, any client that does INSERT DELAYED will wait "
++    "until there is room in the queue again.",
++    &delayed_queue_size, &delayed_queue_size, 0, GET_ULONG,
++    REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ULONG_MAX, 0, 1, 0},
++  {"div_precision_increment", OPT_DIV_PRECINCREMENT,
++   "Precision of the result of '/' operator will be increased on that value.",
++   &global_system_variables.div_precincrement,
++   &max_system_variables.div_precincrement, 0, GET_ULONG,
++   REQUIRED_ARG, 4, 0, DECIMAL_MAX_SCALE, 0, 0, 0},
++  {"expire_logs_days", OPT_EXPIRE_LOGS_DAYS,
++   "If non-zero, binary logs will be purged after expire_logs_days "
++   "days; possible purges happen at startup and at binary log rotation.",
++   &expire_logs_days, &expire_logs_days, 0, GET_ULONG,
++   REQUIRED_ARG, 0, 0, 99, 0, 1, 0},
++  { "flush_time", OPT_FLUSH_TIME,
++    "A dedicated thread is created to flush all tables at the given interval.",
++    &flush_time, &flush_time, 0, GET_ULONG, REQUIRED_ARG,
++    FLUSH_TIME, 0, LONG_TIMEOUT, 0, 1, 0},
++  { "ft_boolean_syntax", OPT_FT_BOOLEAN_SYNTAX,
++    "List of operators for MATCH ... AGAINST ( ... IN BOOLEAN MODE).",
++    0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  { "ft_max_word_len", OPT_FT_MAX_WORD_LEN,
++    "The maximum length of the word to be included in a FULLTEXT index. "
++    "Note: FULLTEXT indexes must be rebuilt after changing this variable.",
++    &ft_max_word_len, &ft_max_word_len, 0, GET_ULONG,
++    REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, HA_FT_MAXCHARLEN, 0, 1, 0},
++  { "ft_min_word_len", OPT_FT_MIN_WORD_LEN,
++    "The minimum length of the word to be included in a FULLTEXT index. "
++    "Note: FULLTEXT indexes must be rebuilt after changing this variable.",
++    &ft_min_word_len, &ft_min_word_len, 0, GET_ULONG,
++    REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0},
++  { "ft_query_expansion_limit", OPT_FT_QUERY_EXPANSION_LIMIT,
++    "Number of best matches to use for query expansion.",
++    &ft_query_expansion_limit, &ft_query_expansion_limit, 0, GET_ULONG,
++    REQUIRED_ARG, 20, 0, 1000, 0, 1, 0},
++  { "ft_stopword_file", OPT_FT_STOPWORD_FILE,
++    "Use stopwords from this file instead of built-in list.",
++    &ft_stopword_file, &ft_stopword_file, 0, GET_STR,
++    REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  { "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN,
++    "The maximum length of the result of function group_concat.",
++    &global_system_variables.group_concat_max_len,
++    &max_system_variables.group_concat_max_len, 0, GET_ULONG,
++    REQUIRED_ARG, 1024, 4, ULONG_MAX, 0, 1, 0},
++  {"interactive_timeout", OPT_INTERACTIVE_TIMEOUT,
++   "The number of seconds the server waits for activity on an interactive "
++   "connection before closing it.",
++   &global_system_variables.net_interactive_timeout,
++   &max_system_variables.net_interactive_timeout, 0,
++   GET_ULONG, REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
++  {"join_buffer_size", OPT_JOIN_BUFF_SIZE,
++   "The size of the buffer that is used for full joins.",
++   &global_system_variables.join_buff_size,
++   &max_system_variables.join_buff_size, 0, GET_ULONG,
++   REQUIRED_ARG, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ULONG_MAX,
++   MALLOC_OVERHEAD, IO_SIZE, 0},
++  {"keep_files_on_create", OPT_KEEP_FILES_ON_CREATE,
++   "Don't overwrite stale .MYD and .MYI even if no directory is specified.",
++   &global_system_variables.keep_files_on_create,
++   &max_system_variables.keep_files_on_create,
++   0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
++  {"key_buffer_size", OPT_KEY_BUFFER_SIZE,
++   "The size of the buffer used for index blocks for MyISAM tables. Increase "
++   "this to get better index handling (for all reads and multiple writes) to "
++   "as much as you can afford; 1GB on a 4GB machine that mainly runs MySQL is "
++   "quite common.",
++   &dflt_key_cache_var.param_buff_size, NULL, NULL, (GET_ULL | GET_ASK_ADDR),
++   REQUIRED_ARG, KEY_CACHE_SIZE, 0, SIZE_T_MAX, MALLOC_OVERHEAD,
++   IO_SIZE, 0},
++  {"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
++   "This characterizes the number of hits a hot block has to be untouched "
++   "until it is considered aged enough to be downgraded to a warm block. "
++   "This specifies the percentage ratio of that number of hits to the total "
++   "number of blocks in key cache.",
++   &dflt_key_cache_var.param_age_threshold, NULL, NULL,
++   (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, 300, 100, ULONG_MAX, 0, 100, 0},
++  {"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE,
++   "The default size of key cache blocks.",
++   &dflt_key_cache_var.param_block_size, NULL, NULL, (GET_ULONG | GET_ASK_ADDR),
++   REQUIRED_ARG, KEY_CACHE_BLOCK_SIZE, 512, 1024 * 16, 0, 512, 0},
++  {"key_cache_division_limit", OPT_KEY_CACHE_DIVISION_LIMIT,
++   "The minimum percentage of warm blocks in key cache.",
++   &dflt_key_cache_var.param_division_limit, NULL, NULL,
++   (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100, 1, 100, 0, 1, 0},
++  {"long_query_time", OPT_LONG_QUERY_TIME,
++   "Log all queries that have taken more than long_query_time seconds to "
++   "execute. The argument will be treated as a decimal value with "
++   "microsecond precision.",
++   &long_query_time, &long_query_time, 0, GET_DOUBLE,
++   REQUIRED_ARG, 10, 0, LONG_TIMEOUT, 0, 0, 0},
++  {"lower_case_table_names", OPT_LOWER_CASE_TABLE_NAMES,
++   "If set to 1, table names are stored in lowercase on disk and table names "
++   "will be case-insensitive.  Should be set to 2 if you are using a case-"
++   "insensitive file system.",
++   &lower_case_table_names, &lower_case_table_names, 0, GET_UINT, OPT_ARG,
++#ifdef FN_NO_CASE_SENCE
++    1
++#else
++    0
++#endif
++   , 0, 2, 0, 1, 0},
++  {"max_allowed_packet", OPT_MAX_ALLOWED_PACKET,
++   "The maximum packet length to send to or receive from server.",
++   &global_system_variables.max_allowed_packet,
++   &max_system_variables.max_allowed_packet, 0, GET_ULONG,
++   REQUIRED_ARG, 1024*1024L, 1024, 1024L*1024L*1024L, MALLOC_OVERHEAD, 1024, 0},
++  {"max_binlog_cache_size", OPT_MAX_BINLOG_CACHE_SIZE,
++   "Can be used to restrict the total size used to cache a multi-transaction query.",
++   &max_binlog_cache_size, &max_binlog_cache_size, 0,
++   GET_ULL, REQUIRED_ARG, ULONG_MAX, IO_SIZE, ULONGLONG_MAX, 0, IO_SIZE, 0},
++  {"max_binlog_size", OPT_MAX_BINLOG_SIZE,
++   "Binary log will be rotated automatically when the size exceeds this "
++   "value. Will also apply to relay logs if max_relay_log_size is 0. "
++   "The minimum value for this variable is 4096.",
++   &max_binlog_size, &max_binlog_size, 0, GET_ULONG,
++   REQUIRED_ARG, 1024*1024L*1024L, IO_SIZE, 1024*1024L*1024L, 0, IO_SIZE, 0},
++  {"max_connect_errors", OPT_MAX_CONNECT_ERRORS,
++   "If there is more than this number of interrupted connections from a host "
++   "this host will be blocked from further connections.",
++   &max_connect_errors, &max_connect_errors, 0, GET_ULONG,
++   REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ULONG_MAX, 0, 1, 0},
++  // Default max_connections of 151 is larger than Apache's default max
++  // children, to avoid "too many connections" error in a common setup
++  {"max_connections", OPT_MAX_CONNECTIONS,
++   "The number of simultaneous clients allowed.", &max_connections,
++   &max_connections, 0, GET_ULONG, REQUIRED_ARG, 151, 1, 100000, 0, 1, 0},
++  {"max_delayed_threads", OPT_MAX_DELAYED_THREADS,
++   "Don't start more than this number of threads to handle INSERT DELAYED "
++   "statements. If set to zero, which means INSERT DELAYED is not used.",
++   &global_system_variables.max_insert_delayed_threads,
++   &max_system_variables.max_insert_delayed_threads,
++   0, GET_ULONG, REQUIRED_ARG, 20, 0, 16384, 0, 1, 0},
++  {"max_error_count", OPT_MAX_ERROR_COUNT,
++   "Max number of errors/warnings to store for a statement.",
++   &global_system_variables.max_error_count,
++   &max_system_variables.max_error_count,
++   0, GET_ULONG, REQUIRED_ARG, DEFAULT_ERROR_COUNT, 0, 65535, 0, 1, 0},
++  {"max_heap_table_size", OPT_MAX_HEP_TABLE_SIZE,
++   "Don't allow creation of heap tables bigger than this.",
++   &global_system_variables.max_heap_table_size,
++   &max_system_variables.max_heap_table_size, 0, GET_ULL,
++   REQUIRED_ARG, 16*1024*1024L, 16384, MAX_MEM_TABLE_SIZE,
++   MALLOC_OVERHEAD, 1024, 0},
++  {"max_join_size", OPT_MAX_JOIN_SIZE,
++   "Joins that are probably going to read more than max_join_size records return an error.",
++   &global_system_variables.max_join_size,
++   &max_system_variables.max_join_size, 0, GET_HA_ROWS, REQUIRED_ARG,
++   HA_POS_ERROR, 1, HA_POS_ERROR, 0, 1, 0},
++   {"max_length_for_sort_data", OPT_MAX_LENGTH_FOR_SORT_DATA,
++    "Max number of bytes in sorted records.",
++    &global_system_variables.max_length_for_sort_data,
++    &max_system_variables.max_length_for_sort_data, 0, GET_ULONG,
++    REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0},
++  {"max_long_data_size", OPT_MAX_LONG_DATA_SIZE,
++   "The maximum size of prepared statement parameter which can be provided "
++   "through mysql_send_long_data() API call. "
++   "Deprecated option; use max_allowed_packet instead.",
++   &max_long_data_size,
++   &max_long_data_size, 0, GET_ULONG,
++   REQUIRED_ARG, 1024*1024L, 1024, UINT_MAX32, MALLOC_OVERHEAD, 1, 0},
++  {"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT,
++   "Maximum number of prepared statements in the server.",
++   &max_prepared_stmt_count, &max_prepared_stmt_count,
++   0, GET_ULONG, REQUIRED_ARG, 16382, 0, 1*1024*1024, 0, 1, 0},
++  {"max_relay_log_size", OPT_MAX_RELAY_LOG_SIZE,
++   "If non-zero: relay log will be rotated automatically when the size "
++   "exceeds this value; if zero (the default): when the size exceeds "
++   "max_binlog_size. 0 excepted, the minimum value for this variable is 4096.",
++   &max_relay_log_size, &max_relay_log_size, 0, GET_ULONG,
++   REQUIRED_ARG, 0L, 0L, 1024*1024L*1024L, 0, IO_SIZE, 0},
++  { "max_seeks_for_key", OPT_MAX_SEEKS_FOR_KEY,
++    "Limit assumed max number of seeks when looking up rows based on a key.",
++    &global_system_variables.max_seeks_for_key,
++    &max_system_variables.max_seeks_for_key, 0, GET_ULONG,
++    REQUIRED_ARG, ULONG_MAX, 1, ULONG_MAX, 0, 1, 0 },
++  {"max_sort_length", OPT_MAX_SORT_LENGTH,
++   "The number of bytes to use when sorting BLOB or TEXT values (only the "
++   "first max_sort_length bytes of each value are used; the rest are ignored).",
++   &global_system_variables.max_sort_length,
++   &max_system_variables.max_sort_length, 0, GET_ULONG,
++   REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0},
++  {"max_sp_recursion_depth", OPT_MAX_SP_RECURSION_DEPTH,
++   "Maximum stored procedure recursion depth. (discussed with docs).",
++   &global_system_variables.max_sp_recursion_depth,
++   &max_system_variables.max_sp_recursion_depth, 0, GET_ULONG,
++   OPT_ARG, 0, 0, 255, 0, 1, 0 },
++  {"max_tmp_tables", OPT_MAX_TMP_TABLES,
++   "Maximum number of temporary tables a client can keep open at a time.",
++   &global_system_variables.max_tmp_tables,
++   &max_system_variables.max_tmp_tables, 0, GET_ULONG,
++   REQUIRED_ARG, 32, 1, ULONG_MAX, 0, 1, 0},
++  {"max_user_connections", OPT_MAX_USER_CONNECTIONS,
++   "The maximum number of active connections for a single user (0 = no limit).",
++   &max_user_connections, &max_user_connections, 0, GET_UINT,
++   REQUIRED_ARG, 0, 0, UINT_MAX, 0, 1, 0},
++  {"max_write_lock_count", OPT_MAX_WRITE_LOCK_COUNT,
++   "After this many write locks, allow some read locks to run in between.",
++   &max_write_lock_count, &max_write_lock_count, 0, GET_ULONG,
++   REQUIRED_ARG, ULONG_MAX, 1, ULONG_MAX, 0, 1, 0},
++  {"min_examined_row_limit", OPT_MIN_EXAMINED_ROW_LIMIT,
++   "Don't log queries which examine less than min_examined_row_limit rows to file.",
++   &global_system_variables.min_examined_row_limit,
++   &max_system_variables.min_examined_row_limit, 0, GET_ULONG,
++  REQUIRED_ARG, 0, 0, ULONG_MAX, 0, 1L, 0},
++  {"multi_range_count", OPT_MULTI_RANGE_COUNT,
++   "Number of key ranges to request at once.",
++   &global_system_variables.multi_range_count,
++   &max_system_variables.multi_range_count, 0,
++   GET_ULONG, REQUIRED_ARG, 256, 1, ULONG_MAX, 0, 1, 0},
++  {"myisam_block_size", OPT_MYISAM_BLOCK_SIZE,
++   "Block size to be used for MyISAM index pages.",
++   &opt_myisam_block_size, &opt_myisam_block_size, 0, GET_ULONG, REQUIRED_ARG,
++   MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, MI_MAX_KEY_BLOCK_LENGTH,
++   0, MI_MIN_KEY_BLOCK_LENGTH, 0},
++  {"myisam_data_pointer_size", OPT_MYISAM_DATA_POINTER_SIZE,
++   "Default pointer size to be used for MyISAM tables.",
++   &myisam_data_pointer_size,
++   &myisam_data_pointer_size, 0, GET_ULONG, REQUIRED_ARG,
++   6, 2, 7, 0, 1, 0},
++  {"myisam_max_extra_sort_file_size", OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE,
++   "This is a deprecated option that does nothing anymore. "
++   "It will be removed in MySQL " VER_CELOSIA,
++   &global_system_variables.myisam_max_extra_sort_file_size,
++   &max_system_variables.myisam_max_extra_sort_file_size,
++   0, GET_ULL, REQUIRED_ARG, (ulonglong) INT_MAX32,
++   0, (ulonglong) MAX_FILE_SIZE, 0, 1, 0},
++  {"myisam_max_sort_file_size", OPT_MYISAM_MAX_SORT_FILE_SIZE,
++   "Don't use the fast sort index method to created index if the temporary "
++   "file would get bigger than this.",
++   &global_system_variables.myisam_max_sort_file_size,
++   &max_system_variables.myisam_max_sort_file_size, 0,
++   GET_ULL, REQUIRED_ARG, (longlong) LONG_MAX, 0, (ulonglong) MAX_FILE_SIZE,
++   0, 1024*1024, 0},
++  {"myisam_mmap_size", OPT_MYISAM_MMAP_SIZE,
++   "Can be used to restrict the total memory used for memory mmaping of myisam files",
++   &myisam_mmap_size, &myisam_mmap_size, 0,
++   GET_ULL, REQUIRED_ARG, SIZE_T_MAX, MEMMAP_EXTRA_MARGIN, SIZE_T_MAX, 0, 1, 0},
++  {"myisam_repair_threads", OPT_MYISAM_REPAIR_THREADS,
++   "Specifies whether several threads should be used when repairing MyISAM "
++   "tables. For values > 1, one thread is used per index. The value of 1 "
++   "disables parallel repair.",
++   &global_system_variables.myisam_repair_threads,
++   &max_system_variables.myisam_repair_threads, 0,
++   GET_ULONG, REQUIRED_ARG, 1, 1, ULONG_MAX, 0, 1, 0},
++  {"myisam_sort_buffer_size", OPT_MYISAM_SORT_BUFFER_SIZE,
++   "The buffer that is allocated when sorting the index when doing a REPAIR "
++   "or when creating indexes with CREATE INDEX or ALTER TABLE.",
++   &global_system_variables.myisam_sort_buff_size,
++   &max_system_variables.myisam_sort_buff_size, 0,
++   GET_ULONG, REQUIRED_ARG, 8192 * 1024, 4096, ~0L, 0, 1, 0},
++  {"myisam_use_mmap", OPT_MYISAM_USE_MMAP,
++   "Use memory mapping for reading and writing MyISAM tables.",
++   &opt_myisam_use_mmap, &opt_myisam_use_mmap, 0, GET_BOOL, NO_ARG,
++   0, 0, 0, 0, 0, 0},
++  {"myisam_stats_method", OPT_MYISAM_STATS_METHOD,
++   "Specifies how MyISAM index statistics collection code should threat NULLs. "
++   "Possible values of name are \"nulls_unequal\" (default behavior for 4.1/5.0), "
++   "\"nulls_equal\" (emulate 4.0 behavior), and \"nulls_ignored\".",
++   &myisam_stats_method_str, &myisam_stats_method_str, 0,
++    GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"net_buffer_length", OPT_NET_BUFFER_LENGTH,
++   "Buffer length for TCP/IP and socket communication.",
++   &global_system_variables.net_buffer_length,
++   &max_system_variables.net_buffer_length, 0, GET_ULONG,
++   REQUIRED_ARG, 16384, 1024, 1024*1024L, 0, 1024, 0},
++  {"net_read_timeout", OPT_NET_READ_TIMEOUT,
++   "Number of seconds to wait for more data from a connection before aborting the read.",
++   &global_system_variables.net_read_timeout,
++   &max_system_variables.net_read_timeout, 0, GET_ULONG,
++   REQUIRED_ARG, NET_READ_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
++  {"net_retry_count", OPT_NET_RETRY_COUNT,
++   "If a read on a communication port is interrupted, retry this many times before giving up.",
++   &global_system_variables.net_retry_count,
++   &max_system_variables.net_retry_count,0,
++   GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ULONG_MAX, 0, 1, 0},
++  {"net_write_timeout", OPT_NET_WRITE_TIMEOUT,
++   "Number of seconds to wait for a block to be written to a connection before "
++   "aborting the write.",
++   &global_system_variables.net_write_timeout,
++   &max_system_variables.net_write_timeout, 0, GET_ULONG,
++   REQUIRED_ARG, NET_WRITE_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
++  { "old", OPT_OLD_MODE, "Use compatible behavior.", 
++    &global_system_variables.old_mode,
++    &max_system_variables.old_mode, 0, GET_BOOL, NO_ARG, 
++    0, 0, 0, 0, 0, 0},
++  {"open_files_limit", OPT_OPEN_FILES_LIMIT,
++   "If this is not 0, then mysqld will use this value to reserve file "
++   "descriptors to use with setrlimit(). If this value is 0 then mysqld "
++   "will reserve max_connections*5 or max_connections + table_cache*2 "
++   "(whichever is larger) number of files.",
++   &open_files_limit, &open_files_limit, 0, GET_ULONG,
++   REQUIRED_ARG, 0, 0, OS_FILE_LIMIT, 0, 1, 0},
++  {"optimizer_prune_level", OPT_OPTIMIZER_PRUNE_LEVEL,
++   "Controls the heuristic(s) applied during query optimization to prune "
++   "less-promising partial plans from the optimizer search space. Meaning: "
++   "0 - do not apply any heuristic, thus perform exhaustive search; 1 - "
++   "prune plans based on number of retrieved rows.",
++   &global_system_variables.optimizer_prune_level,
++   &max_system_variables.optimizer_prune_level,
++   0, GET_ULONG, OPT_ARG, 1, 0, 1, 0, 1, 0},
++  {"optimizer_search_depth", OPT_OPTIMIZER_SEARCH_DEPTH,
++   "Maximum depth of search performed by the query optimizer. Values larger "
++   "than the number of relations in a query result in better query plans, "
++   "but take longer to compile a query. Smaller values than the number of "
++   "tables in a relation result in faster optimization, but may produce "
++   "very bad query plans. If set to 0, the system will automatically pick "
++   "a reasonable value; if set to MAX_TABLES+2, the optimizer will switch "
++   "to the original find_best (used for testing/comparison).",
++   &global_system_variables.optimizer_search_depth,
++   &max_system_variables.optimizer_search_depth,
++   0, GET_ULONG, OPT_ARG, MAX_TABLES+1, 0, MAX_TABLES+2, 0, 1, 0},
++  {"optimizer_switch", OPT_OPTIMIZER_SWITCH,
++   "optimizer_switch=option=val[,option=val...], where option={index_merge, "
++   "index_merge_union, index_merge_sort_union, index_merge_intersection} and "
++   "val={on, off, default}.",
++   &optimizer_switch_str, &optimizer_switch_str, 0, GET_STR, REQUIRED_ARG,
++   /*OPTIMIZER_SWITCH_DEFAULT*/0, 0, 0, 0, 0, 0},
++  {"plugin_dir", OPT_PLUGIN_DIR,
++   "Directory for plugins.",
++   &opt_plugin_dir_ptr, &opt_plugin_dir_ptr, 0,
++   GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"plugin-load", OPT_PLUGIN_LOAD,
++   "Optional semicolon-separated list of plugins to load, where each plugin is "
++   "identified as name=library, where name is the plugin name and library "
++   "is the plugin library in plugin_dir.",
++   &opt_plugin_load, &opt_plugin_load, 0,
++   GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"preload_buffer_size", OPT_PRELOAD_BUFFER_SIZE,
++   "The size of the buffer that is allocated when preloading indexes.",
++   &global_system_variables.preload_buff_size,
++   &max_system_variables.preload_buff_size, 0, GET_ULONG,
++   REQUIRED_ARG, 32*1024L, 1024, 1024*1024*1024L, 0, 1, 0},
++  {"query_alloc_block_size", OPT_QUERY_ALLOC_BLOCK_SIZE,
++   "Allocation block size for query parsing and execution.",
++   &global_system_variables.query_alloc_block_size,
++   &max_system_variables.query_alloc_block_size, 0, GET_ULONG,
++   REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ULONG_MAX, 0, 1024, 0},
++#ifdef HAVE_QUERY_CACHE
++  {"query_cache_limit", OPT_QUERY_CACHE_LIMIT,
++   "Don't cache results that are bigger than this.",
++   &query_cache_limit, &query_cache_limit, 0, GET_ULONG,
++   REQUIRED_ARG, 1024*1024L, 0, ULONG_MAX, 0, 1, 0},
++  {"query_cache_min_res_unit", OPT_QUERY_CACHE_MIN_RES_UNIT,
++   "Minimal size of unit in which space for results is allocated (last unit "
++   "will be trimmed after writing all result data).",
++   &query_cache_min_res_unit, &query_cache_min_res_unit,
++   0, GET_ULONG, REQUIRED_ARG, QUERY_CACHE_MIN_RESULT_DATA_SIZE,
++   0, ULONG_MAX, 0, 1, 0},
++#endif /*HAVE_QUERY_CACHE*/
++  {"query_cache_size", OPT_QUERY_CACHE_SIZE,
++   "The memory allocated to store results from old queries.",
++   &query_cache_size, &query_cache_size, 0, GET_ULONG,
++   REQUIRED_ARG, 0, 0, (longlong) ULONG_MAX, 0, 1024, 0},
++#ifdef HAVE_QUERY_CACHE
++  {"query_cache_type", OPT_QUERY_CACHE_TYPE,
++   "0 = OFF = Don't cache or retrieve results. 1 = ON = Cache all results "
++   "except SELECT SQL_NO_CACHE ... queries. 2 = DEMAND = Cache only SELECT "
++   "SQL_CACHE ... queries.", &global_system_variables.query_cache_type,
++   &max_system_variables.query_cache_type,
++   0, GET_ULONG, REQUIRED_ARG, 1, 0, 2, 0, 1, 0},
++  {"query_cache_wlock_invalidate", OPT_QUERY_CACHE_WLOCK_INVALIDATE,
++   "Invalidate queries in query cache on LOCK for write.",
++   &global_system_variables.query_cache_wlock_invalidate,
++   &max_system_variables.query_cache_wlock_invalidate,
++   0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
++#endif /*HAVE_QUERY_CACHE*/
++  {"query_prealloc_size", OPT_QUERY_PREALLOC_SIZE,
++   "Persistent buffer for query parsing and execution.",
++   &global_system_variables.query_prealloc_size,
++   &max_system_variables.query_prealloc_size, 0, GET_ULONG,
++   REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, QUERY_ALLOC_PREALLOC_SIZE,
++   ULONG_MAX, 0, 1024, 0},
++  {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE,
++   "Allocation block size for storing ranges during optimization.",
++   &global_system_variables.range_alloc_block_size,
++   &max_system_variables.range_alloc_block_size, 0, GET_ULONG,
++   REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, RANGE_ALLOC_BLOCK_SIZE, ULONG_MAX,
++   0, 1024, 0},
++  {"read_buffer_size", OPT_RECORD_BUFFER,
++   "Each thread that does a sequential scan allocates a buffer of this size "
++   "for each table it scans. If you do many sequential scans, you may want "
++   "to increase this value.", &global_system_variables.read_buff_size,
++   &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG,
++   128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, INT_MAX32, MALLOC_OVERHEAD, IO_SIZE,
++   0},
++  {"read_only", OPT_READONLY,
++   "Make all non-temporary tables read-only, with the exception of replication "
++   "(slave) threads and users with the SUPER privilege.",
++   &opt_readonly,
++   &opt_readonly,
++   0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
++  {"read_rnd_buffer_size", OPT_RECORD_RND_BUFFER,
++   "When reading rows in sorted order after a sort, the rows are read through "
++   "this buffer to avoid disk seeks. If not set, then it's set to the value of "
++   "record_buffer.",
++   &global_system_variables.read_rnd_buff_size,
++   &max_system_variables.read_rnd_buff_size, 0,
++   GET_ULONG, REQUIRED_ARG, 256*1024L, IO_SIZE*2+MALLOC_OVERHEAD,
++   INT_MAX32, MALLOC_OVERHEAD, IO_SIZE, 0},
++  {"record_buffer", OPT_RECORD_BUFFER_OLD,
++   "Alias for read_buffer_size. This variable is deprecated and will be removed in a future release.",
++   &global_system_variables.read_buff_size,
++   &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG,
++   128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, INT_MAX32, MALLOC_OVERHEAD, IO_SIZE, 0},
++#ifdef HAVE_REPLICATION
++  {"relay_log_purge", OPT_RELAY_LOG_PURGE,
++   "0 = do not purge relay logs. 1 = purge them as soon as they are no more needed.",
++   &relay_log_purge,
++   &relay_log_purge, 0, GET_BOOL, NO_ARG,
++   1, 0, 1, 0, 1, 0},
++  {"relay_log_space_limit", OPT_RELAY_LOG_SPACE_LIMIT,
++   "Maximum space to use for all relay logs.",
++   &relay_log_space_limit,
++   &relay_log_space_limit, 0, GET_ULL, REQUIRED_ARG, 0L, 0L,
++   (longlong) ULONG_MAX, 0, 1, 0},
++  {"slave_compressed_protocol", OPT_SLAVE_COMPRESSED_PROTOCOL,
++   "Use compression on master/slave protocol.",
++   &opt_slave_compressed_protocol,
++   &opt_slave_compressed_protocol,
++   0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
++  {"slave_net_timeout", OPT_SLAVE_NET_TIMEOUT,
++   "Number of seconds to wait for more data from a master/slave connection before aborting the read.",
++   &slave_net_timeout, &slave_net_timeout, 0,
++   GET_ULONG, REQUIRED_ARG, SLAVE_NET_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0},
++  {"slave_transaction_retries", OPT_SLAVE_TRANS_RETRIES,
++   "Number of times the slave SQL thread will retry a transaction in case "
++   "it failed with a deadlock or elapsed lock wait timeout, "
++   "before giving up and stopping.",
++   &slave_trans_retries, &slave_trans_retries, 0,
++   GET_ULONG, REQUIRED_ARG, 10L, 0L, (longlong) ULONG_MAX, 0, 1, 0},
++#endif /* HAVE_REPLICATION */
++  {"slow_launch_time", OPT_SLOW_LAUNCH_TIME,
++   "If creating the thread takes longer than this value (in seconds), "
++   "the Slow_launch_threads counter will be incremented.",
++   &slow_launch_time, &slow_launch_time, 0, GET_ULONG,
++   REQUIRED_ARG, 2L, 0L, LONG_TIMEOUT, 0, 1, 0},
++  {"sort_buffer_size", OPT_SORT_BUFFER,
++   "Each thread that needs to do a sort allocates a buffer of this size.",
++   &global_system_variables.sortbuff_size,
++   &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG,
++   MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD,
++   1, 0},
++  {"sync-binlog", OPT_SYNC_BINLOG,
++   "Synchronously flush binary log to disk after every #th event. "
++   "Use 0 (default) to disable synchronous flushing.",
++   &sync_binlog_period, &sync_binlog_period, 0, GET_ULONG,
++   REQUIRED_ARG, 0, 0, ULONG_MAX, 0, 1, 0},
++  {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.",
++   &opt_sync_frm, &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0,
++   0, 0, 0, 0},
++  {"table_cache", OPT_TABLE_OPEN_CACHE,
++   "Deprecated; use --table_open_cache instead.",
++   &table_cache_size, &table_cache_size, 0, GET_ULONG,
++   REQUIRED_ARG, TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0},
++  {"table_definition_cache", OPT_TABLE_DEF_CACHE,
++   "The number of cached table definitions.",
++   &table_def_size, &table_def_size,
++   0, GET_ULONG, REQUIRED_ARG, TABLE_DEF_CACHE_DEFAULT, TABLE_DEF_CACHE_MIN,
++   512*1024L, 0, 1, 0},
++  {"table_open_cache", OPT_TABLE_OPEN_CACHE,
++   "The number of cached open tables.",
++   &table_cache_size, &table_cache_size, 0, GET_ULONG,
++   REQUIRED_ARG, TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0},
++  {"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT,
++   "Timeout in seconds to wait for a table level lock before returning an "
++   "error. Used only if the connection has active cursors.",
++   &table_lock_wait_timeout, &table_lock_wait_timeout,
++   0, GET_ULONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
++  {"thread_cache_size", OPT_THREAD_CACHE_SIZE,
++   "How many threads we should keep in a cache for reuse.",
++   &thread_cache_size, &thread_cache_size, 0, GET_ULONG,
++   REQUIRED_ARG, 0, 0, 16384, 0, 1, 0},
++  {"thread_concurrency", OPT_THREAD_CONCURRENCY,
++   "Permits the application to give the threads system a hint for the "
++   "desired number of threads that should be run at the same time.",
++   &concurrency, &concurrency, 0, GET_ULONG, REQUIRED_ARG,
++   DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0},
++#if HAVE_POOL_OF_THREADS == 1
++  {"thread_pool_size", OPT_THREAD_CACHE_SIZE,
++   "How many threads we should create to handle query requests in case of "
++   "'thread_handling=pool-of-threads'.",
++   &thread_pool_size, &thread_pool_size, 0, GET_ULONG,
++   REQUIRED_ARG, 20, 1, 16384, 0, 1, 0},
++#endif
++  {"thread_stack", OPT_THREAD_STACK,
++   "The stack size for each thread.", &my_thread_stack_size,
++   &my_thread_stack_size, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK,
++   1024L*128L, ULONG_MAX, 0, 1024, 0},
++  { "time_format", OPT_TIME_FORMAT,
++    "The TIME format (for future).",
++    &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
++    &opt_date_time_formats[MYSQL_TIMESTAMP_TIME],
++    0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"tmp_table_size", OPT_TMP_TABLE_SIZE,
++   "If an internal in-memory temporary table exceeds this size, MySQL will"
++   " automatically convert it to an on-disk MyISAM table.",
++   &global_system_variables.tmp_table_size,
++   &max_system_variables.tmp_table_size, 0, GET_ULL,
++   REQUIRED_ARG, 16*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0},
++  {"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE,
++   "Allocation block size for transactions to be stored in binary log.",
++   &global_system_variables.trans_alloc_block_size,
++   &max_system_variables.trans_alloc_block_size, 0, GET_ULONG,
++   REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ULONG_MAX, 0, 1024, 0},
++  {"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE,
++   "Persistent buffer for transactions to be stored in binary log.",
++   &global_system_variables.trans_prealloc_size,
++   &max_system_variables.trans_prealloc_size, 0, GET_ULONG,
++   REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ULONG_MAX, 0, 1024, 0},
++  {"thread_handling", OPT_THREAD_HANDLING,
++   "Define threads usage for handling queries: "
++   "one-thread-per-connection or no-threads.", 0, 0,
++   0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
++  {"updatable_views_with_limit", OPT_UPDATABLE_VIEWS_WITH_LIMIT,
++   "1 = YES = Don't issue an error message (warning only) if a VIEW without "
++   "presence of a key of the underlying table is used in queries with a "
++   "LIMIT clause for updating. 0 = NO = Prohibit update of a VIEW, which "
++   "does not contain a key of the underlying table and the query uses a "
++   "LIMIT clause (usually get from GUI tools).",
++   &global_system_variables.updatable_views_with_limit,
++   &max_system_variables.updatable_views_with_limit,
++   0, GET_ULONG, REQUIRED_ARG, 1, 0, 1, 0, 1, 0},
++  {"wait_timeout", OPT_WAIT_TIMEOUT,
++   "The number of seconds the server waits for activity on a connection before closing it.",
++   &global_system_variables.net_wait_timeout,
++   &max_system_variables.net_wait_timeout, 0, GET_ULONG,
++   REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, IF_WIN(INT_MAX32/1000, LONG_TIMEOUT),
++   0, 1, 0},
++  {"binlog-direct-non-transactional-updates", OPT_BINLOG_DIRECT_NON_TRANS_UPDATE,
++   "Causes updates to non-transactional engines using statement format to be "
++   "written directly to binary log. Before using this option, make sure that "
++   "there are no dependencies between transactional and non-transactional "
++   "tables such as in the statement INSERT INTO t_myisam SELECT * FROM "
++   "t_innodb; otherwise, slaves may diverge from the master.",
++   &global_system_variables.binlog_direct_non_trans_update,
++   &max_system_variables.binlog_direct_non_trans_update,
++   0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
++  {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
++};
++
++
++static int show_queries(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONGLONG;
++  var->value= (char *)&thd->query_id;
++  return 0;
++}
++
++
++static int show_net_compression(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_MY_BOOL;
++  var->value= (char *)&thd->net.compress;
++  return 0;
++}
++
++static int show_starttime(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (long) (thd->query_start() - server_start_time);
++  return 0;
++}
++
++#ifdef COMMUNITY_SERVER
++static int show_flushstatustime(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (long) (thd->query_start() - flush_status_time);
++  return 0;
++}
++#endif
++
++#ifdef HAVE_REPLICATION
++static int show_rpl_status(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_CHAR;
++  var->value= const_cast<char*>(rpl_status_type[(int)rpl_status]);
++  return 0;
++}
++
++static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_MY_BOOL;
++  pthread_mutex_lock(&LOCK_active_mi);
++  var->value= buff;
++  *((my_bool *)buff)= (my_bool) (active_mi && 
++                                 active_mi->slave_running == MYSQL_SLAVE_RUN_CONNECT &&
++                                 active_mi->rli.slave_running);
++  pthread_mutex_unlock(&LOCK_active_mi);
++  return 0;
++}
++
++static int show_slave_retried_trans(THD *thd, SHOW_VAR *var, char *buff)
++{
++  /*
++    TODO: with multimaster, have one such counter per line in
++    SHOW SLAVE STATUS, and have the sum over all lines here.
++  */
++  pthread_mutex_lock(&LOCK_active_mi);
++  if (active_mi)
++  {
++    var->type= SHOW_LONG;
++    var->value= buff;
++    pthread_mutex_lock(&active_mi->rli.data_lock);
++    *((long *)buff)= (long)active_mi->rli.retried_trans;
++    pthread_mutex_unlock(&active_mi->rli.data_lock);
++  }
++  else
++    var->type= SHOW_UNDEF;
++  pthread_mutex_unlock(&LOCK_active_mi);
++  return 0;
++}
++#endif /* HAVE_REPLICATION */
++
++static int show_open_tables(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (long)cached_open_tables();
++  return 0;
++}
++
++static int show_prepared_stmt_count(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  pthread_mutex_lock(&LOCK_prepared_stmt_count);
++  *((long *)buff)= (long)prepared_stmt_count;
++  pthread_mutex_unlock(&LOCK_prepared_stmt_count);
++  return 0;
++}
++
++static int show_table_definitions(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (long)cached_table_definitions();
++  return 0;
++}
++
++#ifdef HAVE_OPENSSL
++/* Functions relying on CTX */
++static int show_ssl_ctx_sess_accept(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_accept(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_accept_good(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_accept_good(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_connect_good(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_connect_good(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_accept_renegotiate(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_connect_renegotiate(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_cb_hits(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_cb_hits(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_hits(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_hits(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_cache_full(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_cache_full(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_misses(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_misses(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_timeouts(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_number(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_connect(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_sess_get_cache_size(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_get_verify_mode(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_get_verify_depth(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  *((long *)buff)= (!ssl_acceptor_fd ? 0 :
++                     SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context));
++  return 0;
++}
++
++static int show_ssl_ctx_get_session_cache_mode(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_CHAR;
++  if (!ssl_acceptor_fd)
++    var->value= const_cast<char*>("NONE");
++  else
++    switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context))
++    {
++    case SSL_SESS_CACHE_OFF:
++      var->value= const_cast<char*>("OFF"); break;
++    case SSL_SESS_CACHE_CLIENT:
++      var->value= const_cast<char*>("CLIENT"); break;
++    case SSL_SESS_CACHE_SERVER:
++      var->value= const_cast<char*>("SERVER"); break;
++    case SSL_SESS_CACHE_BOTH:
++      var->value= const_cast<char*>("BOTH"); break;
++    case SSL_SESS_CACHE_NO_AUTO_CLEAR:
++      var->value= const_cast<char*>("NO_AUTO_CLEAR"); break;
++    case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP:
++      var->value= const_cast<char*>("NO_INTERNAL_LOOKUP"); break;
++    default:
++      var->value= const_cast<char*>("Unknown"); break;
++    }
++  return 0;
++}
++
++/*
++   Functions relying on SSL 
++   Note: In the show_ssl_* functions, we need to check if we have a
++         valid vio-object since this isn't always true, specifically
++         when session_status or global_status is requested from
++         inside an Event.
++ */
++static int show_ssl_get_version(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_CHAR;
++  if( thd->vio_ok() && thd->net.vio->ssl_arg )
++    var->value= const_cast<char*>(SSL_get_version((SSL*) thd->net.vio->ssl_arg));
++  else
++    var->value= (char *)"";
++  return 0;
++}
++
++static int show_ssl_session_reused(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  if( thd->vio_ok() && thd->net.vio->ssl_arg )
++    *((long *)buff)= (long)SSL_session_reused((SSL*) thd->net.vio->ssl_arg);
++  else
++    *((long *)buff)= 0;
++  return 0;
++}
++
++static int show_ssl_get_default_timeout(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  if( thd->vio_ok() && thd->net.vio->ssl_arg )
++    *((long *)buff)= (long)SSL_get_default_timeout((SSL*)thd->net.vio->ssl_arg);
++  else
++    *((long *)buff)= 0;
++  return 0;
++}
++
++static int show_ssl_get_verify_mode(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  if( thd->net.vio && thd->net.vio->ssl_arg )
++    *((long *)buff)= (long)SSL_get_verify_mode((SSL*)thd->net.vio->ssl_arg);
++  else
++    *((long *)buff)= 0;
++  return 0;
++}
++
++static int show_ssl_get_verify_depth(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_LONG;
++  var->value= buff;
++  if( thd->vio_ok() && thd->net.vio->ssl_arg )
++    *((long *)buff)= (long)SSL_get_verify_depth((SSL*)thd->net.vio->ssl_arg);
++  else
++    *((long *)buff)= 0;
++  return 0;
++}
++
++static int show_ssl_get_cipher(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_CHAR;
++  if( thd->vio_ok() && thd->net.vio->ssl_arg )
++    var->value= const_cast<char*>(SSL_get_cipher((SSL*) thd->net.vio->ssl_arg));
++  else
++    var->value= (char *)"";
++  return 0;
++}
++
++static int show_ssl_get_cipher_list(THD *thd, SHOW_VAR *var, char *buff)
++{
++  var->type= SHOW_CHAR;
++  var->value= buff;
++  if (thd->vio_ok() && thd->net.vio->ssl_arg)
++  {
++    int i;
++    const char *p;
++    char *end= buff + SHOW_VAR_FUNC_BUFF_SIZE;
++    for (i=0; (p= SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i)) &&
++               buff < end; i++)
++    {
++      buff= strnmov(buff, p, end-buff-1);
++      *buff++= ':';
++    }
++    if (i)
++      buff--;
++  }
++  *buff=0;
++  return 0;
++}
++
++#endif /* HAVE_OPENSSL */
++
++
++/*
++  Variables shown by SHOW STATUS in alphabetical order
++*/
++
++SHOW_VAR status_vars[]= {
++  {"Aborted_clients",          (char*) &aborted_threads,        SHOW_LONG},
++  {"Aborted_connects",         (char*) &aborted_connects,       SHOW_LONG},
++  {"Binlog_cache_disk_use",    (char*) &binlog_cache_disk_use,  SHOW_LONG},
++  {"Binlog_cache_use",         (char*) &binlog_cache_use,       SHOW_LONG},
++  {"Bytes_received",           (char*) offsetof(STATUS_VAR, bytes_received), SHOW_LONGLONG_STATUS},
++  {"Bytes_sent",               (char*) offsetof(STATUS_VAR, bytes_sent), SHOW_LONGLONG_STATUS},
++  {"Com",                      (char*) com_status_vars, SHOW_ARRAY},
++  {"Compression",              (char*) &show_net_compression, SHOW_FUNC},
++  {"Connections",              (char*) &thread_id,              SHOW_LONG_NOFLUSH},
++  {"Created_tmp_disk_tables",  (char*) offsetof(STATUS_VAR, created_tmp_disk_tables), SHOW_LONG_STATUS},
++  {"Created_tmp_files",	       (char*) &my_tmp_file_created,	SHOW_LONG},
++  {"Created_tmp_tables",       (char*) offsetof(STATUS_VAR, created_tmp_tables), SHOW_LONG_STATUS},
++  {"Delayed_errors",           (char*) &delayed_insert_errors,  SHOW_LONG},
++  {"Delayed_insert_threads",   (char*) &delayed_insert_threads, SHOW_LONG_NOFLUSH},
++  {"Delayed_writes",           (char*) &delayed_insert_writes,  SHOW_LONG},
++  {"Flush_commands",           (char*) &refresh_version,        SHOW_LONG_NOFLUSH},
++  {"Handler_commit",           (char*) offsetof(STATUS_VAR, ha_commit_count), SHOW_LONG_STATUS},
++  {"Handler_delete",           (char*) offsetof(STATUS_VAR, ha_delete_count), SHOW_LONG_STATUS},
++  {"Handler_discover",         (char*) offsetof(STATUS_VAR, ha_discover_count), SHOW_LONG_STATUS},
++  {"Handler_prepare",          (char*) offsetof(STATUS_VAR, ha_prepare_count),  SHOW_LONG_STATUS},
++  {"Handler_read_first",       (char*) offsetof(STATUS_VAR, ha_read_first_count), SHOW_LONG_STATUS},
++  {"Handler_read_key",         (char*) offsetof(STATUS_VAR, ha_read_key_count), SHOW_LONG_STATUS},
++  {"Handler_read_next",        (char*) offsetof(STATUS_VAR, ha_read_next_count), SHOW_LONG_STATUS},
++  {"Handler_read_prev",        (char*) offsetof(STATUS_VAR, ha_read_prev_count), SHOW_LONG_STATUS},
++  {"Handler_read_rnd",         (char*) offsetof(STATUS_VAR, ha_read_rnd_count), SHOW_LONG_STATUS},
++  {"Handler_read_rnd_next",    (char*) offsetof(STATUS_VAR, ha_read_rnd_next_count), SHOW_LONG_STATUS},
++  {"Handler_rollback",         (char*) offsetof(STATUS_VAR, ha_rollback_count), SHOW_LONG_STATUS},
++  {"Handler_savepoint",        (char*) offsetof(STATUS_VAR, ha_savepoint_count), SHOW_LONG_STATUS},
++  {"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS},
++  {"Handler_update",           (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS},
++  {"Handler_write",            (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS},
++  {"Key_blocks_not_flushed",   (char*) offsetof(KEY_CACHE, global_blocks_changed), SHOW_KEY_CACHE_LONG},
++  {"Key_blocks_unused",        (char*) offsetof(KEY_CACHE, blocks_unused), SHOW_KEY_CACHE_LONG},
++  {"Key_blocks_used",          (char*) offsetof(KEY_CACHE, blocks_used), SHOW_KEY_CACHE_LONG},
++  {"Key_read_requests",        (char*) offsetof(KEY_CACHE, global_cache_r_requests), SHOW_KEY_CACHE_LONGLONG},
++  {"Key_reads",                (char*) offsetof(KEY_CACHE, global_cache_read), SHOW_KEY_CACHE_LONGLONG},
++  {"Key_write_requests",       (char*) offsetof(KEY_CACHE, global_cache_w_requests), SHOW_KEY_CACHE_LONGLONG},
++  {"Key_writes",               (char*) offsetof(KEY_CACHE, global_cache_write), SHOW_KEY_CACHE_LONGLONG},
++  {"Last_query_cost",          (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS},
++  {"Max_used_connections",     (char*) &max_used_connections,  SHOW_LONG},
++  {"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use,    SHOW_LONG_NOFLUSH},
++  {"Open_files",               (char*) &my_file_opened,         SHOW_LONG_NOFLUSH},
++  {"Open_streams",             (char*) &my_stream_opened,       SHOW_LONG_NOFLUSH},
++  {"Open_table_definitions",   (char*) &show_table_definitions, SHOW_FUNC},
++  {"Open_tables",              (char*) &show_open_tables,       SHOW_FUNC},
++  {"Opened_files",             (char*) &my_file_total_opened, SHOW_LONG_NOFLUSH},
++  {"Opened_tables",            (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS},
++  {"Opened_table_definitions", (char*) offsetof(STATUS_VAR, opened_shares), SHOW_LONG_STATUS},
++  {"Prepared_stmt_count",      (char*) &show_prepared_stmt_count, SHOW_FUNC},
++#ifdef HAVE_QUERY_CACHE
++  {"Qcache_free_blocks",       (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH},
++  {"Qcache_free_memory",       (char*) &query_cache.free_memory, SHOW_LONG_NOFLUSH},
++  {"Qcache_hits",              (char*) &query_cache.hits,       SHOW_LONG},
++  {"Qcache_inserts",           (char*) &query_cache.inserts,    SHOW_LONG},
++  {"Qcache_lowmem_prunes",     (char*) &query_cache.lowmem_prunes, SHOW_LONG},
++  {"Qcache_not_cached",        (char*) &query_cache.refused,    SHOW_LONG},
++  {"Qcache_queries_in_cache",  (char*) &query_cache.queries_in_cache, SHOW_LONG_NOFLUSH},
++  {"Qcache_total_blocks",      (char*) &query_cache.total_blocks, SHOW_LONG_NOFLUSH},
++#endif /*HAVE_QUERY_CACHE*/
++  {"Queries",                  (char*) &show_queries,            SHOW_FUNC},
++  {"Questions",                (char*) offsetof(STATUS_VAR, questions), SHOW_LONG_STATUS},
++#ifdef HAVE_REPLICATION
++  {"Rpl_status",               (char*) &show_rpl_status,          SHOW_FUNC},
++#endif
++  {"Select_full_join",         (char*) offsetof(STATUS_VAR, select_full_join_count), SHOW_LONG_STATUS},
++  {"Select_full_range_join",   (char*) offsetof(STATUS_VAR, select_full_range_join_count), SHOW_LONG_STATUS},
++  {"Select_range",             (char*) offsetof(STATUS_VAR, select_range_count), SHOW_LONG_STATUS},
++  {"Select_range_check",       (char*) offsetof(STATUS_VAR, select_range_check_count), SHOW_LONG_STATUS},
++  {"Select_scan",	       (char*) offsetof(STATUS_VAR, select_scan_count), SHOW_LONG_STATUS},
++  {"Slave_open_temp_tables",   (char*) &slave_open_temp_tables, SHOW_LONG},
++#ifdef HAVE_REPLICATION
++  {"Slave_retried_transactions",(char*) &show_slave_retried_trans, SHOW_FUNC},
++  {"Slave_running",            (char*) &show_slave_running,     SHOW_FUNC},
++#endif
++  {"Slow_launch_threads",      (char*) &slow_launch_threads,    SHOW_LONG},
++  {"Slow_queries",             (char*) offsetof(STATUS_VAR, long_query_count), SHOW_LONG_STATUS},
++  {"Sort_merge_passes",	       (char*) offsetof(STATUS_VAR, filesort_merge_passes), SHOW_LONG_STATUS},
++  {"Sort_range",	       (char*) offsetof(STATUS_VAR, filesort_range_count), SHOW_LONG_STATUS},
++  {"Sort_rows",		       (char*) offsetof(STATUS_VAR, filesort_rows), SHOW_LONG_STATUS},
++  {"Sort_scan",		       (char*) offsetof(STATUS_VAR, filesort_scan_count), SHOW_LONG_STATUS},
++#ifdef HAVE_OPENSSL
++  {"Ssl_accept_renegotiates",  (char*) &show_ssl_ctx_sess_accept_renegotiate, SHOW_FUNC},
++  {"Ssl_accepts",              (char*) &show_ssl_ctx_sess_accept, SHOW_FUNC},
++  {"Ssl_callback_cache_hits",  (char*) &show_ssl_ctx_sess_cb_hits, SHOW_FUNC},
++  {"Ssl_cipher",               (char*) &show_ssl_get_cipher, SHOW_FUNC},
++  {"Ssl_cipher_list",          (char*) &show_ssl_get_cipher_list, SHOW_FUNC},
++  {"Ssl_client_connects",      (char*) &show_ssl_ctx_sess_connect, SHOW_FUNC},
++  {"Ssl_connect_renegotiates", (char*) &show_ssl_ctx_sess_connect_renegotiate, SHOW_FUNC},
++  {"Ssl_ctx_verify_depth",     (char*) &show_ssl_ctx_get_verify_depth, SHOW_FUNC},
++  {"Ssl_ctx_verify_mode",      (char*) &show_ssl_ctx_get_verify_mode, SHOW_FUNC},
++  {"Ssl_default_timeout",      (char*) &show_ssl_get_default_timeout, SHOW_FUNC},
++  {"Ssl_finished_accepts",     (char*) &show_ssl_ctx_sess_accept_good, SHOW_FUNC},
++  {"Ssl_finished_connects",    (char*) &show_ssl_ctx_sess_connect_good, SHOW_FUNC},
++  {"Ssl_session_cache_hits",   (char*) &show_ssl_ctx_sess_hits, SHOW_FUNC},
++  {"Ssl_session_cache_misses", (char*) &show_ssl_ctx_sess_misses, SHOW_FUNC},
++  {"Ssl_session_cache_mode",   (char*) &show_ssl_ctx_get_session_cache_mode, SHOW_FUNC},
++  {"Ssl_session_cache_overflows", (char*) &show_ssl_ctx_sess_cache_full, SHOW_FUNC},
++  {"Ssl_session_cache_size",   (char*) &show_ssl_ctx_sess_get_cache_size, SHOW_FUNC},
++  {"Ssl_session_cache_timeouts", (char*) &show_ssl_ctx_sess_timeouts, SHOW_FUNC},
++  {"Ssl_sessions_reused",      (char*) &show_ssl_session_reused, SHOW_FUNC},
++  {"Ssl_used_session_cache_entries",(char*) &show_ssl_ctx_sess_number, SHOW_FUNC},
++  {"Ssl_verify_depth",         (char*) &show_ssl_get_verify_depth, SHOW_FUNC},
++  {"Ssl_verify_mode",          (char*) &show_ssl_get_verify_mode, SHOW_FUNC},
++  {"Ssl_version",              (char*) &show_ssl_get_version, SHOW_FUNC},
++#endif /* HAVE_OPENSSL */
++  {"Table_locks_immediate",    (char*) &locks_immediate,        SHOW_LONG},
++  {"Table_locks_waited",       (char*) &locks_waited,           SHOW_LONG},
++#ifdef HAVE_MMAP
++  {"Tc_log_max_pages_used",    (char*) &tc_log_max_pages_used,  SHOW_LONG},
++  {"Tc_log_page_size",         (char*) &tc_log_page_size,       SHOW_LONG},
++  {"Tc_log_page_waits",        (char*) &tc_log_page_waits,      SHOW_LONG},
++#endif
++  {"Threads_cached",           (char*) &cached_thread_count,    SHOW_LONG_NOFLUSH},
++  {"Threads_connected",        (char*) &thread_count,           SHOW_INT},
++  {"Threads_created",	       (char*) &thread_created,		SHOW_LONG_NOFLUSH},
++  {"Threads_running",          (char*) &thread_running,         SHOW_INT},
++  {"Uptime",                   (char*) &show_starttime,         SHOW_FUNC},
++#ifdef COMMUNITY_SERVER
++  {"Uptime_since_flush_status",(char*) &show_flushstatustime,   SHOW_FUNC},
++#endif
++  {NullS, NullS, SHOW_LONG}
++};
++
++#ifndef EMBEDDED_LIBRARY
++static void print_version(void)
++{
++  set_server_version();
++  /*
++    Note: the instance manager keys off the string 'Ver' so it can find the
++    version from the output of 'mysqld --version', so don't change it!
++  */
++  printf("%s  Ver %s for %s on %s (%s)\n",my_progname,
++	 server_version,SYSTEM_TYPE,MACHINE_TYPE, MYSQL_COMPILATION_COMMENT);
++}
++
++static void usage(void)
++{
++  if (!(default_charset_info= get_charset_by_csname(default_character_set_name,
++					           MY_CS_PRIMARY,
++						   MYF(MY_WME))))
++    exit(1);
++  if (!default_collation_name)
++    default_collation_name= (char*) default_charset_info->name;
++  print_version();
++  puts("\
++Copyright (C) 2000-2008 MySQL AB, by Monty and others.\n\
++Copyright (C) 2008 Sun Microsystems, Inc.\n\
++This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\
++and you are welcome to modify and redistribute it under the GPL license\n\n\
++Starts the MySQL database server.\n");
++
++  printf("Usage: %s [OPTIONS]\n", my_progname);
++  if (!opt_verbose)
++    puts("\nFor more help options (several pages), use mysqld --verbose --help.");
++  else
++  {
++#ifdef __WIN__
++  puts("NT and Win32 specific options:\n\
++  --install                     Install the default service (NT).\n\
++  --install-manual              Install the default service started manually (NT).\n\
++  --install service_name        Install an optional service (NT).\n\
++  --install-manual service_name Install an optional service started manually (NT).\n\
++  --remove                      Remove the default service from the service list (NT).\n\
++  --remove service_name         Remove the service_name from the service list (NT).\n\
++  --enable-named-pipe           Only to be used for the default server (NT).\n\
++  --standalone                  Dummy option to start as a standalone server (NT).\
++");
++  puts("");
++#endif
++  print_defaults(MYSQL_CONFIG_NAME,load_default_groups);
++  puts("");
++  set_ports();
++
++  /* Print out all the options including plugin supplied options */
++  my_print_help_inc_plugins(my_long_options, sizeof(my_long_options)/sizeof(my_option));
++
++  if (! plugins_are_initialized)
++  {
++    puts("\n\
++Plugins have parameters that are not reflected in this list\n\
++because execution stopped before plugins were initialized.");
++  }
++
++  puts("\n\
++To see what values a running MySQL server is using, type\n\
++'mysqladmin variables' instead of 'mysqld --verbose --help'.");
++  }
++}
++#endif /*!EMBEDDED_LIBRARY*/
++
++
++/**
++  Initialize all MySQL global variables to default values.
++
++  We don't need to set numeric variables refered to in my_long_options
++  as these are initialized by my_getopt.
++
++  @note
++    The reason to set a lot of global variables to zero is to allow one to
++    restart the embedded server with a clean environment
++    It's also needed on some exotic platforms where global variables are
++    not set to 0 when a program starts.
++
++    We don't need to set numeric variables refered to in my_long_options
++    as these are initialized by my_getopt.
++*/
++
++static int mysql_init_variables(void)
++{
++  int error;
++  /* Things reset to zero */
++  opt_skip_slave_start= opt_reckless_slave = 0;
++  mysql_home[0]= pidfile_name[0]= log_error_file[0]= 0;
++  myisam_test_invalid_symlink= test_if_data_home_dir;
++  opt_log= opt_slow_log= 0;
++  opt_update_log= 0;
++  log_output_options= find_bit_type(log_output_str, &log_output_typelib);
++  opt_bin_log= 0;
++  opt_disable_networking= opt_skip_show_db=0;
++  opt_skip_name_resolve= 0;
++  opt_ignore_builtin_innodb= 0;
++  opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname= 0;
++  opt_tc_log_file= (char *)"tc.log";      // no hostname in tc_log file name !
++  opt_secure_auth= 0;
++  opt_secure_file_priv= 0;
++  opt_bootstrap= opt_myisam_log= 0;
++  mqh_used= 0;
++  segfaulted= kill_in_progress= 0;
++  cleanup_done= 0;
++  defaults_argc= 0;
++  defaults_argv= 0;
++  server_id_supplied= 0;
++  test_flags= select_errors= dropping_tables= ha_open_options=0;
++  thread_count= thread_running= kill_cached_threads= wake_thread=0;
++  slave_open_temp_tables= 0;
++  cached_thread_count= 0;
++  opt_endinfo= using_udf_functions= 0;
++  opt_using_transactions= 0;
++  abort_loop= select_thread_in_use= signal_thread_in_use= 0;
++  ready_to_exit= shutdown_in_progress= grant_option= 0;
++  aborted_threads= aborted_connects= 0;
++  delayed_insert_threads= delayed_insert_writes= delayed_rows_in_use= 0;
++  delayed_insert_errors= thread_created= 0;
++  specialflag= 0;
++  binlog_cache_use=  binlog_cache_disk_use= 0;
++  max_used_connections= slow_launch_threads = 0;
++  mysqld_user= mysqld_chroot= opt_init_file= opt_bin_logname = 0;
++  prepared_stmt_count= 0;
++  errmesg= 0;
++  mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS;
++  bzero((uchar*) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list));
++  bzero((char *) &global_status_var, sizeof(global_status_var));
++  opt_large_pages= 0;
++#if defined(ENABLED_DEBUG_SYNC)
++  opt_debug_sync_timeout= 0;
++#endif /* defined(ENABLED_DEBUG_SYNC) */
++  key_map_full.set_all();
++
++  /* Character sets */
++  system_charset_info= &my_charset_utf8_general_ci;
++  files_charset_info= &my_charset_utf8_general_ci;
++  national_charset_info= &my_charset_utf8_general_ci;
++  table_alias_charset= &my_charset_bin;
++  character_set_filesystem= &my_charset_bin;
++
++  opt_date_time_formats[0]= opt_date_time_formats[1]= opt_date_time_formats[2]= 0;
++
++  /* Things with default values that are not zero */
++  delay_key_write_options= (uint) DELAY_KEY_WRITE_ON;
++  slave_exec_mode_options= find_bit_type_or_exit(slave_exec_mode_str,
++                                                 &slave_exec_mode_typelib,
++                                                 NULL, &error);
++  /* Default mode string must not yield a error. */
++  DBUG_ASSERT(!error);
++  if (error)
++    return 1;
++  opt_specialflag= SPECIAL_ENGLISH;
++  unix_sock= ip_sock= INVALID_SOCKET;
++  mysql_home_ptr= mysql_home;
++  pidfile_name_ptr= pidfile_name;
++  log_error_file_ptr= log_error_file;
++  language_ptr= language;
++  mysql_data_home= mysql_real_data_home;
++  thd_startup_options= (OPTION_AUTO_IS_NULL | OPTION_BIN_LOG |
++                        OPTION_QUOTE_SHOW_CREATE | OPTION_SQL_NOTES);
++  protocol_version= PROTOCOL_VERSION;
++  what_to_log= ~ (1L << (uint) COM_TIME);
++  refresh_version= 1L;	/* Increments on each reload */
++  global_query_id= thread_id= 1L;
++  strmov(server_version, MYSQL_SERVER_VERSION);
++  myisam_recover_options_str= sql_mode_str= "OFF";
++  myisam_stats_method_str= "nulls_unequal";
++  my_bind_addr = htonl(INADDR_ANY);
++  threads.empty();
++  thread_cache.empty();
++  key_caches.empty();
++  if (!(dflt_key_cache= get_or_create_key_cache(default_key_cache_base.str,
++                                                default_key_cache_base.length)))
++  {
++    sql_print_error("Cannot allocate the keycache");
++    return 1;
++  }
++  /* set key_cache_hash.default_value = dflt_key_cache */
++  multi_keycache_init();
++
++  /* Set directory paths */
++  strmake(language, LANGUAGE, sizeof(language)-1);
++  strmake(mysql_real_data_home, get_relative_path(MYSQL_DATADIR),
++	  sizeof(mysql_real_data_home)-1);
++  mysql_data_home_buff[0]=FN_CURLIB;	// all paths are relative from here
++  mysql_data_home_buff[1]=0;
++  mysql_data_home_len= 2;
++
++  /* Replication parameters */
++  master_user= (char*) "test";
++  master_password= master_host= 0;
++  master_info_file= (char*) "master.info",
++    relay_log_info_file= (char*) "relay-log.info";
++  master_ssl_key= master_ssl_cert= master_ssl_ca=
++    master_ssl_capath= master_ssl_cipher= 0;
++  report_user= report_password = report_host= 0;	/* TO BE DELETED */
++  opt_relay_logname= opt_relaylog_index_name= 0;
++
++  /* Variables in libraries */
++  charsets_dir= 0;
++  default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME;
++  default_collation_name= compiled_default_collation_name;
++  sys_charset_system.value= (char*) system_charset_info->csname;
++  character_set_filesystem_name= (char*) "binary";
++  lc_time_names_name= (char*) "en_US";
++  /* Set default values for some option variables */
++  default_storage_engine_str= (char*) "MyISAM";
++  global_system_variables.table_plugin= NULL;
++  global_system_variables.tx_isolation= ISO_REPEATABLE_READ;
++  global_system_variables.select_limit= (ulonglong) HA_POS_ERROR;
++  max_system_variables.select_limit=    (ulonglong) HA_POS_ERROR;
++  global_system_variables.max_join_size= (ulonglong) HA_POS_ERROR;
++  max_system_variables.max_join_size=   (ulonglong) HA_POS_ERROR;
++  global_system_variables.old_passwords= 0;
++  global_system_variables.old_alter_table= 0;
++  global_system_variables.binlog_format= BINLOG_FORMAT_UNSPEC;
++  /*
++    Default behavior for 4.1 and 5.0 is to treat NULL values as unequal
++    when collecting index statistics for MyISAM tables.
++  */
++  global_system_variables.myisam_stats_method= MI_STATS_METHOD_NULLS_NOT_EQUAL;
++  
++  global_system_variables.optimizer_switch= OPTIMIZER_SWITCH_DEFAULT;
++  /* Variables that depends on compile options */
++#ifndef DBUG_OFF
++  default_dbug_option=IF_WIN("d:t:i:O,\\mysqld.trace",
++			     "d:t:i:o,/tmp/mysqld.trace");
++#endif
++  opt_error_log= IF_WIN(1,0);
++#ifdef COMMUNITY_SERVER
++    have_community_features = SHOW_OPTION_YES;
++#else
++    have_community_features = SHOW_OPTION_NO;
++#endif
++  global_system_variables.ndb_index_stat_enable=FALSE;
++  max_system_variables.ndb_index_stat_enable=TRUE;
++  global_system_variables.ndb_index_stat_cache_entries=32;
++  max_system_variables.ndb_index_stat_cache_entries=~0L;
++  global_system_variables.ndb_index_stat_update_freq=20;
++  max_system_variables.ndb_index_stat_update_freq=~0L;
++#ifdef HAVE_OPENSSL
++  have_ssl=SHOW_OPTION_YES;
++#else
++  have_ssl=SHOW_OPTION_NO;
++#endif
++#ifdef HAVE_BROKEN_REALPATH
++  have_symlink=SHOW_OPTION_NO;
++#else
++  have_symlink=SHOW_OPTION_YES;
++#endif
++#ifdef HAVE_DLOPEN
++  have_dlopen=SHOW_OPTION_YES;
++#else
++  have_dlopen=SHOW_OPTION_NO;
++#endif
++#ifdef HAVE_QUERY_CACHE
++  have_query_cache=SHOW_OPTION_YES;
++#else
++  have_query_cache=SHOW_OPTION_NO;
++#endif
++#ifdef HAVE_SPATIAL
++  have_geometry=SHOW_OPTION_YES;
++#else
++  have_geometry=SHOW_OPTION_NO;
++#endif
++#ifdef HAVE_RTREE_KEYS
++  have_rtree_keys=SHOW_OPTION_YES;
++#else
++  have_rtree_keys=SHOW_OPTION_NO;
++#endif
++#ifdef HAVE_CRYPT
++  have_crypt=SHOW_OPTION_YES;
++#else
++  have_crypt=SHOW_OPTION_NO;
++#endif
++#ifdef HAVE_COMPRESS
++  have_compress= SHOW_OPTION_YES;
++#else
++  have_compress= SHOW_OPTION_NO;
++#endif
++#ifdef HAVE_LIBWRAP
++  libwrapName= NullS;
++#endif
++#ifdef HAVE_OPENSSL
++  des_key_file = 0;
++  ssl_acceptor_fd= 0;
++#endif
++#ifdef HAVE_SMEM
++  shared_memory_base_name= default_shared_memory_base_name;
++#endif
++#if !defined(my_pthread_setprio) && !defined(HAVE_PTHREAD_SETSCHEDPARAM)
++  opt_specialflag |= SPECIAL_NO_PRIOR;
++#endif
++
++#if defined(__WIN__) || defined(__NETWARE__)
++  /* Allow Win32 and NetWare users to move MySQL anywhere */
++  {
++    char prg_dev[LIBLEN];
++#if defined __WIN__
++	char executing_path_name[LIBLEN];
++	if (!test_if_hard_path(my_progname))
++	{
++		// we don't want to use GetModuleFileName inside of my_path since
++		// my_path is a generic path dereferencing function and here we care
++		// only about the executing binary.
++		GetModuleFileName(NULL, executing_path_name, sizeof(executing_path_name));
++		my_path(prg_dev, executing_path_name, NULL);
++	}
++	else
++#endif
++    my_path(prg_dev,my_progname,"mysql/bin");
++    strcat(prg_dev,"/../");			// Remove 'bin' to get base dir
++    cleanup_dirname(mysql_home,prg_dev);
++  }
++#else
++  const char *tmpenv;
++  if (!(tmpenv = getenv("MY_BASEDIR_VERSION")))
++    tmpenv = DEFAULT_MYSQL_HOME;
++  (void) strmake(mysql_home, tmpenv, sizeof(mysql_home)-1);
++#endif
++  return 0;
++}
++
++
++my_bool
++mysqld_get_one_option(int optid,
++                      const struct my_option *opt __attribute__((unused)),
++                      char *argument)
++{
++  int error;
++
++  switch(optid) {
++  case '#':
++#ifndef DBUG_OFF
++    DBUG_SET_INITIAL(argument ? argument : default_dbug_option);
++#endif
++    opt_endinfo=1;				/* unireg: memory allocation */
++    break;
++  case '0':
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--log-long-format", "--log-short-format");
++    break;
++  case 'a':
++    global_system_variables.sql_mode= fix_sql_mode(MODE_ANSI);
++    global_system_variables.tx_isolation= ISO_SERIALIZABLE;
++    break;
++  case 'b':
++    strmake(mysql_home,argument,sizeof(mysql_home)-1);
++    break;
++  case OPT_DEFAULT_CHARACTER_SET_OLD: // --default-character-set
++    WARN_DEPRECATED(NULL, VER_CELOSIA, 
++                    "--default-character-set",
++                    "--character-set-server");
++    /* Fall through */
++  case 'C':
++    if (default_collation_name == compiled_default_collation_name)
++      default_collation_name= 0;
++    break;
++  case 'l':
++    WARN_DEPRECATED(NULL, "7.0", "--log", "'--general_log'/'--general_log_file'");
++    opt_log=1;
++    break;
++  case 'h':
++    strmake(mysql_real_data_home,argument, sizeof(mysql_real_data_home)-1);
++    /* Correct pointer set by my_getopt (for embedded library) */
++    mysql_data_home= mysql_real_data_home;
++    mysql_data_home_len= strlen(mysql_data_home);
++    break;
++  case 'u':
++    if (!mysqld_user || !strcmp(mysqld_user, argument))
++      mysqld_user= argument;
++    else
++      sql_print_warning("Ignoring user change to '%s' because the user was set to '%s' earlier on the command line\n", argument, mysqld_user);
++    break;
++  case 'L':
++    strmake(language, argument, sizeof(language)-1);
++    break;
++  case 'O':
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--set-variable", "--variable-name=value");
++    break;
++#ifdef HAVE_REPLICATION
++  case OPT_SLAVE_SKIP_ERRORS:
++    init_slave_skip_errors(argument);
++    break;
++  case OPT_SLAVE_EXEC_MODE:
++    slave_exec_mode_options= find_bit_type_or_exit(argument,
++                                                   &slave_exec_mode_typelib,
++                                                   "", &error);
++    if (error)
++      return 1;
++    break;
++#endif
++  case OPT_SAFEMALLOC_MEM_LIMIT:
++#if !defined(DBUG_OFF) && defined(SAFEMALLOC)
++    sf_malloc_mem_limit = atoi(argument);
++#endif
++    break;
++#include <sslopt-case.h>
++#ifndef EMBEDDED_LIBRARY
++  case 'V':
++    print_version();
++    exit(0);
++#endif /*EMBEDDED_LIBRARY*/
++  case OPT_WARNINGS:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--warnings", "--log-warnings");
++    /* Note: fall-through to 'W' */
++  case 'W':
++    if (!argument)
++      global_system_variables.log_warnings++;
++    else if (argument == disabled_my_option)
++      global_system_variables.log_warnings= 0L;
++    else
++      global_system_variables.log_warnings= atoi(argument);
++    break;
++  case 'T':
++    test_flags= argument ? (uint) atoi(argument) : 0;
++    opt_endinfo=1;
++    break;
++  case (int) OPT_DEFAULT_COLLATION_OLD:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--default-collation", "--collation-server");
++    break;
++  case (int) OPT_SAFE_SHOW_DB:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--safe-show-database", "GRANT SHOW DATABASES");
++    break;
++  case (int) OPT_LOG_BIN_TRUST_FUNCTION_CREATORS_OLD:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--log-bin-trust-routine-creators", "--log-bin-trust-function-creators");
++    break;
++  case (int) OPT_ENABLE_LOCK:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--enable-locking", "--external-locking");
++    break;
++  case (int) OPT_BIG_TABLES:
++    thd_startup_options|=OPTION_BIG_TABLES;
++    break;
++  case (int) OPT_IGNORE_BUILTIN_INNODB:
++    opt_ignore_builtin_innodb= 1;
++    break;
++  case (int) OPT_ISAM_LOG:
++    opt_myisam_log=1;
++    break;
++  case (int) OPT_UPDATE_LOG:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--log-update", "--log-bin");
++    opt_update_log=1;
++    break;
++  case (int) OPT_BIN_LOG:
++    opt_bin_log= test(argument != disabled_my_option);
++    break;
++  case (int) OPT_ERROR_LOG_FILE:
++    opt_error_log= 1;
++    break;
++#ifdef HAVE_REPLICATION
++  case (int) OPT_INIT_RPL_ROLE:
++  {
++    int role;
++    role= find_type_or_exit(argument, &rpl_role_typelib, opt->name);
++    rpl_status = (role == 1) ?  RPL_AUTH_MASTER : RPL_IDLE_SLAVE;
++    break;
++  }
++  case (int)OPT_REPLICATE_IGNORE_DB:
++  {
++    rpl_filter->add_ignore_db(argument);
++    break;
++  }
++  case (int)OPT_REPLICATE_DO_DB:
++  {
++    rpl_filter->add_do_db(argument);
++    break;
++  }
++  case (int)OPT_REPLICATE_REWRITE_DB:
++  {
++    char* key = argument,*p, *val;
++
++    if (!(p= strstr(argument, "->")))
++    {
++      sql_print_error("Bad syntax in replicate-rewrite-db - missing '->'!\n");
++      return 1;
++    }
++    val= p--;
++    while (my_isspace(mysqld_charset, *p) && p > argument)
++      *p-- = 0;
++    if (p == argument)
++    {
++      sql_print_error("Bad syntax in replicate-rewrite-db - empty FROM db!\n");
++      return 1;
++    }
++    *val= 0;
++    val+= 2;
++    while (*val && my_isspace(mysqld_charset, *val))
++      val++;
++    if (!*val)
++    {
++      sql_print_error("Bad syntax in replicate-rewrite-db - empty TO db!\n");
++      return 1;
++    }
++
++    rpl_filter->add_db_rewrite(key, val);
++    break;
++  }
++
++  case (int)OPT_BINLOG_IGNORE_DB:
++  {
++    binlog_filter->add_ignore_db(argument);
++    break;
++  }
++  case OPT_BINLOG_FORMAT:
++  {
++    int id;
++    id= find_type_or_exit(argument, &binlog_format_typelib, opt->name);
++    global_system_variables.binlog_format= opt_binlog_format_id= id - 1;
++    break;
++  }
++  case (int)OPT_BINLOG_DO_DB:
++  {
++    binlog_filter->add_do_db(argument);
++    break;
++  }
++  case (int)OPT_REPLICATE_DO_TABLE:
++  {
++    if (rpl_filter->add_do_table(argument))
++    {
++      sql_print_error("Could not add do table rule '%s'!\n", argument);
++      return 1;
++    }
++    break;
++  }
++  case (int)OPT_REPLICATE_WILD_DO_TABLE:
++  {
++    if (rpl_filter->add_wild_do_table(argument))
++    {
++      sql_print_error("Could not add do table rule '%s'!\n", argument);
++      return 1;
++    }
++    break;
++  }
++  case (int)OPT_REPLICATE_WILD_IGNORE_TABLE:
++  {
++    if (rpl_filter->add_wild_ignore_table(argument))
++    {
++      sql_print_error("Could not add ignore table rule '%s'!\n", argument);
++      return 1;
++    }
++    break;
++  }
++  case (int)OPT_REPLICATE_IGNORE_TABLE:
++  {
++    if (rpl_filter->add_ignore_table(argument))
++    {
++      sql_print_error("Could not add ignore table rule '%s'!\n", argument);
++      return 1;
++    }
++    break;
++  }
++#endif /* HAVE_REPLICATION */
++  case (int) OPT_SLOW_QUERY_LOG:
++    WARN_DEPRECATED(NULL, "7.0", "--log_slow_queries", "'--slow_query_log'/'--slow_query_log_file'");
++    opt_slow_log= 1;
++    break;
++#ifdef WITH_CSV_STORAGE_ENGINE
++  case  OPT_LOG_OUTPUT:
++  {
++    if (!argument || !argument[0])
++    {
++      log_output_options= LOG_FILE;
++      log_output_str= log_output_typelib.type_names[1];
++    }
++    else
++    {
++      log_output_str= argument;
++      log_output_options=
++        find_bit_type_or_exit(argument, &log_output_typelib, opt->name, &error);
++      if (error)
++        return 1;
++  }
++    break;
++  }
++#endif
++  case OPT_EVENT_SCHEDULER:
++#ifndef HAVE_EVENT_SCHEDULER
++    sql_perror("Event scheduler is not supported in embedded build.");
++#else
++    if (Events::set_opt_event_scheduler(argument))
++      return 1;
++#endif
++    break;
++  case (int) OPT_SKIP_NEW:
++    opt_specialflag|= SPECIAL_NO_NEW_FUNC;
++    delay_key_write_options= (uint) DELAY_KEY_WRITE_NONE;
++    myisam_concurrent_insert=0;
++    myisam_recover_options= HA_RECOVER_NONE;
++    sp_automatic_privileges=0;
++    my_use_symdir=0;
++    ha_open_options&= ~(HA_OPEN_ABORT_IF_CRASHED | HA_OPEN_DELAY_KEY_WRITE);
++#ifdef HAVE_QUERY_CACHE
++    query_cache_size=0;
++#endif
++    break;
++  case (int) OPT_SAFE:
++    opt_specialflag|= SPECIAL_SAFE_MODE;
++    delay_key_write_options= (uint) DELAY_KEY_WRITE_NONE;
++    myisam_recover_options= HA_RECOVER_DEFAULT;
++    ha_open_options&= ~(HA_OPEN_DELAY_KEY_WRITE);
++    break;
++  case (int) OPT_SKIP_PRIOR:
++    opt_specialflag|= SPECIAL_NO_PRIOR;
++    sql_print_warning("The --skip-thread-priority startup option is deprecated "
++                      "and will be removed in MySQL 7.0. MySQL 6.0 and up do not "
++                      "give threads different priorities.");
++    break;
++  case (int) OPT_SKIP_LOCK:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--skip-locking", "--skip-external-locking");
++    opt_external_locking=0;
++    break;
++  case (int) OPT_SQL_BIN_UPDATE_SAME:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--sql-bin-update-same", "the binary log");
++    break;
++  case (int) OPT_RECORD_BUFFER_OLD:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "record_buffer", "read_buffer_size");
++    break;
++  case (int) OPT_SYMBOLIC_LINKS:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--use-symbolic-links", "--symbolic-links");
++    break;
++  case (int) OPT_SKIP_HOST_CACHE:
++    opt_specialflag|= SPECIAL_NO_HOST_CACHE;
++    break;
++  case (int) OPT_SKIP_RESOLVE:
++    opt_skip_name_resolve= 1;
++    opt_specialflag|=SPECIAL_NO_RESOLVE;
++    break;
++  case (int) OPT_SKIP_NETWORKING:
++#if defined(__NETWARE__)
++    sql_perror("Can't start server: skip-networking option is currently not supported on NetWare");
++    return 1;
++#endif
++    opt_disable_networking=1;
++    mysqld_port=0;
++    break;
++  case (int) OPT_SKIP_SHOW_DB:
++    opt_skip_show_db=1;
++    opt_specialflag|=SPECIAL_SKIP_SHOW_DB;
++    break;
++  case (int) OPT_WANT_CORE:
++    test_flags |= TEST_CORE_ON_SIGNAL;
++    break;
++  case (int) OPT_SKIP_STACK_TRACE:
++    test_flags|=TEST_NO_STACKTRACE;
++    break;
++  case (int) OPT_SKIP_SYMLINKS:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--skip-symlink", "--skip-symbolic-links");
++    my_use_symdir=0;
++    break;
++  case (int) OPT_BIND_ADDRESS:
++    if ((my_bind_addr= (ulong) inet_addr(argument)) == INADDR_NONE)
++    {
++      struct hostent *ent;
++      if (argument[0])
++	ent=gethostbyname(argument);
++      else
++      {
++	char myhostname[255];
++	if (gethostname(myhostname,sizeof(myhostname)) < 0)
++	{
++	  sql_perror("Can't start server: cannot get my own hostname!");
++          return 1;
++	}
++	ent=gethostbyname(myhostname);
++      }
++      if (!ent)
++      {
++	sql_perror("Can't start server: cannot resolve hostname!");
++        return 1;
++      }
++      my_bind_addr = (ulong) ((in_addr*)ent->h_addr_list[0])->s_addr;
++    }
++    break;
++  case (int) OPT_PID_FILE:
++    strmake(pidfile_name, argument, sizeof(pidfile_name)-1);
++    break;
++#ifdef __WIN__
++  case (int) OPT_STANDALONE:		/* Dummy option for NT */
++    break;
++#endif
++  /*
++    The following change issues a deprecation warning if the slave
++    configuration is specified either in the my.cnf file or on
++    the command-line. See BUG#21490.
++  */
++  case OPT_MASTER_HOST:
++  case OPT_MASTER_USER:
++  case OPT_MASTER_PASSWORD:
++  case OPT_MASTER_PORT:
++  case OPT_MASTER_CONNECT_RETRY:
++  case OPT_MASTER_SSL:          
++  case OPT_MASTER_SSL_KEY:
++  case OPT_MASTER_SSL_CERT:       
++  case OPT_MASTER_SSL_CAPATH:
++  case OPT_MASTER_SSL_CIPHER:
++  case OPT_MASTER_SSL_CA:
++    if (!slave_warning_issued)                 //only show the warning once
++    {
++      slave_warning_issued = true;   
++      WARN_DEPRECATED(NULL, "6.0", "for replication startup options", 
++        "'CHANGE MASTER'");
++    }
++    break;
++  case OPT_CONSOLE:
++    if (opt_console)
++      opt_error_log= 0;			// Force logs to stdout
++    break;
++  case (int) OPT_FLUSH:
++    myisam_flush=1;
++    flush_time=0;			// No auto flush
++    break;
++  case OPT_LOW_PRIORITY_UPDATES:
++    thr_upgraded_concurrent_insert_lock= TL_WRITE_LOW_PRIORITY;
++    global_system_variables.low_priority_updates=1;
++    break;
++  case OPT_BOOTSTRAP:
++    opt_noacl=opt_bootstrap=1;
++    break;
++  case OPT_SERVER_ID:
++    server_id_supplied = 1;
++    break;
++  case OPT_DELAY_KEY_WRITE_ALL:
++    WARN_DEPRECATED(NULL, VER_CELOSIA, 
++                    "--delay-key-write-for-all-tables",
++                    "--delay-key-write=ALL");
++    if (argument != disabled_my_option)
++      argument= (char*) "ALL";
++    /* Fall through */
++  case OPT_DELAY_KEY_WRITE:
++    if (argument == disabled_my_option)
++      delay_key_write_options= (uint) DELAY_KEY_WRITE_NONE;
++    else if (! argument)
++      delay_key_write_options= (uint) DELAY_KEY_WRITE_ON;
++    else
++    {
++      int type;
++      type= find_type_or_exit(argument, &delay_key_write_typelib, opt->name);
++      delay_key_write_options= (uint) type-1;
++    }
++    break;
++  case OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE:
++    sql_print_warning("--myisam_max_extra_sort_file_size is deprecated and "
++                      "does nothing in this version.  It will be removed in "
++                      "a future release.");
++    break;
++  case OPT_CHARSETS_DIR:
++    strmake(mysql_charsets_dir, argument, sizeof(mysql_charsets_dir)-1);
++    charsets_dir = mysql_charsets_dir;
++    break;
++  case OPT_TX_ISOLATION:
++  {
++    int type;
++    type= find_type_or_exit(argument, &tx_isolation_typelib, opt->name);
++    global_system_variables.tx_isolation= (type-1);
++    break;
++  }
++#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
++  case OPT_NDB_MGMD:
++  case OPT_NDB_NODEID:
++  {
++    int len= my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len,
++			 sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len,
++			 "%s%s%s",opt_ndb_constrbuf_len > 0 ? ",":"",
++			 optid == OPT_NDB_NODEID ? "nodeid=" : "",
++			 argument);
++    opt_ndb_constrbuf_len+= len;
++  }
++  /* fall through to add the connectstring to the end
++   * and set opt_ndbcluster_connectstring
++   */
++  case OPT_NDB_CONNECTSTRING:
++    if (opt_ndb_connectstring && opt_ndb_connectstring[0])
++      my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len,
++		  sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len,
++		  "%s%s", opt_ndb_constrbuf_len > 0 ? ",":"",
++		  opt_ndb_connectstring);
++    else
++      opt_ndb_constrbuf[opt_ndb_constrbuf_len]= 0;
++    opt_ndbcluster_connectstring= opt_ndb_constrbuf;
++    break;
++  case OPT_NDB_DISTRIBUTION:
++    int id;
++    id= find_type_or_exit(argument, &ndb_distribution_typelib, opt->name);
++    opt_ndb_distribution_id= (enum ndb_distribution)(id-1);
++    break;
++  case OPT_NDB_EXTRA_LOGGING:
++    if (!argument)
++      ndb_extra_logging++;
++    else if (argument == disabled_my_option)
++      ndb_extra_logging= 0L;
++    else
++      ndb_extra_logging= atoi(argument);
++    break;
++#endif
++  case OPT_MYISAM_RECOVER:
++  {
++    if (!argument)
++    {
++      myisam_recover_options=    HA_RECOVER_DEFAULT;
++      myisam_recover_options_str= myisam_recover_typelib.type_names[0];
++    }
++    else if (!argument[0])
++    {
++      myisam_recover_options= HA_RECOVER_NONE;
++      myisam_recover_options_str= "OFF";
++    }
++    else
++    {
++      myisam_recover_options_str=argument;
++      myisam_recover_options=
++        find_bit_type_or_exit(argument, &myisam_recover_typelib, opt->name,
++                              &error);
++      if (error)
++        return 1;
++    }
++    ha_open_options|=HA_OPEN_ABORT_IF_CRASHED;
++    break;
++  }
++  case OPT_CONCURRENT_INSERT:
++    /* The following code is mainly here to emulate old behavior */
++    if (!argument)                      /* --concurrent-insert */
++      myisam_concurrent_insert= 1;
++    else if (argument == disabled_my_option)
++      myisam_concurrent_insert= 0;      /* --skip-concurrent-insert */
++    break;
++  case OPT_TC_HEURISTIC_RECOVER:
++    tc_heuristic_recover= find_type_or_exit(argument,
++                                            &tc_heuristic_recover_typelib,
++                                            opt->name);
++    break;
++  case OPT_MYISAM_STATS_METHOD:
++  {
++    ulong method_conv;
++    int method;
++    LINT_INIT(method_conv);
++
++    myisam_stats_method_str= argument;
++    method= find_type_or_exit(argument, &myisam_stats_method_typelib,
++                              opt->name);
++    switch (method-1) {
++    case 2:
++      method_conv= MI_STATS_METHOD_IGNORE_NULLS;
++      break;
++    case 1:
++      method_conv= MI_STATS_METHOD_NULLS_EQUAL;
++      break;
++    case 0:
++    default:
++      method_conv= MI_STATS_METHOD_NULLS_NOT_EQUAL;
++      break;
++    }
++    global_system_variables.myisam_stats_method= method_conv;
++    break;
++  }
++  case OPT_SQL_MODE:
++  {
++    sql_mode_str= argument;
++    global_system_variables.sql_mode=
++      find_bit_type_or_exit(argument, &sql_mode_typelib, opt->name, &error);
++    if (error)
++      return 1;
++    global_system_variables.sql_mode= fix_sql_mode(global_system_variables.
++						   sql_mode);
++    break;
++  }
++  case OPT_OPTIMIZER_SWITCH:
++  {
++    bool not_used;
++    char *error= 0;
++    uint error_len= 0;
++    optimizer_switch_str= argument;
++    global_system_variables.optimizer_switch=
++      (ulong)find_set_from_flags(&optimizer_switch_typelib, 
++                                 optimizer_switch_typelib.count, 
++                                 global_system_variables.optimizer_switch,
++                                 global_system_variables.optimizer_switch,
++                                 argument, strlen(argument), NULL,
++                                 &error, &error_len, &not_used);
++     if (error)
++     {
++       char buf[512];
++       char *cbuf= buf;
++       cbuf += my_snprintf(buf, 512, "Error in parsing optimizer_switch setting near %*s\n", error_len, error);
++       sql_perror(buf);
++       return 1;
++     }
++    break;
++  }
++  case OPT_ONE_THREAD:
++    global_system_variables.thread_handling=
++      SCHEDULER_ONE_THREAD_PER_CONNECTION;
++    break;
++  case OPT_THREAD_HANDLING:
++  {
++    global_system_variables.thread_handling=
++      find_type_or_exit(argument, &thread_handling_typelib, opt->name)-1;
++    break;
++  }
++  case OPT_FT_BOOLEAN_SYNTAX:
++    if (ft_boolean_check_syntax_string((uchar*) argument))
++    {
++      sql_print_error("Invalid ft-boolean-syntax string: %s\n", argument);
++      return 1;
++    }
++    strmake(ft_boolean_syntax, argument, sizeof(ft_boolean_syntax)-1);
++    break;
++  case OPT_SKIP_SAFEMALLOC:
++#ifdef SAFEMALLOC
++    sf_malloc_quick=1;
++#endif
++    break;
++  case OPT_LOWER_CASE_TABLE_NAMES:
++    lower_case_table_names= argument ? atoi(argument) : 1;
++    lower_case_table_names_used= 1;
++    break;
++#ifdef HAVE_STACK_TRACE_ON_SEGV
++  case OPT_DO_PSTACK:
++    sql_print_warning("'--enable-pstack' is deprecated and will be removed "
++                      "in a future release. A symbolic stack trace will be "
++                      "printed after a crash whenever possible.");
++    break;
++#endif
++#if defined(ENABLED_DEBUG_SYNC)
++  case OPT_DEBUG_SYNC_TIMEOUT:
++    /*
++      Debug Sync Facility. See debug_sync.cc.
++      Default timeout for WAIT_FOR action.
++      Default value is zero (facility disabled).
++      If option is given without an argument, supply a non-zero value.
++    */
++    if (!argument)
++    {
++      /* purecov: begin tested */
++      opt_debug_sync_timeout= DEBUG_SYNC_DEFAULT_WAIT_TIMEOUT;
++      /* purecov: end */
++    }
++    break;
++#endif /* defined(ENABLED_DEBUG_SYNC) */
++  case OPT_MAX_LONG_DATA_SIZE:
++    max_long_data_size_used= true;
++    WARN_DEPRECATED(NULL, VER_CELOSIA, "--max_long_data_size", "--max_allowed_packet");
++    break;
++  }
++  return 0;
++}
++
++
++/** Handle arguments for multiple key caches. */
++C_MODE_START
++static void* mysql_getopt_value(const char *, uint,
++                                const struct my_option *, int *);
++C_MODE_END
++
++static void*
++mysql_getopt_value(const char *keyname, uint key_length,
++		   const struct my_option *option, int *error)
++{
++  if (error)
++    *error= 0;
++  switch (option->id) {
++  case OPT_KEY_BUFFER_SIZE:
++  case OPT_KEY_CACHE_BLOCK_SIZE:
++  case OPT_KEY_CACHE_DIVISION_LIMIT:
++  case OPT_KEY_CACHE_AGE_THRESHOLD:
++  {
++    KEY_CACHE *key_cache;
++    if (!(key_cache= get_or_create_key_cache(keyname, key_length)))
++    {
++      if (error)
++        *error= EXIT_OUT_OF_MEMORY;
++      return 0;
++    }
++    switch (option->id) {
++    case OPT_KEY_BUFFER_SIZE:
++      return &key_cache->param_buff_size;
++    case OPT_KEY_CACHE_BLOCK_SIZE:
++      return &key_cache->param_block_size;
++    case OPT_KEY_CACHE_DIVISION_LIMIT:
++      return &key_cache->param_division_limit;
++    case OPT_KEY_CACHE_AGE_THRESHOLD:
++      return &key_cache->param_age_threshold;
++    }
++  }
++  }
++  return option->value;
++}
++
++
++extern "C" void option_error_reporter(enum loglevel level, const char *format, ...);
++
++void option_error_reporter(enum loglevel level, const char *format, ...)
++{
++  va_list args;
++  va_start(args, format);
++
++  /* Don't print warnings for --loose options during bootstrap */
++  if (level == ERROR_LEVEL || !opt_bootstrap ||
++      global_system_variables.log_warnings)
++  {
++    vprint_msg_to_log(level, format, args);
++  }
++  va_end(args);
++}
++
++
++/**
++  @todo
++  - FIXME add EXIT_TOO_MANY_ARGUMENTS to "mysys_err.h" and return that code?
++*/
++static int get_options(int *argc,char **argv)
++{
++  int ho_error;
++
++  my_getopt_register_get_addr(mysql_getopt_value);
++  strmake(def_ft_boolean_syntax, ft_boolean_syntax,
++	  sizeof(ft_boolean_syntax)-1);
++  my_getopt_error_reporter= option_error_reporter;
++
++  /* Skip unknown options so that they may be processed later by plugins */
++  my_getopt_skip_unknown= TRUE;
++
++  if ((ho_error= handle_options(argc, &argv, my_long_options,
++                                mysqld_get_one_option)))
++    return ho_error;
++  (*argc)++; /* add back one for the progname handle_options removes */
++             /* no need to do this for argv as we are discarding it. */
++
++  if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes ||
++       opt_log_slow_slave_statements) &&
++      !opt_slow_log)
++    sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log_slow_queries is not set");
++  if (global_system_variables.net_buffer_length > 
++      global_system_variables.max_allowed_packet)
++  {
++    sql_print_warning("net_buffer_length (%lu) is set to be larger "
++                      "than max_allowed_packet (%lu). Please rectify.",
++                      global_system_variables.net_buffer_length, 
++                      global_system_variables.max_allowed_packet);
++  }
++
++#if defined(HAVE_BROKEN_REALPATH)
++  my_use_symdir=0;
++  my_disable_symlinks=1;
++  have_symlink=SHOW_OPTION_NO;
++#else
++  if (!my_use_symdir)
++  {
++    my_disable_symlinks=1;
++    have_symlink=SHOW_OPTION_DISABLED;
++  }
++#endif
++  if (opt_debugging)
++  {
++    /* Allow break with SIGINT, no core or stack trace */
++    test_flags|= TEST_SIGINT | TEST_NO_STACKTRACE;
++    test_flags&= ~TEST_CORE_ON_SIGNAL;
++  }
++  /* Set global MyISAM variables from delay_key_write_options */
++  fix_delay_key_write((THD*) 0, OPT_GLOBAL);
++  /* Set global slave_exec_mode from its option */
++  fix_slave_exec_mode();
++
++#ifndef EMBEDDED_LIBRARY
++  if (mysqld_chroot)
++    set_root(mysqld_chroot);
++#else
++  global_system_variables.thread_handling = SCHEDULER_NO_THREADS;
++  max_allowed_packet= global_system_variables.max_allowed_packet;
++  net_buffer_length= global_system_variables.net_buffer_length;
++#endif
++  if (fix_paths())
++    return 1;
++
++  /*
++    Set some global variables from the global_system_variables
++    In most cases the global variables will not be used
++  */
++  my_disable_locking= myisam_single_user= test(opt_external_locking == 0);
++  my_default_record_cache_size=global_system_variables.read_buff_size;
++  myisam_max_temp_length=
++    (my_off_t) global_system_variables.myisam_max_sort_file_size;
++
++  /* Set global variables based on startup options */
++  myisam_block_size=(uint) 1 << my_bit_log2(opt_myisam_block_size);
++
++  /* long_query_time is in microseconds */
++  global_system_variables.long_query_time= max_system_variables.long_query_time=
++    (longlong) (long_query_time * 1000000.0);
++
++  if (opt_short_log_format)
++    opt_specialflag|= SPECIAL_SHORT_LOG_FORMAT;
++
++  if (init_global_datetime_format(MYSQL_TIMESTAMP_DATE,
++				  &global_system_variables.date_format) ||
++      init_global_datetime_format(MYSQL_TIMESTAMP_TIME,
++				  &global_system_variables.time_format) ||
++      init_global_datetime_format(MYSQL_TIMESTAMP_DATETIME,
++				  &global_system_variables.datetime_format))
++    return 1;
++
++#ifdef EMBEDDED_LIBRARY
++  one_thread_scheduler(&thread_scheduler);
++#else
++  if (global_system_variables.thread_handling <=
++      SCHEDULER_ONE_THREAD_PER_CONNECTION)
++    one_thread_per_connection_scheduler(&thread_scheduler);
++  else if (global_system_variables.thread_handling == SCHEDULER_NO_THREADS)
++    one_thread_scheduler(&thread_scheduler);
++  else
++    pool_of_threads_scheduler(&thread_scheduler);  /* purecov: tested */
++#endif
++
++  /*
++    If max_long_data_size is not specified explicitly use
++    value of max_allowed_packet.
++  */
++  if (!max_long_data_size_used)
++    max_long_data_size= global_system_variables.max_allowed_packet;
++
++  return 0;
++}
++
++
++/*
++  Create version name for running mysqld version
++  We automaticly add suffixes -debug, -embedded and -log to the version
++  name to make the version more descriptive.
++  (MYSQL_SERVER_SUFFIX is set by the compilation environment)
++*/
++
++static void set_server_version(void)
++{
++  char *end= strxmov(server_version, MYSQL_SERVER_VERSION,
++                     MYSQL_SERVER_SUFFIX_STR, NullS);
++#ifdef EMBEDDED_LIBRARY
++  end= strmov(end, "-embedded");
++#endif
++#ifndef DBUG_OFF
++  if (!strstr(MYSQL_SERVER_SUFFIX_STR, "-debug"))
++    end= strmov(end, "-debug");
++#endif
++  if (opt_log || opt_update_log || opt_slow_log || opt_bin_log)
++    strmov(end, "-log");                        // This may slow down system
++}
++
++
++static char *get_relative_path(const char *path)
++{
++  if (test_if_hard_path(path) &&
++      is_prefix(path,DEFAULT_MYSQL_HOME) &&
++      strcmp(DEFAULT_MYSQL_HOME,FN_ROOTDIR))
++  {
++    path+=(uint) strlen(DEFAULT_MYSQL_HOME);
++    while (*path == FN_LIBCHAR)
++      path++;
++  }
++  return (char*) path;
++}
++
++
++/**
++  Fix filename and replace extension where 'dir' is relative to
++  mysql_real_data_home.
++  @return
++    1 if len(path) > FN_REFLEN
++*/
++
++bool
++fn_format_relative_to_data_home(char * to, const char *name,
++				const char *dir, const char *extension)
++{
++  char tmp_path[FN_REFLEN];
++  if (!test_if_hard_path(dir))
++  {
++    strxnmov(tmp_path,sizeof(tmp_path)-1, mysql_real_data_home,
++	     dir, NullS);
++    dir=tmp_path;
++  }
++  return !fn_format(to, name, dir, extension,
++		    MY_APPEND_EXT | MY_UNPACK_FILENAME | MY_SAFE_PATH);
++}
++
++
++/**
++  Test a file path to determine if the path is compatible with the secure file
++  path restriction.
++ 
++  @param path null terminated character string
++
++  @return
++    @retval TRUE The path is secure
++    @retval FALSE The path isn't secure
++*/
++
++bool is_secure_file_path(char *path)
++{
++  char buff1[FN_REFLEN], buff2[FN_REFLEN];
++  /*
++    All paths are secure if opt_secure_file_path is 0
++  */
++  if (!opt_secure_file_priv)
++    return TRUE;
++
++  if (strlen(path) >= FN_REFLEN)
++    return FALSE;
++
++  if (my_realpath(buff1, path, 0))
++  {
++    /*
++      The supplied file path might have been a file and not a directory.
++    */
++    int length= (int)dirname_length(path);
++    if (length >= FN_REFLEN)
++      return FALSE;
++    memcpy(buff2, path, length);
++    buff2[length]= '\0';
++    if (length == 0 || my_realpath(buff1, buff2, 0))
++      return FALSE;
++  }
++  convert_dirname(buff2, buff1, NullS);
++  if (strncmp(opt_secure_file_priv, buff2, strlen(opt_secure_file_priv)))
++    return FALSE;
++  return TRUE;
++}
++
++static int fix_paths(void)
++{
++  char buff[FN_REFLEN],*pos;
++  convert_dirname(mysql_home,mysql_home,NullS);
++  /* Resolve symlinks to allow 'mysql_home' to be a relative symlink */
++  my_realpath(mysql_home,mysql_home,MYF(0));
++  /* Ensure that mysql_home ends in FN_LIBCHAR */
++  pos=strend(mysql_home);
++  if (pos[-1] != FN_LIBCHAR)
++  {
++    pos[0]= FN_LIBCHAR;
++    pos[1]= 0;
++  }
++  convert_dirname(language,language,NullS);
++  convert_dirname(mysql_real_data_home,mysql_real_data_home,NullS);
++  (void) my_load_path(mysql_home,mysql_home,""); // Resolve current dir
++  (void) my_load_path(mysql_real_data_home,mysql_real_data_home,mysql_home);
++  (void) my_load_path(pidfile_name,pidfile_name,mysql_real_data_home);
++  (void) my_load_path(opt_plugin_dir, opt_plugin_dir_ptr ? opt_plugin_dir_ptr :
++                                      get_relative_path(PLUGINDIR), mysql_home);
++  opt_plugin_dir_ptr= opt_plugin_dir;
++
++  my_realpath(mysql_unpacked_real_data_home, mysql_real_data_home, MYF(0));
++  mysql_unpacked_real_data_home_len= 
++    (int) strlen(mysql_unpacked_real_data_home);
++  if (mysql_unpacked_real_data_home[mysql_unpacked_real_data_home_len-1] == FN_LIBCHAR)
++    --mysql_unpacked_real_data_home_len;
++
++  char *sharedir=get_relative_path(SHAREDIR);
++  if (test_if_hard_path(sharedir))
++    strmake(buff,sharedir,sizeof(buff)-1);		/* purecov: tested */
++  else
++    strxnmov(buff,sizeof(buff)-1,mysql_home,sharedir,NullS);
++  convert_dirname(buff,buff,NullS);
++  (void) my_load_path(language,language,buff);
++
++  /* If --character-sets-dir isn't given, use shared library dir */
++  if (charsets_dir != mysql_charsets_dir)
++  {
++    strxnmov(mysql_charsets_dir, sizeof(mysql_charsets_dir)-1, buff,
++	     CHARSET_DIR, NullS);
++  }
++  (void) my_load_path(mysql_charsets_dir, mysql_charsets_dir, buff);
++  convert_dirname(mysql_charsets_dir, mysql_charsets_dir, NullS);
++  charsets_dir=mysql_charsets_dir;
++
++  if (init_tmpdir(&mysql_tmpdir_list, opt_mysql_tmpdir))
++    return 1;
++#ifdef HAVE_REPLICATION
++  if (!slave_load_tmpdir)
++  {
++    if (!(slave_load_tmpdir = (char*) my_strdup(mysql_tmpdir, MYF(MY_FAE))))
++      return 1;
++  }
++#endif /* HAVE_REPLICATION */
++  /*
++    Convert the secure-file-priv option to system format, allowing
++    a quick strcmp to check if read or write is in an allowed dir
++   */
++  if (opt_secure_file_priv)
++  {
++    if (*opt_secure_file_priv == 0)
++    {
++      opt_secure_file_priv= 0;
++    }
++    else
++    {
++      if (strlen(opt_secure_file_priv) >= FN_REFLEN)
++        opt_secure_file_priv[FN_REFLEN-1]= '\0';
++      if (my_realpath(buff, opt_secure_file_priv, 0))
++      {
++        sql_print_warning("Failed to normalize the argument for --secure-file-priv.");
++        return 1;
++      }
++      char *secure_file_real_path= (char *)my_malloc(FN_REFLEN, MYF(MY_FAE));
++      convert_dirname(secure_file_real_path, buff, NullS);
++      my_free(opt_secure_file_priv, MYF(0));
++      opt_secure_file_priv= secure_file_real_path;
++    }
++  }
++  
++  return 0;
++}
++
++
++static ulong find_bit_type_or_exit(const char *x, TYPELIB *bit_lib,
++                                   const char *option, int *error)
++{
++  ulong result;
++  const char **ptr;
++  
++  *error= 0;
++  if ((result= find_bit_type(x, bit_lib)) == ~(ulong) 0)
++  {
++    char *buff= (char *) my_alloca(2048);
++    char *cbuf;
++    ptr= bit_lib->type_names;
++    cbuf= buff + ((!*x) ?
++      my_snprintf(buff, 2048, "No option given to %s\n", option) :
++      my_snprintf(buff, 2048, "Wrong option to %s. Option(s) given: %s\n",
++                  option, x));
++    cbuf+= my_snprintf(cbuf, 2048 - (cbuf-buff), "Alternatives are: '%s'", *ptr);
++    while (*++ptr)
++      cbuf+= my_snprintf(cbuf, 2048 - (cbuf-buff), ",'%s'", *ptr);
++    my_snprintf(cbuf, 2048 - (cbuf-buff), "\n");
++    sql_perror(buff);
++    *error= 1;
++    my_afree(buff);
++    return 0;
++  }
++
++  return result;
++}
++
++
++/**
++  @return
++    a bitfield from a string of substrings separated by ','
++    or
++    ~(ulong) 0 on error.
++*/
++
++static ulong find_bit_type(const char *x, TYPELIB *bit_lib)
++{
++  bool found_end;
++  int  found_count;
++  const char *end,*i,*j;
++  const char **array, *pos;
++  ulong found,found_int,bit;
++  DBUG_ENTER("find_bit_type");
++  DBUG_PRINT("enter",("x: '%s'",x));
++
++  found=0;
++  found_end= 0;
++  pos=(char *) x;
++  while (*pos == ' ') pos++;
++  found_end= *pos == 0;
++  while (!found_end)
++  {
++    if (!*(end=strcend(pos,',')))		/* Let end point at fieldend */
++    {
++      while (end > pos && end[-1] == ' ')
++	end--;					/* Skip end-space */
++      found_end=1;
++    }
++    found_int=0; found_count=0;
++    for (array=bit_lib->type_names, bit=1 ; (i= *array++) ; bit<<=1)
++    {
++      j=pos;
++      while (j != end)
++      {
++	if (my_toupper(mysqld_charset,*i++) !=
++            my_toupper(mysqld_charset,*j++))
++	  goto skip;
++      }
++      found_int=bit;
++      if (! *i)
++      {
++	found_count=1;
++	break;
++      }
++      else if (j != pos)			// Half field found
++      {
++	found_count++;				// Could be one of two values
++      }
++skip: ;
++    }
++    if (found_count != 1)
++      DBUG_RETURN(~(ulong) 0);				// No unique value
++    found|=found_int;
++    pos=end+1;
++  }
++
++  DBUG_PRINT("exit",("bit-field: %ld",(ulong) found));
++  DBUG_RETURN(found);
++} /* find_bit_type */
++
++
++/**
++  Check if file system used for databases is case insensitive.
++
++  @param dir_name			Directory to test
++
++  @retval
++    -1  Don't know (Test failed)
++  @retval
++    0   File system is case sensitive
++  @retval
++    1   File system is case insensitive
++*/
++
++static int test_if_case_insensitive(const char *dir_name)
++{
++  int result= 0;
++  File file;
++  char buff[FN_REFLEN], buff2[FN_REFLEN];
++  MY_STAT stat_info;
++  DBUG_ENTER("test_if_case_insensitive");
++
++  fn_format(buff, glob_hostname, dir_name, ".lower-test",
++	    MY_UNPACK_FILENAME | MY_REPLACE_EXT | MY_REPLACE_DIR);
++  fn_format(buff2, glob_hostname, dir_name, ".LOWER-TEST",
++	    MY_UNPACK_FILENAME | MY_REPLACE_EXT | MY_REPLACE_DIR);
++  (void) my_delete(buff2, MYF(0));
++  if ((file= my_create(buff, 0666, O_RDWR, MYF(0))) < 0)
++  {
++    sql_print_warning("Can't create test file %s", buff);
++    DBUG_RETURN(-1);
++  }
++  my_close(file, MYF(0));
++  if (my_stat(buff2, &stat_info, MYF(0)))
++    result= 1;					// Can access file
++  (void) my_delete(buff, MYF(MY_WME));
++  DBUG_PRINT("exit", ("result: %d", result));
++  DBUG_RETURN(result);
++}
++
++
++#ifndef EMBEDDED_LIBRARY
++
++/**
++  Create file to store pid number.
++*/
++static void create_pid_file()
++{
++  File file;
++  if ((file = my_create(pidfile_name,0664,
++			O_WRONLY | O_TRUNC, MYF(MY_WME))) >= 0)
++  {
++    char buff[21], *end;
++    end= int10_to_str((long) getpid(), buff, 10);
++    *end++= '\n';
++    if (!my_write(file, (uchar*) buff, (uint) (end-buff), MYF(MY_WME | MY_NABP)))
++    {
++      (void) my_close(file, MYF(0));
++      return;
++    }
++    (void) my_close(file, MYF(0));
++  }
++  sql_perror("Can't start server: can't create PID file");
++  exit(1);
++}
++#endif /* EMBEDDED_LIBRARY */
++
++/** Clear most status variables. */
++void refresh_status(THD *thd)
++{
++  pthread_mutex_lock(&LOCK_status);
++
++  /* Add thread's status variabes to global status */
++  add_to_status(&global_status_var, &thd->status_var);
++
++  /* Reset thread's status variables */
++  bzero((uchar*) &thd->status_var, sizeof(thd->status_var));
++
++  /* Reset some global variables */
++  reset_status_vars();
++
++  /* Reset the counters of all key caches (default and named). */
++  process_key_caches(reset_key_cache_counters);
++#ifdef COMMUNITY_SERVER
++  flush_status_time= time((time_t*) 0);
++#endif
++  pthread_mutex_unlock(&LOCK_status);
++
++  /*
++    Set max_used_connections to the number of currently open
++    connections.  Lock LOCK_thread_count out of LOCK_status to avoid
++    deadlocks.  Status reset becomes not atomic, but status data is
++    not exact anyway.
++  */
++  pthread_mutex_lock(&LOCK_thread_count);
++  max_used_connections= thread_count-delayed_insert_threads;
++  pthread_mutex_unlock(&LOCK_thread_count);
++}
++
++
++/*****************************************************************************
++  Instantiate variables for missing storage engines
++  This section should go away soon
++*****************************************************************************/
++
++#ifndef WITH_NDBCLUSTER_STORAGE_ENGINE
++ulong ndb_cache_check_time;
++ulong ndb_extra_logging;
++#endif
++
++/*****************************************************************************
++  Instantiate templates
++*****************************************************************************/
++
++#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
++/* Used templates */
++template class I_List<THD>;
++template class I_List_iterator<THD>;
++template class I_List<i_string>;
++template class I_List<i_string_pair>;
++template class I_List<NAMED_LIST>;
++template class I_List<Statement>;
++template class I_List_iterator<Statement>;
++#endif
+diff -urN mysql-old/sql/net_serv.cc mysql/sql/net_serv.cc
+--- mysql-old/sql/net_serv.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/net_serv.cc	2011-05-10 17:56:01.466682376 +0000
+@@ -755,7 +755,7 @@
+   {
+     while (remain > 0)
+     {
+-      size_t length= min(remain, net->max_packet);
++      size_t length= MYSQL_MIN(remain, net->max_packet);
+       if (net_safe_read(net, net->buff, length, alarmed))
+ 	DBUG_RETURN(1);
+       update_statistics(thd_increment_bytes_received(length));
+@@ -946,7 +946,7 @@
+ 	len=uint3korr(net->buff+net->where_b);
+ 	if (!len)				/* End of big multi-packet */
+ 	  goto end;
+-	helping = max(len,*complen) + net->where_b;
++	helping = MYSQL_MAX(len,*complen) + net->where_b;
+ 	/* The necessary size of net->buff */
+ 	if (helping >= net->max_packet)
+ 	{
+diff -urN mysql-old/sql/opt_range.cc mysql/sql/opt_range.cc
+--- mysql-old/sql/opt_range.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/opt_range.cc	2011-05-10 17:56:01.470015709 +0000
+@@ -2347,7 +2347,7 @@
+     group_trp= get_best_group_min_max(&param, tree);
+     if (group_trp)
+     {
+-      param.table->quick_condition_rows= min(group_trp->records,
++      param.table->quick_condition_rows= MYSQL_MIN(group_trp->records,
+                                              head->file->stats.records);
+       if (group_trp->read_cost < best_read_time)
+       {
+@@ -3823,7 +3823,7 @@
+     {
+       imerge_trp->read_cost= imerge_cost;
+       imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
+-      imerge_trp->records= min(imerge_trp->records,
++      imerge_trp->records= MYSQL_MIN(imerge_trp->records,
+                                param->table->file->stats.records);
+       imerge_trp->range_scans= range_scans;
+       imerge_trp->range_scans_end= range_scans + n_child_scans;
+@@ -7471,7 +7471,7 @@
+       param->table->quick_key_parts[key]=param->max_key_part+1;
+       param->table->quick_n_ranges[key]= param->n_ranges;
+       param->table->quick_condition_rows=
+-        min(param->table->quick_condition_rows, records);
++        MYSQL_MIN(param->table->quick_condition_rows, records);
+     }
+     /*
+       Need to save quick_rows in any case as it is used when calculating
+@@ -7540,7 +7540,7 @@
+   uchar *tmp_min_key, *tmp_max_key;
+   uint8 save_first_null_comp= param->first_null_comp;
+ 
+-  param->max_key_part=max(param->max_key_part,key_tree->part);
++  param->max_key_part=MYSQL_MAX(param->max_key_part,key_tree->part);
+   if (key_tree->left != &null_element)
+   {
+     /*
+@@ -8462,13 +8462,13 @@
+   /* Do not allocate the buffers twice. */
+   if (multi_range_length)
+   {
+-    DBUG_ASSERT(multi_range_length == min(multi_range_count, ranges.elements));
++    DBUG_ASSERT(multi_range_length == MYSQL_MIN(multi_range_count, ranges.elements));
+     DBUG_RETURN(0);
+   }
+ 
+   /* Allocate the ranges array. */
+   DBUG_ASSERT(ranges.elements);
+-  multi_range_length= min(multi_range_count, ranges.elements);
++  multi_range_length= MYSQL_MIN(multi_range_count, ranges.elements);
+   DBUG_ASSERT(multi_range_length > 0);
+   while (multi_range_length && ! (multi_range= (KEY_MULTI_RANGE*)
+                                   my_malloc(multi_range_length *
+@@ -8487,7 +8487,7 @@
+   /* Allocate the handler buffer if necessary.  */
+   if (file->ha_table_flags() & HA_NEED_READ_RANGE_BUFFER)
+   {
+-    mrange_bufsiz= min(multi_range_bufsiz,
++    mrange_bufsiz= MYSQL_MIN(multi_range_bufsiz,
+                        ((uint)QUICK_SELECT_I::records + 1)* head->s->reclength);
+ 
+     while (mrange_bufsiz &&
+@@ -8568,7 +8568,7 @@
+         goto end;
+     }
+ 
+-    uint count= min(multi_range_length, ranges.elements -
++    uint count= MYSQL_MIN(multi_range_length, ranges.elements -
+                     (cur_range - (QUICK_RANGE**) ranges.buffer));
+     if (count == 0)
+     {
+@@ -9270,7 +9270,7 @@
+ 
+   TODO
+   - What happens if the query groups by the MIN/MAX field, and there is no
+-    other field as in: "select min(a) from t1 group by a" ?
++    other field as in: "select MYSQL_MIN(a) from t1 group by a" ?
+   - We assume that the general correctness of the GROUP-BY query was checked
+     before this point. Is this correct, or do we have to check it completely?
+   - Lift the limitation in condition (B3), that is, make this access method 
+@@ -9496,7 +9496,7 @@
+         cur_group_prefix_len+= cur_part->store_length;
+         used_key_parts_map.set_bit(key_part_nr);
+         ++cur_group_key_parts;
+-        max_key_part= max(max_key_part,key_part_nr);
++        max_key_part= MYSQL_MAX(max_key_part,key_part_nr);
+       }
+       /*
+         Check that used key parts forms a prefix of the index.
+@@ -10132,9 +10132,9 @@
+     {
+       double blocks_per_group= (double) num_blocks / (double) num_groups;
+       p_overlap= (blocks_per_group * (keys_per_subgroup - 1)) / keys_per_group;
+-      p_overlap= min(p_overlap, 1.0);
++      p_overlap= MYSQL_MIN(p_overlap, 1.0);
+     }
+-    io_cost= (double) min(num_groups * (1 + p_overlap), num_blocks);
++    io_cost= (double) MYSQL_MIN(num_groups * (1 + p_overlap), num_blocks);
+   }
+   else
+     io_cost= (keys_per_group > keys_per_block) ?
+diff -urN mysql-old/sql/opt_range.h mysql/sql/opt_range.h
+--- mysql-old/sql/opt_range.h	2011-05-10 17:45:45.640015709 +0000
++++ mysql/sql/opt_range.h	2011-05-10 17:56:01.476682376 +0000
+@@ -83,7 +83,7 @@
+   void make_min_endpoint(key_range *kr, uint prefix_length, 
+                          key_part_map keypart_map) {
+     make_min_endpoint(kr);
+-    kr->length= min(kr->length, prefix_length);
++    kr->length= MYSQL_MIN(kr->length, prefix_length);
+     kr->keypart_map&= keypart_map;
+   }
+   
+@@ -121,7 +121,7 @@
+   void make_max_endpoint(key_range *kr, uint prefix_length, 
+                          key_part_map keypart_map) {
+     make_max_endpoint(kr);
+-    kr->length= min(kr->length, prefix_length);
++    kr->length= MYSQL_MIN(kr->length, prefix_length);
+     kr->keypart_map&= keypart_map;
+   }
+ 
+diff -urN mysql-old/sql/protocol.cc mysql/sql/protocol.cc
+--- mysql-old/sql/protocol.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/protocol.cc	2011-05-10 17:56:01.476682376 +0000
+@@ -167,7 +167,7 @@
+     pos+=2;
+ 
+     /* We can only return up to 65535 warnings in two bytes */
+-    uint tmp= min(total_warn_count, 65535);
++    uint tmp= MYSQL_MIN(total_warn_count, 65535);
+     int2store(pos, tmp);
+     pos+= 2;
+   }
+@@ -262,7 +262,7 @@
+       Don't send warn count during SP execution, as the warn_list
+       is cleared between substatements, and mysqltest gets confused
+     */
+-    uint tmp= min(total_warn_count, 65535);
++    uint tmp= MYSQL_MIN(total_warn_count, 65535);
+     buff[0]= 254;
+     int2store(buff+1, tmp);
+     /*
+diff -urN mysql-old/sql/rpl_record.cc mysql/sql/rpl_record.cc
+--- mysql-old/sql/rpl_record.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/rpl_record.cc	2011-05-10 17:56:01.476682376 +0000
+@@ -285,7 +285,7 @@
+   /*
+     throw away master's extra fields
+   */
+-  uint max_cols= min(tabledef->size(), cols->n_bits);
++  uint max_cols= MYSQL_MIN(tabledef->size(), cols->n_bits);
+   for (; i < max_cols; i++)
+   {
+     if (bitmap_is_set(cols, i))
+diff -urN mysql-old/sql/rpl_rli.cc mysql/sql/rpl_rli.cc
+--- mysql-old/sql/rpl_rli.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/rpl_rli.cc	2011-05-10 17:56:01.476682376 +0000
+@@ -686,7 +686,7 @@
+   ulong log_name_extension;
+   char log_name_tmp[FN_REFLEN]; //make a char[] from String
+ 
+-  strmake(log_name_tmp, log_name->ptr(), min(log_name->length(), FN_REFLEN-1));
++  strmake(log_name_tmp, log_name->ptr(), MYSQL_MIN(log_name->length(), FN_REFLEN-1));
+ 
+   char *p= fn_ext(log_name_tmp);
+   char *p_end;
+@@ -696,7 +696,7 @@
+     goto err;
+   }
+   // Convert 0-3 to 4
+-  log_pos= max(log_pos, BIN_LOG_HEADER_SIZE);
++  log_pos= MYSQL_MAX(log_pos, BIN_LOG_HEADER_SIZE);
+   /* p points to '.' */
+   log_name_extension= strtoul(++p, &p_end, 10);
+   /*
+diff -urN mysql-old/sql/rpl_utility.cc mysql/sql/rpl_utility.cc
+--- mysql-old/sql/rpl_utility.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/rpl_utility.cc	2011-05-10 17:56:01.480015710 +0000
+@@ -180,7 +180,7 @@
+   /*
+     We only check the initial columns for the tables.
+   */
+-  uint const cols_to_check= min(table->s->fields, size());
++  uint const cols_to_check= MYSQL_MIN(table->s->fields, size());
+   int error= 0;
+   Relay_log_info const *rli= const_cast<Relay_log_info*>(rli_arg);
+ 
+diff -urN mysql-old/sql/rpl_utility.h mysql/sql/rpl_utility.h
+--- mysql-old/sql/rpl_utility.h	2011-05-10 17:45:45.640015709 +0000
++++ mysql/sql/rpl_utility.h	2011-05-10 17:56:01.480015710 +0000
+@@ -295,7 +295,7 @@
+   do {                                             \
+     char buf[256];                                 \
+     uint i;                                        \
+-    for (i = 0 ; i < min(sizeof(buf) - 1, (BS)->n_bits) ; i++) \
++    for (i = 0 ; i < MYSQL_MIN(sizeof(buf) - 1, (BS)->n_bits) ; i++) \
+       buf[i] = bitmap_is_set((BS), i) ? '1' : '0'; \
+     buf[i] = '\0';                                 \
+     DBUG_PRINT((N), ((FRM), buf));                 \
+diff -urN mysql-old/sql/set_var.cc mysql/sql/set_var.cc
+--- mysql-old/sql/set_var.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/set_var.cc	2011-05-10 17:56:01.480015710 +0000
+@@ -1845,7 +1845,7 @@
+ 					    &not_used));
+     if (error_len)
+     {
+-      strmake(buff, error, min(sizeof(buff) - 1, error_len));
++      strmake(buff, error, MYSQL_MIN(sizeof(buff) - 1, error_len));
+       goto err;
+     }
+   }
+@@ -4030,7 +4030,7 @@
+                                &error, &error_len, &not_used);
+   if (error_len)
+   {
+-    strmake(buff, error, min(sizeof(buff) - 1, error_len));
++    strmake(buff, error, MYSQL_MIN(sizeof(buff) - 1, error_len));
+     goto err;
+   }
+   return FALSE;
+diff -urN mysql-old/sql/set_var.cc.orig mysql/sql/set_var.cc.orig
+--- mysql-old/sql/set_var.cc.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/set_var.cc.orig	2011-04-12 12:11:35.000000000 +0000
+@@ -0,0 +1,4357 @@
++/* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
++
++/**
++  @file
++
++  @brief
++  Handling of MySQL SQL variables
++
++  @details
++  To add a new variable, one has to do the following:
++
++  - Use one of the 'sys_var... classes from set_var.h or write a specific
++    one for the variable type.
++  - Define it in the 'variable definition list' in this file.
++  - If the variable is thread specific, add it to 'system_variables' struct.
++    If not, add it to mysqld.cc and an declaration in 'mysql_priv.h'
++  - If the variable should be changed from the command line, add a definition
++    of it in the my_option structure list in mysqld.cc
++  - Don't forget to initialize new fields in global_system_variables and
++    max_system_variables!
++
++  @todo
++    Add full support for the variable character_set (for 4.1)
++
++  @todo
++    When updating myisam_delay_key_write, we should do a 'flush tables'
++    of all MyISAM tables to ensure that they are reopen with the
++    new attribute.
++
++  @note
++    Be careful with var->save_result: sys_var::check() only updates
++    ulonglong_value; so other members of the union are garbage then; to use
++    them you must first assign a value to them (in specific ::check() for
++    example).
++*/
++
++#ifdef USE_PRAGMA_IMPLEMENTATION
++#pragma implementation				// gcc: Class implementation
++#endif
++
++#include "mysql_priv.h"
++#include <mysql.h>
++#include "slave.h"
++#include "rpl_mi.h"
++#include <my_getopt.h>
++#include <thr_alarm.h>
++#include <myisam.h>
++#include <my_dir.h>
++
++#include "events.h"
++
++/* WITH_NDBCLUSTER_STORAGE_ENGINE */
++#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
++extern ulong ndb_cache_check_time;
++extern char opt_ndb_constrbuf[];
++extern ulong ndb_extra_logging;
++#endif
++
++#ifdef HAVE_NDB_BINLOG
++extern ulong ndb_report_thresh_binlog_epoch_slip;
++extern ulong ndb_report_thresh_binlog_mem_usage;
++#endif
++
++extern CHARSET_INFO *character_set_filesystem;
++
++
++static HASH system_variable_hash;
++
++const char *bool_type_names[]= { "OFF", "ON", NullS };
++TYPELIB bool_typelib=
++{
++  array_elements(bool_type_names)-1, "", bool_type_names, NULL
++};
++
++const char *delay_key_write_type_names[]= { "OFF", "ON", "ALL", NullS };
++TYPELIB delay_key_write_typelib=
++{
++  array_elements(delay_key_write_type_names)-1, "",
++  delay_key_write_type_names, NULL
++};
++
++static const char *slave_exec_mode_names[]= { "STRICT", "IDEMPOTENT", NullS };
++static unsigned int slave_exec_mode_names_len[]= { sizeof("STRICT") - 1,
++                                                   sizeof("IDEMPOTENT") - 1, 0 };
++TYPELIB slave_exec_mode_typelib=
++{
++  array_elements(slave_exec_mode_names)-1, "",
++  slave_exec_mode_names, slave_exec_mode_names_len
++};
++
++static int  sys_check_ftb_syntax(THD *thd,  set_var *var);
++static bool sys_update_ftb_syntax(THD *thd, set_var * var);
++static void sys_default_ftb_syntax(THD *thd, enum_var_type type);
++static bool sys_update_init_connect(THD*, set_var*);
++static void sys_default_init_connect(THD*, enum_var_type type);
++static bool sys_update_init_slave(THD*, set_var*);
++static void sys_default_init_slave(THD*, enum_var_type type);
++static bool set_option_bit(THD *thd, set_var *var);
++static bool set_option_log_bin_bit(THD *thd, set_var *var);
++static bool set_option_autocommit(THD *thd, set_var *var);
++static int  check_log_update(THD *thd, set_var *var);
++static bool set_log_update(THD *thd, set_var *var);
++static int  check_pseudo_thread_id(THD *thd, set_var *var);
++void fix_binlog_format_after_update(THD *thd, enum_var_type type);
++static void fix_low_priority_updates(THD *thd, enum_var_type type);
++static int check_tx_isolation(THD *thd, set_var *var);
++static void fix_tx_isolation(THD *thd, enum_var_type type);
++static int check_completion_type(THD *thd, set_var *var);
++static void fix_completion_type(THD *thd, enum_var_type type);
++static void fix_net_read_timeout(THD *thd, enum_var_type type);
++static void fix_net_write_timeout(THD *thd, enum_var_type type);
++static void fix_net_retry_count(THD *thd, enum_var_type type);
++static void fix_max_join_size(THD *thd, enum_var_type type);
++static void fix_query_cache_size(THD *thd, enum_var_type type);
++static void fix_query_cache_min_res_unit(THD *thd, enum_var_type type);
++static void fix_myisam_max_sort_file_size(THD *thd, enum_var_type type);
++static void fix_max_binlog_size(THD *thd, enum_var_type type);
++static void fix_max_relay_log_size(THD *thd, enum_var_type type);
++static void fix_max_connections(THD *thd, enum_var_type type);
++static int check_max_delayed_threads(THD *thd, set_var *var);
++static void fix_thd_mem_root(THD *thd, enum_var_type type);
++static void fix_trans_mem_root(THD *thd, enum_var_type type);
++static void fix_server_id(THD *thd, enum_var_type type);
++bool throw_bounds_warning(THD *thd, bool fixed, bool unsignd,
++                          const char *name, longlong val);
++static KEY_CACHE *create_key_cache(const char *name, uint length);
++void fix_sql_mode_var(THD *thd, enum_var_type type);
++static uchar *get_error_count(THD *thd);
++static uchar *get_warning_count(THD *thd);
++static uchar *get_tmpdir(THD *thd);
++static int  sys_check_log_path(THD *thd,  set_var *var);
++static bool sys_update_general_log_path(THD *thd, set_var * var);
++static void sys_default_general_log_path(THD *thd, enum_var_type type);
++static bool sys_update_slow_log_path(THD *thd, set_var * var);
++static void sys_default_slow_log_path(THD *thd, enum_var_type type);
++static uchar *get_myisam_mmap_size(THD *thd);
++static int check_max_allowed_packet(THD *thd,  set_var *var);
++static int check_net_buffer_length(THD *thd,  set_var *var);
++
++/*
++  Variable definition list
++
++  These are variables that can be set from the command line, in
++  alphabetic order.
++
++  The variables are linked into the list. A variable is added to
++  it in the constructor (see sys_var class for details).
++*/
++
++static sys_var_chain vars = { NULL, NULL };
++
++static sys_var_thd_ulong
++sys_auto_increment_increment(&vars, "auto_increment_increment",
++                             &SV::auto_increment_increment, NULL, NULL,
++                             sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_thd_ulong
++sys_auto_increment_offset(&vars, "auto_increment_offset",
++                          &SV::auto_increment_offset, NULL, NULL,
++                          sys_var::SESSION_VARIABLE_IN_BINLOG);
++
++static sys_var_bool_ptr	sys_automatic_sp_privileges(&vars, "automatic_sp_privileges",
++					      &sp_automatic_privileges);
++
++static sys_var_const            sys_back_log(&vars, "back_log",
++                                             OPT_GLOBAL, SHOW_LONG,
++                                             (uchar*) &back_log);
++static sys_var_const_os_str       sys_basedir(&vars, "basedir", mysql_home);
++static sys_var_long_ptr	sys_binlog_cache_size(&vars, "binlog_cache_size",
++					      &binlog_cache_size);
++static sys_var_thd_binlog_format sys_binlog_format(&vars, "binlog_format",
++                                            &SV::binlog_format);
++static sys_var_thd_bool sys_binlog_direct_non_trans_update(&vars, "binlog_direct_non_transactional_updates",
++                                                           &SV::binlog_direct_non_trans_update);
++static sys_var_thd_ulong	sys_bulk_insert_buff_size(&vars, "bulk_insert_buffer_size",
++						  &SV::bulk_insert_buff_size);
++static sys_var_const_os         sys_character_sets_dir(&vars,
++                                                       "character_sets_dir",
++                                                       OPT_GLOBAL, SHOW_CHAR,
++                                                       (uchar*)
++                                                       mysql_charsets_dir);
++static sys_var_character_set_sv
++sys_character_set_server(&vars, "character_set_server",
++                         &SV::collation_server, &default_charset_info, 0,
++                         sys_var::SESSION_VARIABLE_IN_BINLOG);
++sys_var_const_str       sys_charset_system(&vars, "character_set_system",
++                                           (char *)my_charset_utf8_general_ci.name);
++static sys_var_character_set_database
++sys_character_set_database(&vars, "character_set_database",
++                           sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_character_set_client
++sys_character_set_client(&vars, "character_set_client",
++                         &SV::character_set_client,
++                         &default_charset_info,
++                         sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_character_set_sv
++sys_character_set_connection(&vars, "character_set_connection",
++                             &SV::collation_connection,
++                             &default_charset_info, 0,
++                             sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_character_set_sv sys_character_set_results(&vars, "character_set_results",
++                                        &SV::character_set_results,
++                                        &default_charset_info, true);
++static sys_var_character_set_sv sys_character_set_filesystem(&vars, "character_set_filesystem",
++                                        &SV::character_set_filesystem,
++                                        &character_set_filesystem);
++static sys_var_thd_ulong	sys_completion_type(&vars, "completion_type",
++					 &SV::completion_type,
++					 check_completion_type,
++					 fix_completion_type);
++static sys_var_collation_sv
++sys_collation_connection(&vars, "collation_connection",
++                         &SV::collation_connection, &default_charset_info,
++                         sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_collation_sv
++sys_collation_database(&vars, "collation_database", &SV::collation_database,
++                       &default_charset_info,
++                       sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_collation_sv
++sys_collation_server(&vars, "collation_server", &SV::collation_server,
++                     &default_charset_info,
++                     sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_long_ptr	sys_concurrent_insert(&vars, "concurrent_insert",
++                                              &myisam_concurrent_insert);
++static sys_var_long_ptr	sys_connect_timeout(&vars, "connect_timeout",
++					    &connect_timeout);
++static sys_var_const_os_str       sys_datadir(&vars, "datadir", mysql_real_data_home);
++#ifndef DBUG_OFF
++static sys_var_thd_dbug        sys_dbug(&vars, "debug");
++#endif
++static sys_var_enum		sys_delay_key_write(&vars, "delay_key_write",
++					    &delay_key_write_options,
++					    &delay_key_write_typelib,
++					    fix_delay_key_write);
++static sys_var_long_ptr	sys_delayed_insert_limit(&vars, "delayed_insert_limit",
++						 &delayed_insert_limit);
++static sys_var_long_ptr	sys_delayed_insert_timeout(&vars, "delayed_insert_timeout",
++						   &delayed_insert_timeout);
++static sys_var_long_ptr	sys_delayed_queue_size(&vars, "delayed_queue_size",
++					       &delayed_queue_size);
++
++#ifdef HAVE_EVENT_SCHEDULER
++static sys_var_event_scheduler sys_event_scheduler(&vars, "event_scheduler");
++#endif
++
++static sys_var_long_ptr	sys_expire_logs_days(&vars, "expire_logs_days",
++					     &expire_logs_days);
++static sys_var_bool_ptr	sys_flush(&vars, "flush", &myisam_flush);
++static sys_var_long_ptr	sys_flush_time(&vars, "flush_time", &flush_time);
++static sys_var_str      sys_ft_boolean_syntax(&vars, "ft_boolean_syntax",
++                                              sys_check_ftb_syntax,
++                                              sys_update_ftb_syntax,
++                                              sys_default_ftb_syntax,
++                                              ft_boolean_syntax);
++static sys_var_const    sys_ft_max_word_len(&vars, "ft_max_word_len",
++                                            OPT_GLOBAL, SHOW_LONG,
++                                            (uchar*) &ft_max_word_len);
++static sys_var_const    sys_ft_min_word_len(&vars, "ft_min_word_len",
++                                            OPT_GLOBAL, SHOW_LONG,
++                                            (uchar*) &ft_min_word_len);
++static sys_var_const    sys_ft_query_expansion_limit(&vars,
++                                                     "ft_query_expansion_limit",
++                                                     OPT_GLOBAL, SHOW_LONG,
++                                                     (uchar*)
++                                                     &ft_query_expansion_limit);
++static sys_var_const    sys_ft_stopword_file(&vars, "ft_stopword_file",
++                                             OPT_GLOBAL, SHOW_CHAR_PTR,
++                                             (uchar*) &ft_stopword_file);
++
++static sys_var_const    sys_ignore_builtin_innodb(&vars, "ignore_builtin_innodb",
++                                                  OPT_GLOBAL, SHOW_BOOL,
++                                                  (uchar*) &opt_ignore_builtin_innodb);
++
++sys_var_str             sys_init_connect(&vars, "init_connect", 0,
++                                         sys_update_init_connect,
++                                         sys_default_init_connect,0);
++static sys_var_const    sys_init_file(&vars, "init_file",
++                                      OPT_GLOBAL, SHOW_CHAR_PTR,
++                                      (uchar*) &opt_init_file);
++sys_var_str             sys_init_slave(&vars, "init_slave", 0,
++                                       sys_update_init_slave,
++                                       sys_default_init_slave,0);
++static sys_var_thd_ulong	sys_interactive_timeout(&vars, "interactive_timeout",
++						&SV::net_interactive_timeout);
++static sys_var_thd_ulong	sys_join_buffer_size(&vars, "join_buffer_size",
++					     &SV::join_buff_size);
++static sys_var_key_buffer_size	sys_key_buffer_size(&vars, "key_buffer_size");
++static sys_var_key_cache_long  sys_key_cache_block_size(&vars, "key_cache_block_size",
++						 offsetof(KEY_CACHE,
++							  param_block_size));
++static sys_var_key_cache_long	sys_key_cache_division_limit(&vars, "key_cache_division_limit",
++						     offsetof(KEY_CACHE,
++							      param_division_limit));
++static sys_var_key_cache_long  sys_key_cache_age_threshold(&vars, "key_cache_age_threshold",
++						     offsetof(KEY_CACHE,
++							      param_age_threshold));
++static sys_var_const    sys_language(&vars, "language",
++                                     OPT_GLOBAL, SHOW_CHAR,
++                                     (uchar*) language);
++static sys_var_const    sys_large_files_support(&vars, "large_files_support",
++                                                OPT_GLOBAL, SHOW_BOOL,
++                                                (uchar*) &opt_large_files);
++static sys_var_const    sys_large_page_size(&vars, "large_page_size",
++                                            OPT_GLOBAL, SHOW_INT,
++                                            (uchar*) &opt_large_page_size);
++static sys_var_const    sys_large_pages(&vars, "large_pages",
++                                        OPT_GLOBAL, SHOW_MY_BOOL,
++                                        (uchar*) &opt_large_pages);
++static sys_var_bool_ptr	sys_local_infile(&vars, "local_infile",
++					 &opt_local_infile);
++#ifdef HAVE_MLOCKALL
++static sys_var_const    sys_locked_in_memory(&vars, "locked_in_memory",
++                                             OPT_GLOBAL, SHOW_MY_BOOL,
++                                             (uchar*) &locked_in_memory);
++#endif
++static sys_var_const    sys_log_bin(&vars, "log_bin",
++                                    OPT_GLOBAL, SHOW_BOOL,
++                                    (uchar*) &opt_bin_log);
++static sys_var_trust_routine_creators
++sys_trust_routine_creators(&vars, "log_bin_trust_routine_creators",
++                           &trust_function_creators);
++static sys_var_bool_ptr       
++sys_trust_function_creators(&vars, "log_bin_trust_function_creators",
++                            &trust_function_creators);
++static sys_var_const    sys_log_error(&vars, "log_error",
++                                      OPT_GLOBAL, SHOW_CHAR,
++                                      (uchar*) log_error_file);
++static sys_var_bool_ptr
++  sys_log_queries_not_using_indexes(&vars, "log_queries_not_using_indexes",
++                                    &opt_log_queries_not_using_indexes);
++static sys_var_thd_ulong	sys_log_warnings(&vars, "log_warnings", &SV::log_warnings);
++static sys_var_microseconds	sys_var_long_query_time(&vars, "long_query_time",
++                                                        &SV::long_query_time);
++static sys_var_thd_bool	sys_low_priority_updates(&vars, "low_priority_updates",
++						 &SV::low_priority_updates,
++						 fix_low_priority_updates);
++#ifndef TO_BE_DELETED	/* Alias for the low_priority_updates */
++static sys_var_thd_bool	sys_sql_low_priority_updates(&vars, "sql_low_priority_updates",
++						     &SV::low_priority_updates,
++						     fix_low_priority_updates);
++#endif
++static sys_var_const    sys_lower_case_file_system(&vars,
++                                                   "lower_case_file_system",
++                                                   OPT_GLOBAL, SHOW_MY_BOOL,
++                                                   (uchar*)
++                                                   &lower_case_file_system);
++static sys_var_const    sys_lower_case_table_names(&vars,
++                                                   "lower_case_table_names",
++                                                   OPT_GLOBAL, SHOW_INT,
++                                                   (uchar*)
++                                                   &lower_case_table_names);
++static sys_var_thd_ulong_session_readonly sys_max_allowed_packet(&vars, "max_allowed_packet",
++					       &SV::max_allowed_packet,
++                                               check_max_allowed_packet);
++static sys_var_ulonglong_ptr sys_max_binlog_cache_size(&vars, "max_binlog_cache_size",
++                                                       &max_binlog_cache_size);
++static sys_var_long_ptr	sys_max_binlog_size(&vars, "max_binlog_size",
++					    &max_binlog_size,
++                                            fix_max_binlog_size);
++static sys_var_long_ptr	sys_max_connections(&vars, "max_connections",
++					    &max_connections,
++                                            fix_max_connections);
++static sys_var_long_ptr	sys_max_connect_errors(&vars, "max_connect_errors",
++					       &max_connect_errors);
++static sys_var_thd_ulong       sys_max_insert_delayed_threads(&vars, "max_insert_delayed_threads",
++						       &SV::max_insert_delayed_threads,
++                                                       check_max_delayed_threads,
++                                                       fix_max_connections);
++static sys_var_thd_ulong	sys_max_delayed_threads(&vars, "max_delayed_threads",
++						&SV::max_insert_delayed_threads,
++                                                check_max_delayed_threads,
++                                                fix_max_connections);
++static sys_var_thd_ulong	sys_max_error_count(&vars, "max_error_count",
++					    &SV::max_error_count);
++static sys_var_thd_ulonglong	sys_max_heap_table_size(&vars, "max_heap_table_size",
++						&SV::max_heap_table_size);
++static sys_var_thd_ulong sys_pseudo_thread_id(&vars, "pseudo_thread_id",
++                                              &SV::pseudo_thread_id,
++                                              check_pseudo_thread_id, 0,
++                                              sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_thd_ha_rows	sys_max_join_size(&vars, "max_join_size",
++					  &SV::max_join_size,
++					  fix_max_join_size);
++static sys_var_thd_ulong	sys_max_seeks_for_key(&vars, "max_seeks_for_key",
++					      &SV::max_seeks_for_key);
++static sys_var_thd_ulong   sys_max_length_for_sort_data(&vars, "max_length_for_sort_data",
++                                                 &SV::max_length_for_sort_data);
++static sys_var_const    sys_max_long_data_size(&vars,
++                                               "max_long_data_size",
++                                               OPT_GLOBAL, SHOW_LONG,
++                                               (uchar*)
++                                               &max_long_data_size);
++
++#ifndef TO_BE_DELETED	/* Alias for max_join_size */
++static sys_var_thd_ha_rows	sys_sql_max_join_size(&vars, "sql_max_join_size",
++					      &SV::max_join_size,
++					      fix_max_join_size);
++#endif
++static sys_var_long_ptr_global
++sys_max_prepared_stmt_count(&vars, "max_prepared_stmt_count",
++                            &max_prepared_stmt_count,
++                            &LOCK_prepared_stmt_count);
++static sys_var_long_ptr	sys_max_relay_log_size(&vars, "max_relay_log_size",
++                                               &max_relay_log_size,
++                                               fix_max_relay_log_size);
++static sys_var_thd_ulong	sys_max_sort_length(&vars, "max_sort_length",
++					    &SV::max_sort_length);
++static sys_var_thd_ulong	sys_max_sp_recursion_depth(&vars, "max_sp_recursion_depth",
++                                                   &SV::max_sp_recursion_depth);
++static sys_var_max_user_conn   sys_max_user_connections(&vars, "max_user_connections");
++static sys_var_thd_ulong	sys_max_tmp_tables(&vars, "max_tmp_tables",
++					   &SV::max_tmp_tables);
++static sys_var_long_ptr	sys_max_write_lock_count(&vars, "max_write_lock_count",
++						 &max_write_lock_count);
++static sys_var_thd_ulong       sys_min_examined_row_limit(&vars, "min_examined_row_limit",
++                                                          &SV::min_examined_row_limit);
++static sys_var_thd_ulong       sys_multi_range_count(&vars, "multi_range_count",
++                                              &SV::multi_range_count);
++static sys_var_long_ptr	sys_myisam_data_pointer_size(&vars, "myisam_data_pointer_size",
++                                                    &myisam_data_pointer_size);
++static sys_var_thd_ulonglong	sys_myisam_max_sort_file_size(&vars, "myisam_max_sort_file_size", &SV::myisam_max_sort_file_size, fix_myisam_max_sort_file_size, 1);
++static sys_var_const sys_myisam_recover_options(&vars, "myisam_recover_options",
++                                                OPT_GLOBAL, SHOW_CHAR_PTR,
++                                                (uchar*)
++                                                &myisam_recover_options_str);
++static sys_var_thd_ulong       sys_myisam_repair_threads(&vars, "myisam_repair_threads", &SV::myisam_repair_threads);
++static sys_var_thd_ulong	sys_myisam_sort_buffer_size(&vars, "myisam_sort_buffer_size", &SV::myisam_sort_buff_size);
++static sys_var_bool_ptr	sys_myisam_use_mmap(&vars, "myisam_use_mmap",
++                                            &opt_myisam_use_mmap);
++
++static sys_var_thd_enum         sys_myisam_stats_method(&vars, "myisam_stats_method",
++                                                &SV::myisam_stats_method,
++                                                &myisam_stats_method_typelib,
++                                                NULL);
++
++#ifdef __NT__
++/* purecov: begin inspected */
++static sys_var_const            sys_named_pipe(&vars, "named_pipe",
++                                               OPT_GLOBAL, SHOW_MY_BOOL,
++                                               (uchar*) &opt_enable_named_pipe);
++/* purecov: end */
++#endif
++static sys_var_thd_ulong_session_readonly sys_net_buffer_length(&vars, "net_buffer_length",
++					      &SV::net_buffer_length,
++                                              check_net_buffer_length);
++static sys_var_thd_ulong	sys_net_read_timeout(&vars, "net_read_timeout",
++					     &SV::net_read_timeout,
++					     0, fix_net_read_timeout);
++static sys_var_thd_ulong	sys_net_write_timeout(&vars, "net_write_timeout",
++					      &SV::net_write_timeout,
++					      0, fix_net_write_timeout);
++static sys_var_thd_ulong	sys_net_retry_count(&vars, "net_retry_count",
++					    &SV::net_retry_count,
++					    0, fix_net_retry_count);
++static sys_var_thd_bool	sys_new_mode(&vars, "new", &SV::new_mode);
++static sys_var_bool_ptr_readonly sys_old_mode(&vars, "old",
++                                       &global_system_variables.old_mode);
++/* these two cannot be static */
++sys_var_thd_bool                sys_old_alter_table(&vars, "old_alter_table",
++                                            &SV::old_alter_table);
++sys_var_thd_bool                sys_old_passwords(&vars, "old_passwords", &SV::old_passwords);
++static sys_var_const            sys_open_files_limit(&vars, "open_files_limit",
++                                                     OPT_GLOBAL, SHOW_LONG,
++                                                     (uchar*)
++                                                     &open_files_limit);
++static sys_var_thd_ulong        sys_optimizer_prune_level(&vars, "optimizer_prune_level",
++                                                  &SV::optimizer_prune_level);
++static sys_var_thd_ulong        sys_optimizer_search_depth(&vars, "optimizer_search_depth",
++                                                   &SV::optimizer_search_depth);
++static sys_var_thd_optimizer_switch   sys_optimizer_switch(&vars, "optimizer_switch",
++                                     &SV::optimizer_switch);
++static sys_var_const            sys_pid_file(&vars, "pid_file",
++                                             OPT_GLOBAL, SHOW_CHAR,
++                                             (uchar*) pidfile_name);
++static sys_var_const_os         sys_plugin_dir(&vars, "plugin_dir",
++                                               OPT_GLOBAL, SHOW_CHAR,
++                                               (uchar*) opt_plugin_dir);
++static sys_var_const            sys_port(&vars, "port",
++                                         OPT_GLOBAL, SHOW_INT,
++                                         (uchar*) &mysqld_port);
++static sys_var_thd_ulong        sys_preload_buff_size(&vars, "preload_buffer_size",
++                                              &SV::preload_buff_size);
++static sys_var_const            sys_protocol_version(&vars, "protocol_version",
++                                                     OPT_GLOBAL, SHOW_INT,
++                                                     (uchar*)
++                                                     &protocol_version);
++static sys_var_thd_ulong	sys_read_buff_size(&vars, "read_buffer_size",
++					   &SV::read_buff_size);
++static sys_var_opt_readonly	sys_readonly(&vars, "read_only", &opt_readonly);
++static sys_var_thd_ulong	sys_read_rnd_buff_size(&vars, "read_rnd_buffer_size",
++					       &SV::read_rnd_buff_size);
++static sys_var_thd_ulong	sys_div_precincrement(&vars, "div_precision_increment",
++                                              &SV::div_precincrement);
++static sys_var_long_ptr	sys_rpl_recovery_rank(&vars, "rpl_recovery_rank",
++					      &rpl_recovery_rank);
++static sys_var_long_ptr	sys_query_cache_size(&vars, "query_cache_size",
++					     &query_cache_size,
++					     fix_query_cache_size);
++
++static sys_var_thd_ulong	sys_range_alloc_block_size(&vars, "range_alloc_block_size",
++						   &SV::range_alloc_block_size);
++static sys_var_thd_ulong	sys_query_alloc_block_size(&vars, "query_alloc_block_size",
++						   &SV::query_alloc_block_size,
++						   0, fix_thd_mem_root);
++static sys_var_thd_ulong	sys_query_prealloc_size(&vars, "query_prealloc_size",
++						&SV::query_prealloc_size,
++						0, fix_thd_mem_root);
++#ifdef HAVE_SMEM
++/* purecov: begin tested */
++static sys_var_const    sys_shared_memory(&vars, "shared_memory",
++                                          OPT_GLOBAL, SHOW_MY_BOOL,
++                                          (uchar*)
++                                          &opt_enable_shared_memory);
++static sys_var_const    sys_shared_memory_base_name(&vars,
++                                                    "shared_memory_base_name",
++                                                    OPT_GLOBAL, SHOW_CHAR_PTR,
++                                                    (uchar*)
++                                                    &shared_memory_base_name);
++/* purecov: end */
++#endif
++static sys_var_const    sys_skip_external_locking(&vars,
++                                                  "skip_external_locking",
++                                                  OPT_GLOBAL, SHOW_MY_BOOL,
++                                                  (uchar*)
++                                                  &my_disable_locking);
++static sys_var_const    sys_skip_networking(&vars, "skip_networking",
++                                            OPT_GLOBAL, SHOW_BOOL,
++                                            (uchar*) &opt_disable_networking);
++static sys_var_const    sys_skip_show_database(&vars, "skip_show_database",
++                                            OPT_GLOBAL, SHOW_BOOL,
++                                            (uchar*) &opt_skip_show_db);
++
++static sys_var_const    sys_skip_name_resolve(&vars, "skip_name_resolve",
++                                            OPT_GLOBAL, SHOW_BOOL,
++                                            (uchar*) &opt_skip_name_resolve);
++
++static sys_var_const    sys_socket(&vars, "socket",
++                                   OPT_GLOBAL, SHOW_CHAR_PTR,
++                                   (uchar*) &mysqld_unix_port);
++
++#ifdef HAVE_THR_SETCONCURRENCY
++/* purecov: begin tested */
++static sys_var_const    sys_thread_concurrency(&vars, "thread_concurrency",
++                                               OPT_GLOBAL, SHOW_LONG,
++                                               (uchar*) &concurrency);
++/* purecov: end */
++#endif
++static sys_var_const    sys_thread_stack(&vars, "thread_stack",
++                                         OPT_GLOBAL, SHOW_LONG,
++                                         (uchar*) &my_thread_stack_size);
++static sys_var_readonly_os      sys_tmpdir(&vars, "tmpdir", OPT_GLOBAL, SHOW_CHAR, get_tmpdir);
++static sys_var_thd_ulong	sys_trans_alloc_block_size(&vars, "transaction_alloc_block_size",
++						   &SV::trans_alloc_block_size,
++						   0, fix_trans_mem_root);
++static sys_var_thd_ulong	sys_trans_prealloc_size(&vars, "transaction_prealloc_size",
++						&SV::trans_prealloc_size,
++						0, fix_trans_mem_root);
++sys_var_enum_const      sys_thread_handling(&vars, "thread_handling",
++                                            &SV::thread_handling,
++                                            &thread_handling_typelib,
++                                            NULL);
++
++#ifdef HAVE_QUERY_CACHE
++static sys_var_long_ptr	sys_query_cache_limit(&vars, "query_cache_limit",
++					      &query_cache.query_cache_limit);
++static sys_var_long_ptr        sys_query_cache_min_res_unit(&vars, "query_cache_min_res_unit",
++						     &query_cache_min_res_unit,
++						     fix_query_cache_min_res_unit);
++static sys_var_thd_enum	sys_query_cache_type(&vars, "query_cache_type",
++					     &SV::query_cache_type,
++					     &query_cache_type_typelib);
++static sys_var_thd_bool
++sys_query_cache_wlock_invalidate(&vars, "query_cache_wlock_invalidate",
++				 &SV::query_cache_wlock_invalidate);
++#endif /* HAVE_QUERY_CACHE */
++static sys_var_bool_ptr	sys_secure_auth(&vars, "secure_auth", &opt_secure_auth);
++static sys_var_const_str_ptr sys_secure_file_priv(&vars, "secure_file_priv",
++                                             &opt_secure_file_priv);
++static sys_var_long_ptr	sys_server_id(&vars, "server_id", &server_id, fix_server_id);
++static sys_var_bool_ptr	sys_slave_compressed_protocol(&vars, "slave_compressed_protocol",
++						      &opt_slave_compressed_protocol);
++static sys_var_set_slave_mode slave_exec_mode(&vars,
++                                              "slave_exec_mode",
++                                              &slave_exec_mode_options,
++                                              &slave_exec_mode_typelib,
++                                              0);
++static sys_var_long_ptr	sys_slow_launch_time(&vars, "slow_launch_time",
++					     &slow_launch_time);
++static sys_var_thd_ulong	sys_sort_buffer(&vars, "sort_buffer_size",
++					&SV::sortbuff_size);
++/*
++  sql_mode should *not* have binlog_mode=SESSION_VARIABLE_IN_BINLOG:
++  even though it is written to the binlog, the slave ignores the
++  MODE_NO_DIR_IN_CREATE variable, so slave's value differs from
++  master's (see log_event.cc: Query_log_event::do_apply_event()).
++*/
++static sys_var_thd_sql_mode    sys_sql_mode(&vars, "sql_mode",
++                                            &SV::sql_mode);
++#ifdef HAVE_OPENSSL
++extern char *opt_ssl_ca, *opt_ssl_capath, *opt_ssl_cert, *opt_ssl_cipher,
++            *opt_ssl_key;
++static sys_var_const_os_str_ptr	sys_ssl_ca(&vars, "ssl_ca", &opt_ssl_ca);
++static sys_var_const_os_str_ptr	sys_ssl_capath(&vars, "ssl_capath", &opt_ssl_capath);
++static sys_var_const_os_str_ptr	sys_ssl_cert(&vars, "ssl_cert", &opt_ssl_cert);
++static sys_var_const_os_str_ptr	sys_ssl_cipher(&vars, "ssl_cipher", &opt_ssl_cipher);
++static sys_var_const_os_str_ptr	sys_ssl_key(&vars, "ssl_key", &opt_ssl_key);
++#else
++static sys_var_const_os_str	sys_ssl_ca(&vars, "ssl_ca", NULL);
++static sys_var_const_os_str	sys_ssl_capath(&vars, "ssl_capath", NULL);
++static sys_var_const_os_str	sys_ssl_cert(&vars, "ssl_cert", NULL);
++static sys_var_const_os_str	sys_ssl_cipher(&vars, "ssl_cipher", NULL);
++static sys_var_const_os_str	sys_ssl_key(&vars, "ssl_key", NULL);
++#endif
++static sys_var_thd_enum
++sys_updatable_views_with_limit(&vars, "updatable_views_with_limit",
++                               &SV::updatable_views_with_limit,
++                               &updatable_views_with_limit_typelib);
++
++static sys_var_thd_table_type  sys_table_type(&vars, "table_type",
++				       &SV::table_plugin);
++static sys_var_thd_storage_engine sys_storage_engine(&vars, "storage_engine",
++				       &SV::table_plugin);
++static sys_var_bool_ptr	sys_sync_frm(&vars, "sync_frm", &opt_sync_frm);
++static sys_var_const_str	sys_system_time_zone(&vars, "system_time_zone",
++                                             system_time_zone);
++static sys_var_long_ptr	sys_table_def_size(&vars, "table_definition_cache",
++                                           &table_def_size);
++static sys_var_long_ptr	sys_table_cache_size(&vars, "table_open_cache",
++					     &table_cache_size);
++static sys_var_long_ptr	sys_table_lock_wait_timeout(&vars, "table_lock_wait_timeout",
++                                                    &table_lock_wait_timeout);
++
++#if defined(ENABLED_DEBUG_SYNC)
++/* Debug Sync Facility. Implemented in debug_sync.cc. */
++static sys_var_debug_sync sys_debug_sync(&vars, "debug_sync");
++#endif /* defined(ENABLED_DEBUG_SYNC) */
++
++static sys_var_long_ptr	sys_thread_cache_size(&vars, "thread_cache_size",
++					      &thread_cache_size);
++#if HAVE_POOL_OF_THREADS == 1
++sys_var_long_ptr	sys_thread_pool_size(&vars, "thread_pool_size",
++					      &thread_pool_size);
++#endif
++static sys_var_thd_enum	sys_tx_isolation(&vars, "tx_isolation",
++					 &SV::tx_isolation,
++					 &tx_isolation_typelib,
++					 fix_tx_isolation,
++					 check_tx_isolation);
++static sys_var_thd_ulonglong	sys_tmp_table_size(&vars, "tmp_table_size",
++					   &SV::tmp_table_size);
++static sys_var_bool_ptr  sys_timed_mutexes(&vars, "timed_mutexes",
++                                    &timed_mutexes);
++static sys_var_const_str	sys_version(&vars, "version", server_version);
++static sys_var_const_str	sys_version_comment(&vars, "version_comment",
++                                            MYSQL_COMPILATION_COMMENT);
++static sys_var_const_str	sys_version_compile_machine(&vars, "version_compile_machine",
++                                                    MACHINE_TYPE);
++static sys_var_const_str	sys_version_compile_os(&vars, "version_compile_os",
++                                               SYSTEM_TYPE);
++static sys_var_thd_ulong	sys_net_wait_timeout(&vars, "wait_timeout",
++					     &SV::net_wait_timeout);
++
++/* Condition pushdown to storage engine */
++static sys_var_thd_bool
++sys_engine_condition_pushdown(&vars, "engine_condition_pushdown",
++			      &SV::engine_condition_pushdown);
++
++#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
++/* ndb thread specific variable settings */
++static sys_var_thd_ulong
++sys_ndb_autoincrement_prefetch_sz(&vars, "ndb_autoincrement_prefetch_sz",
++				  &SV::ndb_autoincrement_prefetch_sz);
++static sys_var_thd_bool
++sys_ndb_force_send(&vars, "ndb_force_send", &SV::ndb_force_send);
++#ifdef HAVE_NDB_BINLOG
++static sys_var_long_ptr
++sys_ndb_report_thresh_binlog_epoch_slip(&vars, "ndb_report_thresh_binlog_epoch_slip",
++                                        &ndb_report_thresh_binlog_epoch_slip);
++static sys_var_long_ptr
++sys_ndb_report_thresh_binlog_mem_usage(&vars, "ndb_report_thresh_binlog_mem_usage",
++                                       &ndb_report_thresh_binlog_mem_usage);
++#endif
++static sys_var_thd_bool
++sys_ndb_use_exact_count(&vars, "ndb_use_exact_count", &SV::ndb_use_exact_count);
++static sys_var_thd_bool
++sys_ndb_use_transactions(&vars, "ndb_use_transactions", &SV::ndb_use_transactions);
++static sys_var_long_ptr
++sys_ndb_cache_check_time(&vars, "ndb_cache_check_time", &ndb_cache_check_time);
++static sys_var_const_str
++sys_ndb_connectstring(&vars, "ndb_connectstring", opt_ndb_constrbuf);
++static sys_var_thd_bool
++sys_ndb_index_stat_enable(&vars, "ndb_index_stat_enable",
++                          &SV::ndb_index_stat_enable);
++static sys_var_thd_ulong
++sys_ndb_index_stat_cache_entries(&vars, "ndb_index_stat_cache_entries",
++                                 &SV::ndb_index_stat_cache_entries);
++static sys_var_thd_ulong
++sys_ndb_index_stat_update_freq(&vars, "ndb_index_stat_update_freq",
++                               &SV::ndb_index_stat_update_freq);
++static sys_var_long_ptr
++sys_ndb_extra_logging(&vars, "ndb_extra_logging", &ndb_extra_logging);
++static sys_var_thd_bool
++sys_ndb_use_copying_alter_table(&vars, "ndb_use_copying_alter_table", &SV::ndb_use_copying_alter_table);
++#endif //WITH_NDBCLUSTER_STORAGE_ENGINE
++
++/* Time/date/datetime formats */
++
++static sys_var_thd_date_time_format sys_time_format(&vars, "time_format",
++					     &SV::time_format,
++					     MYSQL_TIMESTAMP_TIME);
++static sys_var_thd_date_time_format sys_date_format(&vars, "date_format",
++					     &SV::date_format,
++					     MYSQL_TIMESTAMP_DATE);
++static sys_var_thd_date_time_format sys_datetime_format(&vars, "datetime_format",
++						 &SV::datetime_format,
++						 MYSQL_TIMESTAMP_DATETIME);
++
++/* Variables that are bits in THD */
++
++sys_var_thd_bit sys_autocommit(&vars, "autocommit", 0,
++                               set_option_autocommit,
++                               OPTION_NOT_AUTOCOMMIT,
++                               1);
++static sys_var_thd_bit	sys_big_tables(&vars, "big_tables", 0,
++				       set_option_bit,
++				       OPTION_BIG_TABLES);
++#ifndef TO_BE_DELETED	/* Alias for big_tables */
++static sys_var_thd_bit	sys_sql_big_tables(&vars, "sql_big_tables", 0,
++					   set_option_bit,
++					   OPTION_BIG_TABLES);
++#endif
++static sys_var_thd_bit	sys_big_selects(&vars, "sql_big_selects", 0,
++					set_option_bit,
++					OPTION_BIG_SELECTS);
++static sys_var_thd_bit	sys_log_off(&vars, "sql_log_off",
++				    check_log_update,
++				    set_option_bit,
++				    OPTION_LOG_OFF);
++static sys_var_thd_bit	sys_log_update(&vars, "sql_log_update",
++                                       check_log_update,
++				       set_log_update,
++				       OPTION_BIN_LOG);
++static sys_var_thd_bit	sys_log_binlog(&vars, "sql_log_bin",
++                                       check_log_update,
++                                       set_option_log_bin_bit,
++				       OPTION_BIN_LOG);
++static sys_var_thd_bit	sys_sql_warnings(&vars, "sql_warnings", 0,
++					 set_option_bit,
++					 OPTION_WARNINGS);
++static sys_var_thd_bit	sys_sql_notes(&vars, "sql_notes", 0,
++					 set_option_bit,
++					 OPTION_SQL_NOTES);
++static sys_var_thd_bit	sys_auto_is_null(&vars, "sql_auto_is_null", 0,
++					 set_option_bit,
++                                         OPTION_AUTO_IS_NULL, 0,
++                                         sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_thd_bit	sys_safe_updates(&vars, "sql_safe_updates", 0,
++					 set_option_bit,
++					 OPTION_SAFE_UPDATES);
++static sys_var_thd_bit	sys_buffer_results(&vars, "sql_buffer_result", 0,
++					   set_option_bit,
++					   OPTION_BUFFER_RESULT);
++static sys_var_thd_bit	sys_quote_show_create(&vars, "sql_quote_show_create", 0,
++					      set_option_bit,
++					      OPTION_QUOTE_SHOW_CREATE);
++static sys_var_thd_bit	sys_foreign_key_checks(&vars, "foreign_key_checks", 0,
++					       set_option_bit,
++					       OPTION_NO_FOREIGN_KEY_CHECKS,
++                                               1, sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_thd_bit	sys_unique_checks(&vars, "unique_checks", 0,
++					  set_option_bit,
++					  OPTION_RELAXED_UNIQUE_CHECKS,
++                                          1,
++                                          sys_var::SESSION_VARIABLE_IN_BINLOG);
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++static sys_var_thd_bit  sys_profiling(&vars, "profiling", NULL, 
++                                      set_option_bit,
++                                      ulonglong(OPTION_PROFILING));
++static sys_var_thd_ulong	sys_profiling_history_size(&vars, "profiling_history_size",
++					      &SV::profiling_history_size);
++#endif
++
++/* Local state variables */
++
++static sys_var_thd_ha_rows	sys_select_limit(&vars, "sql_select_limit",
++						 &SV::select_limit);
++static sys_var_timestamp sys_timestamp(&vars, "timestamp",
++                                       sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_last_insert_id
++sys_last_insert_id(&vars, "last_insert_id",
++                   sys_var::SESSION_VARIABLE_IN_BINLOG);
++/*
++  identity is an alias for last_insert_id(), so that we are compatible
++  with Sybase
++*/
++static sys_var_last_insert_id
++sys_identity(&vars, "identity", sys_var::SESSION_VARIABLE_IN_BINLOG);
++
++static sys_var_thd_lc_time_names
++sys_lc_time_names(&vars, "lc_time_names", sys_var::SESSION_VARIABLE_IN_BINLOG);
++
++/*
++  insert_id should *not* be marked as written to the binlog (i.e., it
++  should *not* have binlog_status==SESSION_VARIABLE_IN_BINLOG),
++  because we want any statement that refers to insert_id explicitly to
++  be unsafe.  (By "explicitly", we mean using @@session.insert_id,
++  whereas insert_id is used "implicitly" when NULL value is inserted
++  into an auto_increment column).
++
++  We want statements referring explicitly to @@session.insert_id to be
++  unsafe, because insert_id is modified internally by the slave sql
++  thread when NULL values are inserted in an AUTO_INCREMENT column.
++  This modification interfers with the value of the
++  @@session.insert_id variable if @@session.insert_id is referred
++  explicitly by an insert statement (as is seen by executing "SET
++  @@session.insert_id=0; CREATE TABLE t (a INT, b INT KEY
++  AUTO_INCREMENT); INSERT INTO t(a) VALUES (@@session.insert_id);" in
++  statement-based logging mode: t will be different on master and
++  slave).
++*/
++static sys_var_insert_id sys_insert_id(&vars, "insert_id");
++static sys_var_readonly		sys_error_count(&vars, "error_count",
++						OPT_SESSION,
++						SHOW_LONG,
++						get_error_count);
++static sys_var_readonly		sys_warning_count(&vars, "warning_count",
++						  OPT_SESSION,
++						  SHOW_LONG,
++						  get_warning_count);
++
++static sys_var_rand_seed1 sys_rand_seed1(&vars, "rand_seed1",
++                                         sys_var::SESSION_VARIABLE_IN_BINLOG);
++static sys_var_rand_seed2 sys_rand_seed2(&vars, "rand_seed2",
++                                         sys_var::SESSION_VARIABLE_IN_BINLOG);
++
++static sys_var_thd_ulong        sys_default_week_format(&vars, "default_week_format",
++					                &SV::default_week_format);
++
++sys_var_thd_ulong               sys_group_concat_max_len(&vars, "group_concat_max_len",
++                                                         &SV::group_concat_max_len);
++
++sys_var_thd_time_zone sys_time_zone(&vars, "time_zone",
++                                    sys_var::SESSION_VARIABLE_IN_BINLOG);
++
++/* Global read-only variable containing hostname */
++static sys_var_const_str        sys_hostname(&vars, "hostname", glob_hostname);
++
++#ifndef EMBEDDED_LIBRARY
++static sys_var_const_str_ptr    sys_repl_report_host(&vars, "report_host", &report_host);
++static sys_var_const_str_ptr    sys_repl_report_user(&vars, "report_user", &report_user);
++static sys_var_const_str_ptr    sys_repl_report_password(&vars, "report_password", &report_password);
++
++static uchar *slave_get_report_port(THD *thd)
++{
++  thd->sys_var_tmp.long_value= report_port;
++  return (uchar*) &thd->sys_var_tmp.long_value;
++}
++
++static sys_var_readonly    sys_repl_report_port(&vars, "report_port", OPT_GLOBAL, SHOW_LONG, slave_get_report_port);
++
++#endif
++
++sys_var_thd_bool  sys_keep_files_on_create(&vars, "keep_files_on_create", 
++                                           &SV::keep_files_on_create);
++/* Read only variables */
++
++static sys_var_have_variable sys_have_compress(&vars, "have_compress", &have_compress);
++static sys_var_have_variable sys_have_crypt(&vars, "have_crypt", &have_crypt);
++static sys_var_have_plugin sys_have_csv(&vars, "have_csv", C_STRING_WITH_LEN("csv"), MYSQL_STORAGE_ENGINE_PLUGIN);
++static sys_var_have_variable sys_have_dlopen(&vars, "have_dynamic_loading", &have_dlopen);
++static sys_var_have_variable sys_have_geometry(&vars, "have_geometry", &have_geometry);
++static sys_var_have_plugin sys_have_innodb(&vars, "have_innodb", C_STRING_WITH_LEN("innodb"), MYSQL_STORAGE_ENGINE_PLUGIN);
++static sys_var_have_plugin sys_have_ndbcluster(&vars, "have_ndbcluster", C_STRING_WITH_LEN("ndbcluster"), MYSQL_STORAGE_ENGINE_PLUGIN);
++static sys_var_have_variable sys_have_openssl(&vars, "have_openssl", &have_ssl);
++static sys_var_have_variable sys_have_ssl(&vars, "have_ssl", &have_ssl);
++static sys_var_have_plugin sys_have_partition_db(&vars, "have_partitioning", C_STRING_WITH_LEN("partition"), MYSQL_STORAGE_ENGINE_PLUGIN);
++static sys_var_have_variable sys_have_query_cache(&vars, "have_query_cache",
++                                           &have_query_cache);
++static sys_var_have_variable sys_have_community_features(&vars, "have_community_features", &have_community_features);
++static sys_var_have_variable sys_have_rtree_keys(&vars, "have_rtree_keys", &have_rtree_keys);
++static sys_var_have_variable sys_have_symlink(&vars, "have_symlink", &have_symlink);
++/* Global read-only variable describing server license */
++static sys_var_const_str	sys_license(&vars, "license", STRINGIFY_ARG(LICENSE));
++/* Global variables which enable|disable logging */
++static sys_var_log_state sys_var_general_log(&vars, "general_log", &opt_log,
++                                      QUERY_LOG_GENERAL);
++/* Synonym of "general_log" for consistency with SHOW VARIABLES output */
++static sys_var_log_state sys_var_log(&vars, "log", &opt_log,
++                                      QUERY_LOG_GENERAL);
++static sys_var_log_state sys_var_slow_query_log(&vars, "slow_query_log", &opt_slow_log,
++                                         QUERY_LOG_SLOW);
++/* Synonym of "slow_query_log" for consistency with SHOW VARIABLES output */
++static sys_var_log_state sys_var_log_slow(&vars, "log_slow_queries",
++                                          &opt_slow_log, QUERY_LOG_SLOW);
++sys_var_str sys_var_general_log_path(&vars, "general_log_file", sys_check_log_path,
++				     sys_update_general_log_path,
++				     sys_default_general_log_path,
++				     opt_logname);
++sys_var_str sys_var_slow_log_path(&vars, "slow_query_log_file", sys_check_log_path,
++				  sys_update_slow_log_path, 
++				  sys_default_slow_log_path,
++				  opt_slow_logname);
++static sys_var_log_output sys_var_log_output_state(&vars, "log_output", &log_output_options,
++					    &log_output_typelib, 0);
++static sys_var_readonly         sys_myisam_mmap_size(&vars, "myisam_mmap_size",
++                                                     OPT_GLOBAL,
++                                                     SHOW_LONGLONG,
++                                                     get_myisam_mmap_size);
++
++
++bool sys_var::check(THD *thd, set_var *var)
++{
++  var->save_result.ulonglong_value= var->value->val_int();
++  return 0;
++}
++
++bool sys_var_str::check(THD *thd, set_var *var)
++{
++  int res;
++  if (!check_func)
++    return 0;
++
++  if ((res=(*check_func)(thd, var)) < 0)
++    my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0),
++             name, var->value->str_value.ptr());
++  return res;
++}
++
++/*
++  Functions to check and update variables
++*/
++
++
++/*
++  Update variables 'init_connect, init_slave'.
++
++  In case of 'DEFAULT' value
++  (for example: 'set GLOBAL init_connect=DEFAULT')
++  'var' parameter is NULL pointer.
++*/
++
++bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex,
++			set_var *var)
++{
++  char *res= 0, *old_value=(char *)(var ? var->value->str_value.ptr() : 0);
++  uint new_length= (var ? var->value->str_value.length() : 0);
++  if (!old_value)
++    old_value= (char*) "";
++  if (!(res= my_strndup(old_value, new_length, MYF(0))))
++    return 1;
++  /*
++    Replace the old value in such a way that the any thread using
++    the value will work.
++  */
++  rw_wrlock(var_mutex);
++  old_value= var_str->value;
++  var_str->value= res;
++  var_str->value_length= new_length;
++  var_str->is_os_charset= FALSE;
++  rw_unlock(var_mutex);
++  my_free(old_value, MYF(MY_ALLOW_ZERO_PTR));
++  return 0;
++}
++
++
++static bool sys_update_init_connect(THD *thd, set_var *var)
++{
++  return update_sys_var_str(&sys_init_connect, &LOCK_sys_init_connect, var);
++}
++
++
++static void sys_default_init_connect(THD* thd, enum_var_type type)
++{
++  update_sys_var_str(&sys_init_connect, &LOCK_sys_init_connect, 0);
++}
++
++
++static bool sys_update_init_slave(THD *thd, set_var *var)
++{
++  return update_sys_var_str(&sys_init_slave, &LOCK_sys_init_slave, var);
++}
++
++
++static void sys_default_init_slave(THD* thd, enum_var_type type)
++{
++  update_sys_var_str(&sys_init_slave, &LOCK_sys_init_slave, 0);
++}
++
++static int sys_check_ftb_syntax(THD *thd,  set_var *var)
++{
++  if (thd->security_ctx->master_access & SUPER_ACL)
++    return (ft_boolean_check_syntax_string((uchar*)
++                                           var->value->str_value.c_ptr()) ?
++            -1 : 0);
++  else
++  {
++    my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER");
++    return 1;
++  }
++}
++
++static bool sys_update_ftb_syntax(THD *thd, set_var * var)
++{
++  strmake(ft_boolean_syntax, var->value->str_value.c_ptr(),
++	  sizeof(ft_boolean_syntax)-1);
++
++#ifdef HAVE_QUERY_CACHE
++  query_cache.flush();
++#endif /* HAVE_QUERY_CACHE */
++
++  return 0;
++}
++
++static void sys_default_ftb_syntax(THD *thd, enum_var_type type)
++{
++  strmake(ft_boolean_syntax, def_ft_boolean_syntax,
++	  sizeof(ft_boolean_syntax)-1);
++}
++
++
++/**
++  If one sets the LOW_PRIORIY UPDATES flag, we also must change the
++  used lock type.
++*/
++
++static void fix_low_priority_updates(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    thr_upgraded_concurrent_insert_lock= 
++      (global_system_variables.low_priority_updates ?
++       TL_WRITE_LOW_PRIORITY : TL_WRITE);
++  else
++    thd->update_lock_default= (thd->variables.low_priority_updates ?
++			       TL_WRITE_LOW_PRIORITY : TL_WRITE);
++}
++
++
++static void
++fix_myisam_max_sort_file_size(THD *thd, enum_var_type type)
++{
++  myisam_max_temp_length=
++    (my_off_t) global_system_variables.myisam_max_sort_file_size;
++}
++
++/**
++  Set the OPTION_BIG_SELECTS flag if max_join_size == HA_POS_ERROR.
++*/
++
++static void fix_max_join_size(THD *thd, enum_var_type type)
++{
++  if (type != OPT_GLOBAL)
++  {
++    if (thd->variables.max_join_size == HA_POS_ERROR)
++      thd->options|= OPTION_BIG_SELECTS;
++    else
++      thd->options&= ~OPTION_BIG_SELECTS;
++  }
++}
++
++
++/**
++  Can't change the 'next' tx_isolation while we are already in
++  a transaction
++*/
++static int check_tx_isolation(THD *thd, set_var *var)
++{
++  if (var->type == OPT_DEFAULT && (thd->server_status & SERVER_STATUS_IN_TRANS))
++  {
++    my_error(ER_CANT_CHANGE_TX_ISOLATION, MYF(0));
++    return 1;
++  }
++  return 0;
++}
++
++/*
++  If one doesn't use the SESSION modifier, the isolation level
++  is only active for the next command.
++*/
++static void fix_tx_isolation(THD *thd, enum_var_type type)
++{
++  if (type == OPT_SESSION)
++    thd->session_tx_isolation= ((enum_tx_isolation)
++				thd->variables.tx_isolation);
++}
++
++static void fix_completion_type(THD *thd __attribute__((unused)),
++				enum_var_type type __attribute__((unused))) {}
++
++static int check_completion_type(THD *thd, set_var *var)
++{
++  longlong val= var->value->val_int();
++  if (val < 0 || val > 2)
++  {
++    char buf[64];
++    my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), var->var->name, llstr(val, buf));
++    return 1;
++  }
++  return 0;
++}
++
++
++/*
++  If we are changing the thread variable, we have to copy it to NET too
++*/
++
++#ifdef HAVE_REPLICATION
++static void fix_net_read_timeout(THD *thd, enum_var_type type)
++{
++  if (type != OPT_GLOBAL)
++    my_net_set_read_timeout(&thd->net, thd->variables.net_read_timeout);
++}
++
++
++static void fix_net_write_timeout(THD *thd, enum_var_type type)
++{
++  if (type != OPT_GLOBAL)
++    my_net_set_write_timeout(&thd->net, thd->variables.net_write_timeout);
++}
++
++static void fix_net_retry_count(THD *thd, enum_var_type type)
++{
++  if (type != OPT_GLOBAL)
++    thd->net.retry_count=thd->variables.net_retry_count;
++}
++#else /* HAVE_REPLICATION */
++static void fix_net_read_timeout(THD *thd __attribute__((unused)),
++				 enum_var_type type __attribute__((unused)))
++{}
++static void fix_net_write_timeout(THD *thd __attribute__((unused)),
++				  enum_var_type type __attribute__((unused)))
++{}
++static void fix_net_retry_count(THD *thd __attribute__((unused)),
++				enum_var_type type __attribute__((unused)))
++{}
++#endif /* HAVE_REPLICATION */
++
++
++static void fix_query_cache_size(THD *thd, enum_var_type type)
++{
++#ifdef HAVE_QUERY_CACHE
++  ulong new_cache_size= query_cache.resize(query_cache_size);
++
++  /*
++     Note: query_cache_size is a global variable reflecting the 
++     requested cache size. See also query_cache_size_arg
++  */
++
++  if (query_cache_size != new_cache_size)
++    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			ER_WARN_QC_RESIZE, ER(ER_WARN_QC_RESIZE),
++			query_cache_size, new_cache_size);
++  
++  query_cache_size= new_cache_size;
++#endif
++}
++
++
++#ifdef HAVE_QUERY_CACHE
++static void fix_query_cache_min_res_unit(THD *thd, enum_var_type type)
++{
++  query_cache_min_res_unit= 
++    query_cache.set_min_res_unit(query_cache_min_res_unit);
++}
++#endif
++
++
++extern void fix_delay_key_write(THD *thd, enum_var_type type)
++{
++  switch ((enum_delay_key_write) delay_key_write_options) {
++  case DELAY_KEY_WRITE_NONE:
++    myisam_delay_key_write=0;
++    break;
++  case DELAY_KEY_WRITE_ON:
++    myisam_delay_key_write=1;
++    break;
++  case DELAY_KEY_WRITE_ALL:
++    myisam_delay_key_write=1;
++    ha_open_options|= HA_OPEN_DELAY_KEY_WRITE;
++    break;
++  }
++}
++
++bool sys_var_set::update(THD *thd, set_var *var)
++{
++  *value= var->save_result.ulong_value;
++  return 0;
++}
++
++uchar *sys_var_set::value_ptr(THD *thd, enum_var_type type,
++                              LEX_STRING *base)
++{
++  char buff[256];
++  String tmp(buff, sizeof(buff), &my_charset_latin1);
++  ulong length;
++  ulong val= *value;
++
++  tmp.length(0);
++  for (uint i= 0; val; val>>= 1, i++)
++  {
++    if (val & 1)
++    {
++      tmp.append(enum_names->type_names[i],
++                 enum_names->type_lengths[i]);
++      tmp.append(',');
++    }
++  }
++
++  if ((length= tmp.length()))
++    length--;
++  return (uchar*) thd->strmake(tmp.ptr(), length);
++}
++
++void sys_var_set_slave_mode::set_default(THD *thd, enum_var_type type)
++{
++  slave_exec_mode_options= SLAVE_EXEC_MODE_STRICT;
++}
++
++bool sys_var_set_slave_mode::check(THD *thd, set_var *var)
++{
++  bool rc=  sys_var_set::check(thd, var);
++  if (!rc && (var->save_result.ulong_value & SLAVE_EXEC_MODE_STRICT) &&
++      (var->save_result.ulong_value & SLAVE_EXEC_MODE_IDEMPOTENT))
++  {
++    rc= true;
++    my_error(ER_SLAVE_AMBIGOUS_EXEC_MODE, MYF(0), "");
++  }
++  return rc;
++}
++
++bool sys_var_set_slave_mode::update(THD *thd, set_var *var)
++{
++  bool rc;
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  rc= sys_var_set::update(thd, var);
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++  return rc;
++}
++
++void fix_slave_exec_mode(void)
++{
++  DBUG_ENTER("fix_slave_exec_mode");
++
++  if ((slave_exec_mode_options & SLAVE_EXEC_MODE_STRICT) &&
++      (slave_exec_mode_options & SLAVE_EXEC_MODE_IDEMPOTENT))
++  {
++    sql_print_error("Ambiguous slave modes combination. STRICT will be used");
++    slave_exec_mode_options&= ~SLAVE_EXEC_MODE_IDEMPOTENT;
++  }
++  if (!(slave_exec_mode_options & SLAVE_EXEC_MODE_IDEMPOTENT))
++    slave_exec_mode_options|= SLAVE_EXEC_MODE_STRICT;
++  DBUG_VOID_RETURN;
++}
++
++
++bool sys_var_thd_binlog_format::check(THD *thd, set_var *var) {
++  /*
++    All variables that affect writing to binary log (either format or
++    turning logging on and off) use the same checking. We call the
++    superclass ::check function to assign the variable correctly, and
++    then check the value.
++   */
++  bool result= sys_var_thd_enum::check(thd, var);
++  if (!result)
++    result= check_log_update(thd, var);
++  return result;
++}
++
++
++bool sys_var_thd_binlog_format::is_readonly() const
++{
++  /*
++    Under certain circumstances, the variable is read-only (unchangeable):
++  */
++  THD *thd= current_thd;
++  /*
++    If RBR and open temporary tables, their CREATE TABLE may not be in the
++    binlog, so we can't toggle to SBR in this connection.
++    The test below will also prevent SET GLOBAL, well it was not easy to test
++    if global or not here.
++    And this test will also prevent switching from RBR to RBR (a no-op which
++    should not happen too often).
++
++    If we don't have row-based replication compiled in, the variable
++    is always read-only.
++  */
++  if ((thd->variables.binlog_format == BINLOG_FORMAT_ROW) &&
++      thd->temporary_tables)
++  {
++    my_error(ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR, MYF(0));
++    return 1;
++  }
++  /*
++    if in a stored function/trigger, it's too late to change mode
++  */
++  if (thd->in_sub_stmt)
++  {
++    my_error(ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT, MYF(0));
++    return 1;    
++  }
++  return sys_var_thd_enum::is_readonly();
++}
++
++
++void fix_binlog_format_after_update(THD *thd, enum_var_type type)
++{
++  thd->reset_current_stmt_binlog_row_based();
++}
++
++
++static void fix_max_binlog_size(THD *thd, enum_var_type type)
++{
++  DBUG_ENTER("fix_max_binlog_size");
++  DBUG_PRINT("info",("max_binlog_size=%lu max_relay_log_size=%lu",
++                     max_binlog_size, max_relay_log_size));
++  mysql_bin_log.set_max_size(max_binlog_size);
++#ifdef HAVE_REPLICATION
++  if (!max_relay_log_size)
++    active_mi->rli.relay_log.set_max_size(max_binlog_size);
++#endif
++  DBUG_VOID_RETURN;
++}
++
++static void fix_max_relay_log_size(THD *thd, enum_var_type type)
++{
++  DBUG_ENTER("fix_max_relay_log_size");
++  DBUG_PRINT("info",("max_binlog_size=%lu max_relay_log_size=%lu",
++                     max_binlog_size, max_relay_log_size));
++#ifdef HAVE_REPLICATION
++  active_mi->rli.relay_log.set_max_size(max_relay_log_size ?
++                                        max_relay_log_size: max_binlog_size);
++#endif
++  DBUG_VOID_RETURN;
++}
++
++
++static int check_max_delayed_threads(THD *thd, set_var *var)
++{
++  longlong val= var->value->val_int();
++  if (var->type != OPT_GLOBAL && val != 0 &&
++      val != (longlong) global_system_variables.max_insert_delayed_threads)
++  {
++    char buf[64];
++    my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), var->var->name, llstr(val, buf));
++    return 1;
++  }
++  return 0;
++}
++
++static void fix_max_connections(THD *thd, enum_var_type type)
++{
++#ifndef EMBEDDED_LIBRARY
++  resize_thr_alarm(max_connections + 
++		   global_system_variables.max_insert_delayed_threads + 10);
++#endif
++}
++
++
++static void fix_thd_mem_root(THD *thd, enum_var_type type)
++{
++  if (type != OPT_GLOBAL)
++    reset_root_defaults(thd->mem_root,
++                        thd->variables.query_alloc_block_size,
++                        thd->variables.query_prealloc_size);
++}
++
++
++static void fix_trans_mem_root(THD *thd, enum_var_type type)
++{
++#ifdef USING_TRANSACTIONS
++  if (type != OPT_GLOBAL)
++    reset_root_defaults(&thd->transaction.mem_root,
++                        thd->variables.trans_alloc_block_size,
++                        thd->variables.trans_prealloc_size);
++#endif
++}
++
++
++static void fix_server_id(THD *thd, enum_var_type type)
++{
++  server_id_supplied = 1;
++  thd->server_id= server_id;
++}
++
++
++/**
++  Throw warning (error in STRICT mode) if value for variable needed bounding.
++  Only call from check(), not update(), because an error in update() would be
++  bad mojo. Plug-in interface also uses this.
++
++  @param thd      thread handle
++  @param fixed    did we have to correct the value? (throw warn/err if so)
++  @param unsignd  is value's type unsigned?
++  @param name     variable's name
++  @param val      variable's value
++
++  @retval         TRUE on error, FALSE otherwise (warning or OK)
++ */
++bool throw_bounds_warning(THD *thd, bool fixed, bool unsignd,
++                          const char *name, longlong val)
++{
++  if (fixed)
++  {
++    char buf[22];
++
++    if (unsignd)
++      ullstr((ulonglong) val, buf);
++    else
++      llstr(val, buf);
++
++    if (thd->variables.sql_mode & MODE_STRICT_ALL_TABLES)
++    {
++      my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buf);
++      return TRUE;
++    }
++
++    push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                        ER_TRUNCATED_WRONG_VALUE,
++                        ER(ER_TRUNCATED_WRONG_VALUE), name, buf);
++  }
++  return FALSE;
++}
++
++
++/**
++  Get unsigned system-variable.
++  Negative value does not wrap around, but becomes zero.
++  Check user-supplied value for a systemvariable against bounds.
++  If we needed to adjust the value, throw a warning or error depending
++  on SQL-mode.
++
++  @param thd             thread handle
++  @param var             the system-variable to get
++  @param user_max        a limit given with --maximum-variable-name=... or 0
++  @param var_type        function will bound on systems where necessary.
++
++  @retval                TRUE on error, FALSE otherwise (warning or OK)
++ */
++static bool get_unsigned(THD *thd, set_var *var, ulonglong user_max,
++                         ulong var_type)
++{
++  int                     warnings= 0;
++  ulonglong               unadjusted;
++  const struct my_option *limits= var->var->option_limits;
++  struct my_option        fallback;
++
++  /* get_unsigned() */
++  if (var->value->unsigned_flag)
++    var->save_result.ulonglong_value= (ulonglong) var->value->val_int();
++  else
++  {
++    longlong v= var->value->val_int();
++    var->save_result.ulonglong_value= (ulonglong) ((v < 0) ? 0 : v);
++    if (v < 0)
++    {
++      warnings++;
++      if (throw_bounds_warning(thd, TRUE, FALSE, var->var->name, v))
++        return TRUE;  /* warning was promoted to error, give up */
++    }
++  }
++
++  unadjusted= var->save_result.ulonglong_value;
++
++  /* max, if any */
++
++  if ((user_max > 0) && (unadjusted > user_max))
++  {
++    var->save_result.ulonglong_value= user_max;
++
++    if ((warnings == 0) && throw_bounds_warning(thd, TRUE, TRUE,
++                                                var->var->name,
++                                                (longlong) unadjusted))
++      return TRUE;
++
++    warnings++;
++  }
++
++  /*
++    if the sysvar doesn't have a proper bounds record but the check
++    function would like bounding to ULONG where its size differs from
++    that of ULONGLONG, we make up a bogus limits record here and let
++    the usual suspects handle the actual limiting.
++  */
++
++  if (!limits && var_type != GET_ULL)
++  {
++    bzero(&fallback, sizeof(fallback));
++    fallback.var_type= var_type;
++    limits= &fallback;
++  }
++
++  /* fix_unsigned() */
++  if (limits)
++  {
++    my_bool   fixed;
++
++    var->save_result.ulonglong_value= getopt_ull_limit_value(var->save_result.
++                                                             ulonglong_value,
++                                                             limits, &fixed);
++
++    if ((warnings == 0) && throw_bounds_warning(thd, fixed, TRUE,
++                                                var->var->name,
++                                                (longlong) unadjusted))
++      return TRUE;
++  }
++
++  return FALSE;
++}
++
++
++sys_var_long_ptr::
++sys_var_long_ptr(sys_var_chain *chain, const char *name_arg, ulong *value_ptr_arg,
++                 sys_after_update_func after_update_arg)
++  :sys_var_long_ptr_global(chain, name_arg, value_ptr_arg,
++                           &LOCK_global_system_variables, after_update_arg)
++{}
++
++
++bool sys_var_long_ptr_global::check(THD *thd, set_var *var)
++{
++  return get_unsigned(thd, var, 0, GET_ULONG);
++}
++
++bool sys_var_long_ptr_global::update(THD *thd, set_var *var)
++{
++  pthread_mutex_lock(guard);
++  *value= (ulong) var->save_result.ulonglong_value;
++  pthread_mutex_unlock(guard);
++  return 0;
++}
++
++
++void sys_var_long_ptr_global::set_default(THD *thd, enum_var_type type)
++{
++  my_bool not_used;
++  pthread_mutex_lock(guard);
++  *value= (ulong) getopt_ull_limit_value((ulong) option_limits->def_value,
++                                         option_limits, &not_used);
++  pthread_mutex_unlock(guard);
++}
++
++
++bool sys_var_ulonglong_ptr::check(THD *thd, set_var *var)
++{
++  return get_unsigned(thd, var, 0, GET_ULL);
++}
++
++
++bool sys_var_ulonglong_ptr::update(THD *thd, set_var *var)
++{
++  ulonglong tmp= var->save_result.ulonglong_value;
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  *value= (ulonglong) tmp;
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++  return 0;
++}
++
++
++void sys_var_ulonglong_ptr::set_default(THD *thd, enum_var_type type)
++{
++  my_bool not_used;
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  *value= getopt_ull_limit_value((ulonglong) option_limits->def_value,
++                                 option_limits, &not_used);
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++}
++
++
++bool sys_var_bool_ptr::update(THD *thd, set_var *var)
++{
++  *value= (my_bool) var->save_result.ulong_value;
++  return 0;
++}
++
++
++void sys_var_bool_ptr::set_default(THD *thd, enum_var_type type)
++{
++  *value= (my_bool) option_limits->def_value;
++}
++
++
++bool sys_var_enum::update(THD *thd, set_var *var)
++{
++  *value= (uint) var->save_result.ulong_value;
++  return 0;
++}
++
++
++uchar *sys_var_enum::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
++{
++  return (uchar*) enum_names->type_names[*value];
++}
++
++
++uchar *sys_var_enum_const::value_ptr(THD *thd, enum_var_type type,
++                                     LEX_STRING *base)
++{
++  return (uchar*) enum_names->type_names[global_system_variables.*offset];
++}
++
++bool sys_var_thd_ulong::check(THD *thd, set_var *var)
++{
++  if (get_unsigned(thd, var, max_system_variables.*offset, GET_ULONG))
++    return TRUE;
++  DBUG_ASSERT(var->save_result.ulonglong_value <= ULONG_MAX);
++  return ((check_func && (*check_func)(thd, var)));
++}
++
++bool sys_var_thd_ulong::update(THD *thd, set_var *var)
++{
++  if (var->type == OPT_GLOBAL)
++    global_system_variables.*offset= (ulong) var->save_result.ulonglong_value;
++  else
++    thd->variables.*offset= (ulong) var->save_result.ulonglong_value;
++
++  return 0;
++}
++
++
++void sys_var_thd_ulong::set_default(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++  {
++    my_bool not_used;
++    /* We will not come here if option_limits is not set */
++    global_system_variables.*offset=
++      (ulong) getopt_ull_limit_value((ulong) option_limits->def_value,
++                                     option_limits, &not_used);
++  }
++  else
++    thd->variables.*offset= global_system_variables.*offset;
++}
++
++
++uchar *sys_var_thd_ulong::value_ptr(THD *thd, enum_var_type type,
++				   LEX_STRING *base)
++{
++  if (type == OPT_GLOBAL)
++    return (uchar*) &(global_system_variables.*offset);
++  return (uchar*) &(thd->variables.*offset);
++}
++
++
++bool sys_var_thd_ha_rows::check(THD *thd, set_var *var)
++{
++  return get_unsigned(thd, var, max_system_variables.*offset,
++#ifdef BIG_TABLES
++                      GET_ULL
++#else
++                      GET_ULONG
++#endif
++                     );
++}
++
++
++bool sys_var_thd_ha_rows::update(THD *thd, set_var *var)
++{
++  if (var->type == OPT_GLOBAL)
++  {
++    /* Lock is needed to make things safe on 32 bit systems */
++    pthread_mutex_lock(&LOCK_global_system_variables);
++    global_system_variables.*offset= (ha_rows)
++                                     var->save_result.ulonglong_value;
++    pthread_mutex_unlock(&LOCK_global_system_variables);
++  }
++  else
++    thd->variables.*offset= (ha_rows) var->save_result.ulonglong_value;
++  return 0;
++}
++
++
++void sys_var_thd_ha_rows::set_default(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++  {
++    my_bool not_used;
++    /* We will not come here if option_limits is not set */
++    pthread_mutex_lock(&LOCK_global_system_variables);
++    global_system_variables.*offset=
++      (ha_rows) getopt_ull_limit_value((ha_rows) option_limits->def_value,
++                                       option_limits, &not_used);
++    pthread_mutex_unlock(&LOCK_global_system_variables);
++  }
++  else
++    thd->variables.*offset= global_system_variables.*offset;
++}
++
++
++uchar *sys_var_thd_ha_rows::value_ptr(THD *thd, enum_var_type type,
++				     LEX_STRING *base)
++{
++  if (type == OPT_GLOBAL)
++    return (uchar*) &(global_system_variables.*offset);
++  return (uchar*) &(thd->variables.*offset);
++}
++
++bool sys_var_thd_ulonglong::check(THD *thd, set_var *var)
++{
++  return get_unsigned(thd, var, max_system_variables.*offset, GET_ULL);
++}
++
++bool sys_var_thd_ulonglong::update(THD *thd,  set_var *var)
++{
++  if (var->type == OPT_GLOBAL)
++  {
++    /* Lock is needed to make things safe on 32 bit systems */
++    pthread_mutex_lock(&LOCK_global_system_variables);
++    global_system_variables.*offset= (ulonglong)
++                                     var->save_result.ulonglong_value;
++    pthread_mutex_unlock(&LOCK_global_system_variables);
++  }
++  else
++    thd->variables.*offset= (ulonglong) var->save_result.ulonglong_value;
++  return 0;
++}
++
++
++void sys_var_thd_ulonglong::set_default(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++  {
++    my_bool not_used;
++    pthread_mutex_lock(&LOCK_global_system_variables);
++    global_system_variables.*offset=
++      getopt_ull_limit_value((ulonglong) option_limits->def_value,
++                             option_limits, &not_used);
++    pthread_mutex_unlock(&LOCK_global_system_variables);
++  }
++  else
++    thd->variables.*offset= global_system_variables.*offset;
++}
++
++
++uchar *sys_var_thd_ulonglong::value_ptr(THD *thd, enum_var_type type,
++				       LEX_STRING *base)
++{
++  if (type == OPT_GLOBAL)
++    return (uchar*) &(global_system_variables.*offset);
++  return (uchar*) &(thd->variables.*offset);
++}
++
++
++bool sys_var_thd_bool::update(THD *thd,  set_var *var)
++{
++  if (var->type == OPT_GLOBAL)
++    global_system_variables.*offset= (my_bool) var->save_result.ulong_value;
++  else
++    thd->variables.*offset= (my_bool) var->save_result.ulong_value;
++  return 0;
++}
++
++
++void sys_var_thd_bool::set_default(THD *thd,  enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    global_system_variables.*offset= (my_bool) option_limits->def_value;
++  else
++    thd->variables.*offset= global_system_variables.*offset;
++}
++
++
++uchar *sys_var_thd_bool::value_ptr(THD *thd, enum_var_type type,
++				  LEX_STRING *base)
++{
++  if (type == OPT_GLOBAL)
++    return (uchar*) &(global_system_variables.*offset);
++  return (uchar*) &(thd->variables.*offset);
++}
++
++
++bool sys_var::check_enum(THD *thd, set_var *var, const TYPELIB *enum_names)
++{
++  char buff[STRING_BUFFER_USUAL_SIZE];
++  const char *value;
++  String str(buff, sizeof(buff), system_charset_info), *res;
++
++  if (var->value->result_type() == STRING_RESULT)
++  {
++    if (!(res=var->value->val_str(&str)) ||
++	((long) (var->save_result.ulong_value=
++		 (ulong) find_type(enum_names, res->ptr(),
++				   res->length(),1)-1)) < 0)
++    {
++      value= res ? res->c_ptr() : "NULL";
++      goto err;
++    }
++  }
++  else
++  {
++    ulonglong tmp=var->value->val_int();
++    if (tmp >= enum_names->count)
++    {
++      llstr(tmp,buff);
++      value=buff;				// Wrong value is here
++      goto err;
++    }
++    var->save_result.ulong_value= (ulong) tmp;	// Save for update
++  }
++  return 0;
++
++err:
++  my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, value);
++  return 1;
++}
++
++
++bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names)
++{
++  bool not_used;
++  char buff[STRING_BUFFER_USUAL_SIZE], *error= 0;
++  uint error_len= 0;
++  String str(buff, sizeof(buff), system_charset_info), *res;
++
++  if (var->value->result_type() == STRING_RESULT)
++  {
++    if (!(res= var->value->val_str(&str)))
++    {
++      strmov(buff, "NULL");
++      goto err;
++    }
++
++    if (!m_allow_empty_value &&
++        res->length() == 0)
++    {
++      buff[0]= 0;
++      goto err;
++    }
++
++    var->save_result.ulong_value= ((ulong)
++				   find_set(enum_names, res->c_ptr_safe(),
++					    res->length(),
++                                            NULL,
++                                            &error, &error_len,
++					    &not_used));
++    if (error_len)
++    {
++      strmake(buff, error, min(sizeof(buff) - 1, error_len));
++      goto err;
++    }
++  }
++  else
++  {
++    ulonglong tmp= var->value->val_int();
++
++    if (!m_allow_empty_value &&
++        tmp == 0)
++    {
++      buff[0]= '0';
++      buff[1]= 0;
++      goto err;
++    }
++
++    /*
++      For when the enum is made to contain 64 elements, as 1ULL<<64 is
++      undefined, we guard with a "count<64" test.
++    */
++    if (unlikely((tmp >= ((ULL(1)) << enum_names->count)) &&
++                 (enum_names->count < 64)))
++    {
++      llstr(tmp, buff);
++      goto err;
++    }
++    var->save_result.ulong_value= (ulong) tmp;  // Save for update
++  }
++  return 0;
++
++err:
++  my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buff);
++  return 1;
++}
++
++
++CHARSET_INFO *sys_var::charset(THD *thd)
++{
++  return is_os_charset ? thd->variables.character_set_filesystem : 
++    system_charset_info;
++}
++
++
++bool sys_var_thd_enum::update(THD *thd, set_var *var)
++{
++  if (var->type == OPT_GLOBAL)
++    global_system_variables.*offset= var->save_result.ulong_value;
++  else
++    thd->variables.*offset= var->save_result.ulong_value;
++  return 0;
++}
++
++
++void sys_var_thd_enum::set_default(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    global_system_variables.*offset= (ulong) option_limits->def_value;
++  else
++    thd->variables.*offset= global_system_variables.*offset;
++}
++
++
++uchar *sys_var_thd_enum::value_ptr(THD *thd, enum_var_type type,
++				  LEX_STRING *base)
++{
++  ulong tmp= ((type == OPT_GLOBAL) ?
++	      global_system_variables.*offset :
++	      thd->variables.*offset);
++  return (uchar*) enum_names->type_names[tmp];
++}
++
++bool sys_var_thd_bit::check(THD *thd, set_var *var)
++{
++  return (check_enum(thd, var, &bool_typelib) ||
++          (check_func && (*check_func)(thd, var)));
++}
++
++bool sys_var_thd_bit::update(THD *thd, set_var *var)
++{
++  int res= (*update_func)(thd, var);
++  return res;
++}
++
++
++uchar *sys_var_thd_bit::value_ptr(THD *thd, enum_var_type type,
++				 LEX_STRING *base)
++{
++  /*
++    If reverse is 0 (default) return 1 if bit is set.
++    If reverse is 1, return 0 if bit is set
++  */
++  thd->sys_var_tmp.my_bool_value= ((thd->options & bit_flag) ?
++				   !reverse : reverse);
++  return (uchar*) &thd->sys_var_tmp.my_bool_value;
++}
++
++
++/** Update a date_time format variable based on given value. */
++
++void sys_var_thd_date_time_format::update2(THD *thd, enum_var_type type,
++					   DATE_TIME_FORMAT *new_value)
++{
++  DATE_TIME_FORMAT *old;
++  DBUG_ENTER("sys_var_date_time_format::update2");
++  DBUG_DUMP("positions", (uchar*) new_value->positions,
++	    sizeof(new_value->positions));
++
++  if (type == OPT_GLOBAL)
++  {
++    pthread_mutex_lock(&LOCK_global_system_variables);
++    old= (global_system_variables.*offset);
++    (global_system_variables.*offset)= new_value;
++    pthread_mutex_unlock(&LOCK_global_system_variables);
++  }
++  else
++  {
++    old= (thd->variables.*offset);
++    (thd->variables.*offset)= new_value;
++  }
++  my_free((char*) old, MYF(MY_ALLOW_ZERO_PTR));
++  DBUG_VOID_RETURN;
++}
++
++
++bool sys_var_thd_date_time_format::update(THD *thd, set_var *var)
++{
++  DATE_TIME_FORMAT *new_value;
++  /* We must make a copy of the last value to get it into normal memory */
++  new_value= date_time_format_copy((THD*) 0,
++				   var->save_result.date_time_format);
++  if (!new_value)
++    return 1;					// Out of memory
++  update2(thd, var->type, new_value);		// Can't fail
++  return 0;
++}
++
++
++bool sys_var_thd_date_time_format::check(THD *thd, set_var *var)
++{
++  char buff[STRING_BUFFER_USUAL_SIZE];
++  String str(buff,sizeof(buff), system_charset_info), *res;
++  DATE_TIME_FORMAT *format;
++
++  if (!(res=var->value->val_str(&str)))
++    res= &my_empty_string;
++
++  if (!(format= date_time_format_make(date_time_type,
++				      res->ptr(), res->length())))
++  {
++    my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, res->c_ptr());
++    return 1;
++  }
++  
++  /*
++    We must copy result to thread space to not get a memory leak if
++    update is aborted
++  */
++  var->save_result.date_time_format= date_time_format_copy(thd, format);
++  my_free((char*) format, MYF(0));
++  return var->save_result.date_time_format == 0;
++}
++
++
++void sys_var_thd_date_time_format::set_default(THD *thd, enum_var_type type)
++{
++  DATE_TIME_FORMAT *res= 0;
++
++  if (type == OPT_GLOBAL)
++  {
++    const char *format;
++    if ((format= opt_date_time_formats[date_time_type]))
++      res= date_time_format_make(date_time_type, format, strlen(format));
++  }
++  else
++  {
++    /* Make copy with malloc */
++    res= date_time_format_copy((THD *) 0, global_system_variables.*offset);
++  }
++
++  if (res)					// Should always be true
++    update2(thd, type, res);
++}
++
++
++uchar *sys_var_thd_date_time_format::value_ptr(THD *thd, enum_var_type type,
++					      LEX_STRING *base)
++{
++  if (type == OPT_GLOBAL)
++  {
++    char *res;
++    /*
++      We do a copy here just to be sure things will work even if someone
++      is modifying the original string while the copy is accessed
++      (Can't happen now in SQL SHOW, but this is a good safety for the future)
++    */
++    res= thd->strmake((global_system_variables.*offset)->format.str,
++		      (global_system_variables.*offset)->format.length);
++    return (uchar*) res;
++  }
++  return (uchar*) (thd->variables.*offset)->format.str;
++}
++
++
++typedef struct old_names_map_st
++{
++  const char *old_name;
++  const char *new_name;
++} my_old_conv;
++
++static my_old_conv old_conv[]= 
++{
++  {	"cp1251_koi8"		,	"cp1251"	},
++  {	"cp1250_latin2"		,	"cp1250"	},
++  {	"kam_latin2"		,	"keybcs2"	},
++  {	"mac_latin2"		,	"MacRoman"	},
++  {	"macce_latin2"		,	"MacCE"		},
++  {	"pc2_latin2"		,	"pclatin2"	},
++  {	"vga_latin2"		,	"pclatin1"	},
++  {	"koi8_cp1251"		,	"koi8r"		},
++  {	"win1251ukr_koi8_ukr"	,	"win1251ukr"	},
++  {	"koi8_ukr_win1251ukr"	,	"koi8u"		},
++  {	NULL			,	NULL		}
++};
++
++CHARSET_INFO *get_old_charset_by_name(const char *name)
++{
++  my_old_conv *conv;
++ 
++  for (conv= old_conv; conv->old_name; conv++)
++  {
++    if (!my_strcasecmp(&my_charset_latin1, name, conv->old_name))
++      return get_charset_by_csname(conv->new_name, MY_CS_PRIMARY, MYF(0));
++  }
++  return NULL;
++}
++
++
++bool sys_var_collation::check(THD *thd, set_var *var)
++{
++  CHARSET_INFO *tmp;
++  LINT_INIT(tmp);
++
++  if (var->value->result_type() == STRING_RESULT)
++  {
++    char buff[STRING_BUFFER_USUAL_SIZE];
++    String str(buff,sizeof(buff), system_charset_info), *res;
++    if (!(res=var->value->val_str(&str)))
++    {
++      my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, "NULL");
++      return 1;
++    }
++    if (!(tmp=get_charset_by_name(res->c_ptr(),MYF(0))))
++    {
++      my_error(ER_UNKNOWN_COLLATION, MYF(0), res->c_ptr());
++      return 1;
++    }
++  }
++  else // INT_RESULT
++  {
++    if (!(tmp=get_charset((int) var->value->val_int(),MYF(0))))
++    {
++      char buf[20];
++      int10_to_str((int) var->value->val_int(), buf, -10);
++      my_error(ER_UNKNOWN_COLLATION, MYF(0), buf);
++      return 1;
++    }
++  }
++  var->save_result.charset= tmp;	// Save for update
++  return 0;
++}
++
++
++bool sys_var_character_set::check(THD *thd, set_var *var)
++{
++  CHARSET_INFO *tmp;
++  LINT_INIT(tmp);
++
++  if (var->value->result_type() == STRING_RESULT)
++  {
++    char buff[STRING_BUFFER_USUAL_SIZE];
++    String str(buff,sizeof(buff), system_charset_info), *res;
++    if (!(res=var->value->val_str(&str)))
++    {
++      if (!nullable)
++      {
++        my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, "NULL");
++        return 1;
++      }
++      tmp= NULL;
++    }
++    else if (!(tmp=get_charset_by_csname(res->c_ptr(),MY_CS_PRIMARY,MYF(0))) &&
++             !(tmp=get_old_charset_by_name(res->c_ptr())))
++    {
++      my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), res->c_ptr());
++      return 1;
++    }
++  }
++  else // INT_RESULT
++  {
++    if (!(tmp=get_charset((int) var->value->val_int(),MYF(0))))
++    {
++      char buf[20];
++      int10_to_str((int) var->value->val_int(), buf, -10);
++      my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), buf);
++      return 1;
++    }
++  }
++  var->save_result.charset= tmp;	// Save for update
++  return 0;
++}
++
++
++bool sys_var_character_set::update(THD *thd, set_var *var)
++{
++  ci_ptr(thd,var->type)[0]= var->save_result.charset;
++  thd->update_charset();
++  return 0;
++}
++
++
++uchar *sys_var_character_set::value_ptr(THD *thd, enum_var_type type,
++				       LEX_STRING *base)
++{
++  CHARSET_INFO *cs= ci_ptr(thd,type)[0];
++  return cs ? (uchar*) cs->csname : (uchar*) NULL;
++}
++
++
++void sys_var_character_set_sv::set_default(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    global_system_variables.*offset= *global_default;
++  else
++  {
++    thd->variables.*offset= global_system_variables.*offset;
++    thd->update_charset();
++  }
++}
++CHARSET_INFO **sys_var_character_set_sv::ci_ptr(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    return &(global_system_variables.*offset);
++  else
++    return &(thd->variables.*offset);
++}
++
++
++bool sys_var_character_set_client::check(THD *thd, set_var *var)
++{
++  if (sys_var_character_set_sv::check(thd, var))
++    return 1;
++  /* Currently, UCS-2 cannot be used as a client character set */
++  if (!is_supported_parser_charset(var->save_result.charset))
++  {
++    my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, 
++             var->save_result.charset->csname);
++    return 1;
++  }
++  return 0;
++}
++
++
++CHARSET_INFO ** sys_var_character_set_database::ci_ptr(THD *thd,
++						       enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    return &global_system_variables.collation_database;
++  else
++    return &thd->variables.collation_database;
++}
++
++
++void sys_var_character_set_database::set_default(THD *thd, enum_var_type type)
++{
++ if (type == OPT_GLOBAL)
++    global_system_variables.collation_database= default_charset_info;
++  else
++  {
++    thd->variables.collation_database= thd->db_charset;
++    thd->update_charset();
++  }
++}
++
++
++bool sys_var_collation_sv::update(THD *thd, set_var *var)
++{
++  if (var->type == OPT_GLOBAL)
++    global_system_variables.*offset= var->save_result.charset;
++  else
++  {
++    thd->variables.*offset= var->save_result.charset;
++    thd->update_charset();
++  }
++  return 0;
++}
++
++
++void sys_var_collation_sv::set_default(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    global_system_variables.*offset= *global_default;
++  else
++  {
++    thd->variables.*offset= global_system_variables.*offset;
++    thd->update_charset();
++  }
++}
++
++
++uchar *sys_var_collation_sv::value_ptr(THD *thd, enum_var_type type,
++                                       LEX_STRING *base)
++{
++  CHARSET_INFO *cs= ((type == OPT_GLOBAL) ?
++		     global_system_variables.*offset : thd->variables.*offset);
++  return cs ? (uchar*) cs->name : (uchar*) "NULL";
++}
++
++
++LEX_STRING default_key_cache_base= {(char *) "default", 7 };
++
++static KEY_CACHE zero_key_cache;
++
++KEY_CACHE *get_key_cache(LEX_STRING *cache_name)
++{
++  safe_mutex_assert_owner(&LOCK_global_system_variables);
++  if (!cache_name || ! cache_name->length)
++    cache_name= &default_key_cache_base;
++  return ((KEY_CACHE*) find_named(&key_caches,
++                                      cache_name->str, cache_name->length, 0));
++}
++
++
++uchar *sys_var_key_cache_param::value_ptr(THD *thd, enum_var_type type,
++					 LEX_STRING *base)
++{
++  KEY_CACHE *key_cache= get_key_cache(base);
++  if (!key_cache)
++    key_cache= &zero_key_cache;
++  return (uchar*) key_cache + offset ;
++}
++
++
++bool sys_var_key_buffer_size::check(THD *thd, set_var *var)
++{
++  return get_unsigned(thd, var, 0, GET_ULL);
++}
++
++
++bool sys_var_key_buffer_size::update(THD *thd, set_var *var)
++{
++  ulonglong tmp= var->save_result.ulonglong_value;
++  LEX_STRING *base_name= &var->base;
++  KEY_CACHE *key_cache;
++  bool error= 0;
++
++  /* If no basename, assume it's for the key cache named 'default' */
++  if (!base_name->length)
++    base_name= &default_key_cache_base;
++
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  key_cache= get_key_cache(base_name);
++
++  if (!key_cache)
++  {
++    /* Key cache didn't exist */
++    if (!tmp)					// Tried to delete cache
++      goto end;					// Ok, nothing to do
++    if (!(key_cache= create_key_cache(base_name->str, base_name->length)))
++    {
++      error= 1;
++      goto end;
++    }
++  }
++
++  /*
++    Abort if some other thread is changing the key cache
++    TODO: This should be changed so that we wait until the previous
++    assignment is done and then do the new assign
++  */
++  if (key_cache->in_init)
++    goto end;
++
++  if (!tmp)					// Zero size means delete
++  {
++    if (key_cache == dflt_key_cache)
++    {
++      error= 1;
++      my_error(ER_WARN_CANT_DROP_DEFAULT_KEYCACHE, MYF(0));
++      goto end;					// Ignore default key cache
++    }
++
++    if (key_cache->key_cache_inited)		// If initied
++    {
++      /*
++	Move tables using this key cache to the default key cache
++	and clear the old key cache.
++      */
++      NAMED_LIST *list; 
++      key_cache= (KEY_CACHE *) find_named(&key_caches, base_name->str,
++					      base_name->length, &list);
++      key_cache->in_init= 1;
++      pthread_mutex_unlock(&LOCK_global_system_variables);
++      error= reassign_keycache_tables(thd, key_cache, dflt_key_cache);
++      pthread_mutex_lock(&LOCK_global_system_variables);
++      key_cache->in_init= 0;
++    }
++    /*
++      We don't delete the key cache as some running threads my still be
++      in the key cache code with a pointer to the deleted (empty) key cache
++    */
++    goto end;
++  }
++
++  key_cache->param_buff_size= (ulonglong) tmp;
++
++  /* If key cache didn't exist initialize it, else resize it */
++  key_cache->in_init= 1;
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++
++  if (!key_cache->key_cache_inited)
++    error= (bool) (ha_init_key_cache("", key_cache));
++  else
++    error= (bool)(ha_resize_key_cache(key_cache));
++
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  key_cache->in_init= 0;  
++
++end:
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++
++ var->save_result.ulonglong_value = SIZE_T_MAX;
++
++  return error;
++}
++
++
++bool sys_var_key_cache_long::check(THD *thd, set_var *var)
++{
++  return get_unsigned(thd, var, 0, GET_ULONG);
++}
++
++
++/**
++  @todo
++  Abort if some other thread is changing the key cache.
++  This should be changed so that we wait until the previous
++  assignment is done and then do the new assign
++*/
++bool sys_var_key_cache_long::update(THD *thd, set_var *var)
++{
++  LEX_STRING *base_name= &var->base;
++  bool error= 0;
++
++  if (!base_name->length)
++    base_name= &default_key_cache_base;
++
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  KEY_CACHE *key_cache= get_key_cache(base_name);
++
++  if (!key_cache && !(key_cache= create_key_cache(base_name->str,
++				                  base_name->length)))
++  {
++    error= 1;
++    goto end;
++  }
++
++  /*
++    Abort if some other thread is changing the key cache
++    TODO: This should be changed so that we wait until the previous
++    assignment is done and then do the new assign
++  */
++  if (key_cache->in_init)
++    goto end;
++
++  *((ulong*) (((char*) key_cache) + offset))= (ulong)
++                                              var->save_result.ulonglong_value;
++
++  /*
++    Don't create a new key cache if it didn't exist
++    (key_caches are created only when the user sets block_size)
++  */
++  key_cache->in_init= 1;
++
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++
++  error= (bool) (ha_resize_key_cache(key_cache));
++
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  key_cache->in_init= 0;  
++
++end:
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++  return error;
++}
++
++
++bool sys_var_log_state::update(THD *thd, set_var *var)
++{
++  bool res;
++
++  if (this == &sys_var_log)
++    WARN_DEPRECATED(thd, "7.0", "@@log", "'@@general_log'");
++  else if (this == &sys_var_log_slow)
++    WARN_DEPRECATED(thd, "7.0", "@@log_slow_queries", "'@@slow_query_log'");
++
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  if (!var->save_result.ulong_value)
++  {
++    logger.deactivate_log_handler(thd, log_type);
++    res= false;
++  }
++  else
++    res= logger.activate_log_handler(thd, log_type);
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++  return res;
++}
++
++void sys_var_log_state::set_default(THD *thd, enum_var_type type)
++{
++  if (this == &sys_var_log)
++    WARN_DEPRECATED(thd, "7.0", "@@log", "'@@general_log'");
++  else if (this == &sys_var_log_slow)
++    WARN_DEPRECATED(thd, "7.0", "@@log_slow_queries", "'@@slow_query_log'");
++
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  logger.deactivate_log_handler(thd, log_type);
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++}
++
++
++static int  sys_check_log_path(THD *thd,  set_var *var)
++{
++  char path[FN_REFLEN], buff[FN_REFLEN];
++  MY_STAT f_stat;
++  String str(buff, sizeof(buff), system_charset_info), *res;
++  const char *log_file_str;
++  size_t path_length;
++
++  if (!(res= var->value->val_str(&str)))
++    goto err;
++
++  log_file_str= res->c_ptr();
++  bzero(&f_stat, sizeof(MY_STAT));
++
++  path_length= unpack_filename(path, log_file_str);
++
++  if (!path_length)
++  {
++    /* File name is empty. */
++
++    goto err;
++  }
++
++  if (my_stat(path, &f_stat, MYF(0)))
++  {
++    /*
++      A file system object exists. Check if argument is a file and we have
++      'write' permission.
++    */
++
++    if (!MY_S_ISREG(f_stat.st_mode) ||
++        !(f_stat.st_mode & MY_S_IWRITE))
++      goto err;
++
++    return 0;
++  }
++
++  /* Get dirname of the file path. */
++  (void) dirname_part(path, log_file_str, &path_length);
++
++  /* Dirname is empty if file path is relative. */
++  if (!path_length)
++    return 0;
++
++  /*
++    Check if directory exists and we have permission to create file and
++    write to file.
++  */
++  if (my_access(path, (F_OK|W_OK)))
++    goto err;
++
++  return 0;
++
++err:
++  my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), var->var->name, 
++           res ? log_file_str : "NULL");
++  return 1;
++}
++
++
++bool update_sys_var_str_path(THD *thd, sys_var_str *var_str,
++			     set_var *var, const char *log_ext,
++			     bool log_state, uint log_type)
++{
++  MYSQL_QUERY_LOG *file_log;
++  char buff[FN_REFLEN];
++  char *res= 0, *old_value=(char *)(var ? var->value->str_value.ptr() : 0);
++  bool result= 0;
++  uint str_length= (var ? var->value->str_value.length() : 0);
++
++  switch (log_type) {
++  case QUERY_LOG_SLOW:
++    file_log= logger.get_slow_log_file_handler();
++    break;
++  case QUERY_LOG_GENERAL:
++    file_log= logger.get_log_file_handler();
++    break;
++  default:
++    MY_ASSERT_UNREACHABLE();
++  }
++
++  if (!old_value)
++  {
++    old_value= make_default_log_name(buff, log_ext);
++    str_length= strlen(old_value);
++  }
++  if (!(res= my_strndup(old_value, str_length, MYF(MY_FAE+MY_WME))))
++  {
++    result= 1;
++    goto err;
++  }
++
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  logger.lock_exclusive();
++
++  if (file_log && log_state)
++    file_log->close(0);
++  old_value= var_str->value;
++  var_str->value= res;
++  var_str->value_length= str_length;
++  my_free(old_value, MYF(MY_ALLOW_ZERO_PTR));
++  if (file_log && log_state)
++  {
++    switch (log_type) {
++    case QUERY_LOG_SLOW:
++      file_log->open_slow_log(sys_var_slow_log_path.value);
++      break;
++    case QUERY_LOG_GENERAL:
++      file_log->open_query_log(sys_var_general_log_path.value);
++      break;
++    default:
++      DBUG_ASSERT(0);
++    }
++  }
++
++  logger.unlock();
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++
++err:
++  return result;
++}
++
++
++static bool sys_update_general_log_path(THD *thd, set_var * var)
++{
++  return update_sys_var_str_path(thd, &sys_var_general_log_path, 
++				 var, ".log", opt_log, QUERY_LOG_GENERAL);
++}
++
++
++static void sys_default_general_log_path(THD *thd, enum_var_type type)
++{
++  (void) update_sys_var_str_path(thd, &sys_var_general_log_path,
++				 0, ".log", opt_log, QUERY_LOG_GENERAL);
++}
++
++
++static bool sys_update_slow_log_path(THD *thd, set_var * var)
++{
++  return update_sys_var_str_path(thd, &sys_var_slow_log_path,
++				 var, "-slow.log", opt_slow_log,
++                                 QUERY_LOG_SLOW);
++}
++
++
++static void sys_default_slow_log_path(THD *thd, enum_var_type type)
++{
++  (void) update_sys_var_str_path(thd, &sys_var_slow_log_path,
++				 0, "-slow.log", opt_slow_log,
++                                 QUERY_LOG_SLOW);
++}
++
++
++bool sys_var_log_output::update(THD *thd, set_var *var)
++{
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  logger.lock_exclusive();
++  logger.init_slow_log(var->save_result.ulong_value);
++  logger.init_general_log(var->save_result.ulong_value);
++  *value= var->save_result.ulong_value;
++  logger.unlock();
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++  return 0;
++}
++
++
++void sys_var_log_output::set_default(THD *thd, enum_var_type type)
++{
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  logger.lock_exclusive();
++  logger.init_slow_log(LOG_FILE);
++  logger.init_general_log(LOG_FILE);
++  *value= LOG_FILE;
++  logger.unlock();
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++}
++
++
++uchar *sys_var_log_output::value_ptr(THD *thd, enum_var_type type,
++                                    LEX_STRING *base)
++{
++  char buff[256];
++  String tmp(buff, sizeof(buff), &my_charset_latin1);
++  ulong length;
++  ulong val= *value;
++
++  tmp.length(0);
++  for (uint i= 0; val; val>>= 1, i++)
++  {
++    if (val & 1)
++    {
++      tmp.append(log_output_typelib.type_names[i],
++                 log_output_typelib.type_lengths[i]);
++      tmp.append(',');
++    }
++  }
++
++  if ((length= tmp.length()))
++    length--;
++  return (uchar*) thd->strmake(tmp.ptr(), length);
++}
++
++
++/*****************************************************************************
++  Functions to handle SET NAMES and SET CHARACTER SET
++*****************************************************************************/
++
++int set_var_collation_client::check(THD *thd)
++{
++  /* Currently, UCS-2 cannot be used as a client character set */
++  if (character_set_client->mbminlen > 1)
++  {
++    my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client",
++             character_set_client->csname);
++    return 1;
++  }
++  return 0;
++}
++
++int set_var_collation_client::update(THD *thd)
++{
++  thd->variables.character_set_client= character_set_client;
++  thd->variables.character_set_results= character_set_results;
++  thd->variables.collation_connection= collation_connection;
++  thd->update_charset();
++  thd->protocol_text.init(thd);
++  thd->protocol_binary.init(thd);
++  return 0;
++}
++
++/****************************************************************************/
++
++bool sys_var_timestamp::check(THD *thd, set_var *var)
++{
++  longlong val;
++  var->save_result.ulonglong_value= var->value->val_int();
++  val= (longlong) var->save_result.ulonglong_value;
++  if (val != 0 &&          // this is how you set the default value
++      (val < TIMESTAMP_MIN_VALUE || val > TIMESTAMP_MAX_VALUE))
++  {
++    char buf[64];
++    my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "timestamp", llstr(val, buf));
++    return TRUE;
++  }
++  return FALSE;
++}
++
++
++bool sys_var_timestamp::update(THD *thd,  set_var *var)
++{
++  thd->set_time((time_t) var->save_result.ulonglong_value);
++  return FALSE;
++}
++
++
++void sys_var_timestamp::set_default(THD *thd, enum_var_type type)
++{
++  thd->user_time=0;
++}
++
++
++uchar *sys_var_timestamp::value_ptr(THD *thd, enum_var_type type,
++				   LEX_STRING *base)
++{
++  thd->sys_var_tmp.long_value= (long) thd->start_time;
++  return (uchar*) &thd->sys_var_tmp.long_value;
++}
++
++
++bool sys_var_last_insert_id::update(THD *thd, set_var *var)
++{
++  thd->first_successful_insert_id_in_prev_stmt= 
++    var->save_result.ulonglong_value;
++  return 0;
++}
++
++
++uchar *sys_var_last_insert_id::value_ptr(THD *thd, enum_var_type type,
++					LEX_STRING *base)
++{
++  /*
++    this tmp var makes it robust againt change of type of 
++    read_first_successful_insert_id_in_prev_stmt().
++  */
++  thd->sys_var_tmp.ulonglong_value= 
++    thd->read_first_successful_insert_id_in_prev_stmt();
++  return (uchar*) &thd->sys_var_tmp.ulonglong_value;
++}
++
++
++bool sys_var_insert_id::update(THD *thd, set_var *var)
++{
++  thd->force_one_auto_inc_interval(var->save_result.ulonglong_value);
++  return 0;
++}
++
++
++uchar *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type,
++				   LEX_STRING *base)
++{
++  thd->sys_var_tmp.ulonglong_value= 
++    thd->auto_inc_intervals_forced.minimum();
++  return (uchar*) &thd->sys_var_tmp.ulonglong_value;
++}
++
++
++bool sys_var_rand_seed1::update(THD *thd, set_var *var)
++{
++  thd->rand.seed1= (ulong) var->save_result.ulonglong_value;
++  return 0;
++}
++
++bool sys_var_rand_seed2::update(THD *thd, set_var *var)
++{
++  thd->rand.seed2= (ulong) var->save_result.ulonglong_value;
++  return 0;
++}
++
++
++bool sys_var_thd_time_zone::check(THD *thd, set_var *var)
++{
++  char buff[MAX_TIME_ZONE_NAME_LENGTH];
++  String str(buff, sizeof(buff), &my_charset_latin1);
++  String *res= var->value->val_str(&str);
++
++  if (!(var->save_result.time_zone= my_tz_find(thd, res)))
++  {
++    my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), res ? res->c_ptr() : "NULL");
++    return 1;
++  }
++  return 0;
++}
++
++
++bool sys_var_thd_time_zone::update(THD *thd, set_var *var)
++{
++  /* We are using Time_zone object found during check() phase. */
++  if (var->type == OPT_GLOBAL)
++  {
++    pthread_mutex_lock(&LOCK_global_system_variables);
++    global_system_variables.time_zone= var->save_result.time_zone;
++    pthread_mutex_unlock(&LOCK_global_system_variables);
++  }
++  else
++    thd->variables.time_zone= var->save_result.time_zone;
++  return 0;
++}
++
++
++uchar *sys_var_thd_time_zone::value_ptr(THD *thd, enum_var_type type,
++				       LEX_STRING *base)
++{
++  /* 
++    We can use ptr() instead of c_ptr() here because String contaning
++    time zone name is guaranteed to be zero ended.
++  */
++  if (type == OPT_GLOBAL)
++    return (uchar *)(global_system_variables.time_zone->get_name()->ptr());
++  else
++  {
++    /*
++      This is an ugly fix for replication: we don't replicate properly queries
++      invoking system variables' values to update tables; but
++      CONVERT_TZ(,,@@session.time_zone) is so popular that we make it
++      replicable (i.e. we tell the binlog code to store the session
++      timezone). If it's the global value which was used we can't replicate
++      (binlog code stores session value only).
++    */
++    thd->time_zone_used= 1;
++    return (uchar *)(thd->variables.time_zone->get_name()->ptr());
++  }
++}
++
++
++void sys_var_thd_time_zone::set_default(THD *thd, enum_var_type type)
++{
++ pthread_mutex_lock(&LOCK_global_system_variables);
++ if (type == OPT_GLOBAL)
++ {
++   if (default_tz_name)
++   {
++     String str(default_tz_name, &my_charset_latin1);
++     /*
++       We are guaranteed to find this time zone since its existence
++       is checked during start-up.
++     */
++     global_system_variables.time_zone= my_tz_find(thd, &str);
++   }
++   else
++     global_system_variables.time_zone= my_tz_SYSTEM;
++ }
++ else
++   thd->variables.time_zone= global_system_variables.time_zone;
++ pthread_mutex_unlock(&LOCK_global_system_variables);
++}
++
++
++bool sys_var_max_user_conn::check(THD *thd, set_var *var)
++{
++  if (var->type == OPT_GLOBAL)
++    return sys_var_thd::check(thd, var);
++  else
++  {
++    /*
++      Per-session values of max_user_connections can't be set directly.
++      May be we should have a separate error message for this?
++    */
++    my_error(ER_GLOBAL_VARIABLE, MYF(0), name);
++    return TRUE;
++  }
++}
++
++bool sys_var_max_user_conn::update(THD *thd, set_var *var)
++{
++  DBUG_ASSERT(var->type == OPT_GLOBAL);
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  max_user_connections= (uint)var->save_result.ulonglong_value;
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++  return 0;
++}
++
++
++void sys_var_max_user_conn::set_default(THD *thd, enum_var_type type)
++{
++  DBUG_ASSERT(type == OPT_GLOBAL);
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  max_user_connections= (ulong) option_limits->def_value;
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++}
++
++
++uchar *sys_var_max_user_conn::value_ptr(THD *thd, enum_var_type type,
++                                       LEX_STRING *base)
++{
++  if (type != OPT_GLOBAL &&
++      thd->user_connect && thd->user_connect->user_resources.user_conn)
++    return (uchar*) &(thd->user_connect->user_resources.user_conn);
++  return (uchar*) &(max_user_connections);
++}
++
++
++bool sys_var_thd_ulong_session_readonly::check(THD *thd, set_var *var)
++{
++  if (var->type != OPT_GLOBAL)
++  {
++    my_error(ER_VARIABLE_IS_READONLY, MYF(0), "SESSION", name, "GLOBAL");
++    return TRUE;
++  }
++
++  return sys_var_thd_ulong::check(thd, var);
++}
++
++
++bool sys_var_thd_lc_time_names::check(THD *thd, set_var *var)
++{
++  MY_LOCALE *locale_match;
++
++  if (var->value->result_type() == INT_RESULT)
++  {
++    if (!(locale_match= my_locale_by_number((uint) var->value->val_int())))
++    {
++      char buf[20];
++      int10_to_str((int) var->value->val_int(), buf, -10);
++      my_printf_error(ER_UNKNOWN_ERROR, "Unknown locale: '%s'", MYF(0), buf);
++      return 1;
++    }
++  }
++  else // STRING_RESULT
++  {
++    char buff[6]; 
++    String str(buff, sizeof(buff), &my_charset_latin1), *res;
++    if (!(res=var->value->val_str(&str)))
++    {
++      my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, "NULL");
++      return 1;
++    }
++    const char *locale_str= res->c_ptr_safe();
++    if (!(locale_match= my_locale_by_name(locale_str)))
++    {
++      my_printf_error(ER_UNKNOWN_ERROR,
++                      "Unknown locale: '%s'", MYF(0), locale_str);
++      return 1;
++    }
++  }
++
++  var->save_result.locale_value= locale_match;
++  return 0;
++}
++
++
++bool sys_var_thd_lc_time_names::update(THD *thd, set_var *var)
++{
++  if (var->type == OPT_GLOBAL)
++    global_system_variables.lc_time_names= var->save_result.locale_value;
++  else
++    thd->variables.lc_time_names= var->save_result.locale_value;
++  return 0;
++}
++
++
++uchar *sys_var_thd_lc_time_names::value_ptr(THD *thd, enum_var_type type,
++					  LEX_STRING *base)
++{
++  return type == OPT_GLOBAL ?
++                 (uchar *) global_system_variables.lc_time_names->name :
++                 (uchar *) thd->variables.lc_time_names->name;
++}
++
++
++void sys_var_thd_lc_time_names::set_default(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    global_system_variables.lc_time_names= my_default_lc_time_names;
++  else
++    thd->variables.lc_time_names= global_system_variables.lc_time_names;
++}
++
++/*
++  Handling of microseoncds given as seconds.part_seconds
++
++  NOTES
++    The argument to long query time is in seconds in decimal
++    which is converted to ulonglong integer holding microseconds for storage.
++    This is used for handling long_query_time
++*/
++
++bool sys_var_microseconds::update(THD *thd, set_var *var)
++{
++  double num= var->value->val_real();
++  longlong microseconds;
++  if (num > (double) option_limits->max_value)
++    num= (double) option_limits->max_value;
++  if (num < (double) option_limits->min_value)
++    num= (double) option_limits->min_value;
++  microseconds= (longlong) (num * 1000000.0 + 0.5);
++  if (var->type == OPT_GLOBAL)
++  {
++    pthread_mutex_lock(&LOCK_global_system_variables);
++    (global_system_variables.*offset)= microseconds;
++    pthread_mutex_unlock(&LOCK_global_system_variables);
++  }
++  else
++    thd->variables.*offset= microseconds;
++  return 0;
++}
++
++
++void sys_var_microseconds::set_default(THD *thd, enum_var_type type)
++{
++  longlong microseconds= (longlong) (option_limits->def_value * 1000000.0);
++  if (type == OPT_GLOBAL)
++  {
++    pthread_mutex_lock(&LOCK_global_system_variables);
++    global_system_variables.*offset= microseconds;
++    pthread_mutex_unlock(&LOCK_global_system_variables);
++  }
++  else
++    thd->variables.*offset= microseconds;
++}
++
++
++uchar *sys_var_microseconds::value_ptr(THD *thd, enum_var_type type,
++                                          LEX_STRING *base)
++{
++  thd->tmp_double_value= (double) ((type == OPT_GLOBAL) ?
++                                   global_system_variables.*offset :
++                                   thd->variables.*offset) / 1000000.0;
++  return (uchar*) &thd->tmp_double_value;
++}
++
++
++/*
++  Functions to update thd->options bits
++*/
++
++static bool set_option_bit(THD *thd, set_var *var)
++{
++  sys_var_thd_bit *sys_var= ((sys_var_thd_bit*) var->var);
++  if ((var->save_result.ulong_value != 0) == sys_var->reverse)
++    thd->options&= ~sys_var->bit_flag;
++  else
++    thd->options|= sys_var->bit_flag;
++  return 0;
++}
++
++/*
++  Functions to be only used to update thd->options OPTION_BIN_LOG bit
++*/
++static bool set_option_log_bin_bit(THD *thd, set_var *var)
++{
++  set_option_bit(thd, var);
++  if (!thd->in_sub_stmt)
++    thd->sql_log_bin_toplevel= thd->options & OPTION_BIN_LOG;
++  return 0;
++}
++
++static bool set_option_autocommit(THD *thd, set_var *var)
++{
++  /* The test is negative as the flag we use is NOT autocommit */
++
++  ulonglong org_options= thd->options;
++
++  if (var->save_result.ulong_value != 0)
++    thd->options&= ~((sys_var_thd_bit*) var->var)->bit_flag;
++  else
++    thd->options|= ((sys_var_thd_bit*) var->var)->bit_flag;
++
++  if ((org_options ^ thd->options) & OPTION_NOT_AUTOCOMMIT)
++  {
++    if ((org_options & OPTION_NOT_AUTOCOMMIT))
++    {
++      /* We changed to auto_commit mode */
++      if (thd->transaction.xid_state.xa_state != XA_NOTR)
++      {
++        thd->options= org_options;
++        my_error(ER_XAER_RMFAIL, MYF(0),
++                 xa_state_names[thd->transaction.xid_state.xa_state]);
++        return 1;
++      }
++      thd->options&= ~(ulonglong) (OPTION_BEGIN | OPTION_KEEP_LOG);
++      thd->transaction.all.modified_non_trans_table= FALSE;
++      thd->server_status|= SERVER_STATUS_AUTOCOMMIT;
++      if (ha_commit(thd))
++	return 1;
++    }
++    else
++    {
++      thd->transaction.all.modified_non_trans_table= FALSE;
++      thd->server_status&= ~SERVER_STATUS_AUTOCOMMIT;
++    }
++  }
++  return 0;
++}
++
++static int check_log_update(THD *thd, set_var *var)
++{
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  if (!(thd->security_ctx->master_access & SUPER_ACL))
++  {
++    my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER");
++    return 1;
++  }
++#endif
++  return 0;
++}
++
++static bool set_log_update(THD *thd, set_var *var)
++{
++  /*
++    The update log is not supported anymore since 5.0.
++    See sql/mysqld.cc/, comments in function init_server_components() for an
++    explaination of the different warnings we send below
++  */
++
++  if (opt_sql_bin_update)
++  {
++    push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++                 ER_UPDATE_LOG_DEPRECATED_TRANSLATED,
++                 ER(ER_UPDATE_LOG_DEPRECATED_TRANSLATED));
++  }
++  else
++    push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++                 ER_UPDATE_LOG_DEPRECATED_IGNORED,
++                 ER(ER_UPDATE_LOG_DEPRECATED_IGNORED));
++  set_option_bit(thd, var);
++  return 0;
++}
++
++
++static int check_pseudo_thread_id(THD *thd, set_var *var)
++{
++  var->save_result.ulonglong_value= var->value->val_int();
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  if (thd->security_ctx->master_access & SUPER_ACL)
++    return 0;
++  else
++  {
++    my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER");
++    return 1;
++  }
++#else
++  return 0;
++#endif
++}
++
++static uchar *get_warning_count(THD *thd)
++{
++  thd->sys_var_tmp.long_value=
++    (thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_NOTE] +
++     thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR] +
++     thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_WARN]);
++  return (uchar*) &thd->sys_var_tmp.long_value;
++}
++
++static uchar *get_error_count(THD *thd)
++{
++  thd->sys_var_tmp.long_value= 
++    thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR];
++  return (uchar*) &thd->sys_var_tmp.long_value;
++}
++
++
++/**
++  Get the tmpdir that was specified or chosen by default.
++
++  This is necessary because if the user does not specify a temporary
++  directory via the command line, one is chosen based on the environment
++  or system defaults.  But we can't just always use mysql_tmpdir, because
++  that is actually a call to my_tmpdir() which cycles among possible
++  temporary directories.
++
++  @param thd		thread handle
++
++  @retval
++    ptr		pointer to NUL-terminated string
++*/
++static uchar *get_tmpdir(THD *thd)
++{
++  if (opt_mysql_tmpdir)
++    return (uchar *)opt_mysql_tmpdir;
++  return (uchar*)mysql_tmpdir;
++}
++
++static uchar *get_myisam_mmap_size(THD *thd)
++{
++  return (uchar *)&myisam_mmap_size;
++}
++
++
++/****************************************************************************
++  Main handling of variables:
++  - Initialisation
++  - Searching during parsing
++  - Update loop
++****************************************************************************/
++
++/**
++  Find variable name in option my_getopt structure used for
++  command line args.
++
++  @param opt	option structure array to search in
++  @param name	variable name
++
++  @retval
++    0		Error
++  @retval
++    ptr		pointer to option structure
++*/
++
++static struct my_option *find_option(struct my_option *opt, const char *name) 
++{
++  uint length=strlen(name);
++  for (; opt->name; opt++)
++  {
++    if (!getopt_compare_strings(opt->name, name, length) &&
++	!opt->name[length])
++    {
++      /*
++	Only accept the option if one can set values through it.
++	If not, there is no default value or limits in the option.
++      */
++      return (opt->value) ? opt : 0;
++    }
++  }
++  return 0;
++}
++
++
++/**
++  Return variable name and length for hashing of variables.
++*/
++
++static uchar *get_sys_var_length(const sys_var *var, size_t *length,
++                                 my_bool first)
++{
++  *length= var->name_length;
++  return (uchar*) var->name;
++}
++
++
++/*
++  Add variables to the dynamic hash of system variables
++  
++  SYNOPSIS
++    mysql_add_sys_var_chain()
++    first       Pointer to first system variable to add
++    long_opt    (optional)command line arguments may be tied for limit checks.
++  
++  RETURN VALUES
++    0           SUCCESS
++    otherwise   FAILURE
++*/
++
++
++int mysql_add_sys_var_chain(sys_var *first, struct my_option *long_options)
++{
++  sys_var *var;
++  
++  /* A write lock should be held on LOCK_system_variables_hash */
++  
++  for (var= first; var; var= var->next)
++  {
++    var->name_length= strlen(var->name);
++    /* this fails if there is a conflicting variable name. see HASH_UNIQUE */
++    if (my_hash_insert(&system_variable_hash, (uchar*) var))
++      goto error;
++    if (long_options)
++      var->option_limits= find_option(long_options, var->name);
++  }
++  return 0;
++
++error:
++  for (; first != var; first= first->next)
++    hash_delete(&system_variable_hash, (uchar*) first);
++  return 1;
++}
++ 
++ 
++/*
++  Remove variables to the dynamic hash of system variables
++   
++  SYNOPSIS
++    mysql_del_sys_var_chain()
++    first       Pointer to first system variable to remove
++   
++  RETURN VALUES
++    0           SUCCESS
++    otherwise   FAILURE
++*/
++ 
++int mysql_del_sys_var_chain(sys_var *first)
++{
++  int result= 0;
++ 
++  /* A write lock should be held on LOCK_system_variables_hash */
++   
++  for (sys_var *var= first; var; var= var->next)
++    result|= hash_delete(&system_variable_hash, (uchar*) var);
++
++  return result;
++}
++ 
++ 
++static int show_cmp(SHOW_VAR *a, SHOW_VAR *b)
++{
++  return strcmp(a->name, b->name);
++}
++ 
++ 
++/*
++  Constructs an array of system variables for display to the user.
++  
++  SYNOPSIS
++    enumerate_sys_vars()
++    thd         current thread
++    sorted      If TRUE, the system variables should be sorted
++  
++  RETURN VALUES
++    pointer     Array of SHOW_VAR elements for display
++    NULL        FAILURE
++*/
++
++SHOW_VAR* enumerate_sys_vars(THD *thd, bool sorted)
++{
++  int count= system_variable_hash.records, i;
++  int size= sizeof(SHOW_VAR) * (count + 1);
++  SHOW_VAR *result= (SHOW_VAR*) thd->alloc(size);
++
++  if (result)
++  {
++    SHOW_VAR *show= result;
++
++    for (i= 0; i < count; i++)
++    {
++      sys_var *var= (sys_var*) hash_element(&system_variable_hash, i);
++      show->name= var->name;
++      show->value= (char*) var;
++      show->type= SHOW_SYS;
++      show++;
++    }
++
++    /* sort into order */
++    if (sorted)
++      my_qsort(result, count, sizeof(SHOW_VAR),
++               (qsort_cmp) show_cmp);
++    
++    /* make last element empty */
++    bzero(show, sizeof(SHOW_VAR));
++  }
++  return result;
++}
++
++
++/*
++  Initialize the system variables
++  
++  SYNOPSIS
++    set_var_init()
++  
++  RETURN VALUES
++    0           SUCCESS
++    otherwise   FAILURE
++*/
++
++int set_var_init()
++{
++  uint count= 0;
++  DBUG_ENTER("set_var_init");
++  
++  for (sys_var *var=vars.first; var; var= var->next, count++) ;
++
++  if (hash_init(&system_variable_hash, system_charset_info, count, 0,
++                0, (hash_get_key) get_sys_var_length, 0, HASH_UNIQUE))
++    goto error;
++
++  vars.last->next= NULL;
++  if (mysql_add_sys_var_chain(vars.first, my_long_options))
++    goto error;
++
++  /*
++    Special cases
++    Needed because MySQL can't find the limits for a variable it it has
++    a different name than the command line option.
++    As these variables are deprecated, this code will disappear soon...
++  */
++  sys_sql_max_join_size.option_limits= sys_max_join_size.option_limits;
++
++  DBUG_RETURN(0);
++
++error:
++  fprintf(stderr, "failed to initialize system variables");
++  DBUG_RETURN(1);
++}
++
++
++void set_var_free()
++{
++  hash_free(&system_variable_hash);
++}
++
++
++/**
++  Find a user set-table variable.
++
++  @param str	   Name of system variable to find
++  @param length    Length of variable.  zero means that we should use strlen()
++                   on the variable
++  @param no_error  Refuse to emit an error, even if one occurred.
++
++  @retval
++    pointer	pointer to variable definitions
++  @retval
++    0		Unknown variable (error message is given)
++*/
++
++sys_var *intern_find_sys_var(const char *str, uint length, bool no_error)
++{
++  sys_var *var;
++
++  /*
++    This function is only called from the sql_plugin.cc.
++    A lock on LOCK_system_variable_hash should be held
++  */
++  var= (sys_var*) hash_search(&system_variable_hash,
++			      (uchar*) str, length ? length : strlen(str));
++  if (!(var || no_error))
++    my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (char*) str);
++
++  return var;
++}
++
++
++/**
++  Execute update of all variables.
++
++  First run a check of all variables that all updates will go ok.
++  If yes, then execute all updates, returning an error if any one failed.
++
++  This should ensure that in all normal cases none all or variables are
++  updated.
++
++  @param THD		Thread id
++  @param var_list       List of variables to update
++
++  @retval
++    0	ok
++  @retval
++    1	ERROR, message sent (normally no variables was updated)
++  @retval
++    -1  ERROR, message not sent
++*/
++
++int sql_set_variables(THD *thd, List<set_var_base> *var_list)
++{
++  int error;
++  List_iterator_fast<set_var_base> it(*var_list);
++  DBUG_ENTER("sql_set_variables");
++
++  set_var_base *var;
++  while ((var=it++))
++  {
++    if ((error= var->check(thd)))
++      goto err;
++  }
++  if (!(error= test(thd->is_error())))
++  {
++    it.rewind();
++    while ((var= it++))
++      error|= var->update(thd);         // Returns 0, -1 or 1
++  }
++
++err:
++  free_underlaid_joins(thd, &thd->lex->select_lex);
++  DBUG_RETURN(error);
++}
++
++
++/**
++  Say if all variables set by a SET support the ONE_SHOT keyword
++  (currently, only character set and collation do; later timezones
++  will).
++
++  @param var_list	List of variables to update
++
++  @note
++    It has a "not_" because it makes faster tests (no need to "!")
++
++  @retval
++    0	all variables of the list support ONE_SHOT
++  @retval
++    1	at least one does not support ONE_SHOT
++*/
++
++bool not_all_support_one_shot(List<set_var_base> *var_list)
++{
++  List_iterator_fast<set_var_base> it(*var_list);
++  set_var_base *var;
++  while ((var= it++))
++  {
++    if (var->no_support_one_shot())
++      return 1;
++  }
++  return 0;
++}
++
++
++/*****************************************************************************
++  Functions to handle SET mysql_internal_variable=const_expr
++*****************************************************************************/
++
++int set_var::check(THD *thd)
++{
++  if (var->is_readonly())
++  {
++    my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), var->name, "read only");
++    return -1;
++  }
++  if (var->check_type(type))
++  {
++    int err= type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE;
++    my_error(err, MYF(0), var->name);
++    return -1;
++  }
++  if ((type == OPT_GLOBAL && check_global_access(thd, SUPER_ACL)))
++    return 1;
++  /* value is a NULL pointer if we are using SET ... = DEFAULT */
++  if (!value)
++  {
++    if (var->check_default(type))
++    {
++      my_error(ER_NO_DEFAULT, MYF(0), var->name);
++      return -1;
++    }
++    return 0;
++  }
++
++  if ((!value->fixed &&
++       value->fix_fields(thd, &value)) || value->check_cols(1))
++    return -1;
++  if (var->check_update_type(value->result_type()))
++  {
++    my_error(ER_WRONG_TYPE_FOR_VAR, MYF(0), var->name);
++    return -1;
++  }
++  return var->check(thd, this) ? -1 : 0;
++}
++
++
++/**
++  Check variable, but without assigning value (used by PS).
++
++  @param thd		thread handler
++
++  @retval
++    0	ok
++  @retval
++    1	ERROR, message sent (normally no variables was updated)
++  @retval
++    -1   ERROR, message not sent
++*/
++int set_var::light_check(THD *thd)
++{
++  if (var->check_type(type))
++  {
++    int err= type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE;
++    my_error(err, MYF(0), var->name);
++    return -1;
++  }
++  if (type == OPT_GLOBAL && check_global_access(thd, SUPER_ACL))
++    return 1;
++
++  if (value && ((!value->fixed && value->fix_fields(thd, &value)) ||
++                value->check_cols(1)))
++    return -1;
++  return 0;
++}
++
++/**
++  Update variable
++
++  @param   thd    thread handler
++  @returns 0|1    ok or	ERROR
++
++  @note ERROR can be only due to abnormal operations involving
++  the server's execution evironment such as
++  out of memory, hard disk failure or the computer blows up.
++  Consider set_var::check() method if there is a need to return
++  an error due to logics.
++*/
++int set_var::update(THD *thd)
++{
++  if (!value)
++    var->set_default(thd, type);
++  else if (var->update(thd, this))
++    return -1;				// should never happen
++  if (var->after_update)
++    (*var->after_update)(thd, type);
++  return 0;
++}
++
++
++/*****************************************************************************
++  Functions to handle SET @user_variable=const_expr
++*****************************************************************************/
++
++int set_var_user::check(THD *thd)
++{
++  /*
++    Item_func_set_user_var can't substitute something else on its place =>
++    0 can be passed as last argument (reference on item)
++  */
++  return (user_var_item->fix_fields(thd, (Item**) 0) ||
++	  user_var_item->check(0)) ? -1 : 0;
++}
++
++
++/**
++  Check variable, but without assigning value (used by PS).
++
++  @param thd		thread handler
++
++  @retval
++    0	ok
++  @retval
++    1	ERROR, message sent (normally no variables was updated)
++  @retval
++    -1   ERROR, message not sent
++*/
++int set_var_user::light_check(THD *thd)
++{
++  /*
++    Item_func_set_user_var can't substitute something else on its place =>
++    0 can be passed as last argument (reference on item)
++  */
++  return (user_var_item->fix_fields(thd, (Item**) 0));
++}
++
++
++int set_var_user::update(THD *thd)
++{
++  if (user_var_item->update())
++  {
++    /* Give an error if it's not given already */
++    my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY), MYF(0));
++    return -1;
++  }
++  return 0;
++}
++
++
++/*****************************************************************************
++  Functions to handle SET PASSWORD
++*****************************************************************************/
++
++int set_var_password::check(THD *thd)
++{
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  if (!user->host.str)
++  {
++    DBUG_ASSERT(thd->security_ctx->priv_host);
++    if (*thd->security_ctx->priv_host != 0)
++    {
++      user->host.str= (char *) thd->security_ctx->priv_host;
++      user->host.length= strlen(thd->security_ctx->priv_host);
++    }
++    else
++    {
++      user->host.str= (char *)"%";
++      user->host.length= 1;
++    }
++  }
++  if (!user->user.str)
++  {
++    DBUG_ASSERT(thd->security_ctx->priv_user);
++    user->user.str= (char *) thd->security_ctx->priv_user;
++    user->user.length= strlen(thd->security_ctx->priv_user);
++  }
++  /* Returns 1 as the function sends error to client */
++  return check_change_password(thd, user->host.str, user->user.str,
++                               password, strlen(password)) ? 1 : 0;
++#else
++  return 0;
++#endif
++}
++
++int set_var_password::update(THD *thd)
++{
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  /* Returns 1 as the function sends error to client */
++  return change_password(thd, user->host.str, user->user.str, password) ?
++	  1 : 0;
++#else
++  return 0;
++#endif
++}
++
++/****************************************************************************
++ Functions to handle table_type
++****************************************************************************/
++
++/* Based upon sys_var::check_enum() */
++
++bool sys_var_thd_storage_engine::check(THD *thd, set_var *var)
++{
++  char buff[STRING_BUFFER_USUAL_SIZE];
++  const char *value;
++  String str(buff, sizeof(buff), &my_charset_latin1), *res;
++
++  var->save_result.plugin= NULL;
++  if (var->value->result_type() == STRING_RESULT)
++  {
++    LEX_STRING engine_name;
++    handlerton *hton;
++    if (!(res=var->value->val_str(&str)) ||
++        !(engine_name.str= (char *)res->ptr()) ||
++        !(engine_name.length= res->length()) ||
++	!(var->save_result.plugin= ha_resolve_by_name(thd, &engine_name)) ||
++        !(hton= plugin_data(var->save_result.plugin, handlerton *)) ||
++        ha_checktype(thd, ha_legacy_type(hton), 1, 0) != hton)
++    {
++      value= res ? res->c_ptr() : "NULL";
++      goto err;
++    }
++    return 0;
++  }
++  value= "unknown";
++
++err:
++  my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), value);
++  return 1;
++}
++
++
++uchar *sys_var_thd_storage_engine::value_ptr(THD *thd, enum_var_type type,
++					    LEX_STRING *base)
++{
++  uchar* result;
++  handlerton *hton;
++  LEX_STRING *engine_name;
++  plugin_ref plugin= thd->variables.*offset;
++  if (type == OPT_GLOBAL)
++    plugin= my_plugin_lock(thd, &(global_system_variables.*offset));
++  hton= plugin_data(plugin, handlerton*);
++  engine_name= &hton2plugin[hton->slot]->name;
++  result= (uchar *) thd->strmake(engine_name->str, engine_name->length);
++  if (type == OPT_GLOBAL)
++    plugin_unlock(thd, plugin);
++  return result;
++}
++
++
++void sys_var_thd_storage_engine::set_default(THD *thd, enum_var_type type)
++{
++  plugin_ref old_value, new_value, *value;
++  if (type == OPT_GLOBAL)
++  {
++    value= &(global_system_variables.*offset);
++    new_value= ha_lock_engine(NULL, myisam_hton);
++  }
++  else
++  {
++    value= &(thd->variables.*offset);
++    new_value= my_plugin_lock(NULL, &(global_system_variables.*offset));
++  }
++  DBUG_ASSERT(new_value);
++  old_value= *value;
++  *value= new_value;
++  plugin_unlock(NULL, old_value);
++}
++
++
++bool sys_var_thd_storage_engine::update(THD *thd, set_var *var)
++{
++  plugin_ref *value= &(global_system_variables.*offset), old_value;
++   if (var->type != OPT_GLOBAL)
++     value= &(thd->variables.*offset);
++  old_value= *value;
++  if (old_value != var->save_result.plugin)
++  {
++    *value= my_plugin_lock(NULL, &var->save_result.plugin);
++    plugin_unlock(NULL, old_value);
++  }
++  return 0;
++}
++
++void sys_var_thd_table_type::warn_deprecated(THD *thd)
++{
++  WARN_DEPRECATED(thd, "6.0", "@@table_type", "'@@storage_engine'");
++}
++
++void sys_var_thd_table_type::set_default(THD *thd, enum_var_type type)
++{
++  warn_deprecated(thd);
++  sys_var_thd_storage_engine::set_default(thd, type);
++}
++
++bool sys_var_thd_table_type::update(THD *thd, set_var *var)
++{
++  warn_deprecated(thd);
++  return sys_var_thd_storage_engine::update(thd, var);
++}
++
++
++/****************************************************************************
++ Functions to handle sql_mode
++****************************************************************************/
++
++/**
++  Make string representation of mode.
++
++  @param[in]  thd    thread handler
++  @param[in]  val    sql_mode value
++  @param[out] len    pointer on length of string
++
++  @return
++    pointer to string with sql_mode representation
++*/
++
++bool
++sys_var_thd_sql_mode::
++symbolic_mode_representation(THD *thd, ulonglong val, LEX_STRING *rep)
++{
++  char buff[STRING_BUFFER_USUAL_SIZE*8];
++  String tmp(buff, sizeof(buff), &my_charset_latin1);
++
++  tmp.length(0);
++
++  for (uint i= 0; val; val>>= 1, i++)
++  {
++    if (val & 1)
++    {
++      tmp.append(sql_mode_typelib.type_names[i],
++                 sql_mode_typelib.type_lengths[i]);
++      tmp.append(',');
++    }
++  }
++
++  if (tmp.length())
++    tmp.length(tmp.length() - 1); /* trim the trailing comma */
++
++  rep->str= thd->strmake(tmp.ptr(), tmp.length());
++
++  rep->length= rep->str ? tmp.length() : 0;
++
++  return rep->length != tmp.length();
++}
++
++
++uchar *sys_var_thd_sql_mode::value_ptr(THD *thd, enum_var_type type,
++				      LEX_STRING *base)
++{
++  LEX_STRING sql_mode;
++  ulonglong val= ((type == OPT_GLOBAL) ? global_system_variables.*offset :
++                  thd->variables.*offset);
++  (void) symbolic_mode_representation(thd, val, &sql_mode);
++  return (uchar *) sql_mode.str;
++}
++
++
++void sys_var_thd_sql_mode::set_default(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    global_system_variables.*offset= 0;
++  else
++    thd->variables.*offset= global_system_variables.*offset;
++}
++
++
++void fix_sql_mode_var(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    global_system_variables.sql_mode=
++      fix_sql_mode(global_system_variables.sql_mode);
++  else
++  {
++    thd->variables.sql_mode= fix_sql_mode(thd->variables.sql_mode);
++    /*
++      Update thd->server_status
++     */
++    if (thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)
++      thd->server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES;
++    else
++      thd->server_status&= ~SERVER_STATUS_NO_BACKSLASH_ESCAPES;
++  }
++}
++
++/** Map database specific bits to function bits. */
++
++ulong fix_sql_mode(ulong sql_mode)
++{
++  /*
++    Note that we dont set 
++    MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | MODE_NO_FIELD_OPTIONS
++    to allow one to get full use of MySQL in this mode.
++  */
++
++  if (sql_mode & MODE_ANSI)
++  {
++    sql_mode|= (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
++		MODE_IGNORE_SPACE);
++    /* 
++      MODE_ONLY_FULL_GROUP_BY removed from ANSI mode because it is currently
++      overly restrictive (see BUG#8510).
++    */
++  }
++  if (sql_mode & MODE_ORACLE)
++    sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
++		MODE_IGNORE_SPACE |
++		MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS |
++		MODE_NO_FIELD_OPTIONS | MODE_NO_AUTO_CREATE_USER);
++  if (sql_mode & MODE_MSSQL)
++    sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
++		MODE_IGNORE_SPACE |
++		MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS |
++		MODE_NO_FIELD_OPTIONS);
++  if (sql_mode & MODE_POSTGRESQL)
++    sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
++		MODE_IGNORE_SPACE |
++		MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS |
++		MODE_NO_FIELD_OPTIONS);
++  if (sql_mode & MODE_DB2)
++    sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
++		MODE_IGNORE_SPACE |
++		MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS |
++		MODE_NO_FIELD_OPTIONS);
++  if (sql_mode & MODE_MAXDB)
++    sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
++		MODE_IGNORE_SPACE |
++		MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS |
++		MODE_NO_FIELD_OPTIONS | MODE_NO_AUTO_CREATE_USER);
++  if (sql_mode & MODE_MYSQL40)
++    sql_mode|= MODE_HIGH_NOT_PRECEDENCE;
++  if (sql_mode & MODE_MYSQL323)
++    sql_mode|= MODE_HIGH_NOT_PRECEDENCE;
++  if (sql_mode & MODE_TRADITIONAL)
++    sql_mode|= (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES |
++                MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
++                MODE_ERROR_FOR_DIVISION_BY_ZERO | MODE_NO_AUTO_CREATE_USER);
++  return sql_mode;
++}
++
++
++bool
++sys_var_thd_optimizer_switch::
++symbolic_mode_representation(THD *thd, ulonglong val, LEX_STRING *rep)
++{
++  char buff[STRING_BUFFER_USUAL_SIZE*8];
++  String tmp(buff, sizeof(buff), &my_charset_latin1);
++  int i;
++  ulonglong bit;
++  tmp.length(0);
++ 
++  for (i= 0, bit=1; bit != OPTIMIZER_SWITCH_LAST; i++, bit= bit << 1)
++  {
++    tmp.append(optimizer_switch_typelib.type_names[i],
++               optimizer_switch_typelib.type_lengths[i]);
++    tmp.append('=');
++    tmp.append((val & bit)? "on":"off");
++    tmp.append(',');
++  }
++
++  if (tmp.length())
++    tmp.length(tmp.length() - 1); /* trim the trailing comma */
++
++  rep->str= thd->strmake(tmp.ptr(), tmp.length());
++
++  rep->length= rep->str ? tmp.length() : 0;
++
++  return rep->length != tmp.length();
++}
++
++
++uchar *sys_var_thd_optimizer_switch::value_ptr(THD *thd, enum_var_type type,
++				               LEX_STRING *base)
++{
++  LEX_STRING opts;
++  ulonglong val= ((type == OPT_GLOBAL) ? global_system_variables.*offset :
++                  thd->variables.*offset);
++  (void) symbolic_mode_representation(thd, val, &opts);
++  return (uchar *) opts.str;
++}
++
++
++/*
++  Check (and actually parse) string representation of @@optimizer_switch.
++*/
++
++bool sys_var_thd_optimizer_switch::check(THD *thd, set_var *var)
++{
++  bool not_used;
++  char buff[STRING_BUFFER_USUAL_SIZE], *error= 0;
++  uint error_len= 0;
++  String str(buff, sizeof(buff), system_charset_info), *res;
++
++  if (!(res= var->value->val_str(&str)))
++  {
++    strmov(buff, "NULL");
++    goto err;
++  }
++  
++  if (res->length() == 0)
++  {
++    buff[0]= 0;
++    goto err;
++  }
++
++  var->save_result.ulong_value= 
++    (ulong)find_set_from_flags(&optimizer_switch_typelib, 
++                               optimizer_switch_typelib.count, 
++                               thd->variables.optimizer_switch,
++                               global_system_variables.optimizer_switch,
++                               res->c_ptr_safe(), res->length(), NULL,
++                               &error, &error_len, &not_used);
++  if (error_len)
++  {
++    strmake(buff, error, min(sizeof(buff) - 1, error_len));
++    goto err;
++  }
++  return FALSE;
++err:
++  my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buff);
++  return TRUE;
++}
++
++
++void sys_var_thd_optimizer_switch::set_default(THD *thd, enum_var_type type)
++{
++  if (type == OPT_GLOBAL)
++    global_system_variables.*offset= OPTIMIZER_SWITCH_DEFAULT;
++  else
++    thd->variables.*offset= global_system_variables.*offset;
++}
++
++/****************************************************************************
++  Named list handling
++****************************************************************************/
++
++uchar* find_named(I_List<NAMED_LIST> *list, const char *name, uint length,
++		NAMED_LIST **found)
++{
++  I_List_iterator<NAMED_LIST> it(*list);
++  NAMED_LIST *element;
++  while ((element= it++))
++  {
++    if (element->cmp(name, length))
++    {
++      if (found)
++        *found= element;
++      return element->data;
++    }
++  }
++  return 0;
++}
++
++
++void delete_elements(I_List<NAMED_LIST> *list,
++		     void (*free_element)(const char *name, uchar*))
++{
++  NAMED_LIST *element;
++  DBUG_ENTER("delete_elements");
++  while ((element= list->get()))
++  {
++    (*free_element)(element->name, element->data);
++    delete element;
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++/* Key cache functions */
++
++static KEY_CACHE *create_key_cache(const char *name, uint length)
++{
++  KEY_CACHE *key_cache;
++  DBUG_ENTER("create_key_cache");
++  DBUG_PRINT("enter",("name: %.*s", length, name));
++  
++  if ((key_cache= (KEY_CACHE*) my_malloc(sizeof(KEY_CACHE),
++					     MYF(MY_ZEROFILL | MY_WME))))
++  {
++    if (!new NAMED_LIST(&key_caches, name, length, (uchar*) key_cache))
++    {
++      my_free((char*) key_cache, MYF(0));
++      key_cache= 0;
++    }
++    else
++    {
++      /*
++	Set default values for a key cache
++	The values in dflt_key_cache_var is set by my_getopt() at startup
++
++	We don't set 'buff_size' as this is used to enable the key cache
++      */
++      key_cache->param_block_size=     dflt_key_cache_var.param_block_size;
++      key_cache->param_division_limit= dflt_key_cache_var.param_division_limit;
++      key_cache->param_age_threshold=  dflt_key_cache_var.param_age_threshold;
++    }
++  }
++  DBUG_RETURN(key_cache);
++}
++
++
++KEY_CACHE *get_or_create_key_cache(const char *name, uint length)
++{
++  LEX_STRING key_cache_name;
++  KEY_CACHE *key_cache;
++
++  key_cache_name.str= (char *) name;
++  key_cache_name.length= length;
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  if (!(key_cache= get_key_cache(&key_cache_name)))
++    key_cache= create_key_cache(name, length);
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++  return key_cache;
++}
++
++
++void free_key_cache(const char *name, KEY_CACHE *key_cache)
++{
++  ha_end_key_cache(key_cache);
++  my_free((char*) key_cache, MYF(0));
++}
++
++
++bool process_key_caches(process_key_cache_t func)
++{
++  I_List_iterator<NAMED_LIST> it(key_caches);
++  NAMED_LIST *element;
++
++  while ((element= it++))
++  {
++    KEY_CACHE *key_cache= (KEY_CACHE *) element->data;
++    func(element->name, key_cache);
++  }
++  return 0;
++}
++
++
++void sys_var_trust_routine_creators::warn_deprecated(THD *thd)
++{
++  WARN_DEPRECATED(thd, VER_CELOSIA, "@@log_bin_trust_routine_creators",
++                      "'@@log_bin_trust_function_creators'");
++}
++
++void sys_var_trust_routine_creators::set_default(THD *thd, enum_var_type type)
++{
++  warn_deprecated(thd);
++  sys_var_bool_ptr::set_default(thd, type);
++}
++
++bool sys_var_trust_routine_creators::update(THD *thd, set_var *var)
++{
++  warn_deprecated(thd);
++  return sys_var_bool_ptr::update(thd, var);
++}
++
++bool sys_var_opt_readonly::update(THD *thd, set_var *var)
++{
++  bool result;
++
++  DBUG_ENTER("sys_var_opt_readonly::update");
++
++  /* Prevent self dead-lock */
++  if (thd->locked_tables || thd->active_transaction())
++  {
++    my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
++    DBUG_RETURN(true);
++  }
++
++  if (thd->global_read_lock)
++  {
++    /*
++      This connection already holds the global read lock.
++      This can be the case with:
++      - FLUSH TABLES WITH READ LOCK
++      - SET GLOBAL READ_ONLY = 1
++    */
++    result= sys_var_bool_ptr::update(thd, var);
++    DBUG_RETURN(result);
++  }
++
++  /*
++    Perform a 'FLUSH TABLES WITH READ LOCK'.
++    This is a 3 step process:
++    - [1] lock_global_read_lock()
++    - [2] close_cached_tables()
++    - [3] make_global_read_lock_block_commit()
++    [1] prevents new connections from obtaining tables locked for write.
++    [2] waits until all existing connections close their tables.
++    [3] prevents transactions from being committed.
++  */
++
++  if (lock_global_read_lock(thd))
++    DBUG_RETURN(true);
++
++  /*
++    This call will be blocked by any connection holding a READ or WRITE lock.
++    Ideally, we want to wait only for pending WRITE locks, but since:
++    con 1> LOCK TABLE T FOR READ;
++    con 2> LOCK TABLE T FOR WRITE; (blocked by con 1)
++    con 3> SET GLOBAL READ ONLY=1; (blocked by con 2)
++    can cause to wait on a read lock, it's required for the client application
++    to unlock everything, and acceptable for the server to wait on all locks.
++  */
++  if ((result= close_cached_tables(thd, NULL, FALSE, TRUE, TRUE)))
++    goto end_with_read_lock;
++
++  if ((result= make_global_read_lock_block_commit(thd)))
++    goto end_with_read_lock;
++
++  /* Change the opt_readonly system variable, safe because the lock is held */
++  result= sys_var_bool_ptr::update(thd, var);
++
++end_with_read_lock:
++  /* Release the lock */
++  unlock_global_read_lock(thd);
++  DBUG_RETURN(result);
++}
++
++
++#ifndef DBUG_OFF
++/* even session variable here requires SUPER, because of -#o,file */
++bool sys_var_thd_dbug::check(THD *thd, set_var *var)
++{
++  return check_global_access(thd, SUPER_ACL);
++}
++
++bool sys_var_thd_dbug::update(THD *thd, set_var *var)
++{
++  char buf[256];
++  String str(buf, sizeof(buf), system_charset_info), *res;
++
++  res= var->value->val_str(&str);
++
++  if (var->type == OPT_GLOBAL)
++    DBUG_SET_INITIAL(res ? res->c_ptr() : "");
++  else
++    DBUG_SET(res ? res->c_ptr() : "");
++
++  return 0;
++}
++
++
++uchar *sys_var_thd_dbug::value_ptr(THD *thd, enum_var_type type, LEX_STRING *b)
++{
++  char buf[256];
++  if (type == OPT_GLOBAL)
++    DBUG_EXPLAIN_INITIAL(buf, sizeof(buf));
++  else
++    DBUG_EXPLAIN(buf, sizeof(buf));
++  return (uchar*) thd->strdup(buf);
++}
++#endif /* DBUG_OFF */
++
++
++#ifdef HAVE_EVENT_SCHEDULER
++bool sys_var_event_scheduler::check(THD *thd, set_var *var)
++{
++  return check_enum(thd, var, &Events::var_typelib);
++}
++
++/*
++   The update method of the global variable event_scheduler.
++   If event_scheduler is switched from 0 to 1 then the scheduler main
++   thread is resumed and if from 1 to 0 the scheduler thread is suspended
++
++   SYNOPSIS
++     sys_var_event_scheduler::update()
++       thd  Thread context (unused)
++       var  The new value
++
++   Returns
++     FALSE  OK
++     TRUE   Error
++*/
++
++bool
++sys_var_event_scheduler::update(THD *thd, set_var *var)
++{
++  int res;
++  /* here start the thread if not running. */
++  DBUG_ENTER("sys_var_event_scheduler::update");
++  DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value));
++
++  enum Events::enum_opt_event_scheduler
++    new_state=
++    (enum Events::enum_opt_event_scheduler) var->save_result.ulong_value;
++
++  res= Events::switch_event_scheduler_state(new_state);
++
++  DBUG_RETURN((bool) res);
++}
++
++
++uchar *sys_var_event_scheduler::value_ptr(THD *thd, enum_var_type type,
++                                         LEX_STRING *base)
++{
++  return (uchar *) Events::get_opt_event_scheduler_str();
++}
++#endif
++
++
++int 
++check_max_allowed_packet(THD *thd,  set_var *var)
++{
++  longlong val= var->value->val_int();
++  if (val < (longlong) global_system_variables.net_buffer_length)
++  {
++    push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 
++                        ER_UNKNOWN_ERROR, 
++                        "The value of 'max_allowed_packet' should be no less than "
++                        "the value of 'net_buffer_length'");
++  }
++  return 0;
++}
++
++
++int 
++check_net_buffer_length(THD *thd,  set_var *var)
++{
++  longlong val= var->value->val_int();
++  if (val > (longlong) global_system_variables.max_allowed_packet)
++  {
++    push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 
++                        ER_UNKNOWN_ERROR, 
++                        "The value of 'max_allowed_packet' should be no less than "
++                        "the value of 'net_buffer_length'");
++  }
++  return 0;
++}
++
++/****************************************************************************
++  Used templates
++****************************************************************************/
++
++#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
++template class List<set_var_base>;
++template class List_iterator_fast<set_var_base>;
++template class I_List_iterator<NAMED_LIST>;
++#endif
+diff -urN mysql-old/sql/slave.cc mysql/sql/slave.cc
+--- mysql-old/sql/slave.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/slave.cc	2011-05-10 17:56:01.483349044 +0000
+@@ -1737,7 +1737,7 @@
+         special marker to say "consider we have caught up".
+       */
+       protocol->store((longlong)(mi->rli.last_master_timestamp ?
+-                                 max(0, time_diff) : 0));
++                                 MYSQL_MAX(0, time_diff) : 0));
+     }
+     else
+     {
+@@ -2354,7 +2354,7 @@
+             exec_res= 0;
+             rli->cleanup_context(thd, 1);
+             /* chance for concurrent connection to get more locks */
+-            safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE),
++            safe_sleep(thd, MYSQL_MIN(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE),
+                        (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli);
+             pthread_mutex_lock(&rli->data_lock); // because of SHOW STATUS
+             rli->trans_retries++;
+@@ -4050,7 +4050,7 @@
+     relay_log_pos       Current log pos
+     pending             Number of bytes already processed from the event
+   */
+-  rli->event_relay_log_pos= max(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE);
++  rli->event_relay_log_pos= MYSQL_MAX(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE);
+   my_b_seek(cur_log,rli->event_relay_log_pos);
+   DBUG_RETURN(cur_log);
+ }
+diff -urN mysql-old/sql/spatial.h mysql/sql/spatial.h
+--- mysql-old/sql/spatial.h	2011-05-10 17:45:45.640015709 +0000
++++ mysql/sql/spatial.h	2011-05-10 17:56:01.486682377 +0000
+@@ -180,8 +180,8 @@
+     if (d != mbr->dimension() || d <= 0 || contains(mbr) || within(mbr))
+       return 0;
+ 
+-    MBR intersection(max(xmin, mbr->xmin), max(ymin, mbr->ymin),
+-                     min(xmax, mbr->xmax), min(ymax, mbr->ymax));
++    MBR intersection(MYSQL_MAX(xmin, mbr->xmin), MYSQL_MAX(ymin, mbr->ymin),
++                     MYSQL_MIN(xmax, mbr->xmax), MYSQL_MIN(ymax, mbr->ymax));
+ 
+     return (d == intersection.dimension());
+   }
+diff -urN mysql-old/sql/sp_head.cc mysql/sql/sp_head.cc
+--- mysql-old/sql/sp_head.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/sp_head.cc	2011-05-10 17:56:01.486682377 +0000
+@@ -2453,7 +2453,7 @@
+ 
+     Item_empty_string *stmt_fld=
+       new Item_empty_string(col3_caption,
+-                            max(m_defstr.length, 1024));
++                            MYSQL_MAX(m_defstr.length, 1024));
+ 
+     stmt_fld->maybe_null= TRUE;
+ 
+@@ -2654,7 +2654,7 @@
+   field_list.push_back(new Item_uint("Pos", 9));
+   // 1024 is for not to confuse old clients
+   field_list.push_back(new Item_empty_string("Instruction",
+-					     max(buffer.length(), 1024)));
++					     MYSQL_MAX(buffer.length(), 1024)));
+   if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS |
+                                          Protocol::SEND_EOF))
+     DBUG_RETURN(1);
+diff -urN mysql-old/sql/sql_acl.cc mysql/sql/sql_acl.cc
+--- mysql-old/sql/sql_acl.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/sql_acl.cc	2011-05-10 17:56:01.490015710 +0000
+@@ -824,7 +824,7 @@
+         chars= 128;                             // Marker that chars existed
+       }
+     }
+-    sort= (sort << 8) + (wild_pos ? min(wild_pos, 127) : chars);
++    sort= (sort << 8) + (wild_pos ? MYSQL_MIN(wild_pos, 127) : chars);
+   }
+   va_end(args);
+   return sort;
+diff -urN mysql-old/sql/sql_analyse.cc mysql/sql/sql_analyse.cc
+--- mysql-old/sql/sql_analyse.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/sql_analyse.cc	2011-05-10 17:56:01.493349043 +0000
+@@ -280,16 +280,16 @@
+   {
+     if (((longlong) info->ullval) < 0)
+       return 0; // Impossible to store as a negative number
+-    ev_info->llval =  -(longlong) max((ulonglong) -ev_info->llval, 
++    ev_info->llval =  -(longlong) MYSQL_MAX((ulonglong) -ev_info->llval, 
+ 				      info->ullval);
+-    ev_info->min_dval = (double) -max(-ev_info->min_dval, info->dval);
++    ev_info->min_dval = (double) -MYSQL_MAX(-ev_info->min_dval, info->dval);
+   }
+   else		// ulonglong is as big as bigint in MySQL
+   {
+     if ((check_ulonglong(num, info->integers) == DECIMAL_NUM))
+       return 0;
+-    ev_info->ullval = (ulonglong) max(ev_info->ullval, info->ullval);
+-    ev_info->max_dval =  (double) max(ev_info->max_dval, info->dval);
++    ev_info->ullval = (ulonglong) MYSQL_MAX(ev_info->ullval, info->ullval);
++    ev_info->max_dval =  (double) MYSQL_MAX(ev_info->max_dval, info->dval);
+   }
+   return 1;
+ } // get_ev_num_info
+@@ -1043,7 +1043,7 @@
+   my_decimal_div(E_DEC_FATAL_ERROR, &avg_val, sum+cur_sum, &num, prec_increment);
+   /* TODO remove this after decimal_div returns proper frac */
+   my_decimal_round(E_DEC_FATAL_ERROR, &avg_val,
+-                   min(sum[cur_sum].frac + prec_increment, DECIMAL_MAX_SCALE),
++                   MYSQL_MIN(sum[cur_sum].frac + prec_increment, DECIMAL_MAX_SCALE),
+                    FALSE,&rounded_avg);
+   my_decimal2string(E_DEC_FATAL_ERROR, &rounded_avg, 0, 0, '0', s);
+   return s;
+@@ -1068,7 +1068,7 @@
+   my_decimal_div(E_DEC_FATAL_ERROR, &tmp, &sum2, &num, prec_increment);
+   my_decimal2double(E_DEC_FATAL_ERROR, &tmp, &std_sqr);
+   s->set_real(((double) std_sqr <= 0.0 ? 0.0 : sqrt(std_sqr)),
+-         min(item->decimals + prec_increment, NOT_FIXED_DEC), my_thd_charset);
++         MYSQL_MIN(item->decimals + prec_increment, NOT_FIXED_DEC), my_thd_charset);
+ 
+   return s;
+ }
+@@ -1185,7 +1185,7 @@
+   func_items[8] = new Item_proc_string("Std", 255);
+   func_items[8]->maybe_null = 1;
+   func_items[9] = new Item_proc_string("Optimal_fieldtype",
+-				       max(64, output_str_length));
++				       MYSQL_MAX(64, output_str_length));
+ 
+   for (uint i = 0; i < array_elements(func_items); i++)
+     field_list.push_back(func_items[i]);
+diff -urN mysql-old/sql/sql_cache.cc mysql/sql/sql_cache.cc
+--- mysql-old/sql/sql_cache.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/sql_cache.cc	2011-05-10 17:56:01.493349043 +0000
+@@ -1004,7 +1004,7 @@
+     }
+     last_result_block= header->result()->prev;
+     allign_size= ALIGN_SIZE(last_result_block->used);
+-    len= max(query_cache.min_allocation_unit, allign_size);
++    len= MYSQL_MAX(query_cache.min_allocation_unit, allign_size);
+     if (last_result_block->length >= query_cache.min_allocation_unit + len)
+       query_cache.split_block(last_result_block,len);
+ 
+@@ -2425,7 +2425,7 @@
+   DBUG_ENTER("Query_cache::write_block_data");
+   DBUG_PRINT("qcache", ("data: %ld, header: %ld, all header: %ld",
+ 		      data_len, header_len, all_headers_len));
+-  Query_cache_block *block= allocate_block(max(align_len,
++  Query_cache_block *block= allocate_block(MYSQL_MAX(align_len,
+                                            min_allocation_unit),1, 0);
+   if (block != 0)
+   {
+@@ -2480,7 +2480,7 @@
+   ulong append_min = get_min_append_result_data_size();
+   if (last_block_free_space < data_len &&
+       append_next_free_block(last_block,
+-			     max(tail, append_min)))
++			     MYSQL_MAX(tail, append_min)))
+     last_block_free_space = last_block->length - last_block->used;
+   // If no space in last block (even after join) allocate new block
+   if (last_block_free_space < data_len)
+@@ -2508,7 +2508,7 @@
+   // Now finally write data to the last block
+   if (success && last_block_free_space > 0)
+   {
+-    ulong to_copy = min(data_len,last_block_free_space);
++    ulong to_copy = MYSQL_MIN(data_len,last_block_free_space);
+     DBUG_PRINT("qcache", ("use free space %lub at block 0x%lx to copy %lub",
+ 			last_block_free_space, (ulong)last_block, to_copy));
+     memcpy((uchar*) last_block + last_block->used, data, to_copy);
+@@ -2596,8 +2596,8 @@
+   if (queries_in_cache < QUERY_CACHE_MIN_ESTIMATED_QUERIES_NUMBER)
+     return min_result_data_size;
+   ulong avg_result = (query_cache_size - free_memory) / queries_in_cache;
+-  avg_result = min(avg_result, query_cache_limit);
+-  return max(min_result_data_size, avg_result);
++  avg_result = MYSQL_MIN(avg_result, query_cache_limit);
++  return MYSQL_MAX(min_result_data_size, avg_result);
+ }
+ 
+ inline ulong Query_cache::get_min_append_result_data_size()
+@@ -2629,7 +2629,7 @@
+     ulong len= data_len + all_headers_len;
+     ulong align_len= ALIGN_SIZE(len);
+ 
+-    if (!(new_block= allocate_block(max(min_size, align_len),
++    if (!(new_block= allocate_block(MYSQL_MAX(min_size, align_len),
+ 				    min_result_data_size == 0,
+ 				    all_headers_len + min_result_data_size)))
+     {
+@@ -2638,7 +2638,7 @@
+     }
+ 
+     new_block->n_tables = 0;
+-    new_block->used = min(len, new_block->length);
++    new_block->used = MYSQL_MIN(len, new_block->length);
+     new_block->type = Query_cache_block::RES_INCOMPLETE;
+     new_block->next = new_block->prev = new_block;
+     Query_cache_result *header = new_block->result();
+@@ -3061,7 +3061,7 @@
+   DBUG_PRINT("qcache", ("len %lu, not less %d, min %lu",
+              len, not_less,min));
+ 
+-  if (len >= min(query_cache_size, query_cache_limit))
++  if (len >= MYSQL_MIN(query_cache_size, query_cache_limit))
+   {
+     DBUG_PRINT("qcache", ("Query cache hase only %lu memory and limit %lu",
+ 			query_cache_size, query_cache_limit));
+diff -urN mysql-old/sql/sql_class.cc mysql/sql/sql_class.cc
+--- mysql-old/sql/sql_class.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/sql_class.cc	2011-05-10 17:56:01.500015709 +0000
+@@ -416,7 +416,7 @@
+     if (max_query_len < 1)
+       len= thd->query_length();
+     else
+-      len= min(thd->query_length(), max_query_len);
++      len= MYSQL_MIN(thd->query_length(), max_query_len);
+     str.append('\n');
+     str.append(thd->query(), len);
+   }
+@@ -431,7 +431,7 @@
+     was reallocated to a larger buffer to be able to fit.
+   */
+   DBUG_ASSERT(buffer != NULL);
+-  length= min(str.length(), length-1);
++  length= MYSQL_MIN(str.length(), length-1);
+   memcpy(buffer, str.c_ptr_quick(), length);
+   /* Make sure that the new string is null terminated */
+   buffer[length]= '\0';
+@@ -2082,7 +2082,7 @@
+     else
+     {
+       if (fixed_row_size)
+-	used_length=min(res->length(),item->max_length);
++	used_length=MYSQL_MIN(res->length(),item->max_length);
+       else
+ 	used_length=res->length();
+       if ((result_type == STRING_RESULT || is_unsafe_field_sep) &&
+diff -urN mysql-old/sql/sql_client.cc mysql/sql/sql_client.cc
+--- mysql-old/sql/sql_client.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/sql_client.cc	2011-05-10 17:56:01.500015709 +0000
+@@ -34,7 +34,7 @@
+                            (uint)global_system_variables.net_write_timeout);
+ 
+   net->retry_count=  (uint) global_system_variables.net_retry_count;
+-  net->max_packet_size= max(global_system_variables.net_buffer_length,
++  net->max_packet_size= MYSQL_MAX(global_system_variables.net_buffer_length,
+ 			    global_system_variables.max_allowed_packet);
+ #endif
+ }
+diff -urN mysql-old/sql/sql_connect.cc mysql/sql/sql_connect.cc
+--- mysql-old/sql/sql_connect.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/sql_connect.cc	2011-05-10 17:56:01.503349042 +0000
+@@ -789,7 +789,7 @@
+       if (thd->main_security_ctx.host)
+       {
+         if (thd->main_security_ctx.host != my_localhost)
+-          thd->main_security_ctx.host[min(strlen(thd->main_security_ctx.host),
++          thd->main_security_ctx.host[MYSQL_MIN(strlen(thd->main_security_ctx.host),
+                                           HOSTNAME_LENGTH)]= 0;
+         thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host;
+       }
+diff -urN mysql-old/sql/sql_connect.cc.orig mysql/sql/sql_connect.cc.orig
+--- mysql-old/sql/sql_connect.cc.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/sql_connect.cc.orig	2011-04-12 12:11:35.000000000 +0000
+@@ -0,0 +1,1315 @@
++/* Copyright (C) 2007 MySQL AB
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
++
++
++/*
++  Functions to autenticate and handle reqests for a connection
++*/
++
++#include "mysql_priv.h"
++
++#ifdef HAVE_OPENSSL
++/*
++  Without SSL the handshake consists of one packet. This packet
++  has both client capabilites and scrambled password.
++  With SSL the handshake might consist of two packets. If the first
++  packet (client capabilities) has CLIENT_SSL flag set, we have to
++  switch to SSL and read the second packet. The scrambled password
++  is in the second packet and client_capabilites field will be ignored.
++  Maybe it is better to accept flags other than CLIENT_SSL from the
++  second packet?
++*/
++#define SSL_HANDSHAKE_SIZE      2
++#define NORMAL_HANDSHAKE_SIZE   6
++#define MIN_HANDSHAKE_SIZE      2
++#else
++#define MIN_HANDSHAKE_SIZE      6
++#endif /* HAVE_OPENSSL */
++
++#ifdef __WIN__
++extern void win_install_sigabrt_handler();
++#endif
++
++/*
++  Get structure for logging connection data for the current user
++*/
++
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++static HASH hash_user_connections;
++
++static int get_or_create_user_conn(THD *thd, const char *user,
++				   const char *host,
++				   USER_RESOURCES *mqh)
++{
++  int return_val= 0;
++  size_t temp_len, user_len;
++  char temp_user[USER_HOST_BUFF_SIZE];
++  struct  user_conn *uc;
++
++  DBUG_ASSERT(user != 0);
++  DBUG_ASSERT(host != 0);
++
++  user_len= strlen(user);
++  temp_len= (strmov(strmov(temp_user, user)+1, host) - temp_user)+1;
++  (void) pthread_mutex_lock(&LOCK_user_conn);
++  if (!(uc = (struct  user_conn *) hash_search(&hash_user_connections,
++					       (uchar*) temp_user, temp_len)))
++  {
++    /* First connection for user; Create a user connection object */
++    if (!(uc= ((struct user_conn*)
++	       my_malloc(sizeof(struct user_conn) + temp_len+1,
++			 MYF(MY_WME)))))
++    {
++      /* MY_WME ensures an error is set in THD. */
++      return_val= 1;
++      goto end;
++    }
++    uc->user=(char*) (uc+1);
++    memcpy(uc->user,temp_user,temp_len+1);
++    uc->host= uc->user + user_len +  1;
++    uc->len= temp_len;
++    uc->connections= uc->questions= uc->updates= uc->conn_per_hour= 0;
++    uc->user_resources= *mqh;
++    uc->reset_utime= thd->thr_create_utime;
++    if (my_hash_insert(&hash_user_connections, (uchar*) uc))
++    {
++      /* The only possible error is out of memory, MY_WME sets an error. */
++      my_free((char*) uc,0);
++      return_val= 1;
++      goto end;
++    }
++  }
++  thd->user_connect=uc;
++  uc->connections++;
++end:
++  (void) pthread_mutex_unlock(&LOCK_user_conn);
++  return return_val;
++
++}
++
++
++/*
++  check if user has already too many connections
++  
++  SYNOPSIS
++  check_for_max_user_connections()
++  thd			Thread handle
++  uc			User connect object
++
++  NOTES
++    If check fails, we decrease user connection count, which means one
++    shouldn't call decrease_user_connections() after this function.
++
++  RETURN
++    0	ok
++    1	error
++*/
++
++static
++int check_for_max_user_connections(THD *thd, USER_CONN *uc)
++{
++  int error=0;
++  DBUG_ENTER("check_for_max_user_connections");
++
++  (void) pthread_mutex_lock(&LOCK_user_conn);
++  if (max_user_connections && !uc->user_resources.user_conn &&
++      max_user_connections < (uint) uc->connections)
++  {
++    my_error(ER_TOO_MANY_USER_CONNECTIONS, MYF(0), uc->user);
++    error=1;
++    goto end;
++  }
++  time_out_user_resource_limits(thd, uc);
++  if (uc->user_resources.user_conn &&
++      uc->user_resources.user_conn < uc->connections)
++  {
++    my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user,
++             "max_user_connections",
++             (long) uc->user_resources.user_conn);
++    error= 1;
++    goto end;
++  }
++  if (uc->user_resources.conn_per_hour &&
++      uc->user_resources.conn_per_hour <= uc->conn_per_hour)
++  {
++    my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user,
++             "max_connections_per_hour",
++             (long) uc->user_resources.conn_per_hour);
++    error=1;
++    goto end;
++  }
++  uc->conn_per_hour++;
++
++end:
++  if (error)
++    uc->connections--; // no need for decrease_user_connections() here
++  (void) pthread_mutex_unlock(&LOCK_user_conn);
++  DBUG_RETURN(error);
++}
++
++
++/*
++  Decrease user connection count
++
++  SYNOPSIS
++    decrease_user_connections()
++    uc			User connection object
++
++  NOTES
++    If there is a n user connection object for a connection
++    (which only happens if 'max_user_connections' is defined or
++    if someone has created a resource grant for a user), then
++    the connection count is always incremented on connect.
++
++    The user connect object is not freed if some users has
++    'max connections per hour' defined as we need to be able to hold
++    count over the lifetime of the connection.
++*/
++
++void decrease_user_connections(USER_CONN *uc)
++{
++  DBUG_ENTER("decrease_user_connections");
++  (void) pthread_mutex_lock(&LOCK_user_conn);
++  DBUG_ASSERT(uc->connections);
++  if (!--uc->connections && !mqh_used)
++  {
++    /* Last connection for user; Delete it */
++    (void) hash_delete(&hash_user_connections,(uchar*) uc);
++  }
++  (void) pthread_mutex_unlock(&LOCK_user_conn);
++  DBUG_VOID_RETURN;
++}
++
++
++/*
++  Reset per-hour user resource limits when it has been more than
++  an hour since they were last checked
++
++  SYNOPSIS:
++    time_out_user_resource_limits()
++    thd			Thread handler
++    uc			User connection details
++
++  NOTE:
++    This assumes that the LOCK_user_conn mutex has been acquired, so it is
++    safe to test and modify members of the USER_CONN structure.
++*/
++
++void time_out_user_resource_limits(THD *thd, USER_CONN *uc)
++{
++  ulonglong check_time= thd->start_utime;
++  DBUG_ENTER("time_out_user_resource_limits");
++
++  /* If more than a hour since last check, reset resource checking */
++  if (check_time  - uc->reset_utime >= LL(3600000000))
++  {
++    uc->questions=1;
++    uc->updates=0;
++    uc->conn_per_hour=0;
++    uc->reset_utime= check_time;
++  }
++
++  DBUG_VOID_RETURN;
++}
++
++/*
++  Check if maximum queries per hour limit has been reached
++  returns 0 if OK.
++*/
++
++bool check_mqh(THD *thd, uint check_command)
++{
++  bool error= 0;
++  USER_CONN *uc=thd->user_connect;
++  DBUG_ENTER("check_mqh");
++  DBUG_ASSERT(uc != 0);
++
++  (void) pthread_mutex_lock(&LOCK_user_conn);
++
++  time_out_user_resource_limits(thd, uc);
++
++  /* Check that we have not done too many questions / hour */
++  if (uc->user_resources.questions &&
++      uc->questions++ >= uc->user_resources.questions)
++  {
++    my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user, "max_questions",
++             (long) uc->user_resources.questions);
++    error=1;
++    goto end;
++  }
++  if (check_command < (uint) SQLCOM_END)
++  {
++    /* Check that we have not done too many updates / hour */
++    if (uc->user_resources.updates &&
++        (sql_command_flags[check_command] & CF_CHANGES_DATA) &&
++	uc->updates++ >= uc->user_resources.updates)
++    {
++      my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user, "max_updates",
++               (long) uc->user_resources.updates);
++      error=1;
++      goto end;
++    }
++  }
++end:
++  (void) pthread_mutex_unlock(&LOCK_user_conn);
++  DBUG_RETURN(error);
++}
++
++#endif /* NO_EMBEDDED_ACCESS_CHECKS */
++
++
++/**
++  Check if user exist and password supplied is correct.
++
++  @param  thd         thread handle, thd->security_ctx->{host,user,ip} are used
++  @param  command     originator of the check: now check_user is called
++                      during connect and change user procedures; used for
++                      logging.
++  @param  passwd      scrambled password received from client
++  @param  passwd_len  length of scrambled password
++  @param  db          database name to connect to, may be NULL
++  @param  check_count TRUE if establishing a new connection. In this case
++                      check that we have not exceeded the global
++                      max_connections limist
++
++  @note Host, user and passwd may point to communication buffer.
++  Current implementation does not depend on that, but future changes
++  should be done with this in mind; 'thd' is INOUT, all other params
++  are 'IN'.
++
++  @retval  0  OK; thd->security_ctx->user/master_access/priv_user/db_access and
++              thd->db are updated; OK is sent to the client.
++  @retval  1  error, e.g. access denied or handshake error, not sent to
++              the client. A message is pushed into the error stack.
++*/
++
++int
++check_user(THD *thd, enum enum_server_command command,
++	       const char *passwd, uint passwd_len, const char *db,
++	       bool check_count)
++{
++  DBUG_ENTER("check_user");
++  LEX_STRING db_str= { (char *) db, db ? strlen(db) : 0 };
++
++  /*
++    Clear thd->db as it points to something, that will be freed when
++    connection is closed. We don't want to accidentally free a wrong
++    pointer if connect failed. Also in case of 'CHANGE USER' failure,
++    current database will be switched to 'no database selected'.
++  */
++  thd->reset_db(NULL, 0);
++
++#ifdef NO_EMBEDDED_ACCESS_CHECKS
++  thd->main_security_ctx.master_access= GLOBAL_ACLS;       // Full rights
++  /* Change database if necessary */
++  if (db && db[0])
++  {
++    if (mysql_change_db(thd, &db_str, FALSE))
++      DBUG_RETURN(1);
++  }
++  my_ok(thd);
++  DBUG_RETURN(0);
++#else
++
++  my_bool opt_secure_auth_local;
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  opt_secure_auth_local= opt_secure_auth;
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++  
++  /*
++    If the server is running in secure auth mode, short scrambles are 
++    forbidden.
++  */
++  if (opt_secure_auth_local && passwd_len == SCRAMBLE_LENGTH_323)
++  {
++    my_error(ER_NOT_SUPPORTED_AUTH_MODE, MYF(0));
++    general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE));
++    DBUG_RETURN(1);
++  }
++  if (passwd_len != 0 &&
++      passwd_len != SCRAMBLE_LENGTH &&
++      passwd_len != SCRAMBLE_LENGTH_323)
++  {
++    my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++    DBUG_RETURN(1);
++  }
++
++  USER_RESOURCES ur;
++  int res= acl_getroot(thd, &ur, passwd, passwd_len);
++#ifndef EMBEDDED_LIBRARY
++  if (res == -1)
++  {
++    /*
++      This happens when client (new) sends password scrambled with
++      scramble(), but database holds old value (scrambled with
++      scramble_323()). Here we please client to send scrambled_password
++      in old format.
++    */
++    NET *net= &thd->net;
++    if (opt_secure_auth_local)
++    {
++      my_error(ER_SERVER_IS_IN_SECURE_AUTH_MODE, MYF(0),
++               thd->main_security_ctx.user,
++               thd->main_security_ctx.host_or_ip);
++      general_log_print(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE),
++                        thd->main_security_ctx.user,
++                        thd->main_security_ctx.host_or_ip);
++      DBUG_RETURN(1);
++    }
++    /* We have to read very specific packet size */
++    if (send_old_password_request(thd) ||
++        my_net_read(net) != SCRAMBLE_LENGTH_323 + 1)
++    {
++      inc_host_errors(&thd->remote.sin_addr);
++      my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++      DBUG_RETURN(1);
++    }
++    /* Final attempt to check the user based on reply */
++    /* So as passwd is short, errcode is always >= 0 */
++    res= acl_getroot(thd, &ur, (char *) net->read_pos, SCRAMBLE_LENGTH_323);
++  }
++#endif /*EMBEDDED_LIBRARY*/
++  /* here res is always >= 0 */
++  if (res == 0)
++  {
++    if (!(thd->main_security_ctx.master_access &
++          NO_ACCESS)) // authentication is OK
++    {
++      DBUG_PRINT("info",
++                 ("Capabilities: %lu  packet_length: %ld  Host: '%s'  "
++                  "Login user: '%s' Priv_user: '%s'  Using password: %s "
++                  "Access: %lu  db: '%s'",
++                  thd->client_capabilities,
++                  thd->max_client_packet_length,
++                  thd->main_security_ctx.host_or_ip,
++                  thd->main_security_ctx.user,
++                  thd->main_security_ctx.priv_user,
++                  passwd_len ? "yes": "no",
++                  thd->main_security_ctx.master_access,
++                  (thd->db ? thd->db : "*none*")));
++
++      if (check_count)
++      {
++        pthread_mutex_lock(&LOCK_connection_count);
++        bool count_ok= connection_count <= max_connections ||
++                       (thd->main_security_ctx.master_access & SUPER_ACL);
++        VOID(pthread_mutex_unlock(&LOCK_connection_count));
++
++        if (!count_ok)
++        {                                         // too many connections
++          my_error(ER_CON_COUNT_ERROR, MYF(0));
++          DBUG_RETURN(1);
++        }
++      }
++
++      /*
++        Log the command before authentication checks, so that the user can
++        check the log for the tried login tried and also to detect
++        break-in attempts.
++      */
++      general_log_print(thd, command,
++                        (thd->main_security_ctx.priv_user ==
++                         thd->main_security_ctx.user ?
++                         (char*) "%s@%s on %s" :
++                         (char*) "%s@%s as anonymous on %s"),
++                        thd->main_security_ctx.user,
++                        thd->main_security_ctx.host_or_ip,
++                        db ? db : (char*) "");
++
++      /*
++        This is the default access rights for the current database.  It's
++        set to 0 here because we don't have an active database yet (and we
++        may not have an active database to set.
++      */
++      thd->main_security_ctx.db_access=0;
++
++      /* Don't allow user to connect if he has done too many queries */
++      if ((ur.questions || ur.updates || ur.conn_per_hour || ur.user_conn ||
++	   max_user_connections) &&
++	  get_or_create_user_conn(thd,
++            (opt_old_style_user_limits ? thd->main_security_ctx.user :
++             thd->main_security_ctx.priv_user),
++            (opt_old_style_user_limits ? thd->main_security_ctx.host_or_ip :
++             thd->main_security_ctx.priv_host),
++            &ur))
++      {
++        /* The error is set by get_or_create_user_conn(). */
++	DBUG_RETURN(1);
++      }
++      if (thd->user_connect &&
++	  (thd->user_connect->user_resources.conn_per_hour ||
++	   thd->user_connect->user_resources.user_conn ||
++	   max_user_connections) &&
++	  check_for_max_user_connections(thd, thd->user_connect))
++      {
++        /* The error is set in check_for_max_user_connections(). */
++        DBUG_RETURN(1);
++      }
++
++      /* Change database if necessary */
++      if (db && db[0])
++      {
++        if (mysql_change_db(thd, &db_str, FALSE))
++        {
++          /* mysql_change_db() has pushed the error message. */
++          if (thd->user_connect)
++            decrease_user_connections(thd->user_connect);
++          DBUG_RETURN(1);
++        }
++      }
++      my_ok(thd);
++      thd->password= test(passwd_len);          // remember for error messages 
++#ifndef EMBEDDED_LIBRARY
++      /*
++        Allow the network layer to skip big packets. Although a malicious
++        authenticated session might use this to trick the server to read
++        big packets indefinitely, this is a previously established behavior
++        that needs to be preserved as to not break backwards compatibility.
++      */
++      thd->net.skip_big_packet= TRUE;
++#endif
++      /* Ready to handle queries */
++      DBUG_RETURN(0);
++    }
++  }
++  else if (res == 2) // client gave short hash, server has long hash
++  {
++    my_error(ER_NOT_SUPPORTED_AUTH_MODE, MYF(0));
++    general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE));
++    DBUG_RETURN(1);
++  }
++  my_error(ER_ACCESS_DENIED_ERROR, MYF(0),
++           thd->main_security_ctx.user,
++           thd->main_security_ctx.host_or_ip,
++           passwd_len ? ER(ER_YES) : ER(ER_NO));
++  general_log_print(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR),
++                    thd->main_security_ctx.user,
++                    thd->main_security_ctx.host_or_ip,
++                    passwd_len ? ER(ER_YES) : ER(ER_NO));
++  DBUG_RETURN(1);
++#endif /* NO_EMBEDDED_ACCESS_CHECKS */
++}
++
++
++/*
++  Check for maximum allowable user connections, if the mysqld server is
++  started with corresponding variable that is greater then 0.
++*/
++
++extern "C" uchar *get_key_conn(user_conn *buff, size_t *length,
++			      my_bool not_used __attribute__((unused)))
++{
++  *length= buff->len;
++  return (uchar*) buff->user;
++}
++
++
++extern "C" void free_user(struct user_conn *uc)
++{
++  my_free((char*) uc,MYF(0));
++}
++
++
++void init_max_user_conn(void)
++{
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  (void) hash_init(&hash_user_connections,system_charset_info,max_connections,
++		   0,0,
++		   (hash_get_key) get_key_conn, (hash_free_key) free_user,
++		   0);
++#endif
++}
++
++
++void free_max_user_conn(void)
++{
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  hash_free(&hash_user_connections);
++#endif /* NO_EMBEDDED_ACCESS_CHECKS */
++}
++
++
++void reset_mqh(LEX_USER *lu, bool get_them= 0)
++{
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  (void) pthread_mutex_lock(&LOCK_user_conn);
++  if (lu)  // for GRANT
++  {
++    USER_CONN *uc;
++    uint temp_len=lu->user.length+lu->host.length+2;
++    char temp_user[USER_HOST_BUFF_SIZE];
++
++    memcpy(temp_user,lu->user.str,lu->user.length);
++    memcpy(temp_user+lu->user.length+1,lu->host.str,lu->host.length);
++    temp_user[lu->user.length]='\0'; temp_user[temp_len-1]=0;
++    if ((uc = (struct  user_conn *) hash_search(&hash_user_connections,
++						(uchar*) temp_user, temp_len)))
++    {
++      uc->questions=0;
++      get_mqh(temp_user,&temp_user[lu->user.length+1],uc);
++      uc->updates=0;
++      uc->conn_per_hour=0;
++    }
++  }
++  else
++  {
++    /* for FLUSH PRIVILEGES and FLUSH USER_RESOURCES */
++    for (uint idx=0;idx < hash_user_connections.records; idx++)
++    {
++      USER_CONN *uc=(struct user_conn *) hash_element(&hash_user_connections,
++						      idx);
++      if (get_them)
++	get_mqh(uc->user,uc->host,uc);
++      uc->questions=0;
++      uc->updates=0;
++      uc->conn_per_hour=0;
++    }
++  }
++  (void) pthread_mutex_unlock(&LOCK_user_conn);
++#endif /* NO_EMBEDDED_ACCESS_CHECKS */
++}
++
++
++/**
++  Set thread character set variables from the given ID
++
++  @param  thd         thread handle
++  @param  cs_number   character set and collation ID
++
++  @retval  0  OK; character_set_client, collation_connection and
++              character_set_results are set to the new value,
++              or to the default global values.
++
++  @retval  1  error, e.g. the given ID is not supported by parser.
++              Corresponding SQL error is sent.
++*/
++
++bool thd_init_client_charset(THD *thd, uint cs_number)
++{
++  CHARSET_INFO *cs;
++  /*
++   Use server character set and collation if
++   - opt_character_set_client_handshake is not set
++   - client has not specified a character set
++   - client character set is the same as the servers
++   - client character set doesn't exists in server
++  */
++  if (!opt_character_set_client_handshake ||
++      !(cs= get_charset(cs_number, MYF(0))) ||
++      !my_strcasecmp(&my_charset_latin1,
++                     global_system_variables.character_set_client->name,
++                     cs->name))
++  {
++    thd->variables.character_set_client=
++      global_system_variables.character_set_client;
++    thd->variables.collation_connection=
++      global_system_variables.collation_connection;
++    thd->variables.character_set_results=
++      global_system_variables.character_set_results;
++  }
++  else
++  {
++    if (!is_supported_parser_charset(cs))
++    {
++      /* Disallow non-supported parser character sets: UCS2, UTF16, UTF32 */
++      my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client",
++               cs->csname);
++      return true;
++    }
++    thd->variables.character_set_results=
++      thd->variables.collation_connection= 
++      thd->variables.character_set_client= cs;
++  }
++  return false;
++}
++
++
++/*
++  Initialize connection threads
++*/
++
++bool init_new_connection_handler_thread()
++{
++  pthread_detach_this_thread();
++#if defined(__WIN__)
++  win_install_sigabrt_handler();
++#else
++  /* Win32 calls this in pthread_create */
++  if (my_thread_init())
++    return 1;
++#endif /* __WIN__ */
++  return 0;
++}
++
++#ifndef EMBEDDED_LIBRARY
++/**
++  Get a null character terminated string from a user-supplied buffer.
++
++  @param buffer[in, out]    Pointer to the buffer to be scanned.
++  @param max_bytes_available[in, out]  Limit the bytes to scan.
++  @param string_length[out] The number of characters scanned not including
++                            the null character.
++
++  @remark The string_length does not include the terminating null character.
++          However, after the call, the buffer is increased by string_length+1
++          bytes, beyond the null character if there still available bytes to
++          scan.
++
++  @return pointer to beginning of the string scanned.
++    @retval NULL The buffer content is malformed
++*/
++
++static
++char *get_null_terminated_string(char **buffer,
++                                 size_t *max_bytes_available,
++                                 size_t *string_length)
++{
++  char *str= (char *)memchr(*buffer, '\0', *max_bytes_available);
++
++  if (str == NULL)
++    return NULL;
++
++  *string_length= (size_t)(str - *buffer);
++  *max_bytes_available-= *string_length + 1;
++  str= *buffer;
++  *buffer += *string_length + 1;  
++
++  return str;
++}
++
++
++/**
++  Get a length encoded string from a user-supplied buffer.
++
++  @param buffer[in, out] The buffer to scan; updates position after scan.
++  @param max_bytes_available[in, out] Limit the number of bytes to scan
++  @param string_length[out] Number of characters scanned
++  
++  @remark In case the length is zero, then the total size of the string is
++    considered to be 1 byte; the size byte.
++
++  @return pointer to first byte after the header in buffer.
++    @retval NULL The buffer content is malformed
++*/
++
++static
++char *get_length_encoded_string(char **buffer,
++                                size_t *max_bytes_available,
++                                size_t *string_length)
++{
++  if (*max_bytes_available == 0)
++    return NULL;
++
++  /* Do double cast to prevent overflow from signed / unsigned conversion */
++  size_t str_len= (size_t)(unsigned char)**buffer;
++
++  /*
++    If the length encoded string has the length 0
++    the total size of the string is only one byte long (the size byte)
++  */
++  if (str_len == 0)
++  {
++    ++*buffer;
++    *string_length= 0;
++    /*
++      Return a pointer to the 0 character so the return value will be
++      an empty string.
++    */
++    return *buffer-1;
++  }
++
++  if (str_len >= *max_bytes_available)
++    return NULL;
++
++  char *str= *buffer+1;
++  *string_length= str_len;
++  *max_bytes_available-= *string_length + 1;
++  *buffer+= *string_length + 1;
++  return str;
++}
++
++
++/*
++  Perform handshake, authorize client and update thd ACL variables.
++
++  SYNOPSIS
++    check_connection()
++    thd  thread handle
++
++  RETURN
++     0  success, OK is sent to user, thd is updated.
++    -1  error, which is sent to user
++   > 0  error code (not sent to user)
++*/
++
++static int check_connection(THD *thd)
++{
++  uint connect_errors= 0;
++  NET *net= &thd->net;
++  ulong pkt_len= 0;
++  char *end;
++
++  DBUG_PRINT("info",
++             ("New connection received on %s", vio_description(net->vio)));
++#ifdef SIGNAL_WITH_VIO_CLOSE
++  thd->set_active_vio(net->vio);
++#endif
++
++  if (!thd->main_security_ctx.host)         // If TCP/IP connection
++  {
++    char ip[30];
++
++    if (vio_peer_addr(net->vio, ip, &thd->peer_port))
++    {
++      my_error(ER_BAD_HOST_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++      return 1;
++    }
++    if (!(thd->main_security_ctx.ip= my_strdup(ip,MYF(MY_WME))))
++      return 1; /* The error is set by my_strdup(). */
++    thd->main_security_ctx.host_or_ip= thd->main_security_ctx.ip;
++    vio_in_addr(net->vio,&thd->remote.sin_addr);
++    if (!(specialflag & SPECIAL_NO_RESOLVE))
++    {
++      vio_in_addr(net->vio,&thd->remote.sin_addr);
++      thd->main_security_ctx.host=
++        ip_to_hostname(&thd->remote.sin_addr, &connect_errors);
++      /* Cut very long hostnames to avoid possible overflows */
++      if (thd->main_security_ctx.host)
++      {
++        if (thd->main_security_ctx.host != my_localhost)
++          thd->main_security_ctx.host[min(strlen(thd->main_security_ctx.host),
++                                          HOSTNAME_LENGTH)]= 0;
++        thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host;
++      }
++      if (connect_errors > max_connect_errors)
++      {
++        my_error(ER_HOST_IS_BLOCKED, MYF(0), thd->main_security_ctx.host_or_ip);
++        return 1;
++      }
++    }
++    DBUG_PRINT("info",("Host: %s  ip: %s",
++		       (thd->main_security_ctx.host ?
++                        thd->main_security_ctx.host : "unknown host"),
++		       (thd->main_security_ctx.ip ?
++                        thd->main_security_ctx.ip : "unknown ip")));
++    if (acl_check_host(thd->main_security_ctx.host, thd->main_security_ctx.ip))
++    {
++      my_error(ER_HOST_NOT_PRIVILEGED, MYF(0),
++               thd->main_security_ctx.host_or_ip);
++      return 1;
++    }
++  }
++  else /* Hostname given means that the connection was on a socket */
++  {
++    DBUG_PRINT("info",("Host: %s", thd->main_security_ctx.host));
++    thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host;
++    thd->main_security_ctx.ip= 0;
++    /* Reset sin_addr */
++    bzero((char*) &thd->remote, sizeof(thd->remote));
++  }
++  vio_keepalive(net->vio, TRUE);
++  
++  ulong server_capabilites;
++  {
++    /* buff[] needs to big enough to hold the server_version variable */
++    char buff[SERVER_VERSION_LENGTH + 1 + SCRAMBLE_LENGTH + 1 + 64];
++    server_capabilites= CLIENT_BASIC_FLAGS;
++
++    if (opt_using_transactions)
++      server_capabilites|= CLIENT_TRANSACTIONS;
++#ifdef HAVE_COMPRESS
++    server_capabilites|= CLIENT_COMPRESS;
++#endif /* HAVE_COMPRESS */
++#ifdef HAVE_OPENSSL
++    if (ssl_acceptor_fd)
++    {
++      server_capabilites |= CLIENT_SSL;       /* Wow, SSL is available! */
++      server_capabilites |= CLIENT_SSL_VERIFY_SERVER_CERT;
++    }
++#endif /* HAVE_OPENSSL */
++
++    end= strnmov(buff, server_version, SERVER_VERSION_LENGTH) + 1;
++    int4store((uchar*) end, thd->thread_id);
++    end+= 4;
++    /*
++      So as check_connection is the only entry point to authorization
++      procedure, scramble is set here. This gives us new scramble for
++      each handshake.
++    */
++    create_random_string(thd->scramble, SCRAMBLE_LENGTH, &thd->rand);
++    /*
++      Old clients does not understand long scrambles, but can ignore packet
++      tail: that's why first part of the scramble is placed here, and second
++      part at the end of packet.
++    */
++    end= strmake(end, thd->scramble, SCRAMBLE_LENGTH_323) + 1;
++   
++    int2store(end, server_capabilites);
++    /* write server characteristics: up to 16 bytes allowed */
++    end[2]=(char) default_charset_info->number;
++    int2store(end+3, thd->server_status);
++    bzero(end+5, 13);
++    end+= 18;
++    /* write scramble tail */
++    end= strmake(end, thd->scramble + SCRAMBLE_LENGTH_323, 
++                 SCRAMBLE_LENGTH - SCRAMBLE_LENGTH_323) + 1;
++
++    /* At this point we write connection message and read reply */
++    if (net_write_command(net, (uchar) protocol_version, (uchar*) "", 0,
++                          (uchar*) buff, (size_t) (end-buff)) ||
++	(pkt_len= my_net_read(net)) == packet_error ||
++	pkt_len < MIN_HANDSHAKE_SIZE)
++    {
++      inc_host_errors(&thd->remote.sin_addr);
++      my_error(ER_HANDSHAKE_ERROR, MYF(0),
++               thd->main_security_ctx.host_or_ip);
++      return 1;
++    }
++  }
++#ifdef _CUSTOMCONFIG_
++#include "_cust_sql_parse.h"
++#endif
++  if (connect_errors)
++    reset_host_errors(&thd->remote.sin_addr);
++  if (thd->packet.alloc(thd->variables.net_buffer_length))
++    return 1; /* The error is set by alloc(). */
++
++  thd->client_capabilities= uint2korr(net->read_pos);
++  if (thd->client_capabilities & CLIENT_PROTOCOL_41)
++  {
++    thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16;
++    thd->max_client_packet_length= uint4korr(net->read_pos+4);
++    DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8]));
++    if (thd_init_client_charset(thd, (uint) net->read_pos[8]))
++      return 1;
++    thd->update_charset();
++    end= (char*) net->read_pos+32;
++  }
++  else
++  {
++    thd->max_client_packet_length= uint3korr(net->read_pos+2);
++    end= (char*) net->read_pos+5;
++  }
++  /*
++    Disable those bits which are not supported by the server.
++    This is a precautionary measure, if the client lies. See Bug#27944.
++  */
++  thd->client_capabilities&= server_capabilites;
++
++  if (thd->client_capabilities & CLIENT_IGNORE_SPACE)
++    thd->variables.sql_mode|= MODE_IGNORE_SPACE;
++#ifdef HAVE_OPENSSL
++  DBUG_PRINT("info", ("client capabilities: %lu", thd->client_capabilities));
++  if (thd->client_capabilities & CLIENT_SSL)
++  {
++    /* Do the SSL layering. */
++    if (!ssl_acceptor_fd)
++    {
++      inc_host_errors(&thd->remote.sin_addr);
++      my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++      return 1;
++    }
++    DBUG_PRINT("info", ("IO layer change in progress..."));
++    if (sslaccept(ssl_acceptor_fd, net->vio, net->read_timeout))
++    {
++      DBUG_PRINT("error", ("Failed to accept new SSL connection"));
++      inc_host_errors(&thd->remote.sin_addr);
++      my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++      return 1;
++    }
++    DBUG_PRINT("info", ("Reading user information over SSL layer"));
++    if ((pkt_len= my_net_read(net)) == packet_error ||
++	pkt_len < NORMAL_HANDSHAKE_SIZE)
++    {
++      DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)",
++			   pkt_len));
++      inc_host_errors(&thd->remote.sin_addr);
++      my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++      return 1;
++    }
++  }
++#endif /* HAVE_OPENSSL */
++
++  if (end > (char *)net->read_pos + pkt_len)
++  {
++    inc_host_errors(&thd->remote.sin_addr);
++    my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++    return 1;
++  }
++
++  if (thd->client_capabilities & CLIENT_INTERACTIVE)
++    thd->variables.net_wait_timeout= thd->variables.net_interactive_timeout;
++  if ((thd->client_capabilities & CLIENT_TRANSACTIONS) &&
++      opt_using_transactions)
++    net->return_status= &thd->server_status;
++ 
++  /*
++    In order to safely scan a head for '\0' string terminators
++    we must keep track of how many bytes remain in the allocated
++    buffer or we might read past the end of the buffer.
++  */
++  size_t bytes_remaining_in_packet= pkt_len - (end - (char *)net->read_pos);
++
++  size_t user_len;
++  char *user= get_null_terminated_string(&end, &bytes_remaining_in_packet,
++                                         &user_len);
++  if (user == NULL)
++  {
++    inc_host_errors(&thd->remote.sin_addr);
++    my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++    return 1;
++  }
++
++  /*
++    Old clients send a null-terminated string as password; new clients send
++    the size (1 byte) + string (not null-terminated). Hence in case of empty
++    password both send '\0'.
++  */
++  size_t passwd_len= 0;
++  char *passwd= NULL;
++
++  if (thd->client_capabilities & CLIENT_SECURE_CONNECTION)
++  {
++    /*
++      4.1+ password. First byte is password length.
++    */
++    passwd= get_length_encoded_string(&end, &bytes_remaining_in_packet,
++                                      &passwd_len);
++  }
++  else
++  {
++    /*
++      Old passwords are zero terminated strings.
++    */
++    passwd= get_null_terminated_string(&end, &bytes_remaining_in_packet,
++                                       &passwd_len);
++  }
++
++  if (passwd == NULL)
++  {
++    inc_host_errors(&thd->remote.sin_addr);
++    my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++    return 1;
++  }
++
++  size_t db_len= 0;
++  char *db= NULL;
++
++  if (thd->client_capabilities & CLIENT_CONNECT_WITH_DB)
++  {
++    db= get_null_terminated_string(&end, &bytes_remaining_in_packet,
++                                   &db_len);
++    if (db == NULL)
++    {
++      inc_host_errors(&thd->remote.sin_addr);
++      my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip);
++      return 1;
++    }
++  }
++
++  char db_buff[NAME_LEN + 1];           // buffer to store db in utf8
++  char user_buff[USERNAME_LENGTH + 1];	// buffer to store user in utf8
++  uint dummy_errors;
++
++  /* Since 4.1 all database names are stored in utf8 */
++  if (db)
++  {
++    db_buff[copy_and_convert(db_buff, sizeof(db_buff)-1,
++                             system_charset_info,
++                             db, db_len,
++                             thd->charset(), &dummy_errors)]= 0;
++    db= db_buff;
++  }
++
++  user_buff[user_len= copy_and_convert(user_buff, sizeof(user_buff)-1,
++                                       system_charset_info, user, user_len,
++                                       thd->charset(), &dummy_errors)]= '\0';
++  user= user_buff;
++
++  /* If username starts and ends in "'", chop them off */
++  if (user_len > 1 && user[0] == '\'' && user[user_len - 1] == '\'')
++  {
++    user[user_len-1]= 0;
++    user++;
++    user_len-= 2;
++  }
++
++  /*
++    Clip username to allowed length in characters (not bytes).  This is
++    mostly for backward compatibility.
++  */
++  {
++    CHARSET_INFO *cs= system_charset_info;
++    int           err;
++
++    user_len= (uint) cs->cset->well_formed_len(cs, user, user + user_len,
++                                               USERNAME_CHAR_LENGTH, &err);
++    user[user_len]= '\0';
++  }
++
++  if (thd->main_security_ctx.user)
++    x_free(thd->main_security_ctx.user);
++  if (!(thd->main_security_ctx.user= my_strdup(user, MYF(MY_WME))))
++    return 1; /* The error is set by my_strdup(). */
++  return check_user(thd, COM_CONNECT, passwd, passwd_len, db, TRUE);
++}
++
++
++/*
++  Setup thread to be used with the current thread
++
++  SYNOPSIS
++    bool setup_connection_thread_globals()
++    thd    Thread/connection handler
++
++  RETURN
++    0   ok
++    1   Error (out of memory)
++        In this case we will close the connection and increment status
++*/
++
++bool setup_connection_thread_globals(THD *thd)
++{
++  if (thd->store_globals())
++  {
++    close_connection(thd, ER_OUT_OF_RESOURCES, 1);
++    statistic_increment(aborted_connects,&LOCK_status);
++    thread_scheduler.end_thread(thd, 0);
++    return 1;                                   // Error
++  }
++  return 0;
++}
++
++
++/*
++  Autenticate user, with error reporting
++
++  SYNOPSIS
++   login_connection()
++   thd        Thread handler
++
++  NOTES
++    Connection is not closed in case of errors
++
++  RETURN
++    0    ok
++    1    error
++*/
++
++
++static bool login_connection(THD *thd)
++{
++  NET *net= &thd->net;
++  int error;
++  DBUG_ENTER("login_connection");
++  DBUG_PRINT("info", ("login_connection called by thread %lu",
++                      thd->thread_id));
++
++  /* Use "connect_timeout" value during connection phase */
++  my_net_set_read_timeout(net, connect_timeout);
++  my_net_set_write_timeout(net, connect_timeout);
++
++  error= check_connection(thd);
++  net_end_statement(thd);
++
++  if (error)
++  {						// Wrong permissions
++#ifdef __NT__
++    if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE)
++      my_sleep(1000);				/* must wait after eof() */
++#endif
++    statistic_increment(aborted_connects,&LOCK_status);
++    DBUG_RETURN(1);
++  }
++  /* Connect completed, set read/write timeouts back to default */
++  my_net_set_read_timeout(net, thd->variables.net_read_timeout);
++  my_net_set_write_timeout(net, thd->variables.net_write_timeout);
++  DBUG_RETURN(0);
++}
++
++
++/*
++  Close an established connection
++
++  NOTES
++    This mainly updates status variables
++*/
++
++static void end_connection(THD *thd)
++{
++  NET *net= &thd->net;
++  plugin_thdvar_cleanup(thd);
++  if (thd->user_connect)
++    decrease_user_connections(thd->user_connect);
++
++  if (thd->killed || (net->error && net->vio != 0))
++  {
++    statistic_increment(aborted_threads,&LOCK_status);
++  }
++
++  if (net->error && net->vio != 0)
++  {
++    if (!thd->killed && thd->variables.log_warnings > 1)
++    {
++      Security_context *sctx= thd->security_ctx;
++
++      sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION),
++                        thd->thread_id,(thd->db ? thd->db : "unconnected"),
++                        sctx->user ? sctx->user : "unauthenticated",
++                        sctx->host_or_ip,
++                        (thd->main_da.is_error() ? thd->main_da.message() :
++                         ER(ER_UNKNOWN_ERROR)));
++    }
++  }
++}
++
++
++/*
++  Initialize THD to handle queries
++*/
++
++static void prepare_new_connection_state(THD* thd)
++{
++  Security_context *sctx= thd->security_ctx;
++
++#ifdef __NETWARE__
++  netware_reg_user(sctx->ip, sctx->user, "MySQL");
++#endif
++
++  if (thd->variables.max_join_size == HA_POS_ERROR)
++    thd->options |= OPTION_BIG_SELECTS;
++  if (thd->client_capabilities & CLIENT_COMPRESS)
++    thd->net.compress=1;				// Use compression
++
++  /*
++    Much of this is duplicated in create_embedded_thd() for the
++    embedded server library.
++    TODO: refactor this to avoid code duplication there
++  */
++  thd->version= refresh_version;
++  thd->proc_info= 0;
++  thd->command= COM_SLEEP;
++  thd->set_time();
++  thd->init_for_queries();
++
++  if (sys_init_connect.value_length && !(sctx->master_access & SUPER_ACL))
++  {
++    execute_init_command(thd, &sys_init_connect, &LOCK_sys_init_connect);
++    if (thd->is_error())
++    {
++      thd->killed= THD::KILL_CONNECTION;
++      sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION),
++                        thd->thread_id,(thd->db ? thd->db : "unconnected"),
++                        sctx->user ? sctx->user : "unauthenticated",
++                        sctx->host_or_ip, "init_connect command failed");
++      sql_print_warning("%s", thd->main_da.message());
++    }
++    thd->proc_info=0;
++    thd->set_time();
++    thd->init_for_queries();
++  }
++}
++
++
++/*
++  Thread handler for a connection
++
++  SYNOPSIS
++    handle_one_connection()
++    arg		Connection object (THD)
++
++  IMPLEMENTATION
++    This function (normally) does the following:
++    - Initialize thread
++    - Initialize THD to be used with this thread
++    - Authenticate user
++    - Execute all queries sent on the connection
++    - Take connection down
++    - End thread  / Handle next connection using thread from thread cache
++*/
++
++pthread_handler_t handle_one_connection(void *arg)
++{
++  THD *thd= (THD*) arg;
++
++  thd->thr_create_utime= my_micro_time();
++
++  if (thread_scheduler.init_new_connection_thread())
++  {
++    close_connection(thd, ER_OUT_OF_RESOURCES, 1);
++    statistic_increment(aborted_connects,&LOCK_status);
++    thread_scheduler.end_thread(thd,0);
++    return 0;
++  }
++
++  /*
++    If a thread was created to handle this connection:
++    increment slow_launch_threads counter if it took more than
++    slow_launch_time seconds to create the thread.
++  */
++  if (thd->prior_thr_create_utime)
++  {
++    ulong launch_time= (ulong) (thd->thr_create_utime -
++                                thd->prior_thr_create_utime);
++    if (launch_time >= slow_launch_time*1000000L)
++      statistic_increment(slow_launch_threads, &LOCK_status);
++    thd->prior_thr_create_utime= 0;
++  }
++
++  /*
++    handle_one_connection() is normally the only way a thread would
++    start and would always be on the very high end of the stack ,
++    therefore, the thread stack always starts at the address of the
++    first local variable of handle_one_connection, which is thd. We
++    need to know the start of the stack so that we could check for
++    stack overruns.
++  */
++  thd->thread_stack= (char*) &thd;
++  if (setup_connection_thread_globals(thd))
++    return 0;
++
++  for (;;)
++  {
++    NET *net= &thd->net;
++
++    lex_start(thd);
++    if (login_connection(thd))
++      goto end_thread;
++
++    prepare_new_connection_state(thd);
++
++    while (!net->error && net->vio != 0 &&
++           !(thd->killed == THD::KILL_CONNECTION))
++    {
++      if (do_command(thd))
++	break;
++    }
++    end_connection(thd);
++   
++end_thread:
++    close_connection(thd, 0, 1);
++    if (thread_scheduler.end_thread(thd,1))
++      return 0;                                 // Probably no-threads
++
++    /*
++      If end_thread() returns, we are either running with
++      thread-handler=no-threads or this thread has been schedule to
++      handle the next connection.
++    */
++    thd= current_thd;
++    thd->thread_stack= (char*) &thd;
++  }
++}
++#endif /* EMBEDDED_LIBRARY */
+diff -urN mysql-old/sql/sql_load.cc mysql/sql/sql_load.cc
+--- mysql-old/sql/sql_load.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/sql_load.cc	2011-05-10 17:56:01.503349042 +0000
+@@ -1109,7 +1109,7 @@
+ 
+ 
+   /* Set of a stack for unget if long terminators */
+-  uint length=max(field_term_length,line_term_length)+1;
++  uint length=MYSQL_MAX(field_term_length,line_term_length)+1;
+   set_if_bigger(length,line_start.length());
+   stack=stack_pos=(int*) sql_alloc(sizeof(int)*length);
+ 
+diff -urN mysql-old/sql/sql_parse.cc mysql/sql/sql_parse.cc
+--- mysql-old/sql/sql_parse.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/sql_parse.cc	2011-05-10 17:56:01.510015710 +0000
+@@ -5722,7 +5722,7 @@
+     return 1;
+   }
+ #ifndef DBUG_OFF
+-  max_stack_used= max(max_stack_used, stack_used);
++  max_stack_used= MYSQL_MAX(max_stack_used, stack_used);
+ #endif
+   return 0;
+ }
+@@ -7226,7 +7226,7 @@
+     char command[80];
+     Lex_input_stream *lip= & thd->m_parser_state->m_lip;
+     strmake(command, lip->yylval->symbol.str,
+-	    min(lip->yylval->symbol.length, sizeof(command)-1));
++	    MYSQL_MIN(lip->yylval->symbol.length, sizeof(command)-1));
+     my_error(ER_CANT_USE_OPTION_HERE, MYF(0), command);
+     return 1;
+   }
+diff -urN mysql-old/sql/sql_parse.cc.orig mysql/sql/sql_parse.cc.orig
+--- mysql-old/sql/sql_parse.cc.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/sql_parse.cc.orig	2011-04-12 12:11:38.000000000 +0000
+@@ -0,0 +1,7997 @@
++/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
++
++#define MYSQL_LEX 1
++#include "mysql_priv.h"
++#include "sql_repl.h"
++#include "rpl_filter.h"
++#include "repl_failsafe.h"
++#include <m_ctype.h>
++#include <myisam.h>
++#include <my_dir.h>
++
++#include "sp_head.h"
++#include "sp.h"
++#include "sp_cache.h"
++#include "events.h"
++#include "sql_trigger.h"
++#include "debug_sync.h"
++
++/**
++  @defgroup Runtime_Environment Runtime Environment
++  @{
++*/
++
++/* Used in error handling only */
++#define SP_TYPE_STRING(LP) \
++  ((LP)->sphead->m_type == TYPE_ENUM_FUNCTION ? "FUNCTION" : "PROCEDURE")
++#define SP_COM_STRING(LP) \
++  ((LP)->sql_command == SQLCOM_CREATE_SPFUNCTION || \
++   (LP)->sql_command == SQLCOM_ALTER_FUNCTION || \
++   (LP)->sql_command == SQLCOM_SHOW_CREATE_FUNC || \
++   (LP)->sql_command == SQLCOM_DROP_FUNCTION ? \
++   "FUNCTION" : "PROCEDURE")
++
++static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables);
++static bool check_show_create_table_access(THD *thd, TABLE_LIST *table);
++
++const char *any_db="*any*";	// Special symbol for check_access
++
++const LEX_STRING command_name[]={
++  { C_STRING_WITH_LEN("Sleep") },
++  { C_STRING_WITH_LEN("Quit") },
++  { C_STRING_WITH_LEN("Init DB") },
++  { C_STRING_WITH_LEN("Query") },
++  { C_STRING_WITH_LEN("Field List") },
++  { C_STRING_WITH_LEN("Create DB") },
++  { C_STRING_WITH_LEN("Drop DB") },
++  { C_STRING_WITH_LEN("Refresh") },
++  { C_STRING_WITH_LEN("Shutdown") },
++  { C_STRING_WITH_LEN("Statistics") },
++  { C_STRING_WITH_LEN("Processlist") },
++  { C_STRING_WITH_LEN("Connect") },
++  { C_STRING_WITH_LEN("Kill") },
++  { C_STRING_WITH_LEN("Debug") },
++  { C_STRING_WITH_LEN("Ping") },
++  { C_STRING_WITH_LEN("Time") },
++  { C_STRING_WITH_LEN("Delayed insert") },
++  { C_STRING_WITH_LEN("Change user") },
++  { C_STRING_WITH_LEN("Binlog Dump") },
++  { C_STRING_WITH_LEN("Table Dump") },
++  { C_STRING_WITH_LEN("Connect Out") },
++  { C_STRING_WITH_LEN("Register Slave") },
++  { C_STRING_WITH_LEN("Prepare") },
++  { C_STRING_WITH_LEN("Execute") },
++  { C_STRING_WITH_LEN("Long Data") },
++  { C_STRING_WITH_LEN("Close stmt") },
++  { C_STRING_WITH_LEN("Reset stmt") },
++  { C_STRING_WITH_LEN("Set option") },
++  { C_STRING_WITH_LEN("Fetch") },
++  { C_STRING_WITH_LEN("Daemon") },
++  { C_STRING_WITH_LEN("Error") }  // Last command number
++};
++
++const char *xa_state_names[]={
++  "NON-EXISTING", "ACTIVE", "IDLE", "PREPARED", "ROLLBACK ONLY"
++};
++
++/**
++  Mark a XA transaction as rollback-only if the RM unilaterally
++  rolled back the transaction branch.
++
++  @note If a rollback was requested by the RM, this function sets
++        the appropriate rollback error code and transits the state
++        to XA_ROLLBACK_ONLY.
++
++  @return TRUE if transaction was rolled back or if the transaction
++          state is XA_ROLLBACK_ONLY. FALSE otherwise.
++*/
++static bool xa_trans_rolled_back(XID_STATE *xid_state)
++{
++  if (xid_state->rm_error)
++  {
++    switch (xid_state->rm_error) {
++    case ER_LOCK_WAIT_TIMEOUT:
++      my_error(ER_XA_RBTIMEOUT, MYF(0));
++      break;
++    case ER_LOCK_DEADLOCK:
++      my_error(ER_XA_RBDEADLOCK, MYF(0));
++      break;
++    default:
++      my_error(ER_XA_RBROLLBACK, MYF(0));
++    }
++    xid_state->xa_state= XA_ROLLBACK_ONLY;
++  }
++
++  return (xid_state->xa_state == XA_ROLLBACK_ONLY);
++}
++
++/**
++  Rollback work done on behalf of at ransaction branch.
++*/
++static bool xa_trans_rollback(THD *thd)
++{
++  /*
++    Resource Manager error is meaningless at this point, as we perform
++    explicit rollback request by user. We must reset rm_error before
++    calling ha_rollback(), so thd->transaction.xid structure gets reset
++    by ha_rollback()/THD::transaction::cleanup().
++  */
++  thd->transaction.xid_state.rm_error= 0;
++
++  bool status= test(ha_rollback(thd));
++
++  thd->options&= ~(ulong) OPTION_BEGIN;
++  thd->transaction.all.modified_non_trans_table= FALSE;
++  thd->server_status&= ~SERVER_STATUS_IN_TRANS;
++  xid_cache_delete(&thd->transaction.xid_state);
++  thd->transaction.xid_state.xa_state= XA_NOTR;
++
++  return status;
++}
++
++static void unlock_locked_tables(THD *thd)
++{
++  if (thd->locked_tables)
++  {
++    thd->lock=thd->locked_tables;
++    thd->locked_tables=0;			// Will be automatically closed
++    close_thread_tables(thd);			// Free tables
++  }
++}
++
++
++bool end_active_trans(THD *thd)
++{
++  int error=0;
++  DBUG_ENTER("end_active_trans");
++  if (unlikely(thd->in_sub_stmt))
++  {
++    my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0));
++    DBUG_RETURN(1);
++  }
++  if (thd->transaction.xid_state.xa_state != XA_NOTR)
++  {
++    my_error(ER_XAER_RMFAIL, MYF(0),
++             xa_state_names[thd->transaction.xid_state.xa_state]);
++    DBUG_RETURN(1);
++  }
++  if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN |
++		      OPTION_TABLE_LOCK))
++  {
++    DBUG_PRINT("info",("options: 0x%llx", thd->options));
++    /* Safety if one did "drop table" on locked tables */
++    if (!thd->locked_tables)
++      thd->options&= ~OPTION_TABLE_LOCK;
++    thd->server_status&= ~SERVER_STATUS_IN_TRANS;
++    if (ha_commit(thd))
++      error=1;
++  }
++  thd->options&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
++  thd->transaction.all.modified_non_trans_table= FALSE;
++  DBUG_RETURN(error);
++}
++
++
++bool begin_trans(THD *thd)
++{
++  int error=0;
++  if (unlikely(thd->in_sub_stmt))
++  {
++    my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0));
++    return 1;
++  }
++  if (thd->locked_tables)
++  {
++    thd->lock=thd->locked_tables;
++    thd->locked_tables=0;			// Will be automatically closed
++    close_thread_tables(thd);			// Free tables
++  }
++  if (end_active_trans(thd))
++    error= -1;
++  else
++  {
++    thd->options|= OPTION_BEGIN;
++    thd->server_status|= SERVER_STATUS_IN_TRANS;
++  }
++  return error;
++}
++
++#ifdef HAVE_REPLICATION
++/**
++  Returns true if all tables should be ignored.
++*/
++inline bool all_tables_not_ok(THD *thd, TABLE_LIST *tables)
++{
++  return rpl_filter->is_on() && tables && !thd->spcont &&
++         !rpl_filter->tables_ok(thd->db, tables);
++}
++#endif
++
++
++static bool some_non_temp_table_to_be_updated(THD *thd, TABLE_LIST *tables)
++{
++  for (TABLE_LIST *table= tables; table; table= table->next_global)
++  {
++    DBUG_ASSERT(table->db && table->table_name);
++    if (table->updating &&
++        !find_temporary_table(thd, table->db, table->table_name))
++      return 1;
++  }
++  return 0;
++}
++
++
++/**
++  Mark all commands that somehow changes a table.
++
++  This is used to check number of updates / hour.
++
++  sql_command is actually set to SQLCOM_END sometimes
++  so we need the +1 to include it in the array.
++
++  See COMMAND_FLAG_xxx for different type of commands
++     2  - query that returns meaningful ROW_COUNT() -
++          a number of modified rows
++*/
++
++uint sql_command_flags[SQLCOM_END+1];
++
++void init_update_queries(void)
++{
++  bzero((uchar*) &sql_command_flags, sizeof(sql_command_flags));
++
++  sql_command_flags[SQLCOM_CREATE_TABLE]=   CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_CREATE_INDEX]=   CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_ALTER_TABLE]=    CF_CHANGES_DATA | CF_WRITE_LOGS_COMMAND;
++  sql_command_flags[SQLCOM_TRUNCATE]=       CF_CHANGES_DATA | CF_WRITE_LOGS_COMMAND;
++  sql_command_flags[SQLCOM_DROP_TABLE]=     CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_LOAD]=           CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_CREATE_DB]=      CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_DROP_DB]=        CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_RENAME_TABLE]=   CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_BACKUP_TABLE]=   CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_RESTORE_TABLE]=  CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_DROP_INDEX]=     CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_CREATE_VIEW]=    CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_DROP_VIEW]=      CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_CREATE_EVENT]=   CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_ALTER_EVENT]=    CF_CHANGES_DATA;
++  sql_command_flags[SQLCOM_DROP_EVENT]=     CF_CHANGES_DATA;
++
++  sql_command_flags[SQLCOM_UPDATE]=	    CF_CHANGES_DATA | CF_HAS_ROW_COUNT |
++                                            CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_UPDATE_MULTI]=   CF_CHANGES_DATA | CF_HAS_ROW_COUNT |
++                                            CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_INSERT]=	    CF_CHANGES_DATA | CF_HAS_ROW_COUNT |
++                                            CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_INSERT_SELECT]=  CF_CHANGES_DATA | CF_HAS_ROW_COUNT |
++                                            CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_DELETE]=         CF_CHANGES_DATA | CF_HAS_ROW_COUNT |
++                                            CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_DELETE_MULTI]=   CF_CHANGES_DATA | CF_HAS_ROW_COUNT |
++                                            CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_REPLACE]=        CF_CHANGES_DATA | CF_HAS_ROW_COUNT |
++                                            CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_REPLACE_SELECT]= CF_CHANGES_DATA | CF_HAS_ROW_COUNT |
++                                            CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SELECT]=         CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SET_OPTION]=     CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_DO]=             CF_REEXECUTION_FRAGILE;
++
++  sql_command_flags[SQLCOM_SHOW_STATUS_PROC]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_STATUS]=      CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_DATABASES]=   CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_TRIGGERS]=    CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_EVENTS]=      CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_OPEN_TABLES]= CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_PLUGINS]=     CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_FIELDS]=      CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_KEYS]=        CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_VARIABLES]=   CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_CHARSETS]=    CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_COLLATIONS]=  CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_NEW_MASTER]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_BINLOGS]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_SLAVE_HOSTS]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_BINLOG_EVENTS]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_COLUMN_TYPES]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_STORAGE_ENGINES]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_AUTHORS]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_CONTRIBUTORS]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_PRIVILEGES]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_WARNS]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_ERRORS]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_ENGINE_STATUS]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_ENGINE_MUTEX]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_ENGINE_LOGS]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_PROCESSLIST]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_GRANTS]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_CREATE_DB]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_CREATE]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_MASTER_STAT]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_SLAVE_STAT]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_CREATE_PROC]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_CREATE_FUNC]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_CREATE_TRIGGER]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_STATUS_FUNC]=  CF_STATUS_COMMAND | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_SHOW_PROC_CODE]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_FUNC_CODE]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_CREATE_EVENT]=  CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_PROFILES]= CF_STATUS_COMMAND;
++  sql_command_flags[SQLCOM_SHOW_PROFILE]= CF_STATUS_COMMAND;
++
++   sql_command_flags[SQLCOM_SHOW_TABLES]=       (CF_STATUS_COMMAND |
++                                                 CF_SHOW_TABLE_COMMAND |
++                                                 CF_REEXECUTION_FRAGILE);
++  sql_command_flags[SQLCOM_SHOW_TABLE_STATUS]= (CF_STATUS_COMMAND |
++                                                CF_SHOW_TABLE_COMMAND |
++                                                CF_REEXECUTION_FRAGILE);
++
++  /*
++    The following is used to preserver CF_ROW_COUNT during the
++    a CALL or EXECUTE statement, so the value generated by the
++    last called (or executed) statement is preserved.
++    See mysql_execute_command() for how CF_ROW_COUNT is used.
++  */
++  sql_command_flags[SQLCOM_CALL]= 		CF_HAS_ROW_COUNT | CF_REEXECUTION_FRAGILE;
++  sql_command_flags[SQLCOM_EXECUTE]= 		CF_HAS_ROW_COUNT;
++
++  /*
++    The following admin table operations are allowed
++    on log tables.
++  */
++  sql_command_flags[SQLCOM_REPAIR]=           CF_WRITE_LOGS_COMMAND;
++  sql_command_flags[SQLCOM_OPTIMIZE]=         CF_WRITE_LOGS_COMMAND;
++  sql_command_flags[SQLCOM_ANALYZE]=          CF_WRITE_LOGS_COMMAND;
++}
++
++
++bool is_update_query(enum enum_sql_command command)
++{
++  DBUG_ASSERT(command >= 0 && command <= SQLCOM_END);
++  return (sql_command_flags[command] & CF_CHANGES_DATA) != 0;
++}
++
++/**
++  Check if a sql command is allowed to write to log tables.
++  @param command The SQL command
++  @return true if writing is allowed
++*/
++bool is_log_table_write_query(enum enum_sql_command command)
++{
++  DBUG_ASSERT(command >= 0 && command <= SQLCOM_END);
++  return (sql_command_flags[command] & CF_WRITE_LOGS_COMMAND) != 0;
++}
++
++void execute_init_command(THD *thd, sys_var_str *init_command_var,
++			  rw_lock_t *var_mutex)
++{
++  Vio* save_vio;
++  ulong save_client_capabilities;
++
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++  thd->profiling.start_new_query();
++  thd->profiling.set_query_source(init_command_var->value,
++                                  init_command_var->value_length);
++#endif
++
++  thd_proc_info(thd, "Execution of init_command");
++  /*
++    We need to lock init_command_var because
++    during execution of init_command_var query
++    values of init_command_var can't be changed
++  */
++  rw_rdlock(var_mutex);
++  save_client_capabilities= thd->client_capabilities;
++  thd->client_capabilities|= CLIENT_MULTI_QUERIES;
++  /*
++    We don't need return result of execution to client side.
++    To forbid this we should set thd->net.vio to 0.
++  */
++  save_vio= thd->net.vio;
++  thd->net.vio= 0;
++  dispatch_command(COM_QUERY, thd,
++                   init_command_var->value,
++                   init_command_var->value_length);
++  rw_unlock(var_mutex);
++  thd->client_capabilities= save_client_capabilities;
++  thd->net.vio= save_vio;
++
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++  thd->profiling.finish_current_query();
++#endif
++}
++
++
++static void handle_bootstrap_impl(THD *thd)
++{
++  FILE *file=bootstrap_file;
++  char *buff;
++  const char* found_semicolon= NULL;
++
++  DBUG_ENTER("handle_bootstrap");
++
++#ifndef EMBEDDED_LIBRARY
++  pthread_detach_this_thread();
++  thd->thread_stack= (char*) &thd;
++#endif /* EMBEDDED_LIBRARY */
++
++  if (thd->variables.max_join_size == HA_POS_ERROR)
++    thd->options |= OPTION_BIG_SELECTS;
++
++  thd_proc_info(thd, 0);
++  thd->version=refresh_version;
++  thd->security_ctx->priv_user=
++    thd->security_ctx->user= (char*) my_strdup("boot", MYF(MY_WME));
++  thd->security_ctx->priv_host[0]=0;
++  /*
++    Make the "client" handle multiple results. This is necessary
++    to enable stored procedures with SELECTs and Dynamic SQL
++    in init-file.
++  */
++  thd->client_capabilities|= CLIENT_MULTI_RESULTS;
++
++  buff= (char*) thd->net.buff;
++  thd->init_for_queries();
++  while (fgets(buff, thd->net.max_packet, file))
++  {
++    char *query, *res;
++    /* strlen() can't be deleted because fgets() doesn't return length */
++    ulong length= (ulong) strlen(buff);
++    while (buff[length-1] != '\n' && !feof(file))
++    {
++      /*
++        We got only a part of the current string. Will try to increase
++        net buffer then read the rest of the current string.
++      */
++      /* purecov: begin tested */
++      if (net_realloc(&(thd->net), 2 * thd->net.max_packet))
++      {
++        net_end_statement(thd);
++        bootstrap_error= 1;
++        break;
++      }
++      buff= (char*) thd->net.buff;
++      res= fgets(buff + length, thd->net.max_packet - length, file);
++      if (!res && !feof(file))
++      {
++        net_end_statement(thd);
++        bootstrap_error= 1;
++        break;
++      }
++      length+= (ulong) strlen(buff + length);
++      /* purecov: end */
++    }
++    if (bootstrap_error)
++      break;                                    /* purecov: inspected */
++
++    while (length && (my_isspace(thd->charset(), buff[length-1]) ||
++                      buff[length-1] == ';'))
++      length--;
++    buff[length]=0;
++
++    /* Skip lines starting with delimiter */
++    if (strncmp(buff, STRING_WITH_LEN("delimiter")) == 0)
++      continue;
++
++    query= (char *) thd->memdup_w_gap(buff, length + 1,
++                                      thd->db_length + 1 +
++                                      QUERY_CACHE_FLAGS_SIZE);
++    thd->set_query(query, length);
++    DBUG_PRINT("query",("%-.4096s", thd->query()));
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++    thd->profiling.start_new_query();
++    thd->profiling.set_query_source(thd->query(), length);
++#endif
++
++    /*
++      We don't need to obtain LOCK_thread_count here because in bootstrap
++      mode we have only one thread.
++    */
++    thd->query_id=next_query_id();
++    thd->set_time();
++    mysql_parse(thd, thd->query(), length, & found_semicolon);
++    close_thread_tables(thd);			// Free tables
++
++    bootstrap_error= thd->is_error();
++    net_end_statement(thd);
++
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++    thd->profiling.finish_current_query();
++#endif
++
++    if (bootstrap_error)
++      break;
++
++    free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
++#ifdef USING_TRANSACTIONS
++    free_root(&thd->transaction.mem_root,MYF(MY_KEEP_PREALLOC));
++#endif
++  }
++
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Execute commands from bootstrap_file.
++
++  Used when creating the initial grant tables.
++*/
++
++pthread_handler_t handle_bootstrap(void *arg)
++{
++  THD *thd=(THD*) arg;
++
++  /* The following must be called before DBUG_ENTER */
++  thd->thread_stack= (char*) &thd;
++  if (my_thread_init() || thd->store_globals())
++  {
++#ifndef EMBEDDED_LIBRARY
++    close_connection(thd, ER_OUT_OF_RESOURCES, 1);
++#endif
++    thd->fatal_error();
++    goto end;
++  }
++
++  handle_bootstrap_impl(thd);
++
++end:
++  net_end(&thd->net);
++  thd->cleanup();
++  delete thd;
++
++#ifndef EMBEDDED_LIBRARY
++  (void) pthread_mutex_lock(&LOCK_thread_count);
++  thread_count--;
++  in_bootstrap= FALSE;
++  (void) pthread_cond_broadcast(&COND_thread_count);
++  (void) pthread_mutex_unlock(&LOCK_thread_count);
++  my_thread_end();
++  pthread_exit(0);
++#endif
++
++  return 0;
++}
++
++
++/**
++  @brief Check access privs for a MERGE table and fix children lock types.
++
++  @param[in]        thd         thread handle
++  @param[in]        db          database name
++  @param[in,out]    table_list  list of child tables (merge_list)
++                                lock_type and optionally db set per table
++
++  @return           status
++    @retval         0           OK
++    @retval         != 0        Error
++
++  @detail
++    This function is used for write access to MERGE tables only
++    (CREATE TABLE, ALTER TABLE ... UNION=(...)). Set TL_WRITE for
++    every child. Set 'db' for every child if not present.
++*/
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++static bool check_merge_table_access(THD *thd, char *db,
++                                     TABLE_LIST *table_list)
++{
++  int error= 0;
++
++  if (table_list)
++  {
++    /* Check that all tables use the current database */
++    TABLE_LIST *tlist;
++
++    for (tlist= table_list; tlist; tlist= tlist->next_local)
++    {
++      if (!tlist->db || !tlist->db[0])
++        tlist->db= db; /* purecov: inspected */
++    }
++    error= check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL,
++                              table_list, UINT_MAX, FALSE);
++  }
++  return error;
++}
++#endif
++
++/* This works because items are allocated with sql_alloc() */
++
++void free_items(Item *item)
++{
++  Item *next;
++  DBUG_ENTER("free_items");
++  for (; item ; item=next)
++  {
++    next=item->next;
++    item->delete_self();
++  }
++  DBUG_VOID_RETURN;
++}
++
++/**
++   This works because items are allocated with sql_alloc().
++   @note The function also handles null pointers (empty list).
++*/
++void cleanup_items(Item *item)
++{
++  DBUG_ENTER("cleanup_items");  
++  for (; item ; item=item->next)
++    item->cleanup();
++  DBUG_VOID_RETURN;
++}
++
++/**
++  Handle COM_TABLE_DUMP command.
++
++  @param thd           thread handle
++  @param db            database name or an empty string. If empty,
++                       the current database of the connection is used
++  @param tbl_name      name of the table to dump
++
++  @note
++    This function is written to handle one specific command only.
++
++  @retval
++    0               success
++  @retval
++    1               error, the error message is set in THD
++*/
++
++static
++int mysql_table_dump(THD *thd, LEX_STRING *db, LEX_STRING *table_name)
++{
++  TABLE* table;
++  TABLE_LIST* table_list;
++  int error = 0;
++  DBUG_ENTER("mysql_table_dump");
++  if (db->length == 0)
++  {
++    db->str= thd->db;            /* purecov: inspected */
++    db->length= thd->db_length;  /* purecov: inspected */
++  }
++  if (!(table_list = (TABLE_LIST*) thd->calloc(sizeof(TABLE_LIST))))
++    DBUG_RETURN(1); // out of memory
++  table_list->db= db->str;
++  table_list->table_name= table_list->alias= table_name->str;
++  table_list->lock_type= TL_READ_NO_INSERT;
++  table_list->prev_global= &table_list;	// can be removed after merge with 4.1
++
++  if (check_db_name(db))
++  {
++    /* purecov: begin inspected */
++    my_error(ER_WRONG_DB_NAME ,MYF(0), db->str ? db->str : "NULL");
++    goto err;
++    /* purecov: end */
++  }
++  if (!table_name->length ||
++      check_table_name(table_name->str, table_name->length, TRUE))
++  {
++    my_error(ER_WRONG_TABLE_NAME, MYF(0),
++             table_name->str ? table_name->str : "NULL");
++    error= 1;
++    goto err;
++  }
++  if (lower_case_table_names)
++    my_casedn_str(files_charset_info, table_name->str);
++
++  if (!(table=open_ltable(thd, table_list, TL_READ_NO_INSERT, 0)))
++    DBUG_RETURN(1);
++
++  if (check_one_table_access(thd, SELECT_ACL, table_list))
++    goto err;
++  thd->free_list = 0;
++  thd->set_query(table_name->str, table_name->length);
++  if ((error = mysqld_dump_create_info(thd, table_list, -1)))
++  {
++    my_error(ER_GET_ERRNO, MYF(0), my_errno);
++    goto err;
++  }
++  net_flush(&thd->net);
++  if ((error= table->file->dump(thd,-1)))
++    my_error(ER_GET_ERRNO, MYF(0), error);
++
++err:
++  DBUG_RETURN(error);
++}
++
++/**
++  Ends the current transaction and (maybe) begin the next.
++
++  @param thd            Current thread
++  @param completion     Completion type
++
++  @retval
++    0   OK
++*/
++
++int end_trans(THD *thd, enum enum_mysql_completiontype completion)
++{
++  bool do_release= 0;
++  int res= 0;
++  DBUG_ENTER("end_trans");
++
++  if (unlikely(thd->in_sub_stmt))
++  {
++    my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0));
++    DBUG_RETURN(1);
++  }
++  if (thd->transaction.xid_state.xa_state != XA_NOTR)
++  {
++    my_error(ER_XAER_RMFAIL, MYF(0),
++             xa_state_names[thd->transaction.xid_state.xa_state]);
++    DBUG_RETURN(1);
++  }
++  switch (completion) {
++  case COMMIT:
++    /*
++     We don't use end_active_trans() here to ensure that this works
++     even if there is a problem with the OPTION_AUTO_COMMIT flag
++     (Which of course should never happen...)
++    */
++    thd->server_status&= ~SERVER_STATUS_IN_TRANS;
++    res= ha_commit(thd);
++    thd->options&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
++    thd->transaction.all.modified_non_trans_table= FALSE;
++    break;
++  case COMMIT_RELEASE:
++    do_release= 1; /* fall through */
++  case COMMIT_AND_CHAIN:
++    res= end_active_trans(thd);
++    if (!res && completion == COMMIT_AND_CHAIN)
++      res= begin_trans(thd);
++    break;
++  case ROLLBACK_RELEASE:
++    do_release= 1; /* fall through */
++  case ROLLBACK:
++  case ROLLBACK_AND_CHAIN:
++  {
++    thd->server_status&= ~SERVER_STATUS_IN_TRANS;
++    if (ha_rollback(thd))
++      res= -1;
++    thd->options&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
++    thd->transaction.all.modified_non_trans_table= FALSE;
++    if (!res && (completion == ROLLBACK_AND_CHAIN))
++      res= begin_trans(thd);
++    break;
++  }
++  default:
++    res= -1;
++    my_error(ER_UNKNOWN_COM_ERROR, MYF(0));
++    DBUG_RETURN(-1);
++  }
++
++  if (res < 0)
++    my_error(thd->killed_errno(), MYF(0));
++  else if ((res == 0) && do_release)
++    thd->killed= THD::KILL_CONNECTION;
++
++  DBUG_RETURN(res);
++}
++
++#ifndef EMBEDDED_LIBRARY
++
++/**
++  Read one command from connection and execute it (query or simple command).
++  This function is called in loop from thread function.
++
++  For profiling to work, it must never be called recursively.
++
++  @retval
++    0  success
++  @retval
++    1  request of thread shutdown (see dispatch_command() description)
++*/
++
++bool do_command(THD *thd)
++{
++  bool return_value;
++  char *packet= 0;
++  ulong packet_length;
++  NET *net= &thd->net;
++  enum enum_server_command command;
++  DBUG_ENTER("do_command");
++
++  /*
++    indicator of uninitialized lex => normal flow of errors handling
++    (see my_message_sql)
++  */
++  thd->lex->current_select= 0;
++
++  /*
++    This thread will do a blocking read from the client which
++    will be interrupted when the next command is received from
++    the client, the connection is closed or "net_wait_timeout"
++    number of seconds has passed
++  */
++  my_net_set_read_timeout(net, thd->variables.net_wait_timeout);
++
++  /*
++    XXX: this code is here only to clear possible errors of init_connect. 
++    Consider moving to init_connect() instead.
++  */
++  thd->clear_error();				// Clear error message
++  thd->main_da.reset_diagnostics_area();
++
++  net_new_transaction(net);
++
++  packet_length= my_net_read(net);
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++  thd->profiling.start_new_query();
++#endif
++  if (packet_length == packet_error)
++  {
++    DBUG_PRINT("info",("Got error %d reading command from socket %s",
++		       net->error,
++		       vio_description(net->vio)));
++
++    /* Check if we can continue without closing the connection */
++
++    /* The error must be set. */
++    DBUG_ASSERT(thd->is_error());
++    net_end_statement(thd);
++
++    if (net->error != 3)
++    {
++      return_value= TRUE;                       // We have to close it.
++      goto out;
++    }
++
++    net->error= 0;
++    return_value= FALSE;
++    goto out;
++  }
++
++  packet= (char*) net->read_pos;
++  /*
++    'packet_length' contains length of data, as it was stored in packet
++    header. In case of malformed header, my_net_read returns zero.
++    If packet_length is not zero, my_net_read ensures that the returned
++    number of bytes was actually read from network.
++    There is also an extra safety measure in my_net_read:
++    it sets packet[packet_length]= 0, but only for non-zero packets.
++  */
++  if (packet_length == 0)                       /* safety */
++  {
++    /* Initialize with COM_SLEEP packet */
++    packet[0]= (uchar) COM_SLEEP;
++    packet_length= 1;
++  }
++  /* Do not rely on my_net_read, extra safety against programming errors. */
++  packet[packet_length]= '\0';                  /* safety */
++
++  command= (enum enum_server_command) (uchar) packet[0];
++
++  if (command >= COM_END)
++    command= COM_END;				// Wrong command
++
++  DBUG_PRINT("info",("Command on %s = %d (%s)",
++                     vio_description(net->vio), command,
++                     command_name[command].str));
++
++  /* Restore read timeout value */
++  my_net_set_read_timeout(net, thd->variables.net_read_timeout);
++
++  DBUG_ASSERT(packet_length);
++  return_value= dispatch_command(command, thd, packet+1, (uint) (packet_length-1));
++
++out:
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++  thd->profiling.finish_current_query();
++#endif
++  DBUG_RETURN(return_value);
++}
++#endif  /* EMBEDDED_LIBRARY */
++
++/**
++  @brief Determine if an attempt to update a non-temporary table while the
++    read-only option was enabled has been made.
++
++  This is a helper function to mysql_execute_command.
++
++  @note SQLCOM_MULTI_UPDATE is an exception and delt with elsewhere.
++
++  @see mysql_execute_command
++  @returns Status code
++    @retval TRUE The statement should be denied.
++    @retval FALSE The statement isn't updating any relevant tables.
++*/
++
++static my_bool deny_updates_if_read_only_option(THD *thd,
++                                                TABLE_LIST *all_tables)
++{
++  DBUG_ENTER("deny_updates_if_read_only_option");
++
++  if (!opt_readonly)
++    DBUG_RETURN(FALSE);
++
++  LEX *lex= thd->lex;
++
++  const my_bool user_is_super=
++    ((ulong)(thd->security_ctx->master_access & SUPER_ACL) ==
++     (ulong)SUPER_ACL);
++
++  if (user_is_super)
++    DBUG_RETURN(FALSE);
++
++  if (!(sql_command_flags[lex->sql_command] & CF_CHANGES_DATA))
++    DBUG_RETURN(FALSE);
++
++  /* Multi update is an exception and is dealt with later. */
++  if (lex->sql_command == SQLCOM_UPDATE_MULTI)
++    DBUG_RETURN(FALSE);
++
++  const my_bool create_temp_tables= 
++    (lex->sql_command == SQLCOM_CREATE_TABLE) &&
++    (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE);
++
++  const my_bool drop_temp_tables= 
++    (lex->sql_command == SQLCOM_DROP_TABLE) &&
++    lex->drop_temporary;
++
++  const my_bool update_real_tables=
++    some_non_temp_table_to_be_updated(thd, all_tables) &&
++    !(create_temp_tables || drop_temp_tables);
++
++
++  const my_bool create_or_drop_databases=
++    (lex->sql_command == SQLCOM_CREATE_DB) ||
++    (lex->sql_command == SQLCOM_DROP_DB);
++
++  if (update_real_tables || create_or_drop_databases)
++  {
++      /*
++        An attempt was made to modify one or more non-temporary tables.
++      */
++      DBUG_RETURN(TRUE);
++  }
++
++
++  /* Assuming that only temporary tables are modified. */
++  DBUG_RETURN(FALSE);
++}
++
++/**
++  Perform one connection-level (COM_XXXX) command.
++
++  @param command         type of command to perform
++  @param thd             connection handle
++  @param packet          data for the command, packet is always null-terminated
++  @param packet_length   length of packet + 1 (to show that data is
++                         null-terminated) except for COM_SLEEP, where it
++                         can be zero.
++
++  @todo
++    set thd->lex->sql_command to SQLCOM_END here.
++  @todo
++    The following has to be changed to an 8 byte integer
++
++  @retval
++    0   ok
++  @retval
++    1   request of thread shutdown, i. e. if command is
++        COM_QUIT/COM_SHUTDOWN
++*/
++bool dispatch_command(enum enum_server_command command, THD *thd,
++		      char* packet, uint packet_length)
++{
++  NET *net= &thd->net;
++  bool error= 0;
++  DBUG_ENTER("dispatch_command");
++  DBUG_PRINT("info",("packet: '%*.s'; command: %d", packet_length, packet, command));
++
++  thd->command=command;
++  /*
++    Commands which always take a long time are logged into
++    the slow log only if opt_log_slow_admin_statements is set.
++  */
++  thd->enable_slow_log= TRUE;
++  thd->lex->sql_command= SQLCOM_END; /* to avoid confusing VIEW detectors */
++  thd->set_time();
++  if (!thd->is_valid_time())
++  {
++    /*
++     If the time has got past 2038 we need to shut this server down
++     We do this by making sure every command is a shutdown and we 
++     have enough privileges to shut the server down
++
++     TODO: remove this when we have full 64 bit my_time_t support
++    */
++    thd->security_ctx->master_access|= SHUTDOWN_ACL;
++    command= COM_SHUTDOWN;
++  }
++
++  VOID(pthread_mutex_lock(&LOCK_thread_count));
++  thd->query_id= global_query_id;
++
++  switch( command ) {
++  /* Ignore these statements. */
++  case COM_STATISTICS:
++  case COM_PING:
++    break;
++  /* Only increase id on these statements but don't count them. */
++  case COM_STMT_PREPARE: 
++  case COM_STMT_CLOSE:
++  case COM_STMT_RESET:
++    next_query_id();
++    break;
++  /* Increase id and count all other statements. */
++  default:
++    statistic_increment(thd->status_var.questions, &LOCK_status);
++    next_query_id();
++  }
++
++  thread_running++;
++  /* TODO: set thd->lex->sql_command to SQLCOM_END here */
++  VOID(pthread_mutex_unlock(&LOCK_thread_count));
++
++  /**
++    Clear the set of flags that are expected to be cleared at the
++    beginning of each command.
++  */
++  thd->server_status&= ~SERVER_STATUS_CLEAR_SET;
++  switch (command) {
++  case COM_INIT_DB:
++  {
++    LEX_STRING tmp;
++    status_var_increment(thd->status_var.com_stat[SQLCOM_CHANGE_DB]);
++    thd->convert_string(&tmp, system_charset_info,
++			packet, packet_length, thd->charset());
++    if (!mysql_change_db(thd, &tmp, FALSE))
++    {
++      general_log_write(thd, command, thd->db, thd->db_length);
++      my_ok(thd);
++    }
++    break;
++  }
++#ifdef HAVE_REPLICATION
++  case COM_REGISTER_SLAVE:
++  {
++    if (!register_slave(thd, (uchar*)packet, packet_length))
++      my_ok(thd);
++    break;
++  }
++#endif
++  case COM_TABLE_DUMP:
++  {
++    LEX_STRING db, table;
++    /* Safe because there is always a trailing \0 at the end of the packet */
++    uint db_len= *(uchar*) packet;
++    if (db_len + 1 > packet_length || db_len > NAME_LEN)
++    {
++      my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
++      break;
++    }
++    /* Safe because there is always a trailing \0 at the end of the packet */
++    uint tbl_len= *(uchar*) (packet + db_len + 1);
++    if (db_len + tbl_len + 2 > packet_length || tbl_len > NAME_LEN)
++    {
++      my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
++      break;
++    }
++
++    status_var_increment(thd->status_var.com_other);
++    thd->enable_slow_log= opt_log_slow_admin_statements;
++    db.str= (char*) thd->alloc(db_len + tbl_len + 2);
++    if (!db.str)
++    {
++      my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
++      break;
++    }
++    db.length= db_len;
++    table.length= tbl_len;
++    table.str= strmake(db.str, packet + 1, db_len) + 1;
++    strmake(table.str, packet + db_len + 2, tbl_len);
++    if (mysql_table_dump(thd, &db, &table) == 0)
++      thd->main_da.disable_status();
++    break;
++  }
++  case COM_CHANGE_USER:
++  {
++    status_var_increment(thd->status_var.com_other);
++    char *user= (char*) packet, *packet_end= packet + packet_length;
++    /* Safe because there is always a trailing \0 at the end of the packet */
++    char *passwd= strend(user)+1;
++
++    thd->change_user();
++    thd->clear_error();                         // if errors from rollback
++
++    /*
++      Old clients send null-terminated string ('\0' for empty string) for
++      password.  New clients send the size (1 byte) + string (not null
++      terminated, so also '\0' for empty string).
++
++      Cast *passwd to an unsigned char, so that it doesn't extend the sign
++      for *passwd > 127 and become 2**32-127 after casting to uint.
++    */
++    char db_buff[NAME_LEN+1];                 // buffer to store db in utf8
++    char *db= passwd;
++    char *save_db;
++    /*
++      If there is no password supplied, the packet must contain '\0',
++      in any type of handshake (4.1 or pre-4.1).
++     */
++    if (passwd >= packet_end)
++    {
++      my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
++      break;
++    }
++    uint passwd_len= (thd->client_capabilities & CLIENT_SECURE_CONNECTION ?
++                      (uchar)(*passwd++) : strlen(passwd));
++    uint dummy_errors, save_db_length, db_length;
++    int res;
++    Security_context save_security_ctx= *thd->security_ctx;
++    USER_CONN *save_user_connect;
++
++    db+= passwd_len + 1;
++    /*
++      Database name is always NUL-terminated, so in case of empty database
++      the packet must contain at least the trailing '\0'.
++    */
++    if (db >= packet_end)
++    {
++      my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
++      break;
++    }
++    db_length= strlen(db);
++
++    char *ptr= db + db_length + 1;
++    uint cs_number= 0;
++
++    if (ptr < packet_end)
++    {
++      CHARSET_INFO *cs;
++      if (ptr + 2 > packet_end)
++      {
++        my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
++        break;
++      }
++
++      if ((cs_number= uint2korr(ptr)) &&
++          (cs= get_charset(cs_number, MYF(0))) &&
++          !is_supported_parser_charset(cs))
++      {
++        /* Disallow non-supported parser character sets: UCS2, UTF16, UTF32 */
++        my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client",
++                 cs->csname);
++        break;
++      }        
++    }
++
++    /* Convert database name to utf8 */
++    db_buff[copy_and_convert(db_buff, sizeof(db_buff)-1,
++                             system_charset_info, db, db_length,
++                             thd->charset(), &dummy_errors)]= 0;
++    db= db_buff;
++
++    /* Save user and privileges */
++    save_db_length= thd->db_length;
++    save_db= thd->db;
++    save_user_connect= thd->user_connect;
++
++    if (!(thd->security_ctx->user= my_strdup(user, MYF(0))))
++    {
++      thd->security_ctx->user= save_security_ctx.user;
++      my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
++      break;
++    }
++
++    /* Clear variables that are allocated */
++    thd->user_connect= 0;
++    thd->security_ctx->priv_user= thd->security_ctx->user;
++    res= check_user(thd, COM_CHANGE_USER, passwd, passwd_len, db, FALSE);
++
++    if (res)
++    {
++      x_free(thd->security_ctx->user);
++      *thd->security_ctx= save_security_ctx;
++      thd->user_connect= save_user_connect;
++      thd->db= save_db;
++      thd->db_length= save_db_length;
++    }
++    else
++    {
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++      /* we've authenticated new user */
++      if (save_user_connect)
++	decrease_user_connections(save_user_connect);
++#endif /* NO_EMBEDDED_ACCESS_CHECKS */
++      x_free(save_db);
++      x_free(save_security_ctx.user);
++
++      if (cs_number)
++      {
++        /*
++          We have checked charset earlier,
++          so thd_init_client_charset cannot fail.
++        */
++        if (thd_init_client_charset(thd, cs_number))
++          DBUG_ASSERT(0);
++        thd->update_charset();
++      }
++    }
++    break;
++  }
++  case COM_STMT_EXECUTE:
++  {
++    mysqld_stmt_execute(thd, packet, packet_length);
++    break;
++  }
++  case COM_STMT_FETCH:
++  {
++    mysqld_stmt_fetch(thd, packet, packet_length);
++    break;
++  }
++  case COM_STMT_SEND_LONG_DATA:
++  {
++    mysql_stmt_get_longdata(thd, packet, packet_length);
++    break;
++  }
++  case COM_STMT_PREPARE:
++  {
++    mysqld_stmt_prepare(thd, packet, packet_length);
++    break;
++  }
++  case COM_STMT_CLOSE:
++  {
++    mysqld_stmt_close(thd, packet);
++    break;
++  }
++  case COM_STMT_RESET:
++  {
++    mysqld_stmt_reset(thd, packet);
++    break;
++  }
++  case COM_QUERY:
++  {
++    if (alloc_query(thd, packet, packet_length))
++      break;					// fatal error is set
++    char *packet_end= thd->query() + thd->query_length();
++    /* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */
++    const char* end_of_stmt= NULL;
++
++    general_log_write(thd, command, thd->query(), thd->query_length());
++    DBUG_PRINT("query",("%-.4096s",thd->query()));
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++    thd->profiling.set_query_source(thd->query(), thd->query_length());
++#endif
++
++    if (!(specialflag & SPECIAL_NO_PRIOR))
++      my_pthread_setprio(pthread_self(),QUERY_PRIOR);
++
++    mysql_parse(thd, thd->query(), thd->query_length(), &end_of_stmt);
++
++    while (!thd->killed && (end_of_stmt != NULL) && ! thd->is_error())
++    {
++      char *beginning_of_next_stmt= (char*) end_of_stmt;
++
++      net_end_statement(thd);
++      query_cache_end_of_result(thd);
++      /*
++        Multiple queries exits, execute them individually
++      */
++      close_thread_tables(thd);
++      ulong length= (ulong)(packet_end - beginning_of_next_stmt);
++
++      log_slow_statement(thd);
++
++      /* Remove garbage at start of query */
++      while (length > 0 && my_isspace(thd->charset(), *beginning_of_next_stmt))
++      {
++        beginning_of_next_stmt++;
++        length--;
++      }
++
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++      thd->profiling.finish_current_query();
++      thd->profiling.start_new_query("continuing");
++      thd->profiling.set_query_source(beginning_of_next_stmt, length);
++#endif
++
++      thd->set_query(beginning_of_next_stmt, length);
++      VOID(pthread_mutex_lock(&LOCK_thread_count));
++      /*
++        Count each statement from the client.
++      */
++      statistic_increment(thd->status_var.questions, &LOCK_status);
++      thd->query_id= next_query_id();
++      thd->set_time(); /* Reset the query start time. */
++      /* TODO: set thd->lex->sql_command to SQLCOM_END here */
++      VOID(pthread_mutex_unlock(&LOCK_thread_count));
++      mysql_parse(thd, beginning_of_next_stmt, length, &end_of_stmt);
++    }
++
++    if (!(specialflag & SPECIAL_NO_PRIOR))
++      my_pthread_setprio(pthread_self(),WAIT_PRIOR);
++    DBUG_PRINT("info",("query ready"));
++    break;
++  }
++  case COM_FIELD_LIST:				// This isn't actually needed
++#ifdef DONT_ALLOW_SHOW_COMMANDS
++    my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND),
++               MYF(0));	/* purecov: inspected */
++    break;
++#else
++  {
++    char *fields, *packet_end= packet + packet_length, *arg_end;
++    /* Locked closure of all tables */
++    TABLE_LIST table_list;
++    LEX_STRING conv_name;
++
++    /* used as fields initializator */
++    lex_start(thd);
++
++    status_var_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS]);
++    bzero((char*) &table_list,sizeof(table_list));
++    if (thd->copy_db_to(&table_list.db, &table_list.db_length))
++      break;
++    /*
++      We have name + wildcard in packet, separated by endzero
++    */
++    arg_end= strend(packet);
++    uint arg_length= arg_end - packet;
++    
++    /* Check given table name length. */
++    if (arg_length >= packet_length || arg_length > NAME_LEN)
++    {
++      my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
++      break;
++    }
++    thd->convert_string(&conv_name, system_charset_info,
++			packet, arg_length, thd->charset());
++    if (check_table_name(conv_name.str, conv_name.length, FALSE))
++    {
++      /* this is OK due to convert_string() null-terminating the string */
++      my_error(ER_WRONG_TABLE_NAME, MYF(0), conv_name.str);
++      break;
++    }
++
++    table_list.alias= table_list.table_name= conv_name.str;
++    packet= arg_end + 1;
++
++    if (is_schema_db(table_list.db, table_list.db_length))
++    {
++      ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, table_list.alias);
++      if (schema_table)
++        table_list.schema_table= schema_table;
++    }
++
++    uint query_length= (uint) (packet_end - packet); // Don't count end \0
++    if (!(fields= (char *) thd->memdup(packet, query_length + 1)))
++      break;
++    thd->set_query(fields, query_length);
++    general_log_print(thd, command, "%s %s", table_list.table_name, fields);
++    if (lower_case_table_names)
++      my_casedn_str(files_charset_info, table_list.table_name);
++
++    if (check_access(thd,SELECT_ACL,table_list.db,&table_list.grant.privilege,
++		     0, 0, test(table_list.schema_table)))
++      break;
++    if (check_grant(thd, SELECT_ACL, &table_list, 2, UINT_MAX, 0))
++      break;
++    /* init structures for VIEW processing */
++    table_list.select_lex= &(thd->lex->select_lex);
++
++    lex_start(thd);
++    mysql_reset_thd_for_next_command(thd);
++
++    thd->lex->
++      select_lex.table_list.link_in_list(&table_list,
++                                         &table_list.next_local);
++    thd->lex->add_to_query_tables(&table_list);
++
++    /* switch on VIEW optimisation: do not fill temporary tables */
++    thd->lex->sql_command= SQLCOM_SHOW_FIELDS;
++    mysqld_list_fields(thd,&table_list,fields);
++    thd->lex->unit.cleanup();
++    thd->cleanup_after_query();
++    break;
++  }
++#endif
++  case COM_QUIT:
++    /* We don't calculate statistics for this command */
++    general_log_print(thd, command, NullS);
++    net->error=0;				// Don't give 'abort' message
++    thd->main_da.disable_status();              // Don't send anything back
++    error=TRUE;					// End server
++    break;
++
++#ifdef REMOVED
++  case COM_CREATE_DB:				// QQ: To be removed
++    {
++      LEX_STRING db, alias;
++      HA_CREATE_INFO create_info;
++
++      status_var_increment(thd->status_var.com_stat[SQLCOM_CREATE_DB]);
++      if (thd->make_lex_string(&db, packet, packet_length, FALSE) ||
++          thd->make_lex_string(&alias, db.str, db.length, FALSE) ||
++          check_db_name(&db))
++      {
++	my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL");
++	break;
++      }
++      if (check_access(thd, CREATE_ACL, db.str , 0, 1, 0,
++                       is_schema_db(db.str, db.length)))
++	break;
++      general_log_print(thd, command, "%.*s", db.length, db.str);
++      bzero(&create_info, sizeof(create_info));
++      mysql_create_db(thd, (lower_case_table_names == 2 ? alias.str : db.str),
++                      &create_info, 0);
++      break;
++    }
++  case COM_DROP_DB:				// QQ: To be removed
++    {
++      status_var_increment(thd->status_var.com_stat[SQLCOM_DROP_DB]);
++      LEX_STRING db;
++
++      if (thd->make_lex_string(&db, packet, packet_length, FALSE) ||
++          check_db_name(&db))
++      {
++	my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL");
++	break;
++      }
++      if (check_access(thd, DROP_ACL, db.str, 0, 1, 0,
++                            is_schema_db(db.str, db.length)))
++	break;
++      if (thd->locked_tables || thd->active_transaction())
++      {
++	my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
++                   ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
++	break;
++      }
++      general_log_write(thd, command, "%.*s", db.length, db.str);
++      mysql_rm_db(thd, db.str, 0, 0);
++      break;
++    }
++#endif
++#ifndef EMBEDDED_LIBRARY
++  case COM_BINLOG_DUMP:
++    {
++      ulong pos;
++      ushort flags;
++      uint32 slave_server_id;
++
++      status_var_increment(thd->status_var.com_other);
++      thd->enable_slow_log= opt_log_slow_admin_statements;
++      if (check_global_access(thd, REPL_SLAVE_ACL))
++	break;
++
++      /* TODO: The following has to be changed to an 8 byte integer */
++      pos = uint4korr(packet);
++      flags = uint2korr(packet + 4);
++      thd->server_id=0; /* avoid suicide */
++      if ((slave_server_id= uint4korr(packet+6))) // mysqlbinlog.server_id==0
++	kill_zombie_dump_threads(slave_server_id);
++      thd->server_id = slave_server_id;
++
++      general_log_print(thd, command, "Log: '%s'  Pos: %ld", packet+10,
++                      (long) pos);
++      mysql_binlog_send(thd, thd->strdup(packet + 10), (my_off_t) pos, flags);
++      unregister_slave(thd,1,1);
++      /*  fake COM_QUIT -- if we get here, the thread needs to terminate */
++      error = TRUE;
++      break;
++    }
++#endif
++  case COM_REFRESH:
++  {
++    int not_used;
++    status_var_increment(thd->status_var.com_stat[SQLCOM_FLUSH]);
++    ulong options= (ulong) (uchar) packet[0];
++    if (check_global_access(thd,RELOAD_ACL))
++      break;
++    general_log_print(thd, command, NullS);
++#ifndef DBUG_OFF
++    bool debug_simulate= FALSE;
++    DBUG_EXECUTE_IF("simulate_detached_thread_refresh", debug_simulate= TRUE;);
++    if (debug_simulate)
++    {
++      /*
++        Simulate a reload without a attached thread session.
++        Provides a environment similar to that of when the
++        server receives a SIGHUP signal and reloads caches
++        and flushes tables.
++      */
++      bool res;
++      my_pthread_setspecific_ptr(THR_THD, NULL);
++      res= reload_acl_and_cache(NULL, options | REFRESH_FAST,
++                                NULL, &not_used);
++      my_pthread_setspecific_ptr(THR_THD, thd);
++      if (!res)
++        my_ok(thd);
++      break;
++    }
++#endif
++    if (!reload_acl_and_cache(thd, options, NULL, &not_used))
++      my_ok(thd);
++    break;
++  }
++#ifndef EMBEDDED_LIBRARY
++  case COM_SHUTDOWN:
++  {
++    status_var_increment(thd->status_var.com_other);
++    if (check_global_access(thd,SHUTDOWN_ACL))
++      break; /* purecov: inspected */
++    /*
++      If the client is < 4.1.3, it is going to send us no argument; then
++      packet_length is 0, packet[0] is the end 0 of the packet. Note that
++      SHUTDOWN_DEFAULT is 0. If client is >= 4.1.3, the shutdown level is in
++      packet[0].
++    */
++    enum mysql_enum_shutdown_level level;
++    if (!thd->is_valid_time())
++      level= SHUTDOWN_DEFAULT;
++    else
++      level= (enum mysql_enum_shutdown_level) (uchar) packet[0];
++    if (level == SHUTDOWN_DEFAULT)
++      level= SHUTDOWN_WAIT_ALL_BUFFERS; // soon default will be configurable
++    else if (level != SHUTDOWN_WAIT_ALL_BUFFERS)
++    {
++      my_error(ER_NOT_SUPPORTED_YET, MYF(0), "this shutdown level");
++      break;
++    }
++    DBUG_PRINT("quit",("Got shutdown command for level %u", level));
++    general_log_print(thd, command, NullS);
++    my_eof(thd);
++    close_thread_tables(thd);			// Free before kill
++    kill_mysql();
++    error=TRUE;
++    break;
++  }
++#endif
++  case COM_STATISTICS:
++  {
++    STATUS_VAR current_global_status_var;
++    ulong uptime;
++    uint length __attribute__((unused));
++    ulonglong queries_per_second1000;
++    char buff[250];
++    uint buff_len= sizeof(buff);
++
++    general_log_print(thd, command, NullS);
++    status_var_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS]);
++    calc_sum_of_all_status(&current_global_status_var);
++    if (!(uptime= (ulong) (thd->start_time - server_start_time)))
++      queries_per_second1000= 0;
++    else
++      queries_per_second1000= thd->query_id * LL(1000) / uptime;
++
++    length= my_snprintf(buff, buff_len - 1,
++                        "Uptime: %lu  Threads: %d  Questions: %lu  "
++                        "Slow queries: %lu  Opens: %lu  Flush tables: %lu  "
++                        "Open tables: %u  Queries per second avg: %u.%u",
++                        uptime,
++                        (int) thread_count, (ulong) thd->query_id,
++                        current_global_status_var.long_query_count,
++                        current_global_status_var.opened_tables,
++                        refresh_version,
++                        cached_open_tables(),
++                        (uint) (queries_per_second1000 / 1000),
++                        (uint) (queries_per_second1000 % 1000));
++#ifdef SAFEMALLOC
++    if (sf_malloc_cur_memory)				// Using SAFEMALLOC
++    {
++      char *end= buff + length;
++      length+= my_snprintf(end, buff_len - length - 1,
++                           end,"  Memory in use: %ldK  Max memory used: %ldK",
++                           (sf_malloc_cur_memory+1023L)/1024L,
++                           (sf_malloc_max_memory+1023L)/1024L);
++    }
++#endif
++#ifndef EMBEDDED_LIBRARY
++    VOID(my_net_write(net, (uchar*) buff, length));
++    VOID(net_flush(net));
++    thd->main_da.disable_status();
++#else
++    /* Store the buffer in permanent memory */
++    my_ok(thd, 0, 0, buff);
++#endif
++    break;
++  }
++  case COM_PING:
++    status_var_increment(thd->status_var.com_other);
++    my_ok(thd);				// Tell client we are alive
++    break;
++  case COM_PROCESS_INFO:
++    status_var_increment(thd->status_var.com_stat[SQLCOM_SHOW_PROCESSLIST]);
++    if (!thd->security_ctx->priv_user[0] &&
++        check_global_access(thd, PROCESS_ACL))
++      break;
++    general_log_print(thd, command, NullS);
++    mysqld_list_processes(thd,
++			  thd->security_ctx->master_access & PROCESS_ACL ? 
++			  NullS : thd->security_ctx->priv_user, 0);
++    break;
++  case COM_PROCESS_KILL:
++  {
++    status_var_increment(thd->status_var.com_stat[SQLCOM_KILL]);
++    ulong id=(ulong) uint4korr(packet);
++    sql_kill(thd,id,false);
++    break;
++  }
++  case COM_SET_OPTION:
++  {
++    status_var_increment(thd->status_var.com_stat[SQLCOM_SET_OPTION]);
++    uint opt_command= uint2korr(packet);
++
++    switch (opt_command) {
++    case (int) MYSQL_OPTION_MULTI_STATEMENTS_ON:
++      thd->client_capabilities|= CLIENT_MULTI_STATEMENTS;
++      my_eof(thd);
++      break;
++    case (int) MYSQL_OPTION_MULTI_STATEMENTS_OFF:
++      thd->client_capabilities&= ~CLIENT_MULTI_STATEMENTS;
++      my_eof(thd);
++      break;
++    default:
++      my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
++      break;
++    }
++    break;
++  }
++  case COM_DEBUG:
++    status_var_increment(thd->status_var.com_other);
++    if (check_global_access(thd, SUPER_ACL))
++      break;					/* purecov: inspected */
++    mysql_print_status();
++    general_log_print(thd, command, NullS);
++    my_eof(thd);
++    break;
++  case COM_SLEEP:
++  case COM_CONNECT:				// Impossible here
++  case COM_TIME:				// Impossible from client
++  case COM_DELAYED_INSERT:
++  case COM_END:
++  default:
++    my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
++    break;
++  }
++
++  /* report error issued during command execution */
++  if (thd->killed_errno())
++  {
++    if (! thd->main_da.is_set())
++      thd->send_kill_message();
++  }
++  if (thd->killed == THD::KILL_QUERY || thd->killed == THD::KILL_BAD_DATA)
++  {
++    thd->killed= THD::NOT_KILLED;
++    thd->mysys_var->abort= 0;
++  }
++
++  /* If commit fails, we should be able to reset the OK status. */
++  thd->main_da.can_overwrite_status= TRUE;
++  ha_autocommit_or_rollback(thd, thd->is_error());
++  thd->main_da.can_overwrite_status= FALSE;
++
++  thd->transaction.stmt.reset();
++
++  net_end_statement(thd);
++  query_cache_end_of_result(thd);
++
++  thd->proc_info= "closing tables";
++  /* Free tables */
++  close_thread_tables(thd);
++
++  log_slow_statement(thd);
++
++  thd_proc_info(thd, "cleaning up");
++  thd->set_query(NULL, 0);
++  thd->command=COM_SLEEP;
++  VOID(pthread_mutex_lock(&LOCK_thread_count)); // For process list
++  thread_running--;
++  VOID(pthread_mutex_unlock(&LOCK_thread_count));
++  thd_proc_info(thd, 0);
++  thd->packet.shrink(thd->variables.net_buffer_length);	// Reclaim some memory
++  free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
++  DBUG_RETURN(error);
++}
++
++
++void log_slow_statement(THD *thd)
++{
++  DBUG_ENTER("log_slow_statement");
++
++  /*
++    The following should never be true with our current code base,
++    but better to keep this here so we don't accidently try to log a
++    statement in a trigger or stored function
++  */
++  if (unlikely(thd->in_sub_stmt))
++    DBUG_VOID_RETURN;                           // Don't set time for sub stmt
++
++  /*
++    Do not log administrative statements unless the appropriate option is
++    set.
++  */
++  if (thd->enable_slow_log)
++  {
++    ulonglong end_utime_of_query= thd->current_utime();
++    thd_proc_info(thd, "logging slow query");
++
++    if (((end_utime_of_query - thd->utime_after_lock) >
++         thd->variables.long_query_time ||
++         ((thd->server_status &
++           (SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED)) &&
++          opt_log_queries_not_using_indexes &&
++           !(sql_command_flags[thd->lex->sql_command] & CF_STATUS_COMMAND))) &&
++        thd->examined_row_count >= thd->variables.min_examined_row_limit)
++    {
++      thd_proc_info(thd, "logging slow query");
++      thd->status_var.long_query_count++;
++      slow_log_print(thd, thd->query(), thd->query_length(), 
++                     end_utime_of_query);
++    }
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Create a TABLE_LIST object for an INFORMATION_SCHEMA table.
++
++    This function is used in the parser to convert a SHOW or DESCRIBE
++    table_name command to a SELECT from INFORMATION_SCHEMA.
++    It prepares a SELECT_LEX and a TABLE_LIST object to represent the
++    given command as a SELECT parse tree.
++
++  @param thd              thread handle
++  @param lex              current lex
++  @param table_ident      table alias if it's used
++  @param schema_table_idx the type of the INFORMATION_SCHEMA table to be
++                          created
++
++  @note
++    Due to the way this function works with memory and LEX it cannot
++    be used outside the parser (parse tree transformations outside
++    the parser break PS and SP).
++
++  @retval
++    0                 success
++  @retval
++    1                 out of memory or SHOW commands are not allowed
++                      in this version of the server.
++*/
++
++int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
++                         enum enum_schema_tables schema_table_idx)
++{
++  SELECT_LEX *schema_select_lex= NULL;
++  DBUG_ENTER("prepare_schema_table");
++
++  switch (schema_table_idx) {
++  case SCH_SCHEMATA:
++#if defined(DONT_ALLOW_SHOW_COMMANDS)
++    my_message(ER_NOT_ALLOWED_COMMAND,
++               ER(ER_NOT_ALLOWED_COMMAND), MYF(0));   /* purecov: inspected */
++    DBUG_RETURN(1);
++#else
++    break;
++#endif
++
++  case SCH_TABLE_NAMES:
++  case SCH_TABLES:
++  case SCH_VIEWS:
++  case SCH_TRIGGERS:
++  case SCH_EVENTS:
++#ifdef DONT_ALLOW_SHOW_COMMANDS
++    my_message(ER_NOT_ALLOWED_COMMAND,
++               ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */
++    DBUG_RETURN(1);
++#else
++    {
++      LEX_STRING db;
++      size_t dummy;
++      if (lex->select_lex.db == NULL &&
++          lex->copy_db_to(&lex->select_lex.db, &dummy))
++      {
++        DBUG_RETURN(1);
++      }
++      schema_select_lex= new SELECT_LEX();
++      db.str= schema_select_lex->db= lex->select_lex.db;
++      schema_select_lex->table_list.first= NULL;
++      db.length= strlen(db.str);
++
++      if (check_db_name(&db))
++      {
++        my_error(ER_WRONG_DB_NAME, MYF(0), db.str);
++        DBUG_RETURN(1);
++      }
++      break;
++    }
++#endif
++  case SCH_COLUMNS:
++  case SCH_STATISTICS:
++  {
++#ifdef DONT_ALLOW_SHOW_COMMANDS
++    my_message(ER_NOT_ALLOWED_COMMAND,
++               ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */
++    DBUG_RETURN(1);
++#else
++    DBUG_ASSERT(table_ident);
++    TABLE_LIST **query_tables_last= lex->query_tables_last;
++    schema_select_lex= new SELECT_LEX();
++    /* 'parent_lex' is used in init_query() so it must be before it. */
++    schema_select_lex->parent_lex= lex;
++    schema_select_lex->init_query();
++    if (!schema_select_lex->add_table_to_list(thd, table_ident, 0, 0, TL_READ))
++      DBUG_RETURN(1);
++    lex->query_tables_last= query_tables_last;
++    break;
++  }
++#endif
++  case SCH_PROFILES:
++    /* 
++      Mark this current profiling record to be discarded.  We don't
++      wish to have SHOW commands show up in profiling.
++    */
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++    thd->profiling.discard_current_query();
++#endif
++    break;
++  case SCH_OPEN_TABLES:
++  case SCH_VARIABLES:
++  case SCH_STATUS:
++  case SCH_PROCEDURES:
++  case SCH_CHARSETS:
++  case SCH_ENGINES:
++  case SCH_COLLATIONS:
++  case SCH_COLLATION_CHARACTER_SET_APPLICABILITY:
++  case SCH_USER_PRIVILEGES:
++  case SCH_SCHEMA_PRIVILEGES:
++  case SCH_TABLE_PRIVILEGES:
++  case SCH_COLUMN_PRIVILEGES:
++  case SCH_TABLE_CONSTRAINTS:
++  case SCH_KEY_COLUMN_USAGE:
++  default:
++    break;
++  }
++  
++  SELECT_LEX *select_lex= lex->current_select;
++  if (make_schema_select(thd, select_lex, schema_table_idx))
++  {
++    DBUG_RETURN(1);
++  }
++  TABLE_LIST *table_list= select_lex->table_list.first;
++  table_list->schema_select_lex= schema_select_lex;
++  table_list->schema_table_reformed= 1;
++  DBUG_RETURN(0);
++}
++
++
++/**
++  Read query from packet and store in thd->query.
++  Used in COM_QUERY and COM_STMT_PREPARE.
++
++    Sets the following THD variables:
++  - query
++  - query_length
++
++  @retval
++    FALSE ok
++  @retval
++    TRUE  error;  In this case thd->fatal_error is set
++*/
++
++bool alloc_query(THD *thd, const char *packet, uint packet_length)
++{
++  char *query;
++  /* Remove garbage at start and end of query */
++  while (packet_length > 0 && my_isspace(thd->charset(), packet[0]))
++  {
++    packet++;
++    packet_length--;
++  }
++  const char *pos= packet + packet_length;     // Point at end null
++  while (packet_length > 0 &&
++	 (pos[-1] == ';' || my_isspace(thd->charset() ,pos[-1])))
++  {
++    pos--;
++    packet_length--;
++  }
++  /* We must allocate some extra memory for query cache */
++  if (! (query= (char*) thd->memdup_w_gap(packet,
++                                          packet_length,
++                                          1 + thd->db_length +
++                                          QUERY_CACHE_FLAGS_SIZE)))
++      return TRUE;
++  query[packet_length]= '\0';
++  thd->set_query(query, packet_length);
++
++  /* Reclaim some memory */
++  thd->packet.shrink(thd->variables.net_buffer_length);
++  thd->convert_buffer.shrink(thd->variables.net_buffer_length);
++
++  return FALSE;
++}
++
++static void reset_one_shot_variables(THD *thd) 
++{
++  thd->variables.character_set_client=
++    global_system_variables.character_set_client;
++  thd->variables.collation_connection=
++    global_system_variables.collation_connection;
++  thd->variables.collation_database=
++    global_system_variables.collation_database;
++  thd->variables.collation_server=
++    global_system_variables.collation_server;
++  thd->update_charset();
++  thd->variables.time_zone=
++    global_system_variables.time_zone;
++  thd->variables.lc_time_names= &my_locale_en_US;
++  thd->one_shot_set= 0;
++}
++
++
++static
++bool sp_process_definer(THD *thd)
++{
++  DBUG_ENTER("sp_process_definer");
++
++  LEX *lex= thd->lex;
++
++  /*
++    If the definer is not specified, this means that CREATE-statement missed
++    DEFINER-clause. DEFINER-clause can be missed in two cases:
++
++      - The user submitted a statement w/o the clause. This is a normal
++        case, we should assign CURRENT_USER as definer.
++
++      - Our slave received an updated from the master, that does not
++        replicate definer for stored rountines. We should also assign
++        CURRENT_USER as definer here, but also we should mark this routine
++        as NON-SUID. This is essential for the sake of backward
++        compatibility.
++
++        The problem is the slave thread is running under "special" user (@),
++        that actually does not exist. In the older versions we do not fail
++        execution of a stored routine if its definer does not exist and
++        continue the execution under the authorization of the invoker
++        (BUG#13198). And now if we try to switch to slave-current-user (@),
++        we will fail.
++
++        Actually, this leads to the inconsistent state of master and
++        slave (different definers, different SUID behaviour), but it seems,
++        this is the best we can do.
++  */
++
++  if (!lex->definer)
++  {
++    Query_arena original_arena;
++    Query_arena *ps_arena= thd->activate_stmt_arena_if_needed(&original_arena);
++
++    lex->definer= create_default_definer(thd);
++
++    if (ps_arena)
++      thd->restore_active_arena(ps_arena, &original_arena);
++
++    /* Error has been already reported. */
++    if (lex->definer == NULL)
++      DBUG_RETURN(TRUE);
++
++    if (thd->slave_thread && lex->sphead)
++      lex->sphead->m_chistics->suid= SP_IS_NOT_SUID;
++  }
++  else
++  {
++    /*
++      If the specified definer differs from the current user, we
++      should check that the current user has SUPER privilege (in order
++      to create a stored routine under another user one must have
++      SUPER privilege).
++    */
++    if ((strcmp(lex->definer->user.str, thd->security_ctx->priv_user) ||
++         my_strcasecmp(system_charset_info, lex->definer->host.str,
++                       thd->security_ctx->priv_host)) &&
++        check_global_access(thd, SUPER_ACL))
++    {
++      my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER");
++      DBUG_RETURN(TRUE);
++    }
++  }
++
++  /* Check that the specified definer exists. Emit a warning if not. */
++
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  if (!is_acl_user(lex->definer->host.str, lex->definer->user.str))
++  {
++    push_warning_printf(thd,
++                        MYSQL_ERROR::WARN_LEVEL_NOTE,
++                        ER_NO_SUCH_USER,
++                        ER(ER_NO_SUCH_USER),
++                        lex->definer->user.str,
++                        lex->definer->host.str);
++  }
++#endif /* NO_EMBEDDED_ACCESS_CHECKS */
++
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  Execute command saved in thd and lex->sql_command.
++
++    Before every operation that can request a write lock for a table
++    wait if a global read lock exists. However do not wait if this
++    thread has locked tables already. No new locks can be requested
++    until the other locks are released. The thread that requests the
++    global read lock waits for write locked tables to become unlocked.
++
++    Note that wait_if_global_read_lock() sets a protection against a new
++    global read lock when it succeeds. This needs to be released by
++    start_waiting_global_read_lock() after the operation.
++
++  @param thd                       Thread handle
++
++  @todo
++    - Invalidate the table in the query cache if something changed
++    after unlocking when changes become visible.
++    TODO: this is workaround. right way will be move invalidating in
++    the unlock procedure.
++    - TODO: use check_change_password()
++    - JOIN is not supported yet. TODO
++    - SUSPEND and FOR MIGRATE are not supported yet. TODO
++
++  @retval
++    FALSE       OK
++  @retval
++    TRUE        Error
++*/
++
++int
++mysql_execute_command(THD *thd)
++{
++  int res= FALSE;
++  bool need_start_waiting= FALSE; // have protection against global read lock
++  int  up_result= 0;
++  LEX  *lex= thd->lex;
++  /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
++  SELECT_LEX *select_lex= &lex->select_lex;
++  /* first table of first SELECT_LEX */
++  TABLE_LIST *first_table= select_lex->table_list.first;
++  /* list of all tables in query */
++  TABLE_LIST *all_tables;
++  /* most outer SELECT_LEX_UNIT of query */
++  SELECT_LEX_UNIT *unit= &lex->unit;
++#ifdef HAVE_REPLICATION
++  /* have table map for update for multi-update statement (BUG#37051) */
++  bool have_table_map_for_update= FALSE;
++#endif
++  /* Saved variable value */
++  DBUG_ENTER("mysql_execute_command");
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  thd->work_part_info= 0;
++#endif
++
++  /*
++    In many cases first table of main SELECT_LEX have special meaning =>
++    check that it is first table in global list and relink it first in 
++    queries_tables list if it is necessary (we need such relinking only
++    for queries with subqueries in select list, in this case tables of
++    subqueries will go to global list first)
++
++    all_tables will differ from first_table only if most upper SELECT_LEX
++    do not contain tables.
++
++    Because of above in place where should be at least one table in most
++    outer SELECT_LEX we have following check:
++    DBUG_ASSERT(first_table == all_tables);
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++  */
++  lex->first_lists_tables_same();
++  /* should be assigned after making first tables same */
++  all_tables= lex->query_tables;
++  /* set context for commands which do not use setup_tables */
++  select_lex->
++    context.resolve_in_table_list_only(select_lex->
++                                       table_list.first);
++
++  /*
++    Reset warning count for each query that uses tables
++    A better approach would be to reset this for any commands
++    that is not a SHOW command or a select that only access local
++    variables, but for now this is probably good enough.
++    Don't reset warnings when executing a stored routine.
++  */
++  if ((all_tables || !lex->is_single_level_stmt()) && !thd->spcont)
++    mysql_reset_errors(thd, 0);
++
++#ifdef HAVE_REPLICATION
++  if (unlikely(thd->slave_thread))
++  {
++    if (lex->sql_command == SQLCOM_DROP_TRIGGER)
++    {
++      /*
++        When dropping a trigger, we need to load its table name
++        before checking slave filter rules.
++      */
++      add_table_for_trigger(thd, thd->lex->spname, 1, &all_tables);
++      
++      if (!all_tables)
++      {
++        /*
++          If table name cannot be loaded,
++          it means the trigger does not exists possibly because
++          CREATE TRIGGER was previously skipped for this trigger
++          according to slave filtering rules.
++          Returning success without producing any errors in this case.
++        */
++        DBUG_RETURN(0);
++      }
++      
++      // force searching in slave.cc:tables_ok() 
++      all_tables->updating= 1;
++    }
++
++    /*
++      For fix of BUG#37051, the master stores the table map for update
++      in the Query_log_event, and the value is assigned to
++      thd->variables.table_map_for_update before executing the update
++      query.
++
++      If thd->variables.table_map_for_update is set, then we are
++      replicating from a new master, we can use this value to apply
++      filter rules without opening all the tables. However If
++      thd->variables.table_map_for_update is not set, then we are
++      replicating from an old master, so we just skip this and
++      continue with the old method. And of course, the bug would still
++      exist for old masters.
++    */
++    if (lex->sql_command == SQLCOM_UPDATE_MULTI &&
++        thd->table_map_for_update)
++    {
++      have_table_map_for_update= TRUE;
++      table_map table_map_for_update= thd->table_map_for_update;
++      uint nr= 0;
++      TABLE_LIST *table;
++      for (table=all_tables; table; table=table->next_global, nr++)
++      {
++        if (table_map_for_update & ((table_map)1 << nr))
++          table->updating= TRUE;
++        else
++          table->updating= FALSE;
++      }
++
++      if (all_tables_not_ok(thd, all_tables))
++      {
++        /* we warn the slave SQL thread */
++        my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
++        if (thd->one_shot_set)
++          reset_one_shot_variables(thd);
++        DBUG_RETURN(0);
++      }
++      
++      for (table=all_tables; table; table=table->next_global)
++        table->updating= TRUE;
++    }
++    
++    /*
++      Check if statment should be skipped because of slave filtering
++      rules
++
++      Exceptions are:
++      - UPDATE MULTI: For this statement, we want to check the filtering
++        rules later in the code
++      - SET: we always execute it (Not that many SET commands exists in
++        the binary log anyway -- only 4.1 masters write SET statements,
++	in 5.0 there are no SET statements in the binary log)
++      - DROP TEMPORARY TABLE IF EXISTS: we always execute it (otherwise we
++        have stale files on slave caused by exclusion of one tmp table).
++    */
++    if (!(lex->sql_command == SQLCOM_UPDATE_MULTI) &&
++	!(lex->sql_command == SQLCOM_SET_OPTION) &&
++	!(lex->sql_command == SQLCOM_DROP_TABLE &&
++          lex->drop_temporary && lex->drop_if_exists) &&
++        all_tables_not_ok(thd, all_tables))
++    {
++      /* we warn the slave SQL thread */
++      my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
++      if (thd->one_shot_set)
++      {
++        /*
++          It's ok to check thd->one_shot_set here:
++
++          The charsets in a MySQL 5.0 slave can change by both a binlogged
++          SET ONE_SHOT statement and the event-internal charset setting, 
++          and these two ways to change charsets do not seems to work
++          together.
++
++          At least there seems to be problems in the rli cache for
++          charsets if we are using ONE_SHOT.  Note that this is normally no
++          problem because either the >= 5.0 slave reads a 4.1 binlog (with
++          ONE_SHOT) *or* or 5.0 binlog (without ONE_SHOT) but never both."
++        */
++        reset_one_shot_variables(thd);
++      }
++      DBUG_RETURN(0);
++    }
++  }
++  else
++  {
++#endif /* HAVE_REPLICATION */
++    /*
++      When option readonly is set deny operations which change non-temporary
++      tables. Except for the replication thread and the 'super' users.
++    */
++    if (deny_updates_if_read_only_option(thd, all_tables))
++    {
++      my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only");
++      DBUG_RETURN(-1);
++    }
++#ifdef HAVE_REPLICATION
++  } /* endif unlikely slave */
++#endif
++  status_var_increment(thd->status_var.com_stat[lex->sql_command]);
++
++  DBUG_ASSERT(thd->transaction.stmt.modified_non_trans_table == FALSE);
++  
++  switch (lex->sql_command) {
++
++  case SQLCOM_SHOW_EVENTS:
++#ifndef HAVE_EVENT_SCHEDULER
++    my_error(ER_NOT_SUPPORTED_YET, MYF(0), "embedded server");
++    break;
++#endif
++  case SQLCOM_SHOW_STATUS_PROC:
++  case SQLCOM_SHOW_STATUS_FUNC:
++    if (!(res= check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE)))
++      res= execute_sqlcom_select(thd, all_tables);
++    break;
++  case SQLCOM_SHOW_STATUS:
++  {
++    system_status_var old_status_var= thd->status_var;
++    thd->initial_status_var= &old_status_var;
++    if (!(res= check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE)))
++      res= execute_sqlcom_select(thd, all_tables);
++    /* Don't log SHOW STATUS commands to slow query log */
++    thd->server_status&= ~(SERVER_QUERY_NO_INDEX_USED |
++                           SERVER_QUERY_NO_GOOD_INDEX_USED);
++    /*
++      restore status variables, as we don't want 'show status' to cause
++      changes
++    */
++    pthread_mutex_lock(&LOCK_status);
++    add_diff_to_status(&global_status_var, &thd->status_var,
++                       &old_status_var);
++    thd->status_var= old_status_var;
++    pthread_mutex_unlock(&LOCK_status);
++    break;
++  }
++  case SQLCOM_SHOW_DATABASES:
++  case SQLCOM_SHOW_TABLES:
++  case SQLCOM_SHOW_TRIGGERS:
++  case SQLCOM_SHOW_TABLE_STATUS:
++  case SQLCOM_SHOW_OPEN_TABLES:
++  case SQLCOM_SHOW_PLUGINS:
++  case SQLCOM_SHOW_FIELDS:
++  case SQLCOM_SHOW_KEYS:
++  case SQLCOM_SHOW_VARIABLES:
++  case SQLCOM_SHOW_CHARSETS:
++  case SQLCOM_SHOW_COLLATIONS:
++  case SQLCOM_SHOW_STORAGE_ENGINES:
++  case SQLCOM_SHOW_PROFILE:
++  case SQLCOM_SELECT:
++    thd->status_var.last_query_cost= 0.0;
++    if (all_tables)
++    {
++      res= check_table_access(thd,
++                              lex->exchange ? SELECT_ACL | FILE_ACL :
++                              SELECT_ACL,
++                              all_tables, UINT_MAX, FALSE);
++    }
++    else
++      res= check_access(thd,
++                        lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL,
++                        any_db, 0, 0, 0, 0);
++
++    if (res)
++      break;
++
++    if (!thd->locked_tables && lex->protect_against_global_read_lock &&
++        !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++      break;
++
++    res= execute_sqlcom_select(thd, all_tables);
++    break;
++  case SQLCOM_PREPARE:
++  {
++    mysql_sql_stmt_prepare(thd);
++    break;
++  }
++  case SQLCOM_EXECUTE:
++  {
++    mysql_sql_stmt_execute(thd);
++    break;
++  }
++  case SQLCOM_DEALLOCATE_PREPARE:
++  {
++    mysql_sql_stmt_close(thd);
++    break;
++  }
++  case SQLCOM_DO:
++    if (check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE) ||
++        open_and_lock_tables(thd, all_tables))
++      goto error;
++
++    res= mysql_do(thd, *lex->insert_list);
++    break;
++
++  case SQLCOM_EMPTY_QUERY:
++    my_ok(thd);
++    break;
++
++  case SQLCOM_HELP:
++    res= mysqld_help(thd,lex->help_arg);
++    break;
++
++#ifndef EMBEDDED_LIBRARY
++  case SQLCOM_PURGE:
++  {
++    if (check_global_access(thd, SUPER_ACL))
++      goto error;
++    /* PURGE MASTER LOGS TO 'file' */
++    res = purge_master_logs(thd, lex->to_log);
++    break;
++  }
++  case SQLCOM_PURGE_BEFORE:
++  {
++    Item *it;
++
++    if (check_global_access(thd, SUPER_ACL))
++      goto error;
++    /* PURGE MASTER LOGS BEFORE 'data' */
++    it= (Item *)lex->value_list.head();
++    if ((!it->fixed && it->fix_fields(lex->thd, &it)) ||
++        it->check_cols(1))
++    {
++      my_error(ER_WRONG_ARGUMENTS, MYF(0), "PURGE LOGS BEFORE");
++      goto error;
++    }
++    it= new Item_func_unix_timestamp(it);
++    /*
++      it is OK only emulate fix_fieds, because we need only
++      value of constant
++    */
++    it->quick_fix_field();
++    res = purge_master_logs_before_date(thd, (ulong)it->val_int());
++    break;
++  }
++#endif
++  case SQLCOM_SHOW_WARNS:
++  {
++    res= mysqld_show_warnings(thd, (ulong)
++			      ((1L << (uint) MYSQL_ERROR::WARN_LEVEL_NOTE) |
++			       (1L << (uint) MYSQL_ERROR::WARN_LEVEL_WARN) |
++			       (1L << (uint) MYSQL_ERROR::WARN_LEVEL_ERROR)
++			       ));
++    break;
++  }
++  case SQLCOM_SHOW_ERRORS:
++  {
++    res= mysqld_show_warnings(thd, (ulong)
++			      (1L << (uint) MYSQL_ERROR::WARN_LEVEL_ERROR));
++    break;
++  }
++  case SQLCOM_SHOW_PROFILES:
++  {
++#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
++    thd->profiling.discard_current_query();
++    res= thd->profiling.show_profiles();
++    if (res)
++      goto error;
++#else
++    my_error(ER_FEATURE_DISABLED, MYF(0), "SHOW PROFILES", "enable-profiling");
++    goto error;
++#endif
++    break;
++  }
++  case SQLCOM_SHOW_NEW_MASTER:
++  {
++    if (check_global_access(thd, REPL_SLAVE_ACL))
++      goto error;
++    /* This query don't work now. See comment in repl_failsafe.cc */
++#ifndef WORKING_NEW_MASTER
++    my_error(ER_NOT_SUPPORTED_YET, MYF(0), "SHOW NEW MASTER");
++    goto error;
++#else
++    res = show_new_master(thd);
++    break;
++#endif
++  }
++
++#ifdef HAVE_REPLICATION
++  case SQLCOM_SHOW_SLAVE_HOSTS:
++  {
++    if (check_global_access(thd, REPL_SLAVE_ACL))
++      goto error;
++    res = show_slave_hosts(thd);
++    break;
++  }
++  case SQLCOM_SHOW_BINLOG_EVENTS:
++  {
++    if (check_global_access(thd, REPL_SLAVE_ACL))
++      goto error;
++    res = mysql_show_binlog_events(thd);
++    break;
++  }
++#endif
++
++  case SQLCOM_BACKUP_TABLE:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE) ||
++	check_global_access(thd, FILE_ACL))
++      goto error; /* purecov: inspected */
++    thd->enable_slow_log= opt_log_slow_admin_statements;
++    res = mysql_backup_table(thd, first_table);
++    select_lex->table_list.first= first_table;
++    lex->query_tables=all_tables;
++    break;
++  }
++  case SQLCOM_RESTORE_TABLE:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_table_access(thd, INSERT_ACL, all_tables, UINT_MAX, FALSE) ||
++	check_global_access(thd, FILE_ACL))
++      goto error; /* purecov: inspected */
++    thd->enable_slow_log= opt_log_slow_admin_statements;
++    res = mysql_restore_table(thd, first_table);
++    select_lex->table_list.first= first_table;
++    lex->query_tables=all_tables;
++    break;
++  }
++  case SQLCOM_ASSIGN_TO_KEYCACHE:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_access(thd, INDEX_ACL, first_table->db,
++                     &first_table->grant.privilege, 0, 0,
++                     test(first_table->schema_table)))
++      goto error;
++    res= mysql_assign_to_keycache(thd, first_table, &lex->ident);
++    break;
++  }
++  case SQLCOM_PRELOAD_KEYS:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_access(thd, INDEX_ACL, first_table->db,
++                     &first_table->grant.privilege, 0, 0,
++                     test(first_table->schema_table)))
++      goto error;
++    res = mysql_preload_keys(thd, first_table);
++    break;
++  }
++#ifdef HAVE_REPLICATION
++  case SQLCOM_CHANGE_MASTER:
++  {
++    if (check_global_access(thd, SUPER_ACL))
++      goto error;
++    pthread_mutex_lock(&LOCK_active_mi);
++    res = change_master(thd,active_mi);
++    pthread_mutex_unlock(&LOCK_active_mi);
++    break;
++  }
++  case SQLCOM_SHOW_SLAVE_STAT:
++  {
++    /* Accept one of two privileges */
++    if (check_global_access(thd, SUPER_ACL | REPL_CLIENT_ACL))
++      goto error;
++    pthread_mutex_lock(&LOCK_active_mi);
++    if (active_mi != NULL)
++    {
++      res = show_master_info(thd, active_mi);
++    }
++    else
++    {
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                   WARN_NO_MASTER_INFO, ER(WARN_NO_MASTER_INFO));
++      my_ok(thd);
++    }
++    pthread_mutex_unlock(&LOCK_active_mi);
++    break;
++  }
++  case SQLCOM_SHOW_MASTER_STAT:
++  {
++    /* Accept one of two privileges */
++    if (check_global_access(thd, SUPER_ACL | REPL_CLIENT_ACL))
++      goto error;
++    res = show_binlog_info(thd);
++    break;
++  }
++
++  case SQLCOM_LOAD_MASTER_DATA: // sync with master
++    if (check_global_access(thd, SUPER_ACL))
++      goto error;
++    if (end_active_trans(thd))
++      goto error;
++    res = load_master_data(thd);
++    break;
++#endif /* HAVE_REPLICATION */
++  case SQLCOM_SHOW_ENGINE_STATUS:
++    {
++      if (check_global_access(thd, PROCESS_ACL))
++        goto error;
++      res = ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_STATUS);
++      break;
++    }
++  case SQLCOM_SHOW_ENGINE_MUTEX:
++    {
++      if (check_global_access(thd, PROCESS_ACL))
++        goto error;
++      res = ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_MUTEX);
++      break;
++    }
++#ifdef HAVE_REPLICATION
++  case SQLCOM_LOAD_MASTER_TABLE:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    DBUG_ASSERT(first_table->db); /* Must be set in the parser */
++
++    if (check_access(thd, CREATE_ACL, first_table->db,
++		     &first_table->grant.privilege, 0, 0,
++                     test(first_table->schema_table)))
++      goto error;				/* purecov: inspected */
++    /* Check that the first table has CREATE privilege */
++    if (check_grant(thd, CREATE_ACL, all_tables, 0, 1, 0))
++      goto error;
++
++    pthread_mutex_lock(&LOCK_active_mi);
++    /*
++      fetch_master_table will send the error to the client on failure.
++      Give error if the table already exists.
++    */
++    if (!fetch_master_table(thd, first_table->db, first_table->table_name,
++			    active_mi, 0, 0))
++    {
++      my_ok(thd);
++    }
++    pthread_mutex_unlock(&LOCK_active_mi);
++    break;
++  }
++#endif /* HAVE_REPLICATION */
++
++  case SQLCOM_CREATE_TABLE:
++  {
++    /* If CREATE TABLE of non-temporary table, do implicit commit */
++    if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE))
++    {
++      if (end_active_trans(thd))
++      {
++	res= -1;
++	break;
++      }
++    }
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    bool link_to_local;
++    // Skip first table, which is the table we are creating
++    TABLE_LIST *create_table= lex->unlink_first_table(&link_to_local);
++    TABLE_LIST *select_tables= lex->query_tables;
++    /*
++      Code below (especially in mysql_create_table() and select_create
++      methods) may modify HA_CREATE_INFO structure in LEX, so we have to
++      use a copy of this structure to make execution prepared statement-
++      safe. A shallow copy is enough as this code won't modify any memory
++      referenced from this structure.
++    */
++    HA_CREATE_INFO create_info(lex->create_info);
++    /*
++      We need to copy alter_info for the same reasons of re-execution
++      safety, only in case of Alter_info we have to do (almost) a deep
++      copy.
++    */
++    Alter_info alter_info(lex->alter_info, thd->mem_root);
++
++    if (thd->is_fatal_error)
++    {
++      /* If out of memory when creating a copy of alter_info. */
++      res= 1;
++      goto end_with_restore_list;
++    }
++
++    if ((res= create_table_precheck(thd, select_tables, create_table)))
++      goto end_with_restore_list;
++
++    /* Might have been updated in create_table_precheck */
++    create_info.alias= create_table->alias;
++
++#ifdef HAVE_READLINK
++    /* Fix names if symlinked tables */
++    if (append_file_to_dir(thd, &create_info.data_file_name,
++			   create_table->table_name) ||
++	append_file_to_dir(thd, &create_info.index_file_name,
++			   create_table->table_name))
++      goto end_with_restore_list;
++#endif
++    /*
++      If we are using SET CHARSET without DEFAULT, add an implicit
++      DEFAULT to not confuse old users. (This may change).
++    */
++    if ((create_info.used_fields &
++	 (HA_CREATE_USED_DEFAULT_CHARSET | HA_CREATE_USED_CHARSET)) ==
++	HA_CREATE_USED_CHARSET)
++    {
++      create_info.used_fields&= ~HA_CREATE_USED_CHARSET;
++      create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET;
++      create_info.default_table_charset= create_info.table_charset;
++      create_info.table_charset= 0;
++    }
++    /*
++      The create-select command will open and read-lock the select table
++      and then create, open and write-lock the new table. If a global
++      read lock steps in, we get a deadlock. The write lock waits for
++      the global read lock, while the global read lock waits for the
++      select table to be closed. So we wait until the global readlock is
++      gone before starting both steps. Note that
++      wait_if_global_read_lock() sets a protection against a new global
++      read lock when it succeeds. This needs to be released by
++      start_waiting_global_read_lock(). We protect the normal CREATE
++      TABLE in the same way. That way we avoid that a new table is
++      created during a gobal read lock.
++    */
++    if (!thd->locked_tables &&
++        !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++    {
++      res= 1;
++      goto end_with_restore_list;
++    }
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    {
++      partition_info *part_info= thd->lex->part_info;
++      if (part_info && !(part_info= thd->lex->part_info->get_clone()))
++      {
++        res= -1;
++        goto end_with_restore_list;
++      }
++      thd->work_part_info= part_info;
++    }
++#endif
++    if (select_lex->item_list.elements)		// With select
++    {
++      select_result *result;
++
++      /*
++        If:
++        a) we inside an SP and there was NAME_CONST substitution,
++        b) binlogging is on (STMT mode),
++        c) we log the SP as separate statements
++        raise a warning, as it may cause problems
++        (see 'NAME_CONST issues' in 'Binary Logging of Stored Programs')
++       */
++      if (thd->query_name_consts && 
++          mysql_bin_log.is_open() &&
++          thd->variables.binlog_format == BINLOG_FORMAT_STMT &&
++          !mysql_bin_log.is_query_in_union(thd, thd->query_id))
++      {
++        List_iterator_fast<Item> it(select_lex->item_list);
++        Item *item;
++        uint splocal_refs= 0;
++        /* Count SP local vars in the top-level SELECT list */
++        while ((item= it++))
++        {
++          if (item->is_splocal())
++            splocal_refs++;
++        }
++        /*
++          If it differs from number of NAME_CONST substitution applied,
++          we may have a SOME_FUNC(NAME_CONST()) in the SELECT list,
++          that may cause a problem with binary log (see BUG#35383),
++          raise a warning. 
++        */
++        if (splocal_refs != thd->query_name_consts)
++          push_warning(thd, 
++                       MYSQL_ERROR::WARN_LEVEL_WARN,
++                       ER_UNKNOWN_ERROR,
++"Invoked routine ran a statement that may cause problems with "
++"binary log, see 'NAME_CONST issues' in 'Binary Logging of Stored Programs' "
++"section of the manual.");
++      }
++      
++      select_lex->options|= SELECT_NO_UNLOCK;
++      unit->set_limit(select_lex);
++
++      /*
++        Disable non-empty MERGE tables with CREATE...SELECT. Too
++        complicated. See Bug #26379. Empty MERGE tables are read-only
++        and don't allow CREATE...SELECT anyway.
++      */
++      if (create_info.used_fields & HA_CREATE_USED_UNION)
++      {
++        my_error(ER_WRONG_OBJECT, MYF(0), create_table->db,
++                 create_table->table_name, "BASE TABLE");
++        res= 1;
++        goto end_with_restore_list;
++      }
++
++      if (!(create_info.options & HA_LEX_CREATE_TMP_TABLE))
++      {
++        lex->link_first_table_back(create_table, link_to_local);
++        create_table->create= TRUE;
++        /* Base table and temporary table are not in the same name space. */
++        create_table->skip_temporary= 1;
++      }
++
++      if (!(res= open_and_lock_tables(thd, lex->query_tables)))
++      {
++        /*
++          Is table which we are changing used somewhere in other parts
++          of query
++        */
++        if (!(create_info.options & HA_LEX_CREATE_TMP_TABLE))
++        {
++          TABLE_LIST *duplicate;
++          create_table= lex->unlink_first_table(&link_to_local);
++
++          if (create_table->view)
++          {
++            if (create_info.options & HA_LEX_CREATE_IF_NOT_EXISTS)
++            {
++              push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++                                  ER_TABLE_EXISTS_ERROR,
++                                  ER(ER_TABLE_EXISTS_ERROR),
++                                  create_info.alias);
++              my_ok(thd);
++            }
++            else
++            {
++              my_error(ER_TABLE_EXISTS_ERROR, MYF(0), create_info.alias);
++              res= 1;
++            }
++            goto end_with_restore_list;
++          }
++
++          if ((duplicate= unique_table(thd, create_table, select_tables, 0)))
++          {
++            update_non_unique_table_error(create_table, "CREATE", duplicate);
++            res= 1;
++            goto end_with_restore_list;
++          }
++        }
++        /* If we create merge table, we have to test tables in merge, too */
++        if (create_info.used_fields & HA_CREATE_USED_UNION)
++        {
++          TABLE_LIST *tab;
++          for (tab= create_info.merge_list.first;
++               tab;
++               tab= tab->next_local)
++          {
++            TABLE_LIST *duplicate;
++            if ((duplicate= unique_table(thd, tab, select_tables, 0)))
++            {
++              update_non_unique_table_error(tab, "CREATE", duplicate);
++              res= 1;
++              goto end_with_restore_list;
++            }
++          }
++        }
++
++        /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */
++        if (create_info.options & HA_LEX_CREATE_TMP_TABLE)
++          thd->options|= OPTION_KEEP_LOG;
++
++        /*
++          select_create is currently not re-execution friendly and
++          needs to be created for every execution of a PS/SP.
++        */
++        if ((result= new select_create(create_table,
++                                       &create_info,
++                                       &alter_info,
++                                       select_lex->item_list,
++                                       lex->duplicates,
++                                       lex->ignore,
++                                       select_tables)))
++        {
++          /*
++            CREATE from SELECT give its SELECT_LEX for SELECT,
++            and item_list belong to SELECT
++          */
++          res= handle_select(thd, lex, result, 0);
++          delete result;
++        }
++      }
++      else if (!(create_info.options & HA_LEX_CREATE_TMP_TABLE))
++        create_table= lex->unlink_first_table(&link_to_local);
++
++    }
++    else
++    {
++      /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */
++      if (create_info.options & HA_LEX_CREATE_TMP_TABLE)
++        thd->options|= OPTION_KEEP_LOG;
++      /* regular create */
++      if (create_info.options & HA_LEX_CREATE_TABLE_LIKE)
++        res= mysql_create_like_table(thd, create_table, select_tables,
++                                     &create_info);
++      else
++      {
++        res= mysql_create_table(thd, create_table->db,
++                                create_table->table_name, &create_info,
++                                &alter_info, 0, 0);
++      }
++      if (!res)
++	my_ok(thd);
++    }
++
++    /* put tables back for PS rexecuting */
++end_with_restore_list:
++    lex->link_first_table_back(create_table, link_to_local);
++    break;
++  }
++  case SQLCOM_CREATE_INDEX:
++    /* Fall through */
++  case SQLCOM_DROP_INDEX:
++  /*
++    CREATE INDEX and DROP INDEX are implemented by calling ALTER
++    TABLE with proper arguments.
++
++    In the future ALTER TABLE will notice that the request is to
++    only add indexes and create these one by one for the existing
++    table without having to do a full rebuild.
++  */
++  {
++    /* Prepare stack copies to be re-execution safe */
++    HA_CREATE_INFO create_info;
++    Alter_info alter_info(lex->alter_info, thd->mem_root);
++
++    if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */
++      goto error;
++
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_one_table_access(thd, INDEX_ACL, all_tables))
++      goto error; /* purecov: inspected */
++    if (end_active_trans(thd))
++      goto error;
++    /*
++      Currently CREATE INDEX or DROP INDEX cause a full table rebuild
++      and thus classify as slow administrative statements just like
++      ALTER TABLE.
++    */
++    thd->enable_slow_log= opt_log_slow_admin_statements;
++
++    bzero((char*) &create_info, sizeof(create_info));
++    create_info.db_type= 0;
++    create_info.row_type= ROW_TYPE_NOT_USED;
++    create_info.default_table_charset= thd->variables.collation_database;
++
++    res= mysql_alter_table(thd, first_table->db, first_table->table_name,
++                           &create_info, first_table, &alter_info,
++                           0, (ORDER*) 0, 0);
++    break;
++  }
++#ifdef HAVE_REPLICATION
++  case SQLCOM_SLAVE_START:
++  {
++    pthread_mutex_lock(&LOCK_active_mi);
++    start_slave(thd,active_mi,1 /* net report*/);
++    pthread_mutex_unlock(&LOCK_active_mi);
++    break;
++  }
++  case SQLCOM_SLAVE_STOP:
++  /*
++    If the client thread has locked tables, a deadlock is possible.
++    Assume that
++    - the client thread does LOCK TABLE t READ.
++    - then the master updates t.
++    - then the SQL slave thread wants to update t,
++      so it waits for the client thread because t is locked by it.
++    - then the client thread does SLAVE STOP.
++      SLAVE STOP waits for the SQL slave thread to terminate its
++      update t, which waits for the client thread because t is locked by it.
++    To prevent that, refuse SLAVE STOP if the
++    client thread has locked tables
++  */
++  if (thd->locked_tables || thd->active_transaction() || thd->global_read_lock)
++  {
++    my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
++               ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
++    goto error;
++  }
++  {
++    pthread_mutex_lock(&LOCK_active_mi);
++    stop_slave(thd,active_mi,1/* net report*/);
++    pthread_mutex_unlock(&LOCK_active_mi);
++    break;
++  }
++#endif /* HAVE_REPLICATION */
++
++  case SQLCOM_ALTER_TABLE:
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    {
++      ulong priv=0;
++      ulong priv_needed= ALTER_ACL;
++      /*
++        Code in mysql_alter_table() may modify its HA_CREATE_INFO argument,
++        so we have to use a copy of this structure to make execution
++        prepared statement- safe. A shallow copy is enough as no memory
++        referenced from this structure will be modified.
++      */
++      HA_CREATE_INFO create_info(lex->create_info);
++      Alter_info alter_info(lex->alter_info, thd->mem_root);
++
++      if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */
++        goto error;
++      /*
++        We also require DROP priv for ALTER TABLE ... DROP PARTITION, as well
++        as for RENAME TO, as being done by SQLCOM_RENAME_TABLE
++      */
++      if (alter_info.flags & (ALTER_DROP_PARTITION | ALTER_RENAME))
++        priv_needed|= DROP_ACL;
++
++      /* Must be set in the parser */
++      DBUG_ASSERT(select_lex->db);
++      if (check_access(thd, priv_needed, first_table->db,
++		       &first_table->grant.privilege, 0, 0,
++                       test(first_table->schema_table)) ||
++	  check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv,0,0,
++                       is_schema_db(select_lex->db))||
++	  check_merge_table_access(thd, first_table->db,
++				   create_info.merge_list.first))
++	goto error;				/* purecov: inspected */
++      if (check_grant(thd, priv_needed, all_tables, 0, UINT_MAX, 0))
++        goto error;
++      if (lex->name.str && !test_all_bits(priv,INSERT_ACL | CREATE_ACL))
++      { // Rename of table
++          TABLE_LIST tmp_table;
++          bzero((char*) &tmp_table,sizeof(tmp_table));
++          tmp_table.table_name= lex->name.str;
++          tmp_table.db=select_lex->db;
++          tmp_table.grant.privilege=priv;
++          if (check_grant(thd, INSERT_ACL | CREATE_ACL, &tmp_table, 0,
++              UINT_MAX, 0))
++            goto error;
++      }
++
++      /* Don't yet allow changing of symlinks with ALTER TABLE */
++      if (create_info.data_file_name)
++        push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                            WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
++                            "DATA DIRECTORY");
++      if (create_info.index_file_name)
++        push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                            WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
++                            "INDEX DIRECTORY");
++      create_info.data_file_name= create_info.index_file_name= NULL;
++      /* ALTER TABLE ends previous transaction */
++      if (end_active_trans(thd))
++	goto error;
++
++      if (!thd->locked_tables &&
++          !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++      {
++        res= 1;
++        break;
++      }
++
++      thd->enable_slow_log= opt_log_slow_admin_statements;
++      res= mysql_alter_table(thd, select_lex->db, lex->name.str,
++                             &create_info,
++                             first_table,
++                             &alter_info,
++                             select_lex->order_list.elements,
++                             (ORDER *) select_lex->order_list.first,
++                             lex->ignore);
++      break;
++    }
++  case SQLCOM_RENAME_TABLE:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    TABLE_LIST *table;
++    for (table= first_table; table; table= table->next_local->next_local)
++    {
++      if (check_access(thd, ALTER_ACL | DROP_ACL, table->db,
++		       &table->grant.privilege,0,0, test(table->schema_table)) ||
++	  check_access(thd, INSERT_ACL | CREATE_ACL, table->next_local->db,
++		       &table->next_local->grant.privilege, 0, 0,
++                       test(table->next_local->schema_table)))
++	goto error;
++      TABLE_LIST old_list, new_list;
++      /*
++        we do not need initialize old_list and new_list because we will
++        come table[0] and table->next[0] there
++      */
++      old_list= table[0];
++      new_list= table->next_local[0];
++      if (check_grant(thd, ALTER_ACL | DROP_ACL, &old_list, 0, 1, 0) ||
++         (!test_all_bits(table->next_local->grant.privilege,
++                         INSERT_ACL | CREATE_ACL) &&
++          check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, 0, 1, 0)))
++        goto error;
++    }
++
++    if (end_active_trans(thd) || mysql_rename_tables(thd, first_table, 0))
++      goto error;
++    break;
++  }
++#ifndef EMBEDDED_LIBRARY
++  case SQLCOM_SHOW_BINLOGS:
++#ifdef DONT_ALLOW_SHOW_COMMANDS
++    my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND),
++               MYF(0)); /* purecov: inspected */
++    goto error;
++#else
++    {
++      if (check_global_access(thd, SUPER_ACL))
++	goto error;
++      res = show_binlogs(thd);
++      break;
++    }
++#endif
++#endif /* EMBEDDED_LIBRARY */
++  case SQLCOM_SHOW_CREATE:
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++#ifdef DONT_ALLOW_SHOW_COMMANDS
++    my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND),
++               MYF(0)); /* purecov: inspected */
++    goto error;
++#else
++    {
++      /* Ignore temporary tables if this is "SHOW CREATE VIEW" */
++      if (lex->only_view)
++        first_table->skip_temporary= 1;
++      if (check_show_create_table_access(thd, first_table))
++	goto error;
++      res= mysqld_show_create(thd, first_table);
++      break;
++    }
++#endif
++  case SQLCOM_CHECKSUM:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables,
++                           UINT_MAX, FALSE))
++      goto error; /* purecov: inspected */
++    res = mysql_checksum_table(thd, first_table, &lex->check_opt);
++    break;
++  }
++  case SQLCOM_REPAIR:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables,
++                           UINT_MAX, FALSE))
++      goto error; /* purecov: inspected */
++    thd->enable_slow_log= opt_log_slow_admin_statements;
++    res= mysql_repair_table(thd, first_table, &lex->check_opt);
++    /* ! we write after unlocking the table */
++    if (!res && !lex->no_write_to_binlog)
++    {
++      /*
++        Presumably, REPAIR and binlog writing doesn't require synchronization
++      */
++      res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
++    }
++    select_lex->table_list.first= first_table;
++    lex->query_tables=all_tables;
++    break;
++  }
++  case SQLCOM_CHECK:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables,
++                           UINT_MAX, FALSE))
++      goto error; /* purecov: inspected */
++    thd->enable_slow_log= opt_log_slow_admin_statements;
++    res = mysql_check_table(thd, first_table, &lex->check_opt);
++    select_lex->table_list.first= first_table;
++    lex->query_tables=all_tables;
++    break;
++  }
++  case SQLCOM_ANALYZE:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables,
++                           UINT_MAX, FALSE))
++      goto error; /* purecov: inspected */
++    thd->enable_slow_log= opt_log_slow_admin_statements;
++    res= mysql_analyze_table(thd, first_table, &lex->check_opt);
++    /* ! we write after unlocking the table */
++    if (!res && !lex->no_write_to_binlog)
++    {
++      /*
++        Presumably, ANALYZE and binlog writing doesn't require synchronization
++      */
++      res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
++    }
++    select_lex->table_list.first= first_table;
++    lex->query_tables=all_tables;
++    break;
++  }
++
++  case SQLCOM_OPTIMIZE:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables,
++                           UINT_MAX, FALSE))
++      goto error; /* purecov: inspected */
++    thd->enable_slow_log= opt_log_slow_admin_statements;
++    res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ?
++      mysql_recreate_table(thd, first_table) :
++      mysql_optimize_table(thd, first_table, &lex->check_opt);
++    /* ! we write after unlocking the table */
++    if (!res && !lex->no_write_to_binlog)
++    {
++      /*
++        Presumably, OPTIMIZE and binlog writing doesn't require synchronization
++      */
++      res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
++    }
++    select_lex->table_list.first= first_table;
++    lex->query_tables=all_tables;
++    break;
++  }
++  case SQLCOM_UPDATE:
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (update_precheck(thd, all_tables))
++      break;
++    if (!thd->locked_tables &&
++        !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++      goto error;
++    DBUG_ASSERT(select_lex->offset_limit == 0);
++    unit->set_limit(select_lex);
++    res= (up_result= mysql_update(thd, all_tables,
++                                  select_lex->item_list,
++                                  lex->value_list,
++                                  select_lex->where,
++                                  select_lex->order_list.elements,
++                                  select_lex->order_list.first,
++                                  unit->select_limit_cnt,
++                                  lex->duplicates, lex->ignore));
++    /* mysql_update return 2 if we need to switch to multi-update */
++    if (up_result != 2)
++      break;
++    /* Fall through */
++  case SQLCOM_UPDATE_MULTI:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    /* if we switched from normal update, rights are checked */
++    if (up_result != 2)
++    {
++      if ((res= multi_update_precheck(thd, all_tables)))
++        break;
++    }
++    else
++      res= 0;
++
++    /*
++      Protection might have already been risen if its a fall through
++      from the SQLCOM_UPDATE case above.
++    */
++    if (!thd->locked_tables &&
++        lex->sql_command == SQLCOM_UPDATE_MULTI &&
++        !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++      goto error;
++
++    res= mysql_multi_update_prepare(thd);
++
++#ifdef HAVE_REPLICATION
++    /* Check slave filtering rules */
++    if (unlikely(thd->slave_thread && !have_table_map_for_update))
++    {
++      if (all_tables_not_ok(thd, all_tables))
++      {
++        if (res!= 0)
++        {
++          res= 0;             /* don't care of prev failure  */
++          thd->clear_error(); /* filters are of highest prior */
++        }
++        /* we warn the slave SQL thread */
++        my_error(ER_SLAVE_IGNORED_TABLE, MYF(0));
++        break;
++      }
++      if (res)
++        break;
++    }
++    else
++    {
++#endif /* HAVE_REPLICATION */
++      if (res)
++        break;
++      if (opt_readonly &&
++	  !(thd->security_ctx->master_access & SUPER_ACL) &&
++	  some_non_temp_table_to_be_updated(thd, all_tables))
++      {
++	my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only");
++	break;
++      }
++#ifdef HAVE_REPLICATION
++    }  /* unlikely */
++#endif
++
++    res= mysql_multi_update(thd, all_tables,
++                            &select_lex->item_list,
++                            &lex->value_list,
++                            select_lex->where,
++                            select_lex->options,
++                            lex->duplicates, lex->ignore, unit, select_lex);
++    break;
++  }
++  case SQLCOM_REPLACE:
++#ifndef DBUG_OFF
++    if (mysql_bin_log.is_open())
++    {
++      /*
++        Generate an incident log event before writing the real event
++        to the binary log.  We put this event is before the statement
++        since that makes it simpler to check that the statement was
++        not executed on the slave (since incidents usually stop the
++        slave).
++
++        Observe that any row events that are generated will be
++        generated before.
++
++        This is only for testing purposes and will not be present in a
++        release build.
++      */
++
++      Incident incident= INCIDENT_NONE;
++      DBUG_PRINT("debug", ("Just before generate_incident()"));
++      DBUG_EXECUTE_IF("incident_database_resync_on_replace",
++                      incident= INCIDENT_LOST_EVENTS;);
++      if (incident)
++      {
++        Incident_log_event ev(thd, incident);
++        (void) mysql_bin_log.write(&ev);        /* error is ignored */
++        if (mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE))
++        {
++          res= 1;
++          break;
++        }
++      }
++      DBUG_PRINT("debug", ("Just after generate_incident()"));
++    }
++#endif
++  case SQLCOM_INSERT:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if ((res= insert_precheck(thd, all_tables)))
++      break;
++
++    if (!thd->locked_tables &&
++        !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++    {
++      res= 1;
++      break;
++    }
++
++    res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values,
++		      lex->update_list, lex->value_list,
++                      lex->duplicates, lex->ignore);
++
++    /*
++      If we have inserted into a VIEW, and the base table has
++      AUTO_INCREMENT column, but this column is not accessible through
++      a view, then we should restore LAST_INSERT_ID to the value it
++      had before the statement.
++    */
++    if (first_table->view && !first_table->contain_auto_increment)
++      thd->first_successful_insert_id_in_cur_stmt=
++        thd->first_successful_insert_id_in_prev_stmt;
++
++    DBUG_EXECUTE_IF("after_mysql_insert",
++                    {
++                      const char act[]=
++                        "now "
++                        "wait_for signal.continue";
++                      DBUG_ASSERT(opt_debug_sync_timeout > 0);
++                      DBUG_ASSERT(!debug_sync_set_action(current_thd,
++                                                         STRING_WITH_LEN(act)));
++                    };);
++    break;
++  }
++  case SQLCOM_REPLACE_SELECT:
++  case SQLCOM_INSERT_SELECT:
++  {
++    select_result *sel_result;
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if ((res= insert_precheck(thd, all_tables)))
++      break;
++
++    /* Fix lock for first table */
++    if (first_table->lock_type == TL_WRITE_DELAYED)
++      first_table->lock_type= TL_WRITE;
++
++    /* Don't unlock tables until command is written to binary log */
++    select_lex->options|= SELECT_NO_UNLOCK;
++
++    unit->set_limit(select_lex);
++
++    if (! thd->locked_tables &&
++        ! (need_start_waiting= ! wait_if_global_read_lock(thd, 0, 1)))
++    {
++      res= 1;
++      break;
++    }
++
++    if (!(res= open_and_lock_tables(thd, all_tables)))
++    {
++      /* Skip first table, which is the table we are inserting in */
++      TABLE_LIST *second_table= first_table->next_local;
++      select_lex->table_list.first= second_table;
++      select_lex->context.table_list= 
++        select_lex->context.first_name_resolution_table= second_table;
++      res= mysql_insert_select_prepare(thd);
++      if (!res && (sel_result= new select_insert(first_table,
++                                                 first_table->table,
++                                                 &lex->field_list,
++                                                 &lex->update_list,
++                                                 &lex->value_list,
++                                                 lex->duplicates,
++                                                 lex->ignore)))
++      {
++	res= handle_select(thd, lex, sel_result, OPTION_SETUP_TABLES_DONE);
++        /*
++          Invalidate the table in the query cache if something changed
++          after unlocking when changes become visible.
++          TODO: this is workaround. right way will be move invalidating in
++          the unlock procedure.
++        */
++        if (!res && first_table->lock_type ==  TL_WRITE_CONCURRENT_INSERT &&
++            thd->lock)
++        {
++          /* INSERT ... SELECT should invalidate only the very first table */
++          TABLE_LIST *save_table= first_table->next_local;
++          first_table->next_local= 0;
++          query_cache_invalidate3(thd, first_table, 1);
++          first_table->next_local= save_table;
++        }
++        delete sel_result;
++      }
++      /* revert changes for SP */
++      select_lex->table_list.first= first_table;
++    }
++
++    /*
++      If we have inserted into a VIEW, and the base table has
++      AUTO_INCREMENT column, but this column is not accessible through
++      a view, then we should restore LAST_INSERT_ID to the value it
++      had before the statement.
++    */
++    if (first_table->view && !first_table->contain_auto_increment)
++      thd->first_successful_insert_id_in_cur_stmt=
++        thd->first_successful_insert_id_in_prev_stmt;
++
++    break;
++  }
++  case SQLCOM_TRUNCATE:
++    if (end_active_trans(thd))
++    {
++      res= -1;
++      break;
++    }
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_one_table_access(thd, DROP_ACL, all_tables))
++      goto error;
++    /*
++      Don't allow this within a transaction because we want to use
++      re-generate table
++    */
++    if (thd->locked_tables || thd->active_transaction())
++    {
++      my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
++                 ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
++      goto error;
++    }
++    if (!(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++      goto error;
++    res= mysql_truncate(thd, first_table, 0);
++    break;
++  case SQLCOM_DELETE:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if ((res= delete_precheck(thd, all_tables)))
++      break;
++    DBUG_ASSERT(select_lex->offset_limit == 0);
++    unit->set_limit(select_lex);
++
++    if (!thd->locked_tables &&
++        !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++    {
++      res= 1;
++      break;
++    }
++
++    res = mysql_delete(thd, all_tables, select_lex->where,
++                       &select_lex->order_list,
++                       unit->select_limit_cnt, select_lex->options,
++                       FALSE);
++    break;
++  }
++  case SQLCOM_DELETE_MULTI:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    TABLE_LIST *aux_tables= thd->lex->auxiliary_table_list.first;
++    multi_delete *del_result;
++
++    if (!thd->locked_tables &&
++        !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++    {
++      res= 1;
++      break;
++    }
++
++    if ((res= multi_delete_precheck(thd, all_tables)))
++      break;
++
++    /* condition will be TRUE on SP re-excuting */
++    if (select_lex->item_list.elements != 0)
++      select_lex->item_list.empty();
++    if (add_item_to_list(thd, new Item_null()))
++      goto error;
++
++    thd_proc_info(thd, "init");
++    if ((res= open_and_lock_tables(thd, all_tables)))
++      break;
++
++    if ((res= mysql_multi_delete_prepare(thd)))
++      goto error;
++
++    if (!thd->is_fatal_error &&
++        (del_result= new multi_delete(aux_tables, lex->table_count)))
++    {
++      res= mysql_select(thd, &select_lex->ref_pointer_array,
++			select_lex->get_table_list(),
++			select_lex->with_wild,
++			select_lex->item_list,
++			select_lex->where,
++			0, (ORDER *)NULL, (ORDER *)NULL, (Item *)NULL,
++			(ORDER *)NULL,
++			(select_lex->options | thd->options |
++			SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
++                        OPTION_SETUP_TABLES_DONE) & ~OPTION_BUFFER_RESULT,
++			del_result, unit, select_lex);
++      res|= thd->is_error();
++      if (res)
++        del_result->abort();
++      delete del_result;
++    }
++    else
++      res= TRUE;                                // Error
++    break;
++  }
++  case SQLCOM_DROP_TABLE:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (!lex->drop_temporary)
++    {
++      if (check_table_access(thd, DROP_ACL, all_tables, UINT_MAX, FALSE))
++	goto error;				/* purecov: inspected */
++      if (end_active_trans(thd))
++        goto error;
++    }
++    else
++    {
++      /* So that DROP TEMPORARY TABLE gets to binlog at commit/rollback */
++      thd->options|= OPTION_KEEP_LOG;
++    }
++    /* DDL and binlog write order protected by LOCK_open */
++    res= mysql_rm_table(thd, first_table, lex->drop_if_exists,
++			lex->drop_temporary);
++  }
++  break;
++  case SQLCOM_SHOW_PROCESSLIST:
++    if (!thd->security_ctx->priv_user[0] &&
++        check_global_access(thd,PROCESS_ACL))
++      break;
++    mysqld_list_processes(thd,
++			  (thd->security_ctx->master_access & PROCESS_ACL ?
++                           NullS :
++                           thd->security_ctx->priv_user),
++                          lex->verbose);
++    break;
++  case SQLCOM_SHOW_AUTHORS:
++    res= mysqld_show_authors(thd);
++    break;
++  case SQLCOM_SHOW_CONTRIBUTORS:
++    res= mysqld_show_contributors(thd);
++    break;
++  case SQLCOM_SHOW_PRIVILEGES:
++    res= mysqld_show_privileges(thd);
++    break;
++  case SQLCOM_SHOW_COLUMN_TYPES:
++    res= mysqld_show_column_types(thd);
++    break;
++  case SQLCOM_SHOW_ENGINE_LOGS:
++#ifdef DONT_ALLOW_SHOW_COMMANDS
++    my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND),
++               MYF(0));	/* purecov: inspected */
++    goto error;
++#else
++    {
++      if (check_access(thd, FILE_ACL, any_db,0,0,0,0))
++	goto error;
++      res= ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_LOGS);
++      break;
++    }
++#endif
++  case SQLCOM_CHANGE_DB:
++  {
++    LEX_STRING db_str= { (char *) select_lex->db, strlen(select_lex->db) };
++
++    if (!mysql_change_db(thd, &db_str, FALSE))
++      my_ok(thd);
++
++    break;
++  }
++
++  case SQLCOM_LOAD:
++  {
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    uint privilege= (lex->duplicates == DUP_REPLACE ?
++		     INSERT_ACL | DELETE_ACL : INSERT_ACL) |
++                    (lex->local_file ? 0 : FILE_ACL);
++
++    if (lex->local_file)
++    {
++      if (!(thd->client_capabilities & CLIENT_LOCAL_FILES) ||
++          !opt_local_infile)
++      {
++	my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), MYF(0));
++	goto error;
++      }
++    }
++
++    if (check_one_table_access(thd, privilege, all_tables))
++      goto error;
++
++    if (!thd->locked_tables &&
++        !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++      goto error;
++
++    res= mysql_load(thd, lex->exchange, first_table, lex->field_list,
++                    lex->update_list, lex->value_list, lex->duplicates,
++                    lex->ignore, (bool) lex->local_file);
++    break;
++  }
++
++  case SQLCOM_SET_OPTION:
++  {
++    List<set_var_base> *lex_var_list= &lex->var_list;
++
++    if (lex->autocommit && end_active_trans(thd))
++      goto error;
++
++    if ((check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE) ||
++	 open_and_lock_tables(thd, all_tables)))
++      goto error;
++    if (lex->one_shot_set && not_all_support_one_shot(lex_var_list))
++    {
++      my_error(ER_RESERVED_SYNTAX, MYF(0), "SET ONE_SHOT");
++      goto error;
++    }
++    if (!(res= sql_set_variables(thd, lex_var_list)))
++    {
++      /*
++        If the previous command was a SET ONE_SHOT, we don't want to forget
++        about the ONE_SHOT property of that SET. So we use a |= instead of = .
++      */
++      thd->one_shot_set|= lex->one_shot_set;
++      my_ok(thd);
++    }
++    else
++    {
++      /*
++        We encountered some sort of error, but no message was sent.
++        Send something semi-generic here since we don't know which
++        assignment in the list caused the error.
++      */
++      if (!thd->is_error())
++        my_error(ER_WRONG_ARGUMENTS,MYF(0),"SET");
++      goto error;
++    }
++
++    break;
++  }
++
++  case SQLCOM_UNLOCK_TABLES:
++    /*
++      It is critical for mysqldump --single-transaction --master-data that
++      UNLOCK TABLES does not implicitely commit a connection which has only
++      done FLUSH TABLES WITH READ LOCK + BEGIN. If this assumption becomes
++      false, mysqldump will not work.
++    */
++    unlock_locked_tables(thd);
++    if (thd->options & OPTION_TABLE_LOCK)
++    {
++      end_active_trans(thd);
++      thd->options&= ~(OPTION_TABLE_LOCK);
++    }
++    if (thd->global_read_lock)
++      unlock_global_read_lock(thd);
++    my_ok(thd);
++    break;
++  case SQLCOM_LOCK_TABLES:
++    unlock_locked_tables(thd);
++    /* we must end the trasaction first, regardless of anything */
++    if (end_active_trans(thd))
++      goto error;
++    if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables,
++                           UINT_MAX, FALSE))
++      goto error;
++    if (lex->protect_against_global_read_lock &&
++        !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
++      goto error;
++    thd->in_lock_tables=1;
++    thd->options|= OPTION_TABLE_LOCK;
++
++    if (!(res= simple_open_n_lock_tables(thd, all_tables)))
++    {
++#ifdef HAVE_QUERY_CACHE
++      if (thd->variables.query_cache_wlock_invalidate)
++	query_cache.invalidate_locked_for_write(first_table);
++#endif /*HAVE_QUERY_CACHE*/
++      thd->locked_tables=thd->lock;
++      thd->lock=0;
++      my_ok(thd);
++    }
++    else
++    {
++      /* 
++        Need to end the current transaction, so the storage engine (InnoDB)
++        can free its locks if LOCK TABLES locked some tables before finding
++        that it can't lock a table in its list
++      */
++      ha_autocommit_or_rollback(thd, 1);
++      end_active_trans(thd);
++      thd->options&= ~(OPTION_TABLE_LOCK);
++    }
++    thd->in_lock_tables=0;
++    break;
++  case SQLCOM_CREATE_DB:
++  {
++    /*
++      As mysql_create_db() may modify HA_CREATE_INFO structure passed to
++      it, we need to use a copy of LEX::create_info to make execution
++      prepared statement- safe.
++    */
++    HA_CREATE_INFO create_info(lex->create_info);
++    if (end_active_trans(thd))
++    {
++      res= -1;
++      break;
++    }
++    char *alias;
++    if (!(alias=thd->strmake(lex->name.str, lex->name.length)) ||
++        check_db_name(&lex->name))
++    {
++      my_error(ER_WRONG_DB_NAME, MYF(0), lex->name.str);
++      break;
++    }
++    /*
++      If in a slave thread :
++      CREATE DATABASE DB was certainly not preceded by USE DB.
++      For that reason, db_ok() in sql/slave.cc did not check the
++      do_db/ignore_db. And as this query involves no tables, tables_ok()
++      above was not called. So we have to check rules again here.
++    */
++#ifdef HAVE_REPLICATION
++    if (thd->slave_thread && 
++	(!rpl_filter->db_ok(lex->name.str) ||
++	 !rpl_filter->db_ok_with_wild_table(lex->name.str)))
++    {
++      my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
++      break;
++    }
++#endif
++    if (check_access(thd,CREATE_ACL,lex->name.str, 0, 1, 0,
++                     is_schema_db(lex->name.str, lex->name.length)))
++      break;
++    res= mysql_create_db(thd,(lower_case_table_names == 2 ? alias :
++                              lex->name.str), &create_info, 0);
++    break;
++  }
++  case SQLCOM_DROP_DB:
++  {
++    if (end_active_trans(thd))
++    {
++      res= -1;
++      break;
++    }
++    if (check_db_name(&lex->name))
++    {
++      my_error(ER_WRONG_DB_NAME, MYF(0), lex->name.str);
++      break;
++    }
++    /*
++      If in a slave thread :
++      DROP DATABASE DB may not be preceded by USE DB.
++      For that reason, maybe db_ok() in sql/slave.cc did not check the 
++      do_db/ignore_db. And as this query involves no tables, tables_ok()
++      above was not called. So we have to check rules again here.
++    */
++#ifdef HAVE_REPLICATION
++    if (thd->slave_thread && 
++	(!rpl_filter->db_ok(lex->name.str) ||
++	 !rpl_filter->db_ok_with_wild_table(lex->name.str)))
++    {
++      my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
++      break;
++    }
++#endif
++    if (check_access(thd,DROP_ACL,lex->name.str,0,1,0,
++                     is_schema_db(lex->name.str, lex->name.length)))
++      break;
++    if (thd->locked_tables || thd->active_transaction())
++    {
++      my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
++                 ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
++      goto error;
++    }
++    res= mysql_rm_db(thd, lex->name.str, lex->drop_if_exists, 0);
++    break;
++  }
++  case SQLCOM_ALTER_DB_UPGRADE:
++  {
++    LEX_STRING *db= & lex->name;
++    if (end_active_trans(thd))
++    {
++      res= 1;
++      break;
++    }
++#ifdef HAVE_REPLICATION
++    if (thd->slave_thread && 
++       (!rpl_filter->db_ok(db->str) ||
++        !rpl_filter->db_ok_with_wild_table(db->str)))
++    {
++      res= 1;
++      my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
++      break;
++    }
++#endif
++    if (check_db_name(db))
++    {
++      my_error(ER_WRONG_DB_NAME, MYF(0), db->str);
++      break;
++    }
++    if (check_access(thd, ALTER_ACL, db->str, 0, 1, 0,
++                     is_schema_db(db->str, db->length)) ||
++        check_access(thd, DROP_ACL, db->str, 0, 1, 0,
++                     is_schema_db(db->str, db->length)) ||
++        check_access(thd, CREATE_ACL, db->str, 0, 1, 0,
++                     is_schema_db(db->str, db->length)))
++    {
++      res= 1;
++      break;
++    }
++    if (thd->locked_tables || thd->active_transaction())
++    {
++      res= 1;
++      my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
++                 ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
++      goto error;
++    }
++
++    res= mysql_upgrade_db(thd, db);
++    if (!res)
++      my_ok(thd);
++    break;
++  }
++  case SQLCOM_ALTER_DB:
++  {
++    LEX_STRING *db= &lex->name;
++    HA_CREATE_INFO create_info(lex->create_info);
++    if (check_db_name(db))
++    {
++      my_error(ER_WRONG_DB_NAME, MYF(0), db->str);
++      break;
++    }
++    /*
++      If in a slave thread :
++      ALTER DATABASE DB may not be preceded by USE DB.
++      For that reason, maybe db_ok() in sql/slave.cc did not check the
++      do_db/ignore_db. And as this query involves no tables, tables_ok()
++      above was not called. So we have to check rules again here.
++    */
++#ifdef HAVE_REPLICATION
++    if (thd->slave_thread &&
++	(!rpl_filter->db_ok(db->str) ||
++	 !rpl_filter->db_ok_with_wild_table(db->str)))
++    {
++      my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
++      break;
++    }
++#endif
++    if (check_access(thd, ALTER_ACL, db->str, 0, 1, 0,
++                     is_schema_db(db->str, db->length)))
++      break;
++    if (thd->locked_tables || thd->active_transaction())
++    {
++      my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
++                 ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
++      goto error;
++    }
++    res= mysql_alter_db(thd, db->str, &create_info);
++    break;
++  }
++  case SQLCOM_SHOW_CREATE_DB:
++  {
++    DBUG_EXECUTE_IF("4x_server_emul",
++                    my_error(ER_UNKNOWN_ERROR, MYF(0)); goto error;);
++    if (check_db_name(&lex->name))
++    {
++      my_error(ER_WRONG_DB_NAME, MYF(0), lex->name.str);
++      break;
++    }
++    res= mysqld_show_create_db(thd, lex->name.str, &lex->create_info);
++    break;
++  }
++  case SQLCOM_CREATE_EVENT:
++  case SQLCOM_ALTER_EVENT:
++  #ifdef HAVE_EVENT_SCHEDULER
++  do
++  {
++    DBUG_ASSERT(lex->event_parse_data);
++    if (lex->table_or_sp_used())
++    {
++      my_error(ER_NOT_SUPPORTED_YET, MYF(0), "Usage of subqueries or stored "
++               "function calls as part of this statement");
++      break;
++    }
++
++    res= sp_process_definer(thd);
++    if (res)
++      break;
++
++    switch (lex->sql_command) {
++    case SQLCOM_CREATE_EVENT:
++    {
++      bool if_not_exists= (lex->create_info.options &
++                           HA_LEX_CREATE_IF_NOT_EXISTS);
++      res= Events::create_event(thd, lex->event_parse_data, if_not_exists);
++      break;
++    }
++    case SQLCOM_ALTER_EVENT:
++      res= Events::update_event(thd, lex->event_parse_data,
++                                lex->spname ? &lex->spname->m_db : NULL,
++                                lex->spname ? &lex->spname->m_name : NULL);
++      break;
++    default:
++      DBUG_ASSERT(0);
++    }
++    DBUG_PRINT("info",("DDL error code=%d", res));
++    if (!res)
++      my_ok(thd);
++
++  } while (0);
++  /* Don't do it, if we are inside a SP */
++  if (!thd->spcont)
++  {
++    delete lex->sphead;
++    lex->sphead= NULL;
++  }
++  /* lex->unit.cleanup() is called outside, no need to call it here */
++  break;
++  case SQLCOM_SHOW_CREATE_EVENT:
++    res= Events::show_create_event(thd, lex->spname->m_db,
++                                   lex->spname->m_name);
++    break;
++  case SQLCOM_DROP_EVENT:
++    if (!(res= Events::drop_event(thd,
++                                  lex->spname->m_db, lex->spname->m_name,
++                                  lex->drop_if_exists)))
++      my_ok(thd);
++    break;
++#else
++    my_error(ER_NOT_SUPPORTED_YET,MYF(0),"embedded server");
++    break;
++#endif
++  case SQLCOM_CREATE_FUNCTION:                  // UDF function
++  {
++    if (check_access(thd,INSERT_ACL,"mysql",0,1,0,0))
++      break;
++#ifdef HAVE_DLOPEN
++    if (!(res = mysql_create_function(thd, &lex->udf)))
++      my_ok(thd);
++#else
++    my_error(ER_CANT_OPEN_LIBRARY, MYF(0), lex->udf.dl, 0, "feature disabled");
++    res= TRUE;
++#endif
++    break;
++  }
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  case SQLCOM_CREATE_USER:
++  {
++    if (check_access(thd, INSERT_ACL, "mysql", 0, 1, 1, 0) &&
++        check_global_access(thd,CREATE_USER_ACL))
++      break;
++    if (end_active_trans(thd))
++      goto error;
++    /* Conditionally writes to binlog */
++    if (!(res= mysql_create_user(thd, lex->users_list)))
++      my_ok(thd);
++    break;
++  }
++  case SQLCOM_DROP_USER:
++  {
++    if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 1, 0) &&
++        check_global_access(thd,CREATE_USER_ACL))
++      break;
++    if (end_active_trans(thd))
++      goto error;
++    /* Conditionally writes to binlog */
++    if (!(res= mysql_drop_user(thd, lex->users_list)))
++      my_ok(thd);
++    break;
++  }
++  case SQLCOM_RENAME_USER:
++  {
++    if (check_access(thd, UPDATE_ACL, "mysql", 0, 1, 1, 0) &&
++        check_global_access(thd,CREATE_USER_ACL))
++      break;
++    if (end_active_trans(thd))
++      goto error;
++    /* Conditionally writes to binlog */
++    if (!(res= mysql_rename_user(thd, lex->users_list)))
++      my_ok(thd);
++    break;
++  }
++  case SQLCOM_REVOKE_ALL:
++  {
++    if (end_active_trans(thd))
++      goto error;
++    if (check_access(thd, UPDATE_ACL, "mysql", 0, 1, 1, 0) &&
++        check_global_access(thd,CREATE_USER_ACL))
++      break;
++
++    /* Replicate current user as grantor */
++    thd->binlog_invoker();
++
++    /* Conditionally writes to binlog */
++    if (!(res = mysql_revoke_all(thd, lex->users_list)))
++      my_ok(thd);
++    break;
++  }
++  case SQLCOM_REVOKE:
++  case SQLCOM_GRANT:
++  {
++    if (end_active_trans(thd))
++      goto error;
++
++    if (check_access(thd, lex->grant | lex->grant_tot_col | GRANT_ACL,
++		     first_table ?  first_table->db : select_lex->db,
++		     first_table ? &first_table->grant.privilege : 0,
++		     first_table ? 0 : 1, 0,
++                     first_table ? (bool) first_table->schema_table :
++                     select_lex->db ?
++                     is_schema_db(select_lex->db) : 0))
++      goto error;
++
++    /* Replicate current user as grantor */
++    thd->binlog_invoker();
++
++    if (thd->security_ctx->user)              // If not replication
++    {
++      LEX_USER *user, *tmp_user;
++
++      List_iterator <LEX_USER> user_list(lex->users_list);
++      while ((tmp_user= user_list++))
++      {
++        if (!(user= get_current_user(thd, tmp_user)))
++          goto error;
++        if (specialflag & SPECIAL_NO_RESOLVE &&
++            hostname_requires_resolving(user->host.str))
++          push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                              ER_WARN_HOSTNAME_WONT_WORK,
++                              ER(ER_WARN_HOSTNAME_WONT_WORK),
++                              user->host.str);
++        // Are we trying to change a password of another user
++        DBUG_ASSERT(user->host.str != 0);
++        if (strcmp(thd->security_ctx->user, user->user.str) ||
++            my_strcasecmp(system_charset_info,
++                          user->host.str, thd->security_ctx->host_or_ip))
++        {
++          // TODO: use check_change_password()
++          if (is_acl_user(user->host.str, user->user.str) &&
++              user->password.str &&
++              check_access(thd, UPDATE_ACL,"mysql",0,1,1,0))
++          {
++            my_message(ER_PASSWORD_NOT_ALLOWED,
++                       ER(ER_PASSWORD_NOT_ALLOWED), MYF(0));
++            goto error;
++          }
++        }
++      }
++    }
++    if (first_table)
++    {
++      if (lex->type == TYPE_ENUM_PROCEDURE ||
++          lex->type == TYPE_ENUM_FUNCTION)
++      {
++        uint grants= lex->all_privileges 
++		   ? (PROC_ACLS & ~GRANT_ACL) | (lex->grant & GRANT_ACL)
++		   : lex->grant;
++        if (check_grant_routine(thd, grants | GRANT_ACL, all_tables,
++                                lex->type == TYPE_ENUM_PROCEDURE, 0))
++	  goto error;
++        /* Conditionally writes to binlog */
++        res= mysql_routine_grant(thd, all_tables,
++                                 lex->type == TYPE_ENUM_PROCEDURE, 
++                                 lex->users_list, grants,
++                                 lex->sql_command == SQLCOM_REVOKE, TRUE);
++        if (!res)
++          my_ok(thd);
++      }
++      else
++      {
++	if (check_grant(thd,(lex->grant | lex->grant_tot_col | GRANT_ACL),
++                        all_tables, 0, UINT_MAX, 0))
++	  goto error;
++        /* Conditionally writes to binlog */
++        res= mysql_table_grant(thd, all_tables, lex->users_list,
++			       lex->columns, lex->grant,
++			       lex->sql_command == SQLCOM_REVOKE);
++      }
++    }
++    else
++    {
++      if (lex->columns.elements || lex->type)
++      {
++	my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE),
++                   MYF(0));
++        goto error;
++      }
++      else
++	/* Conditionally writes to binlog */
++	res = mysql_grant(thd, select_lex->db, lex->users_list, lex->grant,
++			  lex->sql_command == SQLCOM_REVOKE);
++      if (!res)
++      {
++	if (lex->sql_command == SQLCOM_GRANT)
++	{
++	  List_iterator <LEX_USER> str_list(lex->users_list);
++	  LEX_USER *user, *tmp_user;
++	  while ((tmp_user=str_list++))
++          {
++            if (!(user= get_current_user(thd, tmp_user)))
++              goto error;
++	    reset_mqh(user, 0);
++          }
++	}
++      }
++    }
++    break;
++  }
++#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/
++  case SQLCOM_RESET:
++    /*
++      RESET commands are never written to the binary log, so we have to
++      initialize this variable because RESET shares the same code as FLUSH
++    */
++    lex->no_write_to_binlog= 1;
++  case SQLCOM_FLUSH:
++  {
++    int write_to_binlog;
++    if (check_global_access(thd,RELOAD_ACL))
++      goto error;
++
++    /*
++      reload_acl_and_cache() will tell us if we are allowed to write to the
++      binlog or not.
++    */
++    if (!reload_acl_and_cache(thd, lex->type, first_table, &write_to_binlog))
++    {
++      /*
++        We WANT to write and we CAN write.
++        ! we write after unlocking the table.
++      */
++      /*
++        Presumably, RESET and binlog writing doesn't require synchronization
++      */
++
++      if (write_to_binlog > 0)  // we should write
++      { 
++        if (!lex->no_write_to_binlog)
++          res= write_bin_log(thd, FALSE, thd->query(), thd->query_length());
++      } else if (write_to_binlog < 0) 
++      {
++        /* 
++           We should not write, but rather report error because 
++           reload_acl_and_cache binlog interactions failed 
++         */
++        res= 1;
++      } 
++
++      if (!res)
++        my_ok(thd);
++    } 
++    
++    break;
++  }
++  case SQLCOM_KILL:
++  {
++    Item *it= (Item *)lex->value_list.head();
++
++    if (lex->table_or_sp_used())
++    {
++      my_error(ER_NOT_SUPPORTED_YET, MYF(0), "Usage of subqueries or stored "
++               "function calls as part of this statement");
++      break;
++    }
++
++    if ((!it->fixed && it->fix_fields(lex->thd, &it)) || it->check_cols(1))
++    {
++      my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY),
++		 MYF(0));
++      goto error;
++    }
++    sql_kill(thd, (ulong)it->val_int(), lex->type & ONLY_KILL_QUERY);
++    break;
++  }
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  case SQLCOM_SHOW_GRANTS:
++  {
++    LEX_USER *grant_user= get_current_user(thd, lex->grant_user);
++    if (!grant_user)
++      goto error;
++    if ((thd->security_ctx->priv_user &&
++	 !strcmp(thd->security_ctx->priv_user, grant_user->user.str)) ||
++	!check_access(thd, SELECT_ACL, "mysql",0,1,0,0))
++    {
++      res = mysql_show_grants(thd, grant_user);
++    }
++    break;
++  }
++#endif
++  case SQLCOM_HA_OPEN:
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    if (check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE))
++      goto error;
++    res= mysql_ha_open(thd, first_table, 0);
++    break;
++  case SQLCOM_HA_CLOSE:
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    res= mysql_ha_close(thd, first_table);
++    break;
++  case SQLCOM_HA_READ:
++    DBUG_ASSERT(first_table == all_tables && first_table != 0);
++    /*
++      There is no need to check for table permissions here, because
++      if a user has no permissions to read a table, he won't be
++      able to open it (with SQLCOM_HA_OPEN) in the first place.
++    */
++    unit->set_limit(select_lex);
++    res= mysql_ha_read(thd, first_table, lex->ha_read_mode, lex->ident.str,
++                       lex->insert_list, lex->ha_rkey_mode, select_lex->where,
++                       unit->select_limit_cnt, unit->offset_limit_cnt);
++    break;
++
++  case SQLCOM_BEGIN:
++    if (thd->transaction.xid_state.xa_state != XA_NOTR)
++    {
++      my_error(ER_XAER_RMFAIL, MYF(0),
++               xa_state_names[thd->transaction.xid_state.xa_state]);
++      break;
++    }
++    if (begin_trans(thd))
++      goto error;
++    if (lex->start_transaction_opt & MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT)
++    {
++      if (ha_start_consistent_snapshot(thd))
++        goto error;
++    }
++    my_ok(thd);
++    break;
++  case SQLCOM_COMMIT:
++    if (end_trans(thd, lex->tx_release ? COMMIT_RELEASE :
++                              lex->tx_chain ? COMMIT_AND_CHAIN : COMMIT))
++      goto error;
++    my_ok(thd);
++    break;
++  case SQLCOM_ROLLBACK:
++    if (end_trans(thd, lex->tx_release ? ROLLBACK_RELEASE :
++                              lex->tx_chain ? ROLLBACK_AND_CHAIN : ROLLBACK))
++      goto error;
++    my_ok(thd);
++    break;
++  case SQLCOM_RELEASE_SAVEPOINT:
++  {
++    SAVEPOINT *sv;
++    for (sv=thd->transaction.savepoints; sv; sv=sv->prev)
++    {
++      if (my_strnncoll(system_charset_info,
++                       (uchar *)lex->ident.str, lex->ident.length,
++                       (uchar *)sv->name, sv->length) == 0)
++        break;
++    }
++    if (sv)
++    {
++      if (ha_release_savepoint(thd, sv))
++        res= TRUE; // cannot happen
++      else
++        my_ok(thd);
++      thd->transaction.savepoints=sv->prev;
++    }
++    else
++      my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "SAVEPOINT", lex->ident.str);
++    break;
++  }
++  case SQLCOM_ROLLBACK_TO_SAVEPOINT:
++  {
++    SAVEPOINT *sv;
++    for (sv=thd->transaction.savepoints; sv; sv=sv->prev)
++    {
++      if (my_strnncoll(system_charset_info,
++                       (uchar *)lex->ident.str, lex->ident.length,
++                       (uchar *)sv->name, sv->length) == 0)
++        break;
++    }
++    if (sv)
++    {
++      if (ha_rollback_to_savepoint(thd, sv))
++        res= TRUE; // cannot happen
++      else
++      {
++        if (((thd->options & OPTION_KEEP_LOG) || 
++             thd->transaction.all.modified_non_trans_table) &&
++            !thd->slave_thread)
++          push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                       ER_WARNING_NOT_COMPLETE_ROLLBACK,
++                       ER(ER_WARNING_NOT_COMPLETE_ROLLBACK));
++        my_ok(thd);
++      }
++      thd->transaction.savepoints=sv;
++    }
++    else
++      my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "SAVEPOINT", lex->ident.str);
++    break;
++  }
++  case SQLCOM_SAVEPOINT:
++    if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) ||
++          thd->in_sub_stmt) || !opt_using_transactions)
++      my_ok(thd);
++    else
++    {
++      SAVEPOINT **sv, *newsv;
++      for (sv=&thd->transaction.savepoints; *sv; sv=&(*sv)->prev)
++      {
++        if (my_strnncoll(system_charset_info,
++                         (uchar *)lex->ident.str, lex->ident.length,
++                         (uchar *)(*sv)->name, (*sv)->length) == 0)
++          break;
++      }
++      if (*sv) /* old savepoint of the same name exists */
++      {
++        newsv=*sv;
++        ha_release_savepoint(thd, *sv); // it cannot fail
++        *sv=(*sv)->prev;
++      }
++      else if ((newsv=(SAVEPOINT *) alloc_root(&thd->transaction.mem_root,
++                                               savepoint_alloc_size)) == 0)
++      {
++        my_error(ER_OUT_OF_RESOURCES, MYF(0));
++        break;
++      }
++      newsv->name=strmake_root(&thd->transaction.mem_root,
++                               lex->ident.str, lex->ident.length);
++      newsv->length=lex->ident.length;
++      /*
++        if we'll get an error here, don't add new savepoint to the list.
++        we'll lose a little bit of memory in transaction mem_root, but it'll
++        be free'd when transaction ends anyway
++      */
++      if (ha_savepoint(thd, newsv))
++        res= TRUE;
++      else
++      {
++        newsv->prev=thd->transaction.savepoints;
++        thd->transaction.savepoints=newsv;
++        my_ok(thd);
++      }
++    }
++    break;
++  case SQLCOM_CREATE_PROCEDURE:
++  case SQLCOM_CREATE_SPFUNCTION:
++  {
++    uint namelen;
++    char *name;
++    int sp_result= SP_INTERNAL_ERROR;
++
++    DBUG_ASSERT(lex->sphead != 0);
++    DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */
++    /*
++      Verify that the database name is allowed, optionally
++      lowercase it.
++    */
++    if (check_db_name(&lex->sphead->m_db))
++    {
++      my_error(ER_WRONG_DB_NAME, MYF(0), lex->sphead->m_db.str);
++      goto create_sp_error;
++    }
++
++    /*
++      Check that a database directory with this name
++      exists. Design note: This won't work on virtual databases
++      like information_schema.
++    */
++    if (check_db_dir_existence(lex->sphead->m_db.str))
++    {
++      my_error(ER_BAD_DB_ERROR, MYF(0), lex->sphead->m_db.str);
++      goto create_sp_error;
++    }
++
++    if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str, 0, 0, 0,
++                     is_schema_db(lex->sphead->m_db.str,
++                                  lex->sphead->m_db.length)))
++      goto create_sp_error;
++
++    if (end_active_trans(thd))
++      goto create_sp_error;
++
++    name= lex->sphead->name(&namelen);
++#ifdef HAVE_DLOPEN
++    if (lex->sphead->m_type == TYPE_ENUM_FUNCTION)
++    {
++      udf_func *udf = find_udf(name, namelen);
++
++      if (udf)
++      {
++        my_error(ER_UDF_EXISTS, MYF(0), name);
++        goto create_sp_error;
++      }
++    }
++#endif
++
++    if (sp_process_definer(thd))
++      goto create_sp_error;
++
++    res= (sp_result= lex->sphead->create(thd));
++    switch (sp_result) {
++    case SP_OK: {
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++      /* only add privileges if really neccessary */
++
++      Security_context security_context;
++      bool restore_backup_context= false;
++      Security_context *backup= NULL;
++      LEX_USER *definer= thd->lex->definer;
++      /*
++        Check if the definer exists on slave, 
++        then use definer privilege to insert routine privileges to mysql.procs_priv.
++
++        For current user of SQL thread has GLOBAL_ACL privilege, 
++        which doesn't any check routine privileges, 
++        so no routine privilege record  will insert into mysql.procs_priv.
++      */
++      if (thd->slave_thread && is_acl_user(definer->host.str, definer->user.str))
++      {
++        security_context.change_security_context(thd, 
++                                                 &thd->lex->definer->user,
++                                                 &thd->lex->definer->host,
++                                                 &thd->lex->sphead->m_db,
++                                                 &backup);
++        restore_backup_context= true;
++      }
++
++      if (sp_automatic_privileges && !opt_noacl &&
++          check_routine_access(thd, DEFAULT_CREATE_PROC_ACLS,
++                               lex->sphead->m_db.str, name,
++                               lex->sql_command == SQLCOM_CREATE_PROCEDURE, 1))
++      {
++        if (sp_grant_privileges(thd, lex->sphead->m_db.str, name,
++                                lex->sql_command == SQLCOM_CREATE_PROCEDURE))
++          push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                       ER_PROC_AUTO_GRANT_FAIL,
++                       ER(ER_PROC_AUTO_GRANT_FAIL));
++      }
++
++      /*
++        Restore current user with GLOBAL_ACL privilege of SQL thread
++      */ 
++      if (restore_backup_context)
++      {
++        DBUG_ASSERT(thd->slave_thread == 1);
++        thd->security_ctx->restore_security_context(thd, backup);
++      }
++
++#endif
++    break;
++    }
++    case SP_WRITE_ROW_FAILED:
++      my_error(ER_SP_ALREADY_EXISTS, MYF(0), SP_TYPE_STRING(lex), name);
++    break;
++    case SP_BAD_IDENTIFIER:
++      my_error(ER_TOO_LONG_IDENT, MYF(0), name);
++    break;
++    case SP_BODY_TOO_LONG:
++      my_error(ER_TOO_LONG_BODY, MYF(0), name);
++    break;
++    case SP_FLD_STORE_FAILED:
++      my_error(ER_CANT_CREATE_SROUTINE, MYF(0), name);
++      break;
++    default:
++      my_error(ER_SP_STORE_FAILED, MYF(0), SP_TYPE_STRING(lex), name);
++    break;
++    } /* end switch */
++
++    /*
++      Capture all errors within this CASE and
++      clean up the environment.
++    */
++create_sp_error:
++    if (sp_result != SP_OK )
++      goto error;
++    my_ok(thd);
++    break; /* break super switch */
++  } /* end case group bracket */
++  case SQLCOM_CALL:
++    {
++      sp_head *sp;
++
++      /*
++        This will cache all SP and SF and open and lock all tables
++        required for execution.
++      */
++      if (check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE) ||
++	  open_and_lock_tables(thd, all_tables))
++       goto error;
++
++      /*
++        By this moment all needed SPs should be in cache so no need to look 
++        into DB. 
++      */
++      if (!(sp= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, lex->spname,
++                                &thd->sp_proc_cache, TRUE)))
++      {
++	my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PROCEDURE",
++                 lex->spname->m_qname.str);
++	goto error;
++      }
++      else
++      {
++	ha_rows select_limit;
++        /* bits that should be cleared in thd->server_status */
++	uint bits_to_be_cleared= 0;
++        /*
++          Check that the stored procedure doesn't contain Dynamic SQL
++          and doesn't return result sets: such stored procedures can't
++          be called from a function or trigger.
++        */
++        if (thd->in_sub_stmt)
++        {
++          const char *where= (thd->in_sub_stmt & SUB_STMT_TRIGGER ?
++                              "trigger" : "function");
++          if (sp->is_not_allowed_in_function(where))
++            goto error;
++        }
++
++	if (sp->m_flags & sp_head::MULTI_RESULTS)
++	{
++	  if (! (thd->client_capabilities & CLIENT_MULTI_RESULTS))
++	  {
++            /*
++              The client does not support multiple result sets being sent
++              back
++            */
++	    my_error(ER_SP_BADSELECT, MYF(0), sp->m_qname.str);
++	    goto error;
++	  }
++          /*
++            If SERVER_MORE_RESULTS_EXISTS is not set,
++            then remember that it should be cleared
++          */
++	  bits_to_be_cleared= (~thd->server_status &
++                               SERVER_MORE_RESULTS_EXISTS);
++	  thd->server_status|= SERVER_MORE_RESULTS_EXISTS;
++	}
++
++	if (check_routine_access(thd, EXECUTE_ACL,
++				 sp->m_db.str, sp->m_name.str, TRUE, FALSE))
++	{
++	  goto error;
++	}
++	select_limit= thd->variables.select_limit;
++	thd->variables.select_limit= HA_POS_ERROR;
++
++        /* 
++          We never write CALL statements into binlog:
++           - If the mode is non-prelocked, each statement will be logged
++             separately.
++           - If the mode is prelocked, the invoking statement will care
++             about writing into binlog.
++          So just execute the statement.
++        */
++	res= sp->execute_procedure(thd, &lex->value_list);
++	/*
++          If warnings have been cleared, we have to clear total_warn_count
++          too, otherwise the clients get confused.
++	 */
++	if (thd->warn_list.is_empty())
++	  thd->total_warn_count= 0;
++
++	thd->variables.select_limit= select_limit;
++
++        thd->server_status&= ~bits_to_be_cleared;
++
++	if (!res)
++          my_ok(thd, (ulong) (thd->row_count_func < 0 ? 0 :
++                              thd->row_count_func));
++	else
++        {
++          DBUG_ASSERT(thd->is_error() || thd->killed);
++	  goto error;		// Substatement should already have sent error
++        }
++      }
++      break;
++    }
++  case SQLCOM_ALTER_PROCEDURE:
++  case SQLCOM_ALTER_FUNCTION:
++    {
++      int sp_result;
++      sp_head *sp;
++      st_sp_chistics chistics;
++
++      memcpy(&chistics, &lex->sp_chistics, sizeof(chistics));
++      if (lex->sql_command == SQLCOM_ALTER_PROCEDURE)
++        sp= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, lex->spname,
++                            &thd->sp_proc_cache, FALSE);
++      else
++        sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, lex->spname,
++                            &thd->sp_func_cache, FALSE);
++      mysql_reset_errors(thd, 0);
++      if (! sp)
++      {
++	if (lex->spname->m_db.str)
++	  sp_result= SP_KEY_NOT_FOUND;
++	else
++	{
++	  my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
++	  goto error;
++	}
++      }
++      else
++      {
++        if (check_routine_access(thd, ALTER_PROC_ACL, sp->m_db.str, 
++				 sp->m_name.str,
++                                 lex->sql_command == SQLCOM_ALTER_PROCEDURE, 0))
++	  goto error;
++
++        if (end_active_trans(thd)) 
++          goto error;
++	memcpy(&lex->sp_chistics, &chistics, sizeof(lex->sp_chistics));
++        if ((sp->m_type == TYPE_ENUM_FUNCTION) &&
++            !trust_function_creators &&  mysql_bin_log.is_open() &&
++            !sp->m_chistics->detistic &&
++            (chistics.daccess == SP_CONTAINS_SQL ||
++             chistics.daccess == SP_MODIFIES_SQL_DATA))
++        {
++          my_message(ER_BINLOG_UNSAFE_ROUTINE,
++		     ER(ER_BINLOG_UNSAFE_ROUTINE), MYF(0));
++          sp_result= SP_INTERNAL_ERROR;
++        }
++        else
++        {
++          /*
++            Note that if you implement the capability of ALTER FUNCTION to
++            alter the body of the function, this command should be made to
++            follow the restrictions that log-bin-trust-function-creators=0
++            already puts on CREATE FUNCTION.
++          */
++          /* Conditionally writes to binlog */
++
++          int type= lex->sql_command == SQLCOM_ALTER_PROCEDURE ?
++                    TYPE_ENUM_PROCEDURE :
++                    TYPE_ENUM_FUNCTION;
++
++          sp_result= sp_update_routine(thd,
++                                       type,
++                                       lex->spname,
++                                       &lex->sp_chistics);
++        }
++      }
++      switch (sp_result)
++      {
++      case SP_OK:
++	my_ok(thd);
++	break;
++      case SP_KEY_NOT_FOUND:
++	my_error(ER_SP_DOES_NOT_EXIST, MYF(0),
++                 SP_COM_STRING(lex), lex->spname->m_qname.str);
++	goto error;
++      default:
++	my_error(ER_SP_CANT_ALTER, MYF(0),
++                 SP_COM_STRING(lex), lex->spname->m_qname.str);
++	goto error;
++      }
++      break;
++    }
++  case SQLCOM_DROP_PROCEDURE:
++  case SQLCOM_DROP_FUNCTION:
++    {
++      int sp_result;
++      int type= (lex->sql_command == SQLCOM_DROP_PROCEDURE ?
++                 TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION);
++
++      sp_result= sp_routine_exists_in_table(thd, type, lex->spname);
++      mysql_reset_errors(thd, 0);
++      if (sp_result == SP_OK)
++      {
++        char *db= lex->spname->m_db.str;
++	char *name= lex->spname->m_name.str;
++
++	if (check_routine_access(thd, ALTER_PROC_ACL, db, name,
++                                 lex->sql_command == SQLCOM_DROP_PROCEDURE, 0))
++          goto error;
++
++        if (end_active_trans(thd)) 
++          goto error;
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++	if (sp_automatic_privileges && !opt_noacl &&
++	    sp_revoke_privileges(thd, db, name, 
++                                 lex->sql_command == SQLCOM_DROP_PROCEDURE))
++	{
++	  push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 
++		       ER_PROC_AUTO_REVOKE_FAIL,
++		       ER(ER_PROC_AUTO_REVOKE_FAIL));
++	}
++#endif
++        /* Conditionally writes to binlog */
++
++        int type= lex->sql_command == SQLCOM_DROP_PROCEDURE ?
++                  TYPE_ENUM_PROCEDURE :
++                  TYPE_ENUM_FUNCTION;
++
++        sp_result= sp_drop_routine(thd, type, lex->spname);
++      }
++      else
++      {
++#ifdef HAVE_DLOPEN
++	if (lex->sql_command == SQLCOM_DROP_FUNCTION)
++	{
++          udf_func *udf = find_udf(lex->spname->m_name.str,
++                                   lex->spname->m_name.length);
++          if (udf)
++          {
++	    if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 0, 0))
++	      goto error;
++
++	    if (!(res = mysql_drop_function(thd, &lex->spname->m_name)))
++	    {
++	      my_ok(thd);
++	      break;
++	    }
++	  }
++	}
++#endif
++	if (lex->spname->m_db.str)
++	  sp_result= SP_KEY_NOT_FOUND;
++	else
++	{
++	  my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
++	  goto error;
++	}
++      }
++      res= sp_result;
++      switch (sp_result) {
++      case SP_OK:
++	my_ok(thd);
++	break;
++      case SP_KEY_NOT_FOUND:
++	if (lex->drop_if_exists)
++	{
++          res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
++	  push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++			      ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST),
++			      SP_COM_STRING(lex), lex->spname->m_name.str);
++          if (!res)
++            my_ok(thd);
++	  break;
++	}
++	my_error(ER_SP_DOES_NOT_EXIST, MYF(0),
++                 SP_COM_STRING(lex), lex->spname->m_qname.str);
++	goto error;
++      default:
++	my_error(ER_SP_DROP_FAILED, MYF(0),
++                 SP_COM_STRING(lex), lex->spname->m_qname.str);
++	goto error;
++      }
++      break;
++    }
++  case SQLCOM_SHOW_CREATE_PROC:
++    {
++      if (sp_show_create_routine(thd, TYPE_ENUM_PROCEDURE, lex->spname))
++      {
++	my_error(ER_SP_DOES_NOT_EXIST, MYF(0),
++                 SP_COM_STRING(lex), lex->spname->m_name.str);
++	goto error;
++      }
++      break;
++    }
++  case SQLCOM_SHOW_CREATE_FUNC:
++    {
++      if (sp_show_create_routine(thd, TYPE_ENUM_FUNCTION, lex->spname))
++      {
++	my_error(ER_SP_DOES_NOT_EXIST, MYF(0),
++                 SP_COM_STRING(lex), lex->spname->m_name.str);
++	goto error;
++      }
++      break;
++    }
++#ifndef DBUG_OFF
++  case SQLCOM_SHOW_PROC_CODE:
++  case SQLCOM_SHOW_FUNC_CODE:
++    {
++      sp_head *sp;
++
++      if (lex->sql_command == SQLCOM_SHOW_PROC_CODE)
++        sp= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, lex->spname,
++                            &thd->sp_proc_cache, FALSE);
++      else
++        sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, lex->spname,
++                            &thd->sp_func_cache, FALSE);
++      if (!sp || sp->show_routine_code(thd))
++      {
++        /* We don't distinguish between errors for now */
++        my_error(ER_SP_DOES_NOT_EXIST, MYF(0),
++                 SP_COM_STRING(lex), lex->spname->m_name.str);
++        goto error;
++      }
++      break;
++    }
++#endif // ifndef DBUG_OFF
++  case SQLCOM_SHOW_CREATE_TRIGGER:
++    {
++      if (lex->spname->m_name.length > NAME_LEN)
++      {
++        my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str);
++        goto error;
++      }
++
++      if (show_create_trigger(thd, lex->spname))
++        goto error; /* Error has been already logged. */
++
++      break;
++    }
++  case SQLCOM_CREATE_VIEW:
++    {
++      /*
++        Note: SQLCOM_CREATE_VIEW also handles 'ALTER VIEW' commands
++        as specified through the thd->lex->create_view_mode flag.
++      */
++      if (end_active_trans(thd))
++        goto error;
++
++      res= mysql_create_view(thd, first_table, thd->lex->create_view_mode);
++      break;
++    }
++  case SQLCOM_DROP_VIEW:
++    {
++      if (check_table_access(thd, DROP_ACL, all_tables, UINT_MAX, FALSE) ||
++          end_active_trans(thd))
++        goto error;
++      /* Conditionally writes to binlog. */
++      res= mysql_drop_view(thd, first_table, thd->lex->drop_mode);
++      break;
++    }
++  case SQLCOM_CREATE_TRIGGER:
++  {
++    if (end_active_trans(thd))
++      goto error;
++
++    /* Conditionally writes to binlog. */
++    res= mysql_create_or_drop_trigger(thd, all_tables, 1);
++
++    break;
++  }
++  case SQLCOM_DROP_TRIGGER:
++  {
++    if (end_active_trans(thd))
++      goto error;
++
++    /* Conditionally writes to binlog. */
++    res= mysql_create_or_drop_trigger(thd, all_tables, 0);
++    break;
++  }
++  case SQLCOM_XA_START:
++    if (thd->transaction.xid_state.xa_state == XA_IDLE &&
++        thd->lex->xa_opt == XA_RESUME)
++    {
++      if (! thd->transaction.xid_state.xid.eq(thd->lex->xid))
++      {
++        my_error(ER_XAER_NOTA, MYF(0));
++        break;
++      }
++      thd->transaction.xid_state.xa_state= XA_ACTIVE;
++      my_ok(thd);
++      break;
++    }
++    if (thd->lex->xa_opt != XA_NONE)
++    { // JOIN is not supported yet. TODO
++      my_error(ER_XAER_INVAL, MYF(0));
++      break;
++    }
++    if (thd->transaction.xid_state.xa_state != XA_NOTR)
++    {
++      my_error(ER_XAER_RMFAIL, MYF(0),
++               xa_state_names[thd->transaction.xid_state.xa_state]);
++      break;
++    }
++    if (thd->active_transaction() || thd->locked_tables)
++    {
++      my_error(ER_XAER_OUTSIDE, MYF(0));
++      break;
++    }
++    DBUG_ASSERT(thd->transaction.xid_state.xid.is_null());
++    thd->transaction.xid_state.xa_state= XA_ACTIVE;
++    thd->transaction.xid_state.rm_error= 0;
++    thd->transaction.xid_state.xid.set(thd->lex->xid);
++    if (xid_cache_insert(&thd->transaction.xid_state))
++    {
++      thd->transaction.xid_state.xa_state= XA_NOTR;
++      thd->transaction.xid_state.xid.null();
++      break;
++    }
++    thd->transaction.all.modified_non_trans_table= FALSE;
++    thd->options= ((thd->options & ~(OPTION_KEEP_LOG)) | OPTION_BEGIN);
++    thd->server_status|= SERVER_STATUS_IN_TRANS;
++    my_ok(thd);
++    break;
++  case SQLCOM_XA_END:
++    /* fake it */
++    if (thd->lex->xa_opt != XA_NONE)
++    { // SUSPEND and FOR MIGRATE are not supported yet. TODO
++      my_error(ER_XAER_INVAL, MYF(0));
++      break;
++    }
++    if (thd->transaction.xid_state.xa_state != XA_ACTIVE)
++    {
++      my_error(ER_XAER_RMFAIL, MYF(0),
++               xa_state_names[thd->transaction.xid_state.xa_state]);
++      break;
++    }
++    if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
++    {
++      my_error(ER_XAER_NOTA, MYF(0));
++      break;
++    }
++    if (xa_trans_rolled_back(&thd->transaction.xid_state))
++      break;
++    thd->transaction.xid_state.xa_state=XA_IDLE;
++    my_ok(thd);
++    break;
++  case SQLCOM_XA_PREPARE:
++    if (thd->transaction.xid_state.xa_state != XA_IDLE)
++    {
++      my_error(ER_XAER_RMFAIL, MYF(0),
++               xa_state_names[thd->transaction.xid_state.xa_state]);
++      break;
++    }
++    if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
++    {
++      my_error(ER_XAER_NOTA, MYF(0));
++      break;
++    }
++    if (ha_prepare(thd))
++    {
++      my_error(ER_XA_RBROLLBACK, MYF(0));
++      xid_cache_delete(&thd->transaction.xid_state);
++      thd->transaction.xid_state.xa_state=XA_NOTR;
++      break;
++    }
++    thd->transaction.xid_state.xa_state=XA_PREPARED;
++    my_ok(thd);
++    break;
++  case SQLCOM_XA_COMMIT:
++    if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
++    {
++      /*
++        xid_state.in_thd is always true beside of xa recovery
++        procedure. Note, that there is no race condition here
++        between xid_cache_search and xid_cache_delete, since we're always
++        deleting our own XID (thd->lex->xid == thd->transaction.xid_state.xid).
++        The only case when thd->lex->xid != thd->transaction.xid_state.xid
++        and xid_state->in_thd == 0 is in ha_recover() functionality,
++        which is called before starting client connections, and thus is
++        always single-threaded.
++      */
++      XID_STATE *xs=xid_cache_search(thd->lex->xid);
++      if (!xs || xs->in_thd)
++        my_error(ER_XAER_NOTA, MYF(0));
++      else if (xa_trans_rolled_back(xs))
++      {
++        ha_commit_or_rollback_by_xid(thd->lex->xid, 0);
++        xid_cache_delete(xs);
++        break;
++      }
++      else
++      {
++        ha_commit_or_rollback_by_xid(thd->lex->xid, 1);
++        xid_cache_delete(xs);
++        my_ok(thd);
++      }
++      break;
++    }
++    if (xa_trans_rolled_back(&thd->transaction.xid_state))
++    {
++      xa_trans_rollback(thd);
++      break;
++    }
++    if (thd->transaction.xid_state.xa_state == XA_IDLE &&
++        thd->lex->xa_opt == XA_ONE_PHASE)
++    {
++      int r;
++      if ((r= ha_commit(thd)))
++        my_error(r == 1 ? ER_XA_RBROLLBACK : ER_XAER_RMERR, MYF(0));
++      else
++        my_ok(thd);
++    }
++    else if (thd->transaction.xid_state.xa_state == XA_PREPARED &&
++             thd->lex->xa_opt == XA_NONE)
++    {
++      if (wait_if_global_read_lock(thd, 0, 0))
++      {
++        ha_rollback(thd);
++        my_error(ER_XAER_RMERR, MYF(0));
++      }
++      else
++      {
++        if (ha_commit_one_phase(thd, 1))
++          my_error(ER_XAER_RMERR, MYF(0));
++        else
++          my_ok(thd);
++        start_waiting_global_read_lock(thd);
++      }
++    }
++    else
++    {
++      my_error(ER_XAER_RMFAIL, MYF(0),
++               xa_state_names[thd->transaction.xid_state.xa_state]);
++      break;
++    }
++    thd->options&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
++    thd->transaction.all.modified_non_trans_table= FALSE;
++    thd->server_status&= ~SERVER_STATUS_IN_TRANS;
++    xid_cache_delete(&thd->transaction.xid_state);
++    thd->transaction.xid_state.xa_state=XA_NOTR;
++    break;
++  case SQLCOM_XA_ROLLBACK:
++    if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
++    {
++      XID_STATE *xs=xid_cache_search(thd->lex->xid);
++      if (!xs || xs->in_thd)
++        my_error(ER_XAER_NOTA, MYF(0));
++      else
++      {
++        bool ok= !xa_trans_rolled_back(xs);
++        ha_commit_or_rollback_by_xid(thd->lex->xid, 0);
++        xid_cache_delete(xs);
++        if (ok)
++          my_ok(thd);
++      }
++      break;
++    }
++    if (thd->transaction.xid_state.xa_state != XA_IDLE &&
++        thd->transaction.xid_state.xa_state != XA_PREPARED &&
++        thd->transaction.xid_state.xa_state != XA_ROLLBACK_ONLY)
++    {
++      my_error(ER_XAER_RMFAIL, MYF(0),
++               xa_state_names[thd->transaction.xid_state.xa_state]);
++      break;
++    }
++    if (xa_trans_rollback(thd))
++      my_error(ER_XAER_RMERR, MYF(0));
++    else
++      my_ok(thd);
++    break;
++  case SQLCOM_XA_RECOVER:
++    res= mysql_xa_recover(thd);
++    break;
++  case SQLCOM_ALTER_TABLESPACE:
++    if (check_access(thd, ALTER_ACL, thd->db, 0, 1, 0,
++                     thd->db ? is_schema_db(thd->db, thd->db_length) : 0))
++      break;
++    if (!(res= mysql_alter_tablespace(thd, lex->alter_tablespace_info)))
++      my_ok(thd);
++    break;
++  case SQLCOM_INSTALL_PLUGIN:
++    if (! (res= mysql_install_plugin(thd, &thd->lex->comment,
++                                     &thd->lex->ident)))
++      my_ok(thd);
++    break;
++  case SQLCOM_UNINSTALL_PLUGIN:
++    if (! (res= mysql_uninstall_plugin(thd, &thd->lex->comment)))
++      my_ok(thd);
++    break;
++  case SQLCOM_BINLOG_BASE64_EVENT:
++  {
++#ifndef EMBEDDED_LIBRARY
++    mysql_client_binlog_statement(thd);
++#else /* EMBEDDED_LIBRARY */
++    my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "embedded");
++#endif /* EMBEDDED_LIBRARY */
++    break;
++  }
++  case SQLCOM_CREATE_SERVER:
++  {
++    int error;
++    LEX *lex= thd->lex;
++    DBUG_PRINT("info", ("case SQLCOM_CREATE_SERVER"));
++
++    if (check_global_access(thd, SUPER_ACL))
++      break;
++
++    if ((error= create_server(thd, &lex->server_options)))
++    {
++      DBUG_PRINT("info", ("problem creating server <%s>",
++                          lex->server_options.server_name));
++      my_error(error, MYF(0), lex->server_options.server_name);
++      break;
++    }
++    my_ok(thd, 1);
++    break;
++  }
++  case SQLCOM_ALTER_SERVER:
++  {
++    int error;
++    LEX *lex= thd->lex;
++    DBUG_PRINT("info", ("case SQLCOM_ALTER_SERVER"));
++
++    if (check_global_access(thd, SUPER_ACL))
++      break;
++
++    if ((error= alter_server(thd, &lex->server_options)))
++    {
++      DBUG_PRINT("info", ("problem altering server <%s>",
++                          lex->server_options.server_name));
++      my_error(error, MYF(0), lex->server_options.server_name);
++      break;
++    }
++    my_ok(thd, 1);
++    break;
++  }
++  case SQLCOM_DROP_SERVER:
++  {
++    int err_code;
++    LEX *lex= thd->lex;
++    DBUG_PRINT("info", ("case SQLCOM_DROP_SERVER"));
++
++    if (check_global_access(thd, SUPER_ACL))
++      break;
++
++    if ((err_code= drop_server(thd, &lex->server_options)))
++    {
++      if (! lex->drop_if_exists && err_code == ER_FOREIGN_SERVER_DOESNT_EXIST)
++      {
++        DBUG_PRINT("info", ("problem dropping server %s",
++                            lex->server_options.server_name));
++        my_error(err_code, MYF(0), lex->server_options.server_name);
++      }
++      else
++      {
++        my_ok(thd, 0);
++      }
++      break;
++    }
++    my_ok(thd, 1);
++    break;
++  }
++  default:
++#ifndef EMBEDDED_LIBRARY
++    DBUG_ASSERT(0);                             /* Impossible */
++#endif
++    my_ok(thd);
++    break;
++  }
++  thd_proc_info(thd, "query end");
++
++  /*
++    Binlog-related cleanup:
++    Reset system variables temporarily modified by SET ONE SHOT.
++
++    Exception: If this is a SET, do nothing. This is to allow
++    mysqlbinlog to print many SET commands (in this case we want the
++    charset temp setting to live until the real query). This is also
++    needed so that SET CHARACTER_SET_CLIENT... does not cancel itself
++    immediately.
++  */
++  if (thd->one_shot_set && lex->sql_command != SQLCOM_SET_OPTION)
++    reset_one_shot_variables(thd);
++
++  /*
++    The return value for ROW_COUNT() is "implementation dependent" if the
++    statement is not DELETE, INSERT or UPDATE, but -1 is what JDBC and ODBC
++    wants. We also keep the last value in case of SQLCOM_CALL or
++    SQLCOM_EXECUTE.
++  */
++  if (!(sql_command_flags[lex->sql_command] & CF_HAS_ROW_COUNT))
++    thd->row_count_func= -1;
++
++  goto finish;
++
++error:
++  res= TRUE;
++
++finish:
++  if (need_start_waiting)
++  {
++    /*
++      Release the protection against the global read lock and wake
++      everyone, who might want to set a global read lock.
++    */
++    start_waiting_global_read_lock(thd);
++  }
++  DBUG_RETURN(res || thd->is_error());
++}
++
++
++static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
++{
++  LEX	*lex= thd->lex;
++  select_result *result=lex->result;
++  bool res;
++  /* assign global limit variable if limit is not given */
++  {
++    SELECT_LEX *param= lex->unit.global_parameters;
++    if (!param->explicit_limit)
++      param->select_limit=
++        new Item_int((ulonglong) thd->variables.select_limit);
++  }
++  if (!(res= open_and_lock_tables(thd, all_tables)))
++  {
++    if (lex->describe)
++    {
++      /*
++        We always use select_send for EXPLAIN, even if it's an EXPLAIN
++        for SELECT ... INTO OUTFILE: a user application should be able
++        to prepend EXPLAIN to any query and receive output for it,
++        even if the query itself redirects the output.
++      */
++      if (!(result= new select_send()))
++        return 1;                               /* purecov: inspected */
++      thd->send_explain_fields(result);
++      res= mysql_explain_union(thd, &thd->lex->unit, result);
++      if (lex->describe & DESCRIBE_EXTENDED)
++      {
++        char buff[1024];
++        String str(buff,(uint32) sizeof(buff), system_charset_info);
++        str.length(0);
++        thd->lex->unit.print(&str, QT_ORDINARY);
++        str.append('\0');
++        push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++                     ER_YES, str.ptr());
++      }
++      if (res)
++        result->abort();
++      else
++        result->send_eof();
++      delete result;
++    }
++    else
++    {
++      if (!result && !(result= new select_send()))
++        return 1;                               /* purecov: inspected */
++      query_cache_store_query(thd, all_tables);
++      res= handle_select(thd, lex, result, 0);
++      if (result != lex->result)
++        delete result;
++    }
++  }
++  return res;
++}
++
++
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++/**
++  Check grants for commands which work only with one table.
++
++  @param thd                    Thread handler
++  @param privilege              requested privilege
++  @param all_tables             global table list of query
++  @param no_errors              FALSE/TRUE - report/don't report error to
++                            the client (using my_error() call).
++
++  @retval
++    0   OK
++  @retval
++    1   access denied, error is sent to client
++*/
++
++bool check_single_table_access(THD *thd, ulong privilege, 
++                               TABLE_LIST *all_tables, bool no_errors)
++{
++  Security_context * backup_ctx= thd->security_ctx;
++
++  /* we need to switch to the saved context (if any) */
++  if (all_tables->security_ctx)
++    thd->security_ctx= all_tables->security_ctx;
++
++  const char *db_name;
++  if ((all_tables->view || all_tables->field_translation) &&
++      !all_tables->schema_table)
++    db_name= all_tables->view_db.str;
++  else
++    db_name= all_tables->db;
++
++  if (check_access(thd, privilege, db_name,
++		   &all_tables->grant.privilege, 0, no_errors,
++                   test(all_tables->schema_table)))
++    goto deny;
++
++  /* Show only 1 table for check_grant */
++  if (!(all_tables->belong_to_view &&
++        (thd->lex->sql_command == SQLCOM_SHOW_FIELDS)) &&
++      check_grant(thd, privilege, all_tables, 0, 1, no_errors))
++    goto deny;
++
++  thd->security_ctx= backup_ctx;
++  return 0;
++
++deny:
++  thd->security_ctx= backup_ctx;
++  return 1;
++}
++
++/**
++  Check grants for commands which work only with one table and all other
++  tables belonging to subselects or implicitly opened tables.
++
++  @param thd			Thread handler
++  @param privilege		requested privilege
++  @param all_tables		global table list of query
++
++  @retval
++    0   OK
++  @retval
++    1   access denied, error is sent to client
++*/
++
++bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
++{
++  if (check_single_table_access (thd,privilege,all_tables, FALSE))
++    return 1;
++
++  /* Check rights on tables of subselects and implictly opened tables */
++  TABLE_LIST *subselects_tables, *view= all_tables->view ? all_tables : 0;
++  if ((subselects_tables= all_tables->next_global))
++  {
++    /*
++      Access rights asked for the first table of a view should be the same
++      as for the view
++    */
++    if (view && subselects_tables->belong_to_view == view)
++    {
++      if (check_single_table_access (thd, privilege, subselects_tables, FALSE))
++        return 1;
++      subselects_tables= subselects_tables->next_global;
++    }
++    if (subselects_tables &&
++        (check_table_access(thd, SELECT_ACL, subselects_tables, UINT_MAX, FALSE)))
++      return 1;
++  }
++  return 0;
++}
++
++
++/**
++  Get the user (global) and database privileges for all used tables.
++
++  @param save_priv    In this we store global and db level grants for the
++                      table. Note that we don't store db level grants if the
++                      global grants is enough to satisfy the request and the
++                      global grants contains a SELECT grant.
++
++  @note
++    The idea of EXTRA_ACL is that one will be granted access to the table if
++    one has the asked privilege on any column combination of the table; For
++    example to be able to check a table one needs to have SELECT privilege on
++    any column of the table.
++
++  @retval
++    0  ok
++  @retval
++    1  If we can't get the privileges and we don't use table/column
++    grants.
++*/
++bool
++check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
++	     bool dont_check_global_grants, bool no_errors, bool schema_db)
++{
++  Security_context *sctx= thd->security_ctx;
++  ulong db_access;
++  /*
++    GRANT command:
++    In case of database level grant the database name may be a pattern,
++    in case of table|column level grant the database name can not be a pattern.
++    We use 'dont_check_global_grants' as a flag to determine
++    if it's database level grant command 
++    (see SQLCOM_GRANT case, mysql_execute_command() function) and
++    set db_is_pattern according to 'dont_check_global_grants' value.
++  */
++  bool  db_is_pattern= (test(want_access & GRANT_ACL) &&
++                        dont_check_global_grants);
++  ulong dummy;
++  DBUG_ENTER("check_access");
++  DBUG_PRINT("enter",("db: %s  want_access: %lu  master_access: %lu",
++                      db ? db : "", want_access, sctx->master_access));
++  if (save_priv)
++    *save_priv=0;
++  else
++    save_priv= &dummy;
++
++  thd_proc_info(thd, "checking permissions");
++  if ((!db || !db[0]) && !thd->db && !dont_check_global_grants)
++  {
++    DBUG_PRINT("error",("No database"));
++    if (!no_errors)
++      my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR),
++                 MYF(0));                       /* purecov: tested */
++    DBUG_RETURN(TRUE);				/* purecov: tested */
++  }
++
++  if (schema_db)
++  {
++    if ((!(sctx->master_access & FILE_ACL) && (want_access & FILE_ACL)) ||
++        (want_access & ~(SELECT_ACL | EXTRA_ACL | FILE_ACL)))
++    {
++      if (!no_errors)
++      {
++        const char *db_name= db ? db : thd->db;
++        my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
++                 sctx->priv_user, sctx->priv_host, db_name);
++      }
++      DBUG_RETURN(TRUE);
++    }
++    else
++    {
++      *save_priv= SELECT_ACL;
++      DBUG_RETURN(FALSE);
++    }
++  }
++
++  if ((sctx->master_access & want_access) == want_access)
++  {
++    /*
++      If we don't have a global SELECT privilege, we have to get the database
++      specific access rights to be able to handle queries of type
++      UPDATE t1 SET a=1 WHERE b > 0
++    */
++    db_access= sctx->db_access;
++    if (!(sctx->master_access & SELECT_ACL) &&
++	(db && (!thd->db || db_is_pattern || strcmp(db,thd->db))))
++      db_access=acl_get(sctx->host, sctx->ip, sctx->priv_user, db,
++                        db_is_pattern);
++    *save_priv=sctx->master_access | db_access;
++    DBUG_RETURN(FALSE);
++  }
++  if (((want_access & ~sctx->master_access) & ~(DB_ACLS | EXTRA_ACL)) ||
++      (! db && dont_check_global_grants))
++  {						// We can never grant this
++    DBUG_PRINT("error",("No possible access"));
++    if (!no_errors)
++      my_error(ER_ACCESS_DENIED_ERROR, MYF(0),
++               sctx->priv_user,
++               sctx->priv_host,
++               (thd->password ?
++                ER(ER_YES) :
++                ER(ER_NO)));                    /* purecov: tested */
++    DBUG_RETURN(TRUE);				/* purecov: tested */
++  }
++
++  if (db == any_db)
++    DBUG_RETURN(FALSE);				// Allow select on anything
++
++  if (db && (!thd->db || db_is_pattern || strcmp(db,thd->db)))
++    db_access= acl_get(sctx->host, sctx->ip, sctx->priv_user, db,
++                       db_is_pattern);
++  else
++    db_access= sctx->db_access;
++  DBUG_PRINT("info",("db_access: %lu", db_access));
++  /* Remove SHOW attribute and access rights we already have */
++  want_access &= ~(sctx->master_access | EXTRA_ACL);
++  DBUG_PRINT("info",("db_access: %lu  want_access: %lu",
++                     db_access, want_access));
++  db_access= ((*save_priv=(db_access | sctx->master_access)) & want_access);
++
++  if (db_access == want_access ||
++      (!dont_check_global_grants &&
++       !(want_access & ~(db_access | TABLE_ACLS | PROC_ACLS))))
++    DBUG_RETURN(FALSE);				/* Ok */
++
++  DBUG_PRINT("error",("Access denied"));
++  if (!no_errors)
++    my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
++             sctx->priv_user, sctx->priv_host,
++             (db ? db : (thd->db ?
++                         thd->db :
++                         "unknown")));          /* purecov: tested */
++  DBUG_RETURN(TRUE);				/* purecov: tested */
++}
++
++
++static bool check_show_access(THD *thd, TABLE_LIST *table)
++{
++  switch (get_schema_table_idx(table->schema_table)) {
++  case SCH_SCHEMATA:
++    return (specialflag & SPECIAL_SKIP_SHOW_DB) &&
++      check_global_access(thd, SHOW_DB_ACL);
++
++  case SCH_TABLE_NAMES:
++  case SCH_TABLES:
++  case SCH_VIEWS:
++  case SCH_TRIGGERS:
++  case SCH_EVENTS:
++  {
++    const char *dst_db_name= table->schema_select_lex->db;
++
++    DBUG_ASSERT(dst_db_name);
++
++    if (check_access(thd, SELECT_ACL, dst_db_name,
++                     &thd->col_access, FALSE, FALSE,
++                     is_schema_db(dst_db_name)))
++      return TRUE;
++
++    if (!thd->col_access && check_grant_db(thd, dst_db_name))
++    {
++      my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
++               thd->security_ctx->priv_user,
++               thd->security_ctx->priv_host,
++               dst_db_name);
++      return TRUE;
++    }
++
++    return FALSE;
++  }
++
++  case SCH_COLUMNS:
++  case SCH_STATISTICS:
++  {
++    TABLE_LIST *dst_table;
++    dst_table= table->schema_select_lex->table_list.first;
++
++    DBUG_ASSERT(dst_table);
++
++    if (check_access(thd, SELECT_ACL | EXTRA_ACL,
++                     dst_table->db,
++                     &dst_table->grant.privilege,
++                     FALSE, FALSE,
++                     test(dst_table->schema_table)))
++      return FALSE;
++
++    return (check_grant(thd, SELECT_ACL, dst_table, 2, UINT_MAX, FALSE));
++  }
++  default:
++    break;
++  }
++
++  return FALSE;
++}
++
++
++/**
++  Check the privilege for all used tables.
++
++  @param    thd          Thread context
++  @param    want_access  Privileges requested
++  @param    tables       List of tables to be checked
++  @param    number       Check at most this number of tables.
++  @param    no_errors    FALSE/TRUE - report/don't report error to
++                         the client (using my_error() call).
++
++  @note
++    Table privileges are cached in the table list for GRANT checking.
++    This functions assumes that table list used and
++    thd->lex->query_tables_own_last value correspond to each other
++    (the latter should be either 0 or point to next_global member
++    of one of elements of this table list).
++
++  @retval  FALSE   OK
++  @retval  TRUE    Access denied
++*/
++
++bool
++check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
++		   uint number, bool no_errors)
++{
++  TABLE_LIST *org_tables= tables;
++  TABLE_LIST *first_not_own_table= thd->lex->first_not_own_table();
++  uint i= 0;
++  Security_context *sctx= thd->security_ctx, *backup_ctx= thd->security_ctx;
++  /*
++    The check that first_not_own_table is not reached is for the case when
++    the given table list refers to the list for prelocking (contains tables
++    of other queries). For simple queries first_not_own_table is 0.
++  */
++  for (; i < number && tables != first_not_own_table;
++       tables= tables->next_global, i++)
++  {
++    if (tables->security_ctx)
++      sctx= tables->security_ctx;
++    else
++      sctx= backup_ctx;
++
++    if (tables->schema_table && 
++        (want_access & ~(SELECT_ACL | EXTRA_ACL | FILE_ACL)))
++    {
++      if (!no_errors)
++        my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
++                 sctx->priv_user, sctx->priv_host,
++                 INFORMATION_SCHEMA_NAME.str);
++      return TRUE;
++    }
++    /*
++       Register access for view underlying table.
++       Remove SHOW_VIEW_ACL, because it will be checked during making view
++     */
++    tables->grant.orig_want_privilege= (want_access & ~SHOW_VIEW_ACL);
++
++    if (tables->schema_table_reformed)
++    {
++      if (check_show_access(thd, tables))
++        goto deny;
++
++      continue;
++    }
++
++    if (tables->is_anonymous_derived_table() ||
++        (tables->table && (int)tables->table->s->tmp_table))
++      continue;
++    thd->security_ctx= sctx;
++    if ((sctx->master_access & want_access) ==
++        (want_access & ~EXTRA_ACL) &&
++	thd->db)
++      tables->grant.privilege= want_access;
++    else if (tables->db && thd->db && strcmp(tables->db, thd->db) == 0)
++    {
++      if (check_access(thd, want_access, tables->get_db_name(),
++                       &tables->grant.privilege, 0, no_errors, 
++                       test(tables->schema_table)))
++        goto deny;                            // Access denied
++    }
++    else if (check_access(thd, want_access, tables->get_db_name(),
++                          &tables->grant.privilege, 0, no_errors, 
++                          test(tables->schema_table)))
++      goto deny;
++  }
++  thd->security_ctx= backup_ctx;
++  return check_grant(thd,want_access & ~EXTRA_ACL,org_tables,
++		       test(want_access & EXTRA_ACL), number, no_errors);
++deny:
++  thd->security_ctx= backup_ctx;
++  return TRUE;
++}
++
++
++bool
++check_routine_access(THD *thd, ulong want_access,char *db, char *name,
++		     bool is_proc, bool no_errors)
++{
++  TABLE_LIST tables[1];
++  
++  bzero((char *)tables, sizeof(TABLE_LIST));
++  tables->db= db;
++  tables->table_name= tables->alias= name;
++  
++  /*
++    The following test is just a shortcut for check_access() (to avoid
++    calculating db_access) under the assumption that it's common to
++    give persons global right to execute all stored SP (but not
++    necessary to create them).
++  */
++  if ((thd->security_ctx->master_access & want_access) == want_access)
++    tables->grant.privilege= want_access;
++  else if (check_access(thd,want_access,db,&tables->grant.privilege,
++			0, no_errors, 0))
++    return TRUE;
++  
++    return check_grant_routine(thd, want_access, tables, is_proc, no_errors);
++}
++
++
++/**
++  Check if the routine has any of the routine privileges.
++
++  @param thd	       Thread handler
++  @param db           Database name
++  @param name         Routine name
++
++  @retval
++    0            ok
++  @retval
++    1            error
++*/
++
++bool check_some_routine_access(THD *thd, const char *db, const char *name,
++                               bool is_proc)
++{
++  ulong save_priv;
++  if (thd->security_ctx->master_access & SHOW_PROC_ACLS)
++    return FALSE;
++  /*
++    There are no routines in information_schema db. So we can safely
++    pass zero to last paramter of check_access function
++  */
++  if (!check_access(thd, SHOW_PROC_ACLS, db, &save_priv, 0, 1, 0) ||
++      (save_priv & SHOW_PROC_ACLS))
++    return FALSE;
++  return check_routine_level_acl(thd, db, name, is_proc);
++}
++
++
++/*
++  Check if the given table has any of the asked privileges
++
++  @param thd		 Thread handler
++  @param want_access	 Bitmap of possible privileges to check for
++
++  @retval
++    0  ok
++  @retval
++    1  error
++*/
++
++bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table)
++{
++  ulong access;
++  DBUG_ENTER("check_some_access");
++
++  /* This loop will work as long as we have less than 32 privileges */
++  for (access= 1; access < want_access ; access<<= 1)
++  {
++    if (access & want_access)
++    {
++      if (!check_access(thd, access, table->db,
++                        &table->grant.privilege, 0, 1,
++                        test(table->schema_table)) &&
++           !check_grant(thd, access, table, 0, 1, 1))
++        DBUG_RETURN(0);
++    }
++  }
++  DBUG_PRINT("exit",("no matching access rights"));
++  DBUG_RETURN(1);
++}
++
++#endif /*NO_EMBEDDED_ACCESS_CHECKS*/
++
++
++/**
++  check for global access and give descriptive error message if it fails.
++
++  @param thd			Thread handler
++  @param want_access		Use should have any of these global rights
++
++  @warning
++    One gets access right if one has ANY of the rights in want_access.
++    This is useful as one in most cases only need one global right,
++    but in some case we want to check if the user has SUPER or
++    REPL_CLIENT_ACL rights.
++
++  @retval
++    0	ok
++  @retval
++    1	Access denied.  In this case an error is sent to the client
++*/
++
++bool check_global_access(THD *thd, ulong want_access)
++{
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  char command[128];
++  if ((thd->security_ctx->master_access & want_access))
++    return 0;
++  get_privilege_desc(command, sizeof(command), want_access);
++  my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command);
++  return 1;
++#else
++  return 0;
++#endif
++}
++
++/****************************************************************************
++	Check stack size; Send error if there isn't enough stack to continue
++****************************************************************************/
++
++#ifndef EMBEDDED_LIBRARY
++
++#if STACK_DIRECTION < 0
++#define used_stack(A,B) (long) (A - B)
++#else
++#define used_stack(A,B) (long) (B - A)
++#endif
++
++#ifndef DBUG_OFF
++long max_stack_used;
++#endif
++
++/**
++  @note
++  Note: The 'buf' parameter is necessary, even if it is unused here.
++  - fix_fields functions has a "dummy" buffer large enough for the
++    corresponding exec. (Thus we only have to check in fix_fields.)
++  - Passing to check_stack_overrun() prevents the compiler from removing it.
++*/
++bool check_stack_overrun(THD *thd, long margin,
++			 uchar *buf __attribute__((unused)))
++{
++  long stack_used;
++  DBUG_ASSERT(thd == current_thd);
++  if ((stack_used=used_stack(thd->thread_stack,(char*) &stack_used)) >=
++      (long) (my_thread_stack_size - margin))
++  {
++    char ebuff[MYSQL_ERRMSG_SIZE];
++    my_snprintf(ebuff, sizeof(ebuff), ER(ER_STACK_OVERRUN_NEED_MORE),
++                stack_used, my_thread_stack_size, margin);
++    my_message(ER_STACK_OVERRUN_NEED_MORE, ebuff, MYF(ME_FATALERROR));
++    thd->fatal_error();
++    return 1;
++  }
++#ifndef DBUG_OFF
++  max_stack_used= max(max_stack_used, stack_used);
++#endif
++  return 0;
++}
++#endif /* EMBEDDED_LIBRARY */
++
++#define MY_YACC_INIT 1000			// Start with big alloc
++#define MY_YACC_MAX  32000			// Because of 'short'
++
++bool my_yyoverflow(short **yyss, YYSTYPE **yyvs, ulong *yystacksize)
++{
++  Yacc_state *state= & current_thd->m_parser_state->m_yacc;
++  ulong old_info=0;
++  DBUG_ASSERT(state);
++  if ((uint) *yystacksize >= MY_YACC_MAX)
++    return 1;
++  if (!state->yacc_yyvs)
++    old_info= *yystacksize;
++  *yystacksize= set_zone((*yystacksize)*2,MY_YACC_INIT,MY_YACC_MAX);
++  if (!(state->yacc_yyvs= (uchar*)
++        my_realloc(state->yacc_yyvs,
++                   *yystacksize*sizeof(**yyvs),
++                   MYF(MY_ALLOW_ZERO_PTR | MY_FREE_ON_ERROR))) ||
++      !(state->yacc_yyss= (uchar*)
++        my_realloc(state->yacc_yyss,
++                   *yystacksize*sizeof(**yyss),
++                   MYF(MY_ALLOW_ZERO_PTR | MY_FREE_ON_ERROR))))
++    return 1;
++  if (old_info)
++  {
++    /*
++      Only copy the old stack on the first call to my_yyoverflow(),
++      when replacing a static stack (YYINITDEPTH) by a dynamic stack.
++      For subsequent calls, my_realloc already did preserve the old stack.
++    */
++    memcpy(state->yacc_yyss, *yyss, old_info*sizeof(**yyss));
++    memcpy(state->yacc_yyvs, *yyvs, old_info*sizeof(**yyvs));
++  }
++  *yyss= (short*) state->yacc_yyss;
++  *yyvs= (YYSTYPE*) state->yacc_yyvs;
++  return 0;
++}
++
++
++/**
++ Reset THD part responsible for command processing state.
++
++   This needs to be called before execution of every statement
++   (prepared or conventional).
++   It is not called by substatements of routines.
++
++  @todo
++   Make it a method of THD and align its name with the rest of
++   reset/end/start/init methods.
++  @todo
++   Call it after we use THD for queries, not before.
++*/
++
++void mysql_reset_thd_for_next_command(THD *thd)
++{
++  DBUG_ENTER("mysql_reset_thd_for_next_command");
++  DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */
++  DBUG_ASSERT(! thd->in_sub_stmt);
++  thd->free_list= 0;
++  thd->select_number= 1;
++  /*
++    Those two lines below are theoretically unneeded as
++    THD::cleanup_after_query() should take care of this already.
++  */
++  thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty();
++  thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
++
++  thd->query_start_used= 0;
++  thd->is_fatal_error= thd->time_zone_used= 0;
++  /*
++    Clear the status flag that are expected to be cleared at the
++    beginning of each SQL statement.
++  */
++  thd->server_status&= ~SERVER_STATUS_CLEAR_SET;
++  /*
++    If in autocommit mode and not in a transaction, reset
++    OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG to not get warnings
++    in ha_rollback_trans() about some tables couldn't be rolled back.
++  */
++  if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
++  {
++    thd->options&= ~OPTION_KEEP_LOG;
++    thd->transaction.all.modified_non_trans_table= FALSE;
++  }
++  DBUG_ASSERT(thd->security_ctx== &thd->main_security_ctx);
++  thd->thread_specific_used= FALSE;
++
++  if (opt_bin_log)
++  {
++    reset_dynamic(&thd->user_var_events);
++    thd->user_var_events_alloc= thd->mem_root;
++  }
++  thd->clear_error();
++  thd->main_da.reset_diagnostics_area();
++  thd->total_warn_count=0;			// Warnings for this query
++  thd->rand_used= 0;
++  thd->sent_row_count= thd->examined_row_count= 0;
++
++  /*
++    Because we come here only for start of top-statements, binlog format is
++    constant inside a complex statement (using stored functions) etc.
++  */
++  thd->reset_current_stmt_binlog_row_based();
++
++  DBUG_PRINT("debug",
++             ("current_stmt_binlog_row_based: %d",
++              thd->current_stmt_binlog_row_based));
++
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Resets the lex->current_select object.
++  @note It is assumed that lex->current_select != NULL
++
++  This function is a wrapper around select_lex->init_select() with an added
++  check for the special situation when using INTO OUTFILE and LOAD DATA.
++*/
++
++void
++mysql_init_select(LEX *lex)
++{
++  SELECT_LEX *select_lex= lex->current_select;
++  select_lex->init_select();
++  lex->wild= 0;
++  if (select_lex == &lex->select_lex)
++  {
++    DBUG_ASSERT(lex->result == 0);
++    lex->exchange= 0;
++  }
++}
++
++
++/**
++  Used to allocate a new SELECT_LEX object on the current thd mem_root and
++  link it into the relevant lists.
++
++  This function is always followed by mysql_init_select.
++
++  @see mysql_init_select
++
++  @retval TRUE An error occurred
++  @retval FALSE The new SELECT_LEX was successfully allocated.
++*/
++
++bool
++mysql_new_select(LEX *lex, bool move_down)
++{
++  SELECT_LEX *select_lex;
++  THD *thd= lex->thd;
++  DBUG_ENTER("mysql_new_select");
++
++  if (!(select_lex= new (thd->mem_root) SELECT_LEX()))
++    DBUG_RETURN(1);
++  select_lex->select_number= ++thd->select_number;
++  select_lex->parent_lex= lex; /* Used in init_query. */
++  select_lex->init_query();
++  select_lex->init_select();
++  lex->nest_level++;
++  if (lex->nest_level > (int) MAX_SELECT_NESTING)
++  {
++    my_error(ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT,MYF(0),MAX_SELECT_NESTING);
++    DBUG_RETURN(1);
++  }
++  select_lex->nest_level= lex->nest_level;
++  if (move_down)
++  {
++    SELECT_LEX_UNIT *unit;
++    lex->subqueries= TRUE;
++    /* first select_lex of subselect or derived table */
++    if (!(unit= new (thd->mem_root) SELECT_LEX_UNIT()))
++      DBUG_RETURN(1);
++
++    unit->init_query();
++    unit->init_select();
++    unit->thd= thd;
++    unit->include_down(lex->current_select);
++    unit->link_next= 0;
++    unit->link_prev= 0;
++    unit->return_to= lex->current_select;
++    select_lex->include_down(unit);
++    /*
++      By default we assume that it is usual subselect and we have outer name
++      resolution context, if no we will assign it to 0 later
++    */
++    select_lex->context.outer_context= &select_lex->outer_select()->context;
++  }
++  else
++  {
++    if (lex->current_select->order_list.first && !lex->current_select->braces)
++    {
++      my_error(ER_WRONG_USAGE, MYF(0), "UNION", "ORDER BY");
++      DBUG_RETURN(1);
++    }
++    select_lex->include_neighbour(lex->current_select);
++    SELECT_LEX_UNIT *unit= select_lex->master_unit();                              
++    if (!unit->fake_select_lex && unit->add_fake_select_lex(lex->thd))
++      DBUG_RETURN(1);
++    select_lex->context.outer_context= 
++                unit->first_select()->context.outer_context;
++  }
++
++  select_lex->master_unit()->global_parameters= select_lex;
++  select_lex->include_global((st_select_lex_node**)&lex->all_selects_list);
++  lex->current_select= select_lex;
++  /*
++    in subquery is SELECT query and we allow resolution of names in SELECT
++    list
++  */
++  select_lex->context.resolve_in_select_list= TRUE;
++  DBUG_RETURN(0);
++}
++
++/**
++  Create a select to return the same output as 'SELECT @@var_name'.
++
++  Used for SHOW COUNT(*) [ WARNINGS | ERROR].
++
++  This will crash with a core dump if the variable doesn't exists.
++
++  @param var_name		Variable name
++*/
++
++void create_select_for_variable(const char *var_name)
++{
++  THD *thd;
++  LEX *lex;
++  LEX_STRING tmp, null_lex_string;
++  Item *var;
++  char buff[MAX_SYS_VAR_LENGTH*2+4+8], *end;
++  DBUG_ENTER("create_select_for_variable");
++
++  thd= current_thd;
++  lex= thd->lex;
++  mysql_init_select(lex);
++  lex->sql_command= SQLCOM_SELECT;
++  tmp.str= (char*) var_name;
++  tmp.length=strlen(var_name);
++  bzero((char*) &null_lex_string.str, sizeof(null_lex_string));
++  /*
++    We set the name of Item to @@session.var_name because that then is used
++    as the column name in the output.
++  */
++  if ((var= get_system_var(thd, OPT_SESSION, tmp, null_lex_string)))
++  {
++    end= strxmov(buff, "@@session.", var_name, NullS);
++    var->set_name(buff, end-buff, system_charset_info);
++    add_item_to_list(thd, var);
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++void mysql_init_multi_delete(LEX *lex)
++{
++  lex->sql_command=  SQLCOM_DELETE_MULTI;
++  mysql_init_select(lex);
++  lex->select_lex.select_limit= 0;
++  lex->unit.select_limit_cnt= HA_POS_ERROR;
++  lex->select_lex.table_list.save_and_clear(&lex->auxiliary_table_list);
++  lex->lock_option= TL_READ_DEFAULT;
++  lex->query_tables= 0;
++  lex->query_tables_last= &lex->query_tables;
++}
++
++
++/*
++  When you modify mysql_parse(), you may need to mofify
++  mysql_test_parse_for_slave() in this same file.
++*/
++
++/**
++  Parse a query.
++
++  @param       thd     Current thread
++  @param       rawbuf  Begining of the query text
++  @param       length  Length of the query text
++  @param[out]  found_semicolon For multi queries, position of the character of
++                               the next query in the query text.
++*/
++
++void mysql_parse(THD *thd, char *rawbuf, uint length,
++                 const char ** found_semicolon)
++{
++  DBUG_ENTER("mysql_parse");
++
++  DBUG_EXECUTE_IF("parser_debug", turn_parser_debug_on(););
++
++  /*
++    Warning.
++    The purpose of query_cache_send_result_to_client() is to lookup the
++    query in the query cache first, to avoid parsing and executing it.
++    So, the natural implementation would be to:
++    - first, call query_cache_send_result_to_client,
++    - second, if caching failed, initialise the lexical and syntactic parser.
++    The problem is that the query cache depends on a clean initialization
++    of (among others) lex->safe_to_cache_query and thd->server_status,
++    which are reset respectively in
++    - lex_start()
++    - mysql_reset_thd_for_next_command()
++    So, initializing the lexical analyser *before* using the query cache
++    is required for the cache to work properly.
++    FIXME: cleanup the dependencies in the code to simplify this.
++  */
++  lex_start(thd);
++  mysql_reset_thd_for_next_command(thd);
++
++  if (query_cache_send_result_to_client(thd, rawbuf, length) <= 0)
++  {
++    LEX *lex= thd->lex;
++
++    sp_cache_flush_obsolete(&thd->sp_proc_cache);
++    sp_cache_flush_obsolete(&thd->sp_func_cache);
++
++    Parser_state parser_state;
++    bool err;
++    if (!(err= parser_state.init(thd, rawbuf, length)))
++    {
++      err= parse_sql(thd, & parser_state, NULL);
++      *found_semicolon= parser_state.m_lip.found_semicolon;
++    }
++    else
++      *found_semicolon= NULL;
++
++    if (!err)
++    {
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++      if (mqh_used && thd->user_connect &&
++	  check_mqh(thd, lex->sql_command))
++      {
++	thd->net.error = 0;
++      }
++      else
++#endif
++      {
++	if (! thd->is_error())
++	{
++          /*
++            Binlog logs a string starting from thd->query and having length
++            thd->query_length; so we set thd->query_length correctly (to not
++            log several statements in one event, when we executed only first).
++            We set it to not see the ';' (otherwise it would get into binlog
++            and Query_log_event::print() would give ';;' output).
++            This also helps display only the current query in SHOW
++            PROCESSLIST.
++            Note that we don't need LOCK_thread_count to modify query_length.
++          */
++          if (*found_semicolon && (ulong) (*found_semicolon - thd->query()))
++            thd->set_query_inner(thd->query(),
++                                 (uint32) (*found_semicolon -
++                                           thd->query() - 1));
++          /* Actually execute the query */
++          if (*found_semicolon)
++          {
++            lex->safe_to_cache_query= 0;
++            thd->server_status|= SERVER_MORE_RESULTS_EXISTS;
++          }
++          lex->set_trg_event_type_for_tables();
++          mysql_execute_command(thd);
++	}
++      }
++    }
++    else
++    {
++      DBUG_ASSERT(thd->is_error());
++      DBUG_PRINT("info",("Command aborted. Fatal_error: %d",
++			 thd->is_fatal_error));
++
++      query_cache_abort(&thd->net);
++    }
++    if (thd->lex->sphead)
++    {
++      delete thd->lex->sphead;
++      thd->lex->sphead= 0;
++    }
++    lex->unit.cleanup();
++    thd_proc_info(thd, "freeing items");
++    thd->end_statement();
++    thd->cleanup_after_query();
++    DBUG_ASSERT(thd->change_list.is_empty());
++  }
++  else
++  {
++    /* There are no multi queries in the cache. */
++    *found_semicolon= NULL;
++  }
++
++  DBUG_VOID_RETURN;
++}
++
++
++#ifdef HAVE_REPLICATION
++/*
++  Usable by the replication SQL thread only: just parse a query to know if it
++  can be ignored because of replicate-*-table rules.
++
++  @retval
++    0	cannot be ignored
++  @retval
++    1	can be ignored
++*/
++
++bool mysql_test_parse_for_slave(THD *thd, char *rawbuf, uint length)
++{
++  LEX *lex= thd->lex;
++  bool error= 0;
++  DBUG_ENTER("mysql_test_parse_for_slave");
++
++  Parser_state parser_state;
++  if (!(error= parser_state.init(thd, rawbuf, length)))
++  {
++    lex_start(thd);
++    mysql_reset_thd_for_next_command(thd);
++
++    if (!parse_sql(thd, & parser_state, NULL) &&
++        all_tables_not_ok(thd, lex->select_lex.table_list.first))
++      error= 1;                  /* Ignore question */
++    thd->end_statement();
++  }
++  thd->cleanup_after_query();
++  DBUG_RETURN(error);
++}
++#endif
++
++
++
++/**
++  Store field definition for create.
++
++  @return
++    Return 0 if ok
++*/
++
++bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
++		       char *length, char *decimals,
++		       uint type_modifier,
++		       Item *default_value, Item *on_update_value,
++                       LEX_STRING *comment,
++		       char *change,
++                       List<String> *interval_list, CHARSET_INFO *cs,
++		       uint uint_geom_type)
++{
++  register Create_field *new_field;
++  LEX  *lex= thd->lex;
++  DBUG_ENTER("add_field_to_list");
++
++  if (check_string_char_length(field_name, "", NAME_CHAR_LEN,
++                               system_charset_info, 1))
++  {
++    my_error(ER_TOO_LONG_IDENT, MYF(0), field_name->str); /* purecov: inspected */
++    DBUG_RETURN(1);				/* purecov: inspected */
++  }
++  if (type_modifier & PRI_KEY_FLAG)
++  {
++    Key *key;
++    lex->col_list.push_back(new Key_part_spec(field_name->str, 0));
++    key= new Key(Key::PRIMARY, NullS,
++                      &default_key_create_info,
++                      0, lex->col_list);
++    lex->alter_info.key_list.push_back(key);
++    lex->col_list.empty();
++  }
++  if (type_modifier & (UNIQUE_FLAG | UNIQUE_KEY_FLAG))
++  {
++    Key *key;
++    lex->col_list.push_back(new Key_part_spec(field_name->str, 0));
++    key= new Key(Key::UNIQUE, NullS,
++                 &default_key_create_info, 0,
++                 lex->col_list);
++    lex->alter_info.key_list.push_back(key);
++    lex->col_list.empty();
++  }
++
++  if (default_value)
++  {
++    /* 
++      Default value should be literal => basic constants =>
++      no need fix_fields()
++      
++      We allow only one function as part of default value - 
++      NOW() as default for TIMESTAMP type.
++    */
++    if (default_value->type() == Item::FUNC_ITEM && 
++        !(((Item_func*)default_value)->functype() == Item_func::NOW_FUNC &&
++         type == MYSQL_TYPE_TIMESTAMP))
++    {
++      my_error(ER_INVALID_DEFAULT, MYF(0), field_name->str);
++      DBUG_RETURN(1);
++    }
++    else if (default_value->type() == Item::NULL_ITEM)
++    {
++      default_value= 0;
++      if ((type_modifier & (NOT_NULL_FLAG | AUTO_INCREMENT_FLAG)) ==
++	  NOT_NULL_FLAG)
++      {
++	my_error(ER_INVALID_DEFAULT, MYF(0), field_name->str);
++	DBUG_RETURN(1);
++      }
++    }
++    else if (type_modifier & AUTO_INCREMENT_FLAG)
++    {
++      my_error(ER_INVALID_DEFAULT, MYF(0), field_name->str);
++      DBUG_RETURN(1);
++    }
++  }
++
++  if (on_update_value && type != MYSQL_TYPE_TIMESTAMP)
++  {
++    my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name->str);
++    DBUG_RETURN(1);
++  }
++
++  if (type == MYSQL_TYPE_TIMESTAMP && length)
++  {
++    /* Display widths are no longer supported for TIMSTAMP as of MySQL 4.1.
++       In other words, for declarations such as TIMESTAMP(2), TIMESTAMP(4),
++       and so on, the display width is ignored.
++    */
++    char buf[32];
++    my_snprintf(buf, sizeof(buf), "TIMESTAMP(%s)", length);
++    WARN_DEPRECATED(thd, "6.0", buf, "'TIMESTAMP'");
++  }
++
++  if (!(new_field= new Create_field()) ||
++      new_field->init(thd, field_name->str, type, length, decimals, type_modifier,
++                      default_value, on_update_value, comment, change,
++                      interval_list, cs, uint_geom_type))
++    DBUG_RETURN(1);
++
++  lex->alter_info.create_list.push_back(new_field);
++  lex->last_field=new_field;
++  DBUG_RETURN(0);
++}
++
++
++/** Store position for column in ALTER TABLE .. ADD column. */
++
++void store_position_for_column(const char *name)
++{
++  current_thd->lex->last_field->after=my_const_cast(char*) (name);
++}
++
++bool
++add_proc_to_list(THD* thd, Item *item)
++{
++  ORDER *order;
++  Item	**item_ptr;
++
++  if (!(order = (ORDER *) thd->alloc(sizeof(ORDER)+sizeof(Item*))))
++    return 1;
++  item_ptr = (Item**) (order+1);
++  *item_ptr= item;
++  order->item=item_ptr;
++  order->free_me=0;
++  thd->lex->proc_list.link_in_list(order, &order->next);
++  return 0;
++}
++
++
++/**
++  save order by and tables in own lists.
++*/
++
++bool add_to_list(THD *thd, SQL_I_List<ORDER> &list, Item *item,bool asc)
++{
++  ORDER *order;
++  DBUG_ENTER("add_to_list");
++  if (!(order = (ORDER *) thd->alloc(sizeof(ORDER))))
++    DBUG_RETURN(1);
++  order->item_ptr= item;
++  order->item= &order->item_ptr;
++  order->asc = asc;
++  order->free_me=0;
++  order->used=0;
++  order->counter_used= 0;
++  list.link_in_list(order, &order->next);
++  DBUG_RETURN(0);
++}
++
++
++/**
++  Add a table to list of used tables.
++
++  @param table		Table to add
++  @param alias		alias for table (or null if no alias)
++  @param table_options	A set of the following bits:
++                         - TL_OPTION_UPDATING : Table will be updated
++                         - TL_OPTION_FORCE_INDEX : Force usage of index
++                         - TL_OPTION_ALIAS : an alias in multi table DELETE
++  @param lock_type	How table should be locked
++  @param use_index	List of indexed used in USE INDEX
++  @param ignore_index	List of indexed used in IGNORE INDEX
++
++  @retval
++      0		Error
++  @retval
++    \#	Pointer to TABLE_LIST element added to the total table list
++*/
++
++TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
++					     Table_ident *table,
++					     LEX_STRING *alias,
++					     ulong table_options,
++					     thr_lock_type lock_type,
++					     List<Index_hint> *index_hints_arg,
++                                             LEX_STRING *option)
++{
++  register TABLE_LIST *ptr;
++  TABLE_LIST *previous_table_ref; /* The table preceding the current one. */
++  char *alias_str;
++  LEX *lex= thd->lex;
++  DBUG_ENTER("add_table_to_list");
++  LINT_INIT(previous_table_ref);
++
++  if (!table)
++    DBUG_RETURN(0);				// End of memory
++  alias_str= alias ? alias->str : table->table.str;
++  if (!test(table_options & TL_OPTION_ALIAS) && 
++      check_table_name(table->table.str, table->table.length, FALSE))
++  {
++    my_error(ER_WRONG_TABLE_NAME, MYF(0), table->table.str);
++    DBUG_RETURN(0);
++  }
++
++  if (table->is_derived_table() == FALSE && table->db.str &&
++      check_db_name(&table->db))
++  {
++    my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str);
++    DBUG_RETURN(0);
++  }
++
++  if (!alias)					/* Alias is case sensitive */
++  {
++    if (table->sel)
++    {
++      my_message(ER_DERIVED_MUST_HAVE_ALIAS,
++                 ER(ER_DERIVED_MUST_HAVE_ALIAS), MYF(0));
++      DBUG_RETURN(0);
++    }
++    if (!(alias_str= (char*) thd->memdup(alias_str,table->table.length+1)))
++      DBUG_RETURN(0);
++  }
++  if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))))
++    DBUG_RETURN(0);				/* purecov: inspected */
++  if (table->db.str)
++  {
++    ptr->db= table->db.str;
++    ptr->db_length= table->db.length;
++  }
++  else if (lex->copy_db_to(&ptr->db, &ptr->db_length))
++    DBUG_RETURN(0);
++
++  ptr->alias= alias_str;
++  if (lower_case_table_names && table->table.length)
++    table->table.length= my_casedn_str(files_charset_info, table->table.str);
++  ptr->table_name=table->table.str;
++  ptr->table_name_length=table->table.length;
++  ptr->lock_type=   lock_type;
++  ptr->updating=    test(table_options & TL_OPTION_UPDATING);
++  /* TODO: remove TL_OPTION_FORCE_INDEX as it looks like it's not used */
++  ptr->force_index= test(table_options & TL_OPTION_FORCE_INDEX);
++  ptr->ignore_leaves= test(table_options & TL_OPTION_IGNORE_LEAVES);
++  ptr->derived=	    table->sel;
++  if (!ptr->derived && is_schema_db(ptr->db, ptr->db_length))
++  {
++    ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->table_name);
++    if (!schema_table ||
++        (schema_table->hidden && 
++         ((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 || 
++          /*
++            this check is used for show columns|keys from I_S hidden table
++          */
++          lex->sql_command == SQLCOM_SHOW_FIELDS ||
++          lex->sql_command == SQLCOM_SHOW_KEYS)))
++    {
++      my_error(ER_UNKNOWN_TABLE, MYF(0),
++               ptr->table_name, INFORMATION_SCHEMA_NAME.str);
++      DBUG_RETURN(0);
++    }
++    ptr->schema_table_name= ptr->table_name;
++    ptr->schema_table= schema_table;
++  }
++  ptr->select_lex=  lex->current_select;
++  ptr->cacheable_table= 1;
++  ptr->index_hints= index_hints_arg;
++  ptr->option= option ? option->str : 0;
++  /* check that used name is unique */
++  if (lock_type != TL_IGNORE)
++  {
++    TABLE_LIST *first_table= table_list.first;
++    if (lex->sql_command == SQLCOM_CREATE_VIEW)
++      first_table= first_table ? first_table->next_local : NULL;
++    for (TABLE_LIST *tables= first_table ;
++	 tables ;
++	 tables=tables->next_local)
++    {
++      if (!my_strcasecmp(table_alias_charset, alias_str, tables->alias) &&
++	  !strcmp(ptr->db, tables->db))
++      {
++	my_error(ER_NONUNIQ_TABLE, MYF(0), alias_str); /* purecov: tested */
++	DBUG_RETURN(0);				/* purecov: tested */
++      }
++    }
++  }
++  /* Store the table reference preceding the current one. */
++  if (table_list.elements > 0)
++  {
++    /*
++      table_list.next points to the last inserted TABLE_LIST->next_local'
++      element
++      We don't use the offsetof() macro here to avoid warnings from gcc
++    */
++    previous_table_ref= (TABLE_LIST*) ((char*) table_list.next -
++                                       ((char*) &(ptr->next_local) -
++                                        (char*) ptr));
++    /*
++      Set next_name_resolution_table of the previous table reference to point
++      to the current table reference. In effect the list
++      TABLE_LIST::next_name_resolution_table coincides with
++      TABLE_LIST::next_local. Later this may be changed in
++      store_top_level_join_columns() for NATURAL/USING joins.
++    */
++    previous_table_ref->next_name_resolution_table= ptr;
++  }
++
++  /*
++    Link the current table reference in a local list (list for current select).
++    Notice that as a side effect here we set the next_local field of the
++    previous table reference to 'ptr'. Here we also add one element to the
++    list 'table_list'.
++  */
++  table_list.link_in_list(ptr, &ptr->next_local);
++  ptr->next_name_resolution_table= NULL;
++  /* Link table in global list (all used tables) */
++  lex->add_to_query_tables(ptr);
++  DBUG_RETURN(ptr);
++}
++
++
++/**
++  Initialize a new table list for a nested join.
++
++    The function initializes a structure of the TABLE_LIST type
++    for a nested join. It sets up its nested join list as empty.
++    The created structure is added to the front of the current
++    join list in the st_select_lex object. Then the function
++    changes the current nest level for joins to refer to the newly
++    created empty list after having saved the info on the old level
++    in the initialized structure.
++
++  @param thd         current thread
++
++  @retval
++    0   if success
++  @retval
++    1   otherwise
++*/
++
++bool st_select_lex::init_nested_join(THD *thd)
++{
++  TABLE_LIST *ptr;
++  NESTED_JOIN *nested_join;
++  DBUG_ENTER("init_nested_join");
++
++  if (!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+
++                                       sizeof(NESTED_JOIN))))
++    DBUG_RETURN(1);
++  nested_join= ptr->nested_join=
++    ((NESTED_JOIN*) ((uchar*) ptr + ALIGN_SIZE(sizeof(TABLE_LIST))));
++
++  join_list->push_front(ptr);
++  ptr->embedding= embedding;
++  ptr->join_list= join_list;
++  ptr->alias= (char*) "(nested_join)";
++  embedding= ptr;
++  join_list= &nested_join->join_list;
++  join_list->empty();
++  DBUG_RETURN(0);
++}
++
++
++/**
++  End a nested join table list.
++
++    The function returns to the previous join nest level.
++    If the current level contains only one member, the function
++    moves it one level up, eliminating the nest.
++
++  @param thd         current thread
++
++  @return
++    - Pointer to TABLE_LIST element added to the total table list, if success
++    - 0, otherwise
++*/
++
++TABLE_LIST *st_select_lex::end_nested_join(THD *thd)
++{
++  TABLE_LIST *ptr;
++  NESTED_JOIN *nested_join;
++  DBUG_ENTER("end_nested_join");
++
++  DBUG_ASSERT(embedding);
++  ptr= embedding;
++  join_list= ptr->join_list;
++  embedding= ptr->embedding;
++  nested_join= ptr->nested_join;
++  if (nested_join->join_list.elements == 1)
++  {
++    TABLE_LIST *embedded= nested_join->join_list.head();
++    join_list->pop();
++    embedded->join_list= join_list;
++    embedded->embedding= embedding;
++    join_list->push_front(embedded);
++    ptr= embedded;
++  }
++  else if (nested_join->join_list.elements == 0)
++  {
++    join_list->pop();
++    ptr= 0;                                     // return value
++  }
++  DBUG_RETURN(ptr);
++}
++
++
++/**
++  Nest last join operation.
++
++    The function nest last join operation as if it was enclosed in braces.
++
++  @param thd         current thread
++
++  @retval
++    0  Error
++  @retval
++    \#  Pointer to TABLE_LIST element created for the new nested join
++*/
++
++TABLE_LIST *st_select_lex::nest_last_join(THD *thd)
++{
++  TABLE_LIST *ptr;
++  NESTED_JOIN *nested_join;
++  List<TABLE_LIST> *embedded_list;
++  DBUG_ENTER("nest_last_join");
++
++  if (!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+
++                                       sizeof(NESTED_JOIN))))
++    DBUG_RETURN(0);
++  nested_join= ptr->nested_join=
++    ((NESTED_JOIN*) ((uchar*) ptr + ALIGN_SIZE(sizeof(TABLE_LIST))));
++
++  ptr->embedding= embedding;
++  ptr->join_list= join_list;
++  ptr->alias= (char*) "(nest_last_join)";
++  embedded_list= &nested_join->join_list;
++  embedded_list->empty();
++
++  for (uint i=0; i < 2; i++)
++  {
++    TABLE_LIST *table= join_list->pop();
++    table->join_list= embedded_list;
++    table->embedding= ptr;
++    embedded_list->push_back(table);
++    if (table->natural_join)
++    {
++      ptr->is_natural_join= TRUE;
++      /*
++        If this is a JOIN ... USING, move the list of joined fields to the
++        table reference that describes the join.
++      */
++      if (prev_join_using)
++        ptr->join_using_fields= prev_join_using;
++    }
++  }
++  join_list->push_front(ptr);
++  nested_join->used_tables= nested_join->not_null_tables= (table_map) 0;
++  DBUG_RETURN(ptr);
++}
++
++
++/**
++  Add a table to the current join list.
++
++    The function puts a table in front of the current join list
++    of st_select_lex object.
++    Thus, joined tables are put into this list in the reverse order
++    (the most outer join operation follows first).
++
++  @param table       the table to add
++
++  @return
++    None
++*/
++
++void st_select_lex::add_joined_table(TABLE_LIST *table)
++{
++  DBUG_ENTER("add_joined_table");
++  join_list->push_front(table);
++  table->join_list= join_list;
++  table->embedding= embedding;
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Convert a right join into equivalent left join.
++
++    The function takes the current join list t[0],t[1] ... and
++    effectively converts it into the list t[1],t[0] ...
++    Although the outer_join flag for the new nested table contains
++    JOIN_TYPE_RIGHT, it will be handled as the inner table of a left join
++    operation.
++
++  EXAMPLES
++  @verbatim
++    SELECT * FROM t1 RIGHT JOIN t2 ON on_expr =>
++      SELECT * FROM t2 LEFT JOIN t1 ON on_expr
++
++    SELECT * FROM t1,t2 RIGHT JOIN t3 ON on_expr =>
++      SELECT * FROM t1,t3 LEFT JOIN t2 ON on_expr
++
++    SELECT * FROM t1,t2 RIGHT JOIN (t3,t4) ON on_expr =>
++      SELECT * FROM t1,(t3,t4) LEFT JOIN t2 ON on_expr
++
++    SELECT * FROM t1 LEFT JOIN t2 ON on_expr1 RIGHT JOIN t3  ON on_expr2 =>
++      SELECT * FROM t3 LEFT JOIN (t1 LEFT JOIN t2 ON on_expr2) ON on_expr1
++   @endverbatim
++
++  @param thd         current thread
++
++  @return
++    - Pointer to the table representing the inner table, if success
++    - 0, otherwise
++*/
++
++TABLE_LIST *st_select_lex::convert_right_join()
++{
++  TABLE_LIST *tab2= join_list->pop();
++  TABLE_LIST *tab1= join_list->pop();
++  DBUG_ENTER("convert_right_join");
++
++  join_list->push_front(tab2);
++  join_list->push_front(tab1);
++  tab1->outer_join|= JOIN_TYPE_RIGHT;
++
++  DBUG_RETURN(tab1);
++}
++
++/**
++  Set lock for all tables in current select level.
++
++  @param lock_type			Lock to set for tables
++
++  @note
++    If lock is a write lock, then tables->updating is set 1
++    This is to get tables_ok to know that the table is updated by the
++    query
++*/
++
++void st_select_lex::set_lock_for_tables(thr_lock_type lock_type)
++{
++  bool for_update= lock_type >= TL_READ_NO_INSERT;
++  DBUG_ENTER("set_lock_for_tables");
++  DBUG_PRINT("enter", ("lock_type: %d  for_update: %d", lock_type,
++		       for_update));
++  for (TABLE_LIST *tables= table_list.first;
++       tables;
++       tables= tables->next_local)
++  {
++    tables->lock_type= lock_type;
++    tables->updating=  for_update;
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Create a fake SELECT_LEX for a unit.
++
++    The method create a fake SELECT_LEX object for a unit.
++    This object is created for any union construct containing a union
++    operation and also for any single select union construct of the form
++    @verbatim
++    (SELECT ... ORDER BY order_list [LIMIT n]) ORDER BY ... 
++    @endvarbatim
++    or of the form
++    @varbatim
++    (SELECT ... ORDER BY LIMIT n) ORDER BY ...
++    @endvarbatim
++  
++  @param thd_arg		   thread handle
++
++  @note
++    The object is used to retrieve rows from the temporary table
++    where the result on the union is obtained.
++
++  @retval
++    1     on failure to create the object
++  @retval
++    0     on success
++*/
++
++bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg)
++{
++  SELECT_LEX *first_sl= first_select();
++  DBUG_ENTER("add_fake_select_lex");
++  DBUG_ASSERT(!fake_select_lex);
++
++  if (!(fake_select_lex= new (thd_arg->mem_root) SELECT_LEX()))
++      DBUG_RETURN(1);
++  fake_select_lex->include_standalone(this, 
++                                      (SELECT_LEX_NODE**)&fake_select_lex);
++  fake_select_lex->select_number= INT_MAX;
++  fake_select_lex->parent_lex= thd_arg->lex; /* Used in init_query. */
++  fake_select_lex->make_empty_select();
++  fake_select_lex->linkage= GLOBAL_OPTIONS_TYPE;
++  fake_select_lex->select_limit= 0;
++
++  fake_select_lex->context.outer_context=first_sl->context.outer_context;
++  /* allow item list resolving in fake select for ORDER BY */
++  fake_select_lex->context.resolve_in_select_list= TRUE;
++  fake_select_lex->context.select_lex= fake_select_lex;
++
++  if (!is_union())
++  {
++    /* 
++      This works only for 
++      (SELECT ... ORDER BY list [LIMIT n]) ORDER BY order_list [LIMIT m],
++      (SELECT ... LIMIT n) ORDER BY order_list [LIMIT m]
++      just before the parser starts processing order_list
++    */ 
++    global_parameters= fake_select_lex;
++    fake_select_lex->no_table_names_allowed= 1;
++    thd_arg->lex->current_select= fake_select_lex;
++  }
++  thd_arg->lex->pop_context();
++  DBUG_RETURN(0);
++}
++
++
++/**
++  Push a new name resolution context for a JOIN ... ON clause to the
++  context stack of a query block.
++
++    Create a new name resolution context for a JOIN ... ON clause,
++    set the first and last leaves of the list of table references
++    to be used for name resolution, and push the newly created
++    context to the stack of contexts of the query.
++
++  @param thd       pointer to current thread
++  @param left_op   left  operand of the JOIN
++  @param right_op  rigth operand of the JOIN
++
++  @retval
++    FALSE  if all is OK
++  @retval
++    TRUE   if a memory allocation error occured
++*/
++
++bool
++push_new_name_resolution_context(THD *thd,
++                                 TABLE_LIST *left_op, TABLE_LIST *right_op)
++{
++  Name_resolution_context *on_context;
++  if (!(on_context= new (thd->mem_root) Name_resolution_context))
++    return TRUE;
++  on_context->init();
++  on_context->first_name_resolution_table=
++    left_op->first_leaf_for_name_resolution();
++  on_context->last_name_resolution_table=
++    right_op->last_leaf_for_name_resolution();
++  return thd->lex->push_context(on_context);
++}
++
++
++/**
++  Add an ON condition to the second operand of a JOIN ... ON.
++
++    Add an ON condition to the right operand of a JOIN ... ON clause.
++
++  @param b     the second operand of a JOIN ... ON
++  @param expr  the condition to be added to the ON clause
++
++  @retval
++    FALSE  if there was some error
++  @retval
++    TRUE   if all is OK
++*/
++
++void add_join_on(TABLE_LIST *b, Item *expr)
++{
++  if (expr)
++  {
++    if (!b->on_expr)
++      b->on_expr= expr;
++    else
++    {
++      /*
++        If called from the parser, this happens if you have both a
++        right and left join. If called later, it happens if we add more
++        than one condition to the ON clause.
++      */
++      b->on_expr= new Item_cond_and(b->on_expr,expr);
++    }
++    b->on_expr->top_level_item();
++  }
++}
++
++
++/**
++  Mark that there is a NATURAL JOIN or JOIN ... USING between two
++  tables.
++
++    This function marks that table b should be joined with a either via
++    a NATURAL JOIN or via JOIN ... USING. Both join types are special
++    cases of each other, so we treat them together. The function
++    setup_conds() creates a list of equal condition between all fields
++    of the same name for NATURAL JOIN or the fields in 'using_fields'
++    for JOIN ... USING. The list of equality conditions is stored
++    either in b->on_expr, or in JOIN::conds, depending on whether there
++    was an outer join.
++
++  EXAMPLE
++  @verbatim
++    SELECT * FROM t1 NATURAL LEFT JOIN t2
++     <=>
++    SELECT * FROM t1 LEFT JOIN t2 ON (t1.i=t2.i and t1.j=t2.j ... )
++
++    SELECT * FROM t1 NATURAL JOIN t2 WHERE <some_cond>
++     <=>
++    SELECT * FROM t1, t2 WHERE (t1.i=t2.i and t1.j=t2.j and <some_cond>)
++
++    SELECT * FROM t1 JOIN t2 USING(j) WHERE <some_cond>
++     <=>
++    SELECT * FROM t1, t2 WHERE (t1.j=t2.j and <some_cond>)
++   @endverbatim
++
++  @param a		  Left join argument
++  @param b		  Right join argument
++  @param using_fields    Field names from USING clause
++*/
++
++void add_join_natural(TABLE_LIST *a, TABLE_LIST *b, List<String> *using_fields,
++                      SELECT_LEX *lex)
++{
++  b->natural_join= a;
++  lex->prev_join_using= using_fields;
++}
++
++
++/**
++  Reload/resets privileges and the different caches.
++
++  @param thd Thread handler (can be NULL!)
++  @param options What should be reset/reloaded (tables, privileges, slave...)
++  @param tables Tables to flush (if any)
++  @param write_to_binlog < 0 if there was an error while interacting with the binary log inside
++                         reload_acl_and_cache, 
++                         0 if we should not write to the binary log, 
++                         > 0 if we can write to the binlog.
++               
++  @note Depending on 'options', it may be very bad to write the
++    query to the binlog (e.g. FLUSH SLAVE); this is a
++    pointer where reload_acl_and_cache() will put 0 if
++    it thinks we really should not write to the binlog.
++    Otherwise it will put 1.
++
++  @return Error status code
++    @retval 0 Ok
++    @retval !=0  Error; thd->killed is set or thd->is_error() is true
++*/
++
++bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
++                          int *write_to_binlog)
++{
++  bool result=0;
++  select_errors=0;				/* Write if more errors */
++  int tmp_write_to_binlog= *write_to_binlog= 1;
++
++  DBUG_ASSERT(!thd || !thd->in_sub_stmt);
++
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  if (options & REFRESH_GRANT)
++  {
++    THD *tmp_thd= 0;
++    /*
++      If reload_acl_and_cache() is called from SIGHUP handler we have to
++      allocate temporary THD for execution of acl_reload()/grant_reload().
++    */
++    if (!thd && (thd= (tmp_thd= new THD)))
++    {
++      thd->thread_stack= (char*) &tmp_thd;
++      thd->store_globals();
++      lex_start(thd);
++    }
++
++    if (thd)
++    {
++      bool reload_acl_failed= acl_reload(thd);
++      bool reload_grants_failed= grant_reload(thd);
++      bool reload_servers_failed= servers_reload(thd);
++
++      if (reload_acl_failed || reload_grants_failed || reload_servers_failed)
++      {
++        result= 1;
++        /*
++          When an error is returned, my_message may have not been called and
++          the client will hang waiting for a response.
++        */
++        my_error(ER_UNKNOWN_ERROR, MYF(0), "FLUSH PRIVILEGES failed");
++      }
++    }
++
++    if (tmp_thd)
++    {
++      delete tmp_thd;
++      /* Remember that we don't have a THD */
++      my_pthread_setspecific_ptr(THR_THD,  0);
++      thd= 0;
++    }
++    reset_mqh((LEX_USER *)NULL, TRUE);
++  }
++#endif
++  if (options & REFRESH_LOG)
++  {
++    /*
++      Flush the normal query log, the update log, the binary log,
++      the slow query log, the relay log (if it exists) and the log
++      tables.
++    */
++
++    /*
++      Writing this command to the binlog may result in infinite loops
++      when doing mysqlbinlog|mysql, and anyway it does not really make
++      sense to log it automatically (would cause more trouble to users
++      than it would help them)
++    */
++    tmp_write_to_binlog= 0;
++    if( mysql_bin_log.is_open() )
++    {
++      if (mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE))
++        *write_to_binlog= -1;
++    }
++#ifdef HAVE_REPLICATION
++    int rotate_error= 0;
++    pthread_mutex_lock(&LOCK_active_mi);
++    rotate_error= rotate_relay_log(active_mi);
++    pthread_mutex_unlock(&LOCK_active_mi);
++    if (rotate_error)
++      *write_to_binlog= -1;
++#endif
++
++    /* flush slow and general logs */
++    logger.flush_logs(thd);
++
++    if (ha_flush_logs(NULL))
++      result=1;
++    if (flush_error_log())
++      result=1;
++  }
++#ifdef HAVE_QUERY_CACHE
++  if (options & REFRESH_QUERY_CACHE_FREE)
++  {
++    query_cache.pack();				// FLUSH QUERY CACHE
++    options &= ~REFRESH_QUERY_CACHE;    // Don't flush cache, just free memory
++  }
++  if (options & (REFRESH_TABLES | REFRESH_QUERY_CACHE))
++  {
++    query_cache.flush();			// RESET QUERY CACHE
++  }
++#endif /*HAVE_QUERY_CACHE*/
++  /*
++    Note that if REFRESH_READ_LOCK bit is set then REFRESH_TABLES is set too
++    (see sql_yacc.yy)
++  */
++  if (options & (REFRESH_TABLES | REFRESH_READ_LOCK)) 
++  {
++    if ((options & REFRESH_READ_LOCK) && thd)
++    {
++      /*
++        We must not try to aspire a global read lock if we have a write
++        locked table. This would lead to a deadlock when trying to
++        reopen (and re-lock) the table after the flush.
++      */
++      if (thd->locked_tables)
++      {
++        THR_LOCK_DATA **lock_p= thd->locked_tables->locks;
++        THR_LOCK_DATA **end_p= lock_p + thd->locked_tables->lock_count;
++
++        for (; lock_p < end_p; lock_p++)
++        {
++          if ((*lock_p)->type >= TL_WRITE_ALLOW_WRITE)
++          {
++            my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
++            return 1;
++          }
++        }
++      }
++      /*
++	Writing to the binlog could cause deadlocks, as we don't log
++	UNLOCK TABLES
++      */
++      tmp_write_to_binlog= 0;
++      if (lock_global_read_lock(thd))
++	return 1;                               // Killed
++      if (close_cached_tables(thd, tables, FALSE, (options & REFRESH_FAST) ?
++                              FALSE : TRUE, TRUE))
++          result= 1;
++      
++      if (make_global_read_lock_block_commit(thd)) // Killed
++      {
++        /* Don't leave things in a half-locked state */
++        unlock_global_read_lock(thd);
++        return 1;
++      }
++    }
++    else
++    {
++      if (close_cached_tables(thd, tables, FALSE, (options & REFRESH_FAST) ?
++                              FALSE : TRUE, FALSE))
++        result= 1;
++    }
++    my_dbopt_cleanup();
++  }
++  if (options & REFRESH_HOSTS)
++    hostname_cache_refresh();
++  if (thd && (options & REFRESH_STATUS))
++    refresh_status(thd);
++  if (options & REFRESH_THREADS)
++    flush_thread_cache();
++#ifdef HAVE_REPLICATION
++  if (options & REFRESH_MASTER)
++  {
++    DBUG_ASSERT(thd);
++    tmp_write_to_binlog= 0;
++    if (reset_master(thd))
++    {
++      result=1;
++    }
++  }
++#endif
++#ifdef OPENSSL
++   if (options & REFRESH_DES_KEY_FILE)
++   {
++     if (des_key_file && load_des_key_file(des_key_file))
++         result= 1;
++   }
++#endif
++#ifdef HAVE_REPLICATION
++ if (options & REFRESH_SLAVE)
++ {
++   tmp_write_to_binlog= 0;
++   pthread_mutex_lock(&LOCK_active_mi);
++   if (reset_slave(thd, active_mi))
++     result=1;
++   pthread_mutex_unlock(&LOCK_active_mi);
++ }
++#endif
++ if (options & REFRESH_USER_RESOURCES)
++   reset_mqh((LEX_USER *) NULL, 0);             /* purecov: inspected */
++ if (*write_to_binlog != -1)
++   *write_to_binlog= tmp_write_to_binlog;
++ /*
++   If the query was killed then this function must fail.
++ */
++ return result || (thd ? thd->killed : 0);
++}
++
++
++/**
++  kill on thread.
++
++  @param thd			Thread class
++  @param id			Thread id
++  @param only_kill_query        Should it kill the query or the connection
++
++  @note
++    This is written such that we have a short lock on LOCK_thread_count
++*/
++
++uint kill_one_thread(THD *thd, ulong id, bool only_kill_query)
++{
++  THD *tmp;
++  uint error=ER_NO_SUCH_THREAD;
++  DBUG_ENTER("kill_one_thread");
++  DBUG_PRINT("enter", ("id=%lu only_kill=%d", id, only_kill_query));
++  VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list
++  I_List_iterator<THD> it(threads);
++  while ((tmp=it++))
++  {
++    if (tmp->command == COM_DAEMON)
++      continue;
++    if (tmp->thread_id == id)
++    {
++      pthread_mutex_lock(&tmp->LOCK_thd_data);	// Lock from delete
++      break;
++    }
++  }
++  VOID(pthread_mutex_unlock(&LOCK_thread_count));
++  if (tmp)
++  {
++
++    /*
++      If we're SUPER, we can KILL anything, including system-threads.
++      No further checks.
++
++      KILLer: thd->security_ctx->user could in theory be NULL while
++      we're still in "unauthenticated" state. This is a theoretical
++      case (the code suggests this could happen, so we play it safe).
++
++      KILLee: tmp->security_ctx->user will be NULL for system threads.
++      We need to check so Jane Random User doesn't crash the server
++      when trying to kill a) system threads or b) unauthenticated users'
++      threads (Bug#43748).
++
++      If user of both killer and killee are non-NULL, proceed with
++      slayage if both are string-equal.
++    */
++
++    if ((thd->security_ctx->master_access & SUPER_ACL) ||
++        thd->security_ctx->user_matches(tmp->security_ctx))
++    {
++      tmp->awake(only_kill_query ? THD::KILL_QUERY : THD::KILL_CONNECTION);
++      error=0;
++    }
++    else
++      error=ER_KILL_DENIED_ERROR;
++    pthread_mutex_unlock(&tmp->LOCK_thd_data);
++  }
++  DBUG_PRINT("exit", ("%d", error));
++  DBUG_RETURN(error);
++}
++
++
++/*
++  kills a thread and sends response
++
++  SYNOPSIS
++    sql_kill()
++    thd			Thread class
++    id			Thread id
++    only_kill_query     Should it kill the query or the connection
++*/
++
++void sql_kill(THD *thd, ulong id, bool only_kill_query)
++{
++  uint error;
++  if (!(error= kill_one_thread(thd, id, only_kill_query)))
++    my_ok(thd);
++  else
++    my_error(error, MYF(0), id);
++}
++
++
++/** If pointer is not a null pointer, append filename to it. */
++
++bool append_file_to_dir(THD *thd, const char **filename_ptr,
++                        const char *table_name)
++{
++  char buff[FN_REFLEN],*ptr, *end;
++  if (!*filename_ptr)
++    return 0;					// nothing to do
++
++  /* Check that the filename is not too long and it's a hard path */
++  if (strlen(*filename_ptr)+strlen(table_name) >= FN_REFLEN-1 ||
++      !test_if_hard_path(*filename_ptr))
++  {
++    my_error(ER_WRONG_TABLE_NAME, MYF(0), *filename_ptr);
++    return 1;
++  }
++  /* Fix is using unix filename format on dos */
++  strmov(buff,*filename_ptr);
++  end=convert_dirname(buff, *filename_ptr, NullS);
++  if (!(ptr= (char*) thd->alloc((size_t) (end-buff) + strlen(table_name)+1)))
++    return 1;					// End of memory
++  *filename_ptr=ptr;
++  strxmov(ptr,buff,table_name,NullS);
++  return 0;
++}
++
++
++/**
++  Check if the select is a simple select (not an union).
++
++  @retval
++    0	ok
++  @retval
++    1	error	; In this case the error messege is sent to the client
++*/
++
++bool check_simple_select()
++{
++  THD *thd= current_thd;
++  LEX *lex= thd->lex;
++  if (lex->current_select != &lex->select_lex)
++  {
++    char command[80];
++    Lex_input_stream *lip= & thd->m_parser_state->m_lip;
++    strmake(command, lip->yylval->symbol.str,
++	    min(lip->yylval->symbol.length, sizeof(command)-1));
++    my_error(ER_CANT_USE_OPTION_HERE, MYF(0), command);
++    return 1;
++  }
++  return 0;
++}
++
++
++Comp_creator *comp_eq_creator(bool invert)
++{
++  return invert?(Comp_creator *)&ne_creator:(Comp_creator *)&eq_creator;
++}
++
++
++Comp_creator *comp_ge_creator(bool invert)
++{
++  return invert?(Comp_creator *)&lt_creator:(Comp_creator *)&ge_creator;
++}
++
++
++Comp_creator *comp_gt_creator(bool invert)
++{
++  return invert?(Comp_creator *)&le_creator:(Comp_creator *)&gt_creator;
++}
++
++
++Comp_creator *comp_le_creator(bool invert)
++{
++  return invert?(Comp_creator *)&gt_creator:(Comp_creator *)&le_creator;
++}
++
++
++Comp_creator *comp_lt_creator(bool invert)
++{
++  return invert?(Comp_creator *)&ge_creator:(Comp_creator *)&lt_creator;
++}
++
++
++Comp_creator *comp_ne_creator(bool invert)
++{
++  return invert?(Comp_creator *)&eq_creator:(Comp_creator *)&ne_creator;
++}
++
++
++/**
++  Construct ALL/ANY/SOME subquery Item.
++
++  @param left_expr   pointer to left expression
++  @param cmp         compare function creator
++  @param all         true if we create ALL subquery
++  @param select_lex  pointer on parsed subquery structure
++
++  @return
++    constructed Item (or 0 if out of memory)
++*/
++Item * all_any_subquery_creator(Item *left_expr,
++				chooser_compare_func_creator cmp,
++				bool all,
++				SELECT_LEX *select_lex)
++{
++  if ((cmp == &comp_eq_creator) && !all)       //  = ANY <=> IN
++    return new Item_in_subselect(left_expr, select_lex);
++
++  if ((cmp == &comp_ne_creator) && all)        // <> ALL <=> NOT IN
++    return new Item_func_not(new Item_in_subselect(left_expr, select_lex));
++
++  Item_allany_subselect *it=
++    new Item_allany_subselect(left_expr, cmp, select_lex, all);
++  if (all)
++    return it->upper_item= new Item_func_not_all(it);	/* ALL */
++
++  return it->upper_item= new Item_func_nop_all(it);      /* ANY/SOME */
++}
++
++
++/**
++  Multi update query pre-check.
++
++  @param thd		Thread handler
++  @param tables	Global/local table list (have to be the same)
++
++  @retval
++    FALSE OK
++  @retval
++    TRUE  Error
++*/
++
++bool multi_update_precheck(THD *thd, TABLE_LIST *tables)
++{
++  const char *msg= 0;
++  TABLE_LIST *table;
++  LEX *lex= thd->lex;
++  SELECT_LEX *select_lex= &lex->select_lex;
++  DBUG_ENTER("multi_update_precheck");
++
++  if (select_lex->item_list.elements != lex->value_list.elements)
++  {
++    my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++  /*
++    Ensure that we have UPDATE or SELECT privilege for each table
++    The exact privilege is checked in mysql_multi_update()
++  */
++  for (table= tables; table; table= table->next_local)
++  {
++    if (table->derived)
++      table->grant.privilege= SELECT_ACL;
++    else if ((check_access(thd, UPDATE_ACL, table->db,
++                           &table->grant.privilege, 0, 1,
++                           test(table->schema_table)) ||
++              check_grant(thd, UPDATE_ACL, table, 0, 1, 1)) &&
++             (check_access(thd, SELECT_ACL, table->db,
++                           &table->grant.privilege, 0, 0,
++                           test(table->schema_table)) ||
++              check_grant(thd, SELECT_ACL, table, 0, 1, 0)))
++      DBUG_RETURN(TRUE);
++
++    table->table_in_first_from_clause= 1;
++  }
++  /*
++    Is there tables of subqueries?
++  */
++  if (&lex->select_lex != lex->all_selects_list)
++  {
++    DBUG_PRINT("info",("Checking sub query list"));
++    for (table= tables; table; table= table->next_global)
++    {
++      if (!table->table_in_first_from_clause)
++      {
++	if (check_access(thd, SELECT_ACL, table->db,
++			 &table->grant.privilege, 0, 0,
++                         test(table->schema_table)) ||
++	    check_grant(thd, SELECT_ACL, table, 0, 1, 0))
++	  DBUG_RETURN(TRUE);
++      }
++    }
++  }
++
++  if (select_lex->order_list.elements)
++    msg= "ORDER BY";
++  else if (select_lex->select_limit)
++    msg= "LIMIT";
++  if (msg)
++  {
++    my_error(ER_WRONG_USAGE, MYF(0), "UPDATE", msg);
++    DBUG_RETURN(TRUE);
++  }
++  DBUG_RETURN(FALSE);
++}
++
++/**
++  Multi delete query pre-check.
++
++  @param thd			Thread handler
++  @param tables		Global/local table list
++
++  @retval
++    FALSE OK
++  @retval
++    TRUE  error
++*/
++
++bool multi_delete_precheck(THD *thd, TABLE_LIST *tables)
++{
++  SELECT_LEX *select_lex= &thd->lex->select_lex;
++  TABLE_LIST *aux_tables= thd->lex->auxiliary_table_list.first;
++  TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last;
++  DBUG_ENTER("multi_delete_precheck");
++
++  /* sql_yacc guarantees that tables and aux_tables are not zero */
++  DBUG_ASSERT(aux_tables != 0);
++  if (check_table_access(thd, SELECT_ACL, tables, UINT_MAX, FALSE))
++    DBUG_RETURN(TRUE);
++
++  /*
++    Since aux_tables list is not part of LEX::query_tables list we
++    have to juggle with LEX::query_tables_own_last value to be able
++    call check_table_access() safely.
++  */
++  thd->lex->query_tables_own_last= 0;
++  if (check_table_access(thd, DELETE_ACL, aux_tables, UINT_MAX, FALSE))
++  {
++    thd->lex->query_tables_own_last= save_query_tables_own_last;
++    DBUG_RETURN(TRUE);
++  }
++  thd->lex->query_tables_own_last= save_query_tables_own_last;
++
++  if ((thd->options & OPTION_SAFE_UPDATES) && !select_lex->where)
++  {
++    my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
++               ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  Link tables in auxilary table list of multi-delete with corresponding
++  elements in main table list, and set proper locks for them.
++
++  @param lex   pointer to LEX representing multi-delete
++
++  @retval
++    FALSE   success
++  @retval
++    TRUE    error
++*/
++
++bool multi_delete_set_locks_and_link_aux_tables(LEX *lex)
++{
++  TABLE_LIST *tables= lex->select_lex.table_list.first;
++  TABLE_LIST *target_tbl;
++  DBUG_ENTER("multi_delete_set_locks_and_link_aux_tables");
++
++  lex->table_count= 0;
++
++  for (target_tbl= lex->auxiliary_table_list.first;
++       target_tbl; target_tbl= target_tbl->next_local)
++  {
++    lex->table_count++;
++    /* All tables in aux_tables must be found in FROM PART */
++    TABLE_LIST *walk;
++    for (walk= tables; walk; walk= walk->next_local)
++    {
++      if (!my_strcasecmp(table_alias_charset,
++			 target_tbl->alias, walk->alias) &&
++	  !strcmp(walk->db, target_tbl->db))
++	break;
++    }
++    if (!walk)
++    {
++      my_error(ER_UNKNOWN_TABLE, MYF(0),
++               target_tbl->table_name, "MULTI DELETE");
++      DBUG_RETURN(TRUE);
++    }
++    if (!walk->derived)
++    {
++      target_tbl->table_name= walk->table_name;
++      target_tbl->table_name_length= walk->table_name_length;
++    }
++    walk->updating= target_tbl->updating;
++    walk->lock_type= target_tbl->lock_type;
++    target_tbl->correspondent_table= walk;	// Remember corresponding table
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  simple UPDATE query pre-check.
++
++  @param thd		Thread handler
++  @param tables	Global table list
++
++  @retval
++    FALSE OK
++  @retval
++    TRUE  Error
++*/
++
++bool update_precheck(THD *thd, TABLE_LIST *tables)
++{
++  DBUG_ENTER("update_precheck");
++  if (thd->lex->select_lex.item_list.elements != thd->lex->value_list.elements)
++  {
++    my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++  DBUG_RETURN(check_one_table_access(thd, UPDATE_ACL, tables));
++}
++
++
++/**
++  simple DELETE query pre-check.
++
++  @param thd		Thread handler
++  @param tables	Global table list
++
++  @retval
++    FALSE  OK
++  @retval
++    TRUE   error
++*/
++
++bool delete_precheck(THD *thd, TABLE_LIST *tables)
++{
++  DBUG_ENTER("delete_precheck");
++  if (check_one_table_access(thd, DELETE_ACL, tables))
++    DBUG_RETURN(TRUE);
++  /* Set privilege for the WHERE clause */
++  tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege);
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  simple INSERT query pre-check.
++
++  @param thd		Thread handler
++  @param tables	Global table list
++
++  @retval
++    FALSE  OK
++  @retval
++    TRUE   error
++*/
++
++bool insert_precheck(THD *thd, TABLE_LIST *tables)
++{
++  LEX *lex= thd->lex;
++  DBUG_ENTER("insert_precheck");
++
++  /*
++    Check that we have modify privileges for the first table and
++    select privileges for the rest
++  */
++  ulong privilege= (INSERT_ACL |
++                    (lex->duplicates == DUP_REPLACE ? DELETE_ACL : 0) |
++                    (lex->value_list.elements ? UPDATE_ACL : 0));
++
++  if (check_one_table_access(thd, privilege, tables))
++    DBUG_RETURN(TRUE);
++
++  if (lex->update_list.elements != lex->value_list.elements)
++  {
++    my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++    @brief  Check privileges for SHOW CREATE TABLE statement.
++
++    @param  thd    Thread context
++    @param  table  Target table
++
++    @retval TRUE  Failure
++    @retval FALSE Success
++*/
++
++static bool check_show_create_table_access(THD *thd, TABLE_LIST *table)
++{
++  return check_access(thd, SELECT_ACL | EXTRA_ACL, table->db,
++                      &table->grant.privilege, 0, 0,
++                      test(table->schema_table)) ||
++         check_grant(thd, SELECT_ACL, table, 2, UINT_MAX, 0);
++}
++
++
++/**
++  CREATE TABLE query pre-check.
++
++  @param thd			Thread handler
++  @param tables		Global table list
++  @param create_table	        Table which will be created
++
++  @retval
++    FALSE   OK
++  @retval
++    TRUE   Error
++*/
++
++bool create_table_precheck(THD *thd, TABLE_LIST *tables,
++                           TABLE_LIST *create_table)
++{
++  LEX *lex= thd->lex;
++  SELECT_LEX *select_lex= &lex->select_lex;
++  ulong want_priv;
++  bool error= TRUE;                                 // Error message is given
++  DBUG_ENTER("create_table_precheck");
++
++  /*
++    Require CREATE [TEMPORARY] privilege on new table; for
++    CREATE TABLE ... SELECT, also require INSERT.
++  */
++
++  want_priv= ((lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) ?
++              CREATE_TMP_ACL : CREATE_ACL) |
++             (select_lex->item_list.elements ? INSERT_ACL : 0);
++
++  if (check_access(thd, want_priv, create_table->db,
++		   &create_table->grant.privilege, 0, 0,
++                   test(create_table->schema_table)) ||
++      check_merge_table_access(thd, create_table->db,
++                               lex->create_info.merge_list.first))
++    goto err;
++  if (want_priv != CREATE_TMP_ACL &&
++      check_grant(thd, want_priv, create_table, 0, 1, 0))
++    goto err;
++
++  if (select_lex->item_list.elements)
++  {
++    /* Check permissions for used tables in CREATE TABLE ... SELECT */
++
++#ifdef NOT_NECESSARY_TO_CHECK_CREATE_TABLE_EXIST_WHEN_PREPARING_STATEMENT
++    /* This code throws an ill error for CREATE TABLE t1 SELECT * FROM t1 */
++    /*
++      Only do the check for PS, because we on execute we have to check that
++      against the opened tables to ensure we don't use a table that is part
++      of the view (which can only be done after the table has been opened).
++    */
++    if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute())
++    {
++      /*
++        For temporary tables we don't have to check if the created table exists
++      */
++      if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) &&
++          find_table_in_global_list(tables, create_table->db,
++                                    create_table->table_name))
++      {
++	error= FALSE;
++        goto err;
++      }
++    }
++#endif
++    if (tables && check_table_access(thd, SELECT_ACL, tables, UINT_MAX, FALSE))
++      goto err;
++  }
++  else if (lex->create_info.options & HA_LEX_CREATE_TABLE_LIKE)
++  {
++    if (check_show_create_table_access(thd, tables))
++      goto err;
++  }
++  error= FALSE;
++
++err:
++  DBUG_RETURN(error);
++}
++
++
++/**
++  negate given expression.
++
++  @param thd  thread handler
++  @param expr expression for negation
++
++  @return
++    negated expression
++*/
++
++Item *negate_expression(THD *thd, Item *expr)
++{
++  Item *negated;
++  if (expr->type() == Item::FUNC_ITEM &&
++      ((Item_func *) expr)->functype() == Item_func::NOT_FUNC)
++  {
++    /* it is NOT(NOT( ... )) */
++    Item *arg= ((Item_func *) expr)->arguments()[0];
++    enum_parsing_place place= thd->lex->current_select->parsing_place;
++    if (arg->is_bool_func() || place == IN_WHERE || place == IN_HAVING)
++      return arg;
++    /*
++      if it is not boolean function then we have to emulate value of
++      not(not(a)), it will be a != 0
++    */
++    return new Item_func_ne(arg, new Item_int((char*) "0", 0, 1));
++  }
++
++  if ((negated= expr->neg_transformer(thd)) != 0)
++    return negated;
++  return new Item_func_not(expr);
++}
++
++/**
++  Set the specified definer to the default value, which is the
++  current user in the thread.
++ 
++  @param[in]  thd       thread handler
++  @param[out] definer   definer
++*/
++ 
++void get_default_definer(THD *thd, LEX_USER *definer)
++{
++  const Security_context *sctx= thd->security_ctx;
++
++  definer->user.str= (char *) sctx->priv_user;
++  definer->user.length= strlen(definer->user.str);
++
++  definer->host.str= (char *) sctx->priv_host;
++  definer->host.length= strlen(definer->host.str);
++
++  definer->password.str= NULL;
++  definer->password.length= 0;
++}
++
++
++/**
++  Create default definer for the specified THD.
++
++  @param[in] thd         thread handler
++
++  @return
++    - On success, return a valid pointer to the created and initialized
++    LEX_USER, which contains definer information.
++    - On error, return 0.
++*/
++
++LEX_USER *create_default_definer(THD *thd)
++{
++  LEX_USER *definer;
++
++  if (! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER))))
++    return 0;
++
++  thd->get_definer(definer);
++
++  return definer;
++}
++
++
++/**
++  Create definer with the given user and host names.
++
++  @param[in] thd          thread handler
++  @param[in] user_name    user name
++  @param[in] host_name    host name
++
++  @return
++    - On success, return a valid pointer to the created and initialized
++    LEX_USER, which contains definer information.
++    - On error, return 0.
++*/
++
++LEX_USER *create_definer(THD *thd, LEX_STRING *user_name, LEX_STRING *host_name)
++{
++  LEX_USER *definer;
++
++  /* Create and initialize. */
++
++  if (! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER))))
++    return 0;
++
++  definer->user= *user_name;
++  definer->host= *host_name;
++  definer->password.str= NULL;
++  definer->password.length= 0;
++
++  return definer;
++}
++
++
++/**
++  Retuns information about user or current user.
++
++  @param[in] thd          thread handler
++  @param[in] user         user
++
++  @return
++    - On success, return a valid pointer to initialized
++    LEX_USER, which contains user information.
++    - On error, return 0.
++*/
++
++LEX_USER *get_current_user(THD *thd, LEX_USER *user)
++{
++  if (!user->user.str)  // current_user
++    return create_default_definer(thd);
++
++  return user;
++}
++
++
++/**
++  Check that byte length of a string does not exceed some limit.
++
++  @param str         string to be checked
++  @param err_msg     error message to be displayed if the string is too long
++  @param max_length  max length
++
++  @retval
++    FALSE   the passed string is not longer than max_length
++  @retval
++    TRUE    the passed string is longer than max_length
++
++  NOTE
++    The function is not used in existing code but can be useful later?
++*/
++
++bool check_string_byte_length(LEX_STRING *str, const char *err_msg,
++                              uint max_byte_length)
++{
++  if (str->length <= max_byte_length)
++    return FALSE;
++
++  my_error(ER_WRONG_STRING_LENGTH, MYF(0), str->str, err_msg, max_byte_length);
++
++  return TRUE;
++}
++
++
++/*
++  Check that char length of a string does not exceed some limit.
++
++  SYNOPSIS
++  check_string_char_length()
++      str              string to be checked
++      err_msg          error message to be displayed if the string is too long
++      max_char_length  max length in symbols
++      cs               string charset
++
++  RETURN
++    FALSE   the passed string is not longer than max_char_length
++    TRUE    the passed string is longer than max_char_length
++*/
++
++
++bool check_string_char_length(LEX_STRING *str, const char *err_msg,
++                              uint max_char_length, CHARSET_INFO *cs,
++                              bool no_error)
++{
++  int well_formed_error;
++  uint res= cs->cset->well_formed_len(cs, str->str, str->str + str->length,
++                                      max_char_length, &well_formed_error);
++
++  if (!well_formed_error &&  str->length == res)
++    return FALSE;
++
++  if (!no_error)
++    my_error(ER_WRONG_STRING_LENGTH, MYF(0), str->str, err_msg, max_char_length);
++  return TRUE;
++}
++
++
++/*
++  Check if path does not contain mysql data home directory
++  SYNOPSIS
++    test_if_data_home_dir()
++    dir                     directory
++    conv_home_dir           converted data home directory
++    home_dir_len            converted data home directory length
++
++  RETURN VALUES
++    0	ok
++    1	error  
++*/
++C_MODE_START
++
++int test_if_data_home_dir(const char *dir)
++{
++  char path[FN_REFLEN];
++  int dir_len;
++  DBUG_ENTER("test_if_data_home_dir");
++
++  if (!dir)
++    DBUG_RETURN(0);
++
++  (void) fn_format(path, dir, "", "",
++                   (MY_RETURN_REAL_PATH|MY_RESOLVE_SYMLINKS));
++  dir_len= strlen(path);
++  if (mysql_unpacked_real_data_home_len<= dir_len)
++  {
++    if (dir_len > mysql_unpacked_real_data_home_len &&
++        path[mysql_unpacked_real_data_home_len] != FN_LIBCHAR)
++      DBUG_RETURN(0);
++
++    if (lower_case_file_system)
++    {
++      if (!my_strnncoll(default_charset_info, (const uchar*) path,
++                        mysql_unpacked_real_data_home_len,
++                        (const uchar*) mysql_unpacked_real_data_home,
++                        mysql_unpacked_real_data_home_len))
++        DBUG_RETURN(1);
++    }
++    else if (!memcmp(path, mysql_unpacked_real_data_home,
++                     mysql_unpacked_real_data_home_len))
++      DBUG_RETURN(1);
++  }
++  DBUG_RETURN(0);
++}
++
++C_MODE_END
++
++
++/**
++  Check that host name string is valid.
++
++  @param[in] str string to be checked
++
++  @return             Operation status
++    @retval  FALSE    host name is ok
++    @retval  TRUE     host name string is longer than max_length or
++                      has invalid symbols
++*/
++
++bool check_host_name(LEX_STRING *str)
++{
++  const char *name= str->str;
++  const char *end= str->str + str->length;
++  if (check_string_byte_length(str, ER(ER_HOSTNAME), HOSTNAME_LENGTH))
++    return TRUE;
++
++  while (name != end)
++  {
++    if (*name == '@')
++    {
++      my_printf_error(ER_UNKNOWN_ERROR, 
++                      "Malformed hostname (illegal symbol: '%c')", MYF(0),
++                      *name);
++      return TRUE;
++    }
++    name++;
++  }
++  return FALSE;
++}
++
++
++extern int MYSQLparse(void *thd); // from sql_yacc.cc
++
++
++/**
++  This is a wrapper of MYSQLparse(). All the code should call parse_sql()
++  instead of MYSQLparse().
++
++  @param thd Thread context.
++  @param parser_state Parser state.
++  @param creation_ctx Object creation context.
++
++  @return Error status.
++    @retval FALSE on success.
++    @retval TRUE on parsing error.
++*/
++
++bool parse_sql(THD *thd,
++               Parser_state *parser_state,
++               Object_creation_ctx *creation_ctx)
++{
++  DBUG_ASSERT(thd->m_parser_state == NULL);
++
++  /* Backup creation context. */
++
++  Object_creation_ctx *backup_ctx= NULL;
++
++  if (creation_ctx)
++    backup_ctx= creation_ctx->set_n_backup(thd);
++
++  /* Set parser state. */
++
++  thd->m_parser_state= parser_state;
++
++  /* Parse the query. */
++
++  bool mysql_parse_status= MYSQLparse(thd) != 0;
++
++  /* Check that if MYSQLparse() failed, thd->is_error() is set. */
++
++  DBUG_ASSERT(!mysql_parse_status ||
++              (mysql_parse_status && thd->is_error()));
++
++  /* Reset parser state. */
++
++  thd->m_parser_state= NULL;
++
++  /* Restore creation context. */
++
++  if (creation_ctx)
++    creation_ctx->restore_env(thd, backup_ctx);
++
++  /* That's it. */
++
++  return mysql_parse_status || thd->is_fatal_error;
++}
++
++/**
++  @} (end of group Runtime_Environment)
++*/
+diff -urN mysql-old/sql/sql_partition.cc mysql/sql/sql_partition.cc
+--- mysql-old/sql/sql_partition.cc	2011-05-10 17:45:45.636682376 +0000
++++ mysql/sql/sql_partition.cc	2011-05-10 17:56:01.513349044 +0000
+@@ -4592,7 +4592,7 @@
+             */
+             start_part= 0;
+             end_part= new_total_partitions - (upper_2n + 1);
+-            end_part= max(lower_2n - 1, end_part);
++            end_part= MYSQL_MAX(lower_2n - 1, end_part);
+           }
+           else if (new_total_partitions <= upper_2n)
+           {
+diff -urN mysql-old/sql/sql_plugin.cc mysql/sql/sql_plugin.cc
+--- mysql-old/sql/sql_plugin.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/sql_plugin.cc	2011-05-10 17:56:01.516682377 +0000
+@@ -508,7 +508,7 @@
+     for (i=0;
+          (old=(struct st_mysql_plugin *)(ptr+i*sizeof_st_plugin))->info;
+          i++)
+-      memcpy(cur+i, old, min(sizeof(cur[i]), sizeof_st_plugin));
++      memcpy(cur+i, old, MYSQL_MIN(sizeof(cur[i]), sizeof_st_plugin));
+ 
+     sym= cur;
+   }
+@@ -2124,7 +2124,7 @@
+                      &error, &error_len, &not_used);
+     if (error_len)
+     {
+-      strmake(buff, error, min(sizeof(buff) - 1, error_len));
++      strmake(buff, error, MYSQL_MIN(sizeof(buff) - 1, error_len));
+       strvalue= buff;
+       goto err;
+     }
+diff -urN mysql-old/sql/sql_prepare.cc mysql/sql/sql_prepare.cc
+--- mysql-old/sql/sql_prepare.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/sql_prepare.cc	2011-05-10 17:56:01.520015710 +0000
+@@ -249,7 +249,7 @@
+   int2store(buff+5, columns);
+   int2store(buff+7, stmt->param_count);
+   buff[9]= 0;                                   // Guard against a 4.1 client
+-  tmp= min(stmt->thd->total_warn_count, 65535);
++  tmp= MYSQL_MIN(stmt->thd->total_warn_count, 65535);
+   int2store(buff+10, tmp);
+ 
+   /*
+diff -urN mysql-old/sql/sql_profile.cc mysql/sql/sql_profile.cc
+--- mysql-old/sql/sql_profile.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/sql_profile.cc	2011-05-10 17:56:01.520015710 +0000
+@@ -252,7 +252,7 @@
+                                      uint query_length_arg)
+ {
+   /* Truncate to avoid DoS attacks. */
+-  uint length= min(MAX_QUERY_LENGTH, query_length_arg);
++  uint length= MYSQL_MIN(MAX_QUERY_LENGTH, query_length_arg);
+ 
+   DBUG_ASSERT(query_source == NULL); /* we don't leak memory */
+   if (query_source_arg != NULL)
+diff -urN mysql-old/sql/sql_repl.cc mysql/sql/sql_repl.cc
+--- mysql-old/sql/sql_repl.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/sql_repl.cc	2011-05-10 17:56:01.523349043 +0000
+@@ -1297,12 +1297,12 @@
+    {
+      /*
+        Sometimes mi->rli.master_log_pos == 0 (it happens when the SQL thread is
+-       not initialized), so we use a max().
++       not initialized), so we use a MYSQL_MAX().
+        What happens to mi->rli.master_log_pos during the initialization stages
+        of replication is not 100% clear, so we guard against problems using
+        max().
+       */
+-     mi->master_log_pos = max(BIN_LOG_HEADER_SIZE,
++     mi->master_log_pos = MYSQL_MAX(BIN_LOG_HEADER_SIZE,
+ 			      mi->rli.group_master_log_pos);
+      strmake(mi->master_log_name, mi->rli.group_master_log_name,
+              sizeof(mi->master_log_name)-1);
+@@ -1474,7 +1474,7 @@
+     LEX_MASTER_INFO *lex_mi= &thd->lex->mi;
+     SELECT_LEX_UNIT *unit= &thd->lex->unit;
+     ha_rows event_count, limit_start, limit_end;
+-    my_off_t pos = max(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly
++    my_off_t pos = MYSQL_MAX(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly
+     char search_file_name[FN_REFLEN], *name;
+     const char *log_file_name = lex_mi->log_file_name;
+     pthread_mutex_t *log_lock = mysql_bin_log.get_log_lock();
+@@ -1745,14 +1745,14 @@
+     DBUG_RETURN(0);
+   
+   for (block_len= (uint) (my_b_get_bytes_in_buffer(file)); block_len > 0;
+-       buffer += min(block_len, max_event_size),
+-       block_len -= min(block_len, max_event_size))
++       buffer += MYSQL_MIN(block_len, max_event_size),
++       block_len -= MYSQL_MIN(block_len, max_event_size))
+   {
+     lf_info->last_pos_in_file= my_b_get_pos_in_file(file);
+     if (lf_info->wrote_create_file)
+     {
+       Append_block_log_event a(lf_info->thd, lf_info->thd->db, buffer,
+-                               min(block_len, max_event_size),
++                               MYSQL_MIN(block_len, max_event_size),
+                                lf_info->log_delayed);
+       if (mysql_bin_log.write(&a))
+         DBUG_RETURN(1);
+@@ -1761,7 +1761,7 @@
+     {
+       Begin_load_query_log_event b(lf_info->thd, lf_info->thd->db,
+                                    buffer,
+-                                   min(block_len, max_event_size),
++                                   MYSQL_MIN(block_len, max_event_size),
+                                    lf_info->log_delayed);
+       if (mysql_bin_log.write(&b))
+         DBUG_RETURN(1);
+diff -urN mysql-old/sql/sql_select.cc mysql/sql/sql_select.cc
+--- mysql-old/sql/sql_select.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/sql_select.cc	2011-05-10 17:56:01.526682376 +0000
+@@ -3002,7 +3002,7 @@
+       This is can't be to high as otherwise we are likely to use
+       table scan.
+     */
+-    s->worst_seeks= min((double) s->found_records / 10,
++    s->worst_seeks= MYSQL_MIN((double) s->found_records / 10,
+ 			(double) s->read_time*3);
+     if (s->worst_seeks < 2.0)			// Fix for small tables
+       s->worst_seeks=2.0;
+@@ -3938,7 +3938,7 @@
+   uint	and_level,i,found_eq_constant;
+   KEY_FIELD *key_fields, *end, *field;
+   uint sz;
+-  uint m= max(select_lex->max_equal_elems,1);
++  uint m= MYSQL_MAX(select_lex->max_equal_elems,1);
+   
+   /* 
+     We use the same piece of memory to store both  KEY_FIELD 
+@@ -3961,7 +3961,7 @@
+     can be not more than select_lex->max_equal_elems such 
+     substitutions.
+   */ 
+-  sz= max(sizeof(KEY_FIELD),sizeof(SARGABLE_PARAM))*
++  sz= MYSQL_MAX(sizeof(KEY_FIELD),sizeof(SARGABLE_PARAM))*
+       (((thd->lex->current_select->cond_count+1)*2 +
+ 	thd->lex->current_select->between_count)*m+1);
+   if (!(key_fields=(KEY_FIELD*)	thd->alloc(sz)))
+@@ -4124,7 +4124,7 @@
+       if (map == 1)			// Only one table
+       {
+ 	TABLE *tmp_table=join->all_tables[tablenr];
+-	keyuse->ref_table_rows= max(tmp_table->file->stats.records, 100);
++	keyuse->ref_table_rows= MYSQL_MAX(tmp_table->file->stats.records, 100);
+       }
+     }
+     /*
+@@ -4444,7 +4444,7 @@
+               tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
+             }
+             else
+-              tmp= record_count*min(tmp,s->worst_seeks);
++              tmp= record_count*MYSQL_MIN(tmp,s->worst_seeks);
+           }
+         }
+         else
+@@ -4611,7 +4611,7 @@
+               tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
+             }
+             else
+-              tmp= record_count*min(tmp,s->worst_seeks);
++              tmp= record_count*MYSQL_MIN(tmp,s->worst_seeks);
+           }
+           else
+             tmp= best_time;                    // Do nothing
+@@ -5561,7 +5561,7 @@
+   {
+     uint blob_length=(uint) (join_tab->table->file->stats.mean_rec_length-
+ 			     (join_tab->table->s->reclength- rec_length));
+-    rec_length+=(uint) max(4,blob_length);
++    rec_length+=(uint) MYSQL_MAX(4,blob_length);
+   }
+   join_tab->used_fields=fields;
+   join_tab->used_fieldlength=rec_length;
+@@ -10477,7 +10477,7 @@
+     share->max_rows= ~(ha_rows) 0;
+   else
+     share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ?
+-                                 min(thd->variables.tmp_table_size,
++                                 MYSQL_MIN(thd->variables.tmp_table_size,
+                                      thd->variables.max_heap_table_size) :
+                                  thd->variables.tmp_table_size) /
+ 			         share->reclength);
+@@ -13649,7 +13649,7 @@
+             index entry.
+ 	  */
+           index_scan_time= select_limit/rec_per_key *
+-	                   min(rec_per_key, table->file->scan_time());
++	                   MYSQL_MIN(rec_per_key, table->file->scan_time());
+           if ((ref_key < 0 && is_covering) || 
+               (ref_key < 0 && (group || table->force_index)) ||
+               index_scan_time < read_time)
+@@ -13661,7 +13661,7 @@
+             if (table->quick_keys.is_set(nr))
+               quick_records= table->quick_rows[nr];
+             if (best_key < 0 ||
+-                (select_limit <= min(quick_records,best_records) ?
++                (select_limit <= MYSQL_MIN(quick_records,best_records) ?
+                  keyinfo->key_parts < best_key_parts :
+                  quick_records < best_records))
+             {
+@@ -14359,7 +14359,7 @@
+     count++;
+   if (!sortorder)
+     sortorder= (SORT_FIELD*) sql_alloc(sizeof(SORT_FIELD) *
+-                                       (max(count, *length) + 1));
++                                       (MYSQL_MAX(count, *length) + 1));
+   pos= sort= sortorder;
+ 
+   if (!pos)
+@@ -14481,7 +14481,7 @@
+   cache->length=length+blobs*sizeof(char*);
+   cache->blobs=blobs;
+   *blob_ptr=0;					/* End sequentel */
+-  size=max(thd->variables.join_buff_size, cache->length);
++  size=MYSQL_MAX(thd->variables.join_buff_size, cache->length);
+   if (!(cache->buff=(uchar*) my_malloc(size,MYF(0))))
+     DBUG_RETURN(1);				/* Don't use cache */ /* purecov: inspected */
+   cache->end=cache->buff+size;
+diff -urN mysql-old/sql/sql_select.cc.orig mysql/sql/sql_select.cc.orig
+--- mysql-old/sql/sql_select.cc.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/sql_select.cc.orig	2011-04-12 12:11:38.000000000 +0000
+@@ -0,0 +1,17352 @@
++/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
++
++/**
++  @file
++
++  @brief
++  mysql_select and join optimization
++
++
++  @defgroup Query_Optimizer  Query Optimizer
++  @{
++*/
++
++#ifdef USE_PRAGMA_IMPLEMENTATION
++#pragma implementation				// gcc: Class implementation
++#endif
++
++#include "mysql_priv.h"
++#include "sql_select.h"
++#include "sql_cursor.h"
++
++#include <m_ctype.h>
++#include <my_bit.h>
++#include <hash.h>
++#include <ft_global.h>
++
++const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref",
++			      "MAYBE_REF","ALL","range","index","fulltext",
++			      "ref_or_null","unique_subquery","index_subquery",
++                              "index_merge"
++};
++
++struct st_sargable_param;
++
++static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array);
++static bool make_join_statistics(JOIN *join, TABLE_LIST *leaves, COND *conds,
++				 DYNAMIC_ARRAY *keyuse);
++static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,
++                                JOIN_TAB *join_tab,
++                                uint tables, COND *conds,
++                                COND_EQUAL *cond_equal,
++                                table_map table_map, SELECT_LEX *select_lex,
++                                st_sargable_param **sargables);
++static int sort_keyuse(KEYUSE *a,KEYUSE *b);
++static void set_position(JOIN *join,uint index,JOIN_TAB *table,KEYUSE *key);
++static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
++			       table_map used_tables);
++static bool choose_plan(JOIN *join,table_map join_tables);
++
++static void best_access_path(JOIN *join, JOIN_TAB *s, THD *thd,
++                             table_map remaining_tables, uint idx,
++                             double record_count, double read_time);
++static void optimize_straight_join(JOIN *join, table_map join_tables);
++static bool greedy_search(JOIN *join, table_map remaining_tables,
++                             uint depth, uint prune_level);
++static bool best_extension_by_limited_search(JOIN *join,
++                                             table_map remaining_tables,
++                                             uint idx, double record_count,
++                                             double read_time, uint depth,
++                                             uint prune_level);
++static uint determine_search_depth(JOIN* join);
++static int join_tab_cmp(const void* ptr1, const void* ptr2);
++static int join_tab_cmp_straight(const void* ptr1, const void* ptr2);
++/*
++  TODO: 'find_best' is here only temporarily until 'greedy_search' is
++  tested and approved.
++*/
++static bool find_best(JOIN *join,table_map rest_tables,uint index,
++		      double record_count,double read_time);
++static uint cache_record_length(JOIN *join,uint index);
++static double prev_record_reads(JOIN *join, uint idx, table_map found_ref);
++static bool get_best_combination(JOIN *join);
++static store_key *get_store_key(THD *thd,
++				KEYUSE *keyuse, table_map used_tables,
++				KEY_PART_INFO *key_part, uchar *key_buff,
++				uint maybe_null);
++static void make_outerjoin_info(JOIN *join);
++static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *item);
++static void make_join_readinfo(JOIN *join, ulonglong options);
++static bool only_eq_ref_tables(JOIN *join, ORDER *order, table_map tables);
++static void update_depend_map(JOIN *join);
++static void update_depend_map(JOIN *join, ORDER *order);
++static ORDER *remove_const(JOIN *join,ORDER *first_order,COND *cond,
++			   bool change_list, bool *simple_order);
++static int return_zero_rows(JOIN *join, select_result *res,TABLE_LIST *tables,
++                            List<Item> &fields, bool send_row,
++                            ulonglong select_options, const char *info,
++                            Item *having);
++static COND *build_equal_items(THD *thd, COND *cond,
++                               COND_EQUAL *inherited,
++                               List<TABLE_LIST> *join_list,
++                               COND_EQUAL **cond_equal_ref);
++static COND* substitute_for_best_equal_field(COND *cond,
++                                             COND_EQUAL *cond_equal,
++                                             void *table_join_idx);
++static COND *simplify_joins(JOIN *join, List<TABLE_LIST> *join_list,
++                            COND *conds, bool top);
++static bool check_interleaving_with_nj(JOIN_TAB *next);
++static void restore_prev_nj_state(JOIN_TAB *last);
++static void reset_nj_counters(List<TABLE_LIST> *join_list);
++static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list,
++                                          uint first_unused);
++
++static COND *optimize_cond(JOIN *join, COND *conds,
++                           List<TABLE_LIST> *join_list,
++			   Item::cond_result *cond_value);
++static bool const_expression_in_where(COND *conds,Item *item, Item **comp_item);
++static bool open_tmp_table(TABLE *table);
++static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
++				    ulonglong options);
++static int do_select(JOIN *join,List<Item> *fields,TABLE *tmp_table,
++		     Procedure *proc);
++
++static enum_nested_loop_state
++evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
++                     int error);
++static enum_nested_loop_state
++evaluate_null_complemented_join_record(JOIN *join, JOIN_TAB *join_tab);
++static enum_nested_loop_state
++flush_cached_records(JOIN *join, JOIN_TAB *join_tab, bool skip_last);
++static enum_nested_loop_state
++end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
++static enum_nested_loop_state
++end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
++static enum_nested_loop_state
++end_write(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
++static enum_nested_loop_state
++end_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
++static enum_nested_loop_state
++end_unique_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
++static enum_nested_loop_state
++end_write_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
++
++static int test_if_group_changed(List<Cached_item> &list);
++static int join_read_const_table(JOIN_TAB *tab, POSITION *pos);
++static int join_read_system(JOIN_TAB *tab);
++static int join_read_const(JOIN_TAB *tab);
++static int join_read_key(JOIN_TAB *tab);
++static void join_read_key_unlock_row(st_join_table *tab);
++static int join_read_always_key(JOIN_TAB *tab);
++static int join_read_last_key(JOIN_TAB *tab);
++static int join_no_more_records(READ_RECORD *info);
++static int join_read_next(READ_RECORD *info);
++static int join_init_quick_read_record(JOIN_TAB *tab);
++static int test_if_quick_select(JOIN_TAB *tab);
++static int join_init_read_record(JOIN_TAB *tab);
++static int join_read_first(JOIN_TAB *tab);
++static int join_read_next(READ_RECORD *info);
++static int join_read_next_same(READ_RECORD *info);
++static int join_read_last(JOIN_TAB *tab);
++static int join_read_prev_same(READ_RECORD *info);
++static int join_read_prev(READ_RECORD *info);
++static int join_ft_read_first(JOIN_TAB *tab);
++static int join_ft_read_next(READ_RECORD *info);
++int join_read_always_key_or_null(JOIN_TAB *tab);
++int join_read_next_same_or_null(READ_RECORD *info);
++static COND *make_cond_for_table(COND *cond,table_map table,
++				 table_map used_table);
++static Item* part_of_refkey(TABLE *form,Field *field);
++uint find_shortest_key(TABLE *table, const key_map *usable_keys);
++static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,
++				    ha_rows select_limit, bool no_changes,
++                                    key_map *map);
++static bool list_contains_unique_index(TABLE *table,
++                          bool (*find_func) (Field *, void *), void *data);
++static bool find_field_in_item_list (Field *field, void *data);
++static bool find_field_in_order_list (Field *field, void *data);
++static int create_sort_index(THD *thd, JOIN *join, ORDER *order,
++			     ha_rows filesort_limit, ha_rows select_limit,
++                             bool is_order_by);
++static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields,
++			     Item *having);
++static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
++				   ulong offset,Item *having);
++static int remove_dup_with_hash_index(THD *thd,TABLE *table,
++				      uint field_count, Field **first_field,
++
++				      ulong key_length,Item *having);
++static int join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count);
++static ulong used_blob_length(CACHE_FIELD **ptr);
++static bool store_record_in_cache(JOIN_CACHE *cache);
++static void reset_cache_read(JOIN_CACHE *cache);
++static void reset_cache_write(JOIN_CACHE *cache);
++static void read_cached_record(JOIN_TAB *tab);
++static bool cmp_buffer_with_ref(JOIN_TAB *tab);
++static bool setup_new_fields(THD *thd, List<Item> &fields,
++			     List<Item> &all_fields, ORDER *new_order);
++static ORDER *create_distinct_group(THD *thd, Item **ref_pointer_array,
++                                    ORDER *order, List<Item> &fields,
++                                    List<Item> &all_fields,
++				    bool *all_order_by_fields_used);
++static bool test_if_subpart(ORDER *a,ORDER *b);
++static TABLE *get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables);
++static void calc_group_buffer(JOIN *join,ORDER *group);
++static bool make_group_fields(JOIN *main_join, JOIN *curr_join);
++static bool alloc_group_fields(JOIN *join,ORDER *group);
++// Create list for using with tempory table
++static bool change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
++				     List<Item> &new_list1,
++				     List<Item> &new_list2,
++				     uint elements, List<Item> &items);
++// Create list for using with tempory table
++static bool change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array,
++				      List<Item> &new_list1,
++				      List<Item> &new_list2,
++				      uint elements, List<Item> &items);
++static void init_tmptable_sum_functions(Item_sum **func);
++static void update_tmptable_sum_func(Item_sum **func,TABLE *tmp_table);
++static void copy_sum_funcs(Item_sum **func_ptr, Item_sum **end);
++static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab);
++static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr);
++static bool init_sum_functions(Item_sum **func, Item_sum **end);
++static bool update_sum_func(Item_sum **func);
++static void select_describe(JOIN *join, bool need_tmp_table,bool need_order,
++			    bool distinct, const char *message=NullS);
++static Item *remove_additional_cond(Item* conds);
++static void add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab);
++static bool test_if_ref(Item_field *left_item,Item *right_item);
++
++
++/**
++  This handles SELECT with and without UNION.
++*/
++
++bool handle_select(THD *thd, LEX *lex, select_result *result,
++                   ulong setup_tables_done_option)
++{
++  bool res;
++  register SELECT_LEX *select_lex = &lex->select_lex;
++  DBUG_ENTER("handle_select");
++
++  if (select_lex->master_unit()->is_union() || 
++      select_lex->master_unit()->fake_select_lex)
++    res= mysql_union(thd, lex, result, &lex->unit, setup_tables_done_option);
++  else
++  {
++    SELECT_LEX_UNIT *unit= &lex->unit;
++    unit->set_limit(unit->global_parameters);
++    /*
++      'options' of mysql_select will be set in JOIN, as far as JOIN for
++      every PS/SP execution new, we will not need reset this flag if 
++      setup_tables_done_option changed for next rexecution
++    */
++    res= mysql_select(thd, &select_lex->ref_pointer_array,
++		      select_lex->table_list.first,
++		      select_lex->with_wild, select_lex->item_list,
++		      select_lex->where,
++		      select_lex->order_list.elements +
++		      select_lex->group_list.elements,
++		      select_lex->order_list.first,
++		      select_lex->group_list.first,
++		      select_lex->having,
++		      lex->proc_list.first,
++		      select_lex->options | thd->options |
++                      setup_tables_done_option,
++		      result, unit, select_lex);
++  }
++  DBUG_PRINT("info",("res: %d  report_error: %d", res,
++		     thd->is_error()));
++  res|= thd->is_error();
++  if (unlikely(res))
++    result->abort();
++
++  DBUG_RETURN(res);
++}
++
++
++/**
++  Fix fields referenced from inner selects.
++
++  @param thd               Thread handle
++  @param all_fields        List of all fields used in select
++  @param select            Current select
++  @param ref_pointer_array Array of references to Items used in current select
++  @param group_list        GROUP BY list (is NULL by default)
++
++  @details
++    The function serves 3 purposes
++
++    - adds fields referenced from inner query blocks to the current select list
++
++    - Decides which class to use to reference the items (Item_ref or
++      Item_direct_ref)
++
++    - fixes references (Item_ref objects) to these fields.
++
++    If a field isn't already on the select list and the ref_pointer_array
++    is provided then it is added to the all_fields list and the pointer to
++    it is saved in the ref_pointer_array.
++
++    The class to access the outer field is determined by the following rules:
++
++    -#. If the outer field isn't used under an aggregate function then the
++        Item_ref class should be used.
++
++    -#. If the outer field is used under an aggregate function and this
++        function is, in turn, aggregated in the query block where the outer
++        field was resolved or some query nested therein, then the
++        Item_direct_ref class should be used. Also it should be used if we are
++        grouping by a subquery containing the outer field.
++
++    The resolution is done here and not at the fix_fields() stage as
++    it can be done only after aggregate functions are fixed and pulled up to
++    selects where they are to be aggregated.
++
++    When the class is chosen it substitutes the original field in the
++    Item_outer_ref object.
++
++    After this we proceed with fixing references (Item_outer_ref objects) to
++    this field from inner subqueries.
++
++  @return Status
++  @retval true An error occured.
++  @retval false OK.
++ */
++
++bool
++fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
++                 Item **ref_pointer_array, ORDER *group_list)
++{
++  Item_outer_ref *ref;
++
++  List_iterator<Item_outer_ref> ref_it(select->inner_refs_list);
++  while ((ref= ref_it++))
++  {
++    bool direct_ref= false;
++    Item *item= ref->outer_ref;
++    Item **item_ref= ref->ref;
++    Item_ref *new_ref;
++    /*
++      TODO: this field item already might be present in the select list.
++      In this case instead of adding new field item we could use an
++      existing one. The change will lead to less operations for copying fields,
++      smaller temporary tables and less data passed through filesort.
++    */
++    if (ref_pointer_array && !ref->found_in_select_list)
++    {
++      int el= all_fields.elements;
++      ref_pointer_array[el]= item;
++      /* Add the field item to the select list of the current select. */
++      all_fields.push_front(item);
++      /*
++        If it's needed reset each Item_ref item that refers this field with
++        a new reference taken from ref_pointer_array.
++      */
++      item_ref= ref_pointer_array + el;
++    }
++
++    if (ref->in_sum_func)
++    {
++      Item_sum *sum_func;
++      if (ref->in_sum_func->nest_level > select->nest_level)
++        direct_ref= TRUE;
++      else
++      {
++        for (sum_func= ref->in_sum_func; sum_func &&
++             sum_func->aggr_level >= select->nest_level;
++             sum_func= sum_func->in_sum_func)
++        {
++          if (sum_func->aggr_level == select->nest_level)
++          {
++            direct_ref= TRUE;
++            break;
++          }
++        }
++      }
++    }
++    else
++    {
++      /*
++        Check if GROUP BY item trees contain the outer ref:
++        in this case we have to use Item_direct_ref instead of Item_ref.
++      */
++      for (ORDER *group= group_list; group; group= group->next)
++      {
++        if ((*group->item)->walk(&Item::find_item_processor, TRUE,
++                                 (uchar *) ref))
++        {
++          direct_ref= TRUE;
++          break;
++        }
++      }
++    }
++    new_ref= direct_ref ?
++              new Item_direct_ref(ref->context, item_ref, ref->table_name,
++                          ref->field_name, ref->alias_name_used) :
++              new Item_ref(ref->context, item_ref, ref->table_name,
++                          ref->field_name, ref->alias_name_used);
++    if (!new_ref)
++      return TRUE;
++    ref->outer_ref= new_ref;
++    ref->ref= &ref->outer_ref;
++
++    if (!ref->fixed && ref->fix_fields(thd, 0))
++      return TRUE;
++    thd->used_tables|= item->used_tables();
++  }
++  return false;
++}
++
++/**
++  Function to setup clauses without sum functions.
++*/
++inline int setup_without_group(THD *thd, Item **ref_pointer_array,
++			       TABLE_LIST *tables,
++			       TABLE_LIST *leaves,
++			       List<Item> &fields,
++			       List<Item> &all_fields,
++			       COND **conds,
++			       ORDER *order,
++			       ORDER *group, bool *hidden_group_fields)
++{
++  int res;
++  nesting_map save_allow_sum_func=thd->lex->allow_sum_func ;
++  /* 
++    Need to save the value, so we can turn off only the new NON_AGG_FIELD
++    additions coming from the WHERE
++  */
++  uint8 saved_flag= thd->lex->current_select->full_group_by_flag;
++  DBUG_ENTER("setup_without_group");
++
++  thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level);
++  res= setup_conds(thd, tables, leaves, conds);
++
++  /* it's not wrong to have non-aggregated columns in a WHERE */
++  if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY)
++    thd->lex->current_select->full_group_by_flag= saved_flag |
++      (thd->lex->current_select->full_group_by_flag & ~NON_AGG_FIELD_USED);
++
++  thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level;
++  res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields,
++                          order);
++  thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level);
++  res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields,
++                          group, hidden_group_fields);
++  thd->lex->allow_sum_func= save_allow_sum_func;
++  DBUG_RETURN(res);
++}
++
++/*****************************************************************************
++  Check fields, find best join, do the select and output fields.
++  mysql_select assumes that all tables are already opened
++*****************************************************************************/
++
++/**
++  Prepare of whole select (including sub queries in future).
++
++  @todo
++    Add check of calculation of GROUP functions and fields:
++    SELECT COUNT(*)+table.col1 from table1;
++
++  @retval
++    -1   on error
++  @retval
++    0   on success
++*/
++int
++JOIN::prepare(Item ***rref_pointer_array,
++	      TABLE_LIST *tables_init,
++	      uint wild_num, COND *conds_init, uint og_num,
++	      ORDER *order_init, ORDER *group_init,
++	      Item *having_init,
++	      ORDER *proc_param_init, SELECT_LEX *select_lex_arg,
++	      SELECT_LEX_UNIT *unit_arg)
++{
++  DBUG_ENTER("JOIN::prepare");
++
++  // to prevent double initialization on EXPLAIN
++  if (optimized)
++    DBUG_RETURN(0);
++
++  conds= conds_init;
++  order= order_init;
++  group_list= group_init;
++  having= having_init;
++  proc_param= proc_param_init;
++  tables_list= tables_init;
++  select_lex= select_lex_arg;
++  select_lex->join= this;
++  join_list= &select_lex->top_join_list;
++  union_part= unit_arg->is_union();
++
++  thd->lex->current_select->is_item_list_lookup= 1;
++  /*
++    If we have already executed SELECT, then it have not sense to prevent
++    its table from update (see unique_table())
++  */
++  if (thd->derived_tables_processing)
++    select_lex->exclude_from_table_unique_test= TRUE;
++
++  /* Check that all tables, fields, conds and order are ok */
++
++  if (!(select_options & OPTION_SETUP_TABLES_DONE) &&
++      setup_tables_and_check_access(thd, &select_lex->context, join_list,
++                                    tables_list, &select_lex->leaf_tables,
++                                    FALSE, SELECT_ACL, SELECT_ACL))
++      DBUG_RETURN(-1);
++ 
++  TABLE_LIST *table_ptr;
++  for (table_ptr= select_lex->leaf_tables;
++       table_ptr;
++       table_ptr= table_ptr->next_leaf)
++    tables++;
++
++  if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) ||
++      select_lex->setup_ref_array(thd, og_num) ||
++      setup_fields(thd, (*rref_pointer_array), fields_list, MARK_COLUMNS_READ,
++		   &all_fields, 1) ||
++      setup_without_group(thd, (*rref_pointer_array), tables_list,
++			  select_lex->leaf_tables, fields_list,
++			  all_fields, &conds, order, group_list,
++			  &hidden_group_fields))
++    DBUG_RETURN(-1);				/* purecov: inspected */
++
++  ref_pointer_array= *rref_pointer_array;
++  
++  if (having)
++  {
++    nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
++    thd->where="having clause";
++    thd->lex->allow_sum_func|= 1 << select_lex_arg->nest_level;
++    select_lex->having_fix_field= 1;
++    bool having_fix_rc= (!having->fixed &&
++			 (having->fix_fields(thd, &having) ||
++			  having->check_cols(1)));
++    select_lex->having_fix_field= 0;
++    if (having_fix_rc || thd->is_error())
++      DBUG_RETURN(-1);				/* purecov: inspected */
++    thd->lex->allow_sum_func= save_allow_sum_func;
++  }
++
++  if (!(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_VIEW) &&
++      !(select_options & SELECT_DESCRIBE))
++  {
++    Item_subselect *subselect;
++    /* Is it subselect? */
++    if ((subselect= select_lex->master_unit()->item))
++    {
++      Item_subselect::trans_res res;
++      if ((res= subselect->select_transformer(this)) !=
++	  Item_subselect::RES_OK)
++      {
++        select_lex->fix_prepare_information(thd, &conds, &having);
++	DBUG_RETURN((res == Item_subselect::RES_ERROR));
++      }
++    }
++  }
++
++  select_lex->fix_prepare_information(thd, &conds, &having);
++
++  if (order)
++  {
++    bool real_order= FALSE;
++    ORDER *ord;
++    for (ord= order; ord; ord= ord->next)
++    {
++      Item *item= *ord->item;
++      /*
++        Disregard sort order if there's only 
++        zero length NOT NULL fields (e.g. {VAR}CHAR(0) NOT NULL") or
++        zero length NOT NULL string functions there.
++        Such tuples don't contain any data to sort.
++      */
++      if (!real_order &&
++           /* Not a zero length NOT NULL field */
++          ((item->type() != Item::FIELD_ITEM ||
++            ((Item_field *) item)->field->maybe_null() ||
++            ((Item_field *) item)->field->sort_length()) &&
++           /* AND not a zero length NOT NULL string function. */
++           (item->type() != Item::FUNC_ITEM ||
++            item->maybe_null ||
++            item->result_type() != STRING_RESULT ||
++            item->max_length)))
++        real_order= TRUE;
++
++      if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM)
++        item->split_sum_func(thd, ref_pointer_array, all_fields);
++    }
++    if (!real_order)
++      order= NULL;
++  }
++
++  if (having && having->with_sum_func)
++    having->split_sum_func2(thd, ref_pointer_array, all_fields,
++                            &having, TRUE);
++  if (select_lex->inner_sum_func_list)
++  {
++    Item_sum *end=select_lex->inner_sum_func_list;
++    Item_sum *item_sum= end;  
++    do
++    { 
++      item_sum= item_sum->next;
++      item_sum->split_sum_func2(thd, ref_pointer_array,
++                                all_fields, item_sum->ref_by, FALSE);
++    } while (item_sum != end);
++  }
++
++  if (select_lex->inner_refs_list.elements &&
++      fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array,
++                     group_list))
++    DBUG_RETURN(-1);
++
++  if (group_list)
++  {
++    /*
++      Because HEAP tables can't index BIT fields we need to use an
++      additional hidden field for grouping because later it will be
++      converted to a LONG field. Original field will remain of the
++      BIT type and will be returned to a client.
++    */
++    for (ORDER *ord= group_list; ord; ord= ord->next)
++    {
++      if ((*ord->item)->type() == Item::FIELD_ITEM &&
++          (*ord->item)->field_type() == MYSQL_TYPE_BIT)
++      {
++        Item_field *field= new Item_field(thd, *(Item_field**)ord->item);
++        int el= all_fields.elements;
++        ref_pointer_array[el]= field;
++        all_fields.push_front(field);
++        ord->item= ref_pointer_array + el;
++      }
++    }
++  }
++
++  if (setup_ftfuncs(select_lex)) /* should be after having->fix_fields */
++    DBUG_RETURN(-1);
++  
++
++  /*
++    Check if there are references to un-aggregated columns when computing 
++    aggregate functions with implicit grouping (there is no GROUP BY).
++  */
++  if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY && !group_list &&
++      select_lex->full_group_by_flag == (NON_AGG_FIELD_USED | SUM_FUNC_USED))
++  {
++    my_message(ER_MIX_OF_GROUP_FUNC_AND_FIELDS,
++               ER(ER_MIX_OF_GROUP_FUNC_AND_FIELDS), MYF(0));
++    DBUG_RETURN(-1);
++  }
++  {
++    /* Caclulate the number of groups */
++    send_group_parts= 0;
++    for (ORDER *group_tmp= group_list ; group_tmp ; group_tmp= group_tmp->next)
++      send_group_parts++;
++  }
++  
++  procedure= setup_procedure(thd, proc_param, result, fields_list, &error);
++  if (error)
++    goto err;					/* purecov: inspected */
++  if (procedure)
++  {
++    if (setup_new_fields(thd, fields_list, all_fields,
++			 procedure->param_fields))
++	goto err;				/* purecov: inspected */
++    if (procedure->group)
++    {
++      if (!test_if_subpart(procedure->group,group_list))
++      {						/* purecov: inspected */
++	my_message(ER_DIFF_GROUPS_PROC, ER(ER_DIFF_GROUPS_PROC),
++                   MYF(0));                     /* purecov: inspected */
++	goto err;				/* purecov: inspected */
++      }
++    }
++    if (order && (procedure->flags & PROC_NO_SORT))
++    {						/* purecov: inspected */
++      my_message(ER_ORDER_WITH_PROC, ER(ER_ORDER_WITH_PROC),
++                 MYF(0));                       /* purecov: inspected */
++      goto err;					/* purecov: inspected */
++    }
++    if (thd->lex->derived_tables)
++    {
++      my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", 
++               thd->lex->derived_tables & DERIVED_VIEW ?
++               "view" : "subquery"); 
++      goto err;
++    }
++    if (thd->lex->sql_command != SQLCOM_SELECT)
++    {
++      my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "non-SELECT");
++      goto err;
++    }
++  }
++
++  if (!procedure && result && result->prepare(fields_list, unit_arg))
++    goto err;					/* purecov: inspected */
++
++  /* Init join struct */
++  count_field_types(select_lex, &tmp_table_param, all_fields, 0);
++  ref_pointer_array_size= all_fields.elements*sizeof(Item*);
++  this->group= group_list != 0;
++  unit= unit_arg;
++
++  if (tmp_table_param.sum_func_count && !group_list)
++    implicit_grouping= TRUE;
++
++#ifdef RESTRICTED_GROUP
++  if (implicit_grouping)
++  {
++    my_message(ER_WRONG_SUM_SELECT,ER(ER_WRONG_SUM_SELECT),MYF(0));
++    goto err;
++  }
++#endif
++  if (select_lex->olap == ROLLUP_TYPE && rollup_init())
++    goto err;
++  if (alloc_func_list())
++    goto err;
++
++  DBUG_RETURN(0); // All OK
++
++err:
++  delete procedure;				/* purecov: inspected */
++  procedure= 0;
++  DBUG_RETURN(-1);				/* purecov: inspected */
++}
++
++
++/*
++  Remove the predicates pushed down into the subquery
++
++  SYNOPSIS
++    JOIN::remove_subq_pushed_predicates()
++      where   IN  Must be NULL
++              OUT The remaining WHERE condition, or NULL
++
++  DESCRIPTION
++    Given that this join will be executed using (unique|index)_subquery,
++    without "checking NULL", remove the predicates that were pushed down
++    into the subquery.
++
++    If the subquery compares scalar values, we can remove the condition that
++    was wrapped into trig_cond (it will be checked when needed by the subquery
++    engine)
++
++    If the subquery compares row values, we need to keep the wrapped
++    equalities in the WHERE clause: when the left (outer) tuple has both NULL
++    and non-NULL values, we'll do a full table scan and will rely on the
++    equalities corresponding to non-NULL parts of left tuple to filter out
++    non-matching records.
++
++    TODO: We can remove the equalities that will be guaranteed to be true by the
++    fact that subquery engine will be using index lookup. This must be done only
++    for cases where there are no conversion errors of significance, e.g. 257
++    that is searched in a byte. But this requires homogenization of the return 
++    codes of all Field*::store() methods.
++*/
++
++void JOIN::remove_subq_pushed_predicates(Item **where)
++{
++  if (conds->type() == Item::FUNC_ITEM &&
++      ((Item_func *)this->conds)->functype() == Item_func::EQ_FUNC &&
++      ((Item_func *)conds)->arguments()[0]->type() == Item::REF_ITEM &&
++      ((Item_func *)conds)->arguments()[1]->type() == Item::FIELD_ITEM &&
++      test_if_ref ((Item_field *)((Item_func *)conds)->arguments()[1],
++                   ((Item_func *)conds)->arguments()[0]))
++  {
++    *where= 0;
++    return;
++  }
++}
++
++
++/*
++  Index lookup-based subquery: save some flags for EXPLAIN output
++
++  SYNOPSIS
++    save_index_subquery_explain_info()
++      join_tab  Subquery's join tab (there is only one as index lookup is
++                only used for subqueries that are single-table SELECTs)
++      where     Subquery's WHERE clause
++
++  DESCRIPTION
++    For index lookup-based subquery (i.e. one executed with
++    subselect_uniquesubquery_engine or subselect_indexsubquery_engine),
++    check its EXPLAIN output row should contain 
++      "Using index" (TAB_INFO_FULL_SCAN_ON_NULL) 
++      "Using Where" (TAB_INFO_USING_WHERE)
++      "Full scan on NULL key" (TAB_INFO_FULL_SCAN_ON_NULL)
++    and set appropriate flags in join_tab->packed_info.
++*/
++
++static void save_index_subquery_explain_info(JOIN_TAB *join_tab, Item* where)
++{
++  join_tab->packed_info= TAB_INFO_HAVE_VALUE;
++  if (join_tab->table->covering_keys.is_set(join_tab->ref.key))
++    join_tab->packed_info |= TAB_INFO_USING_INDEX;
++  if (where)
++    join_tab->packed_info |= TAB_INFO_USING_WHERE;
++  for (uint i = 0; i < join_tab->ref.key_parts; i++)
++  {
++    if (join_tab->ref.cond_guards[i])
++    {
++      join_tab->packed_info |= TAB_INFO_FULL_SCAN_ON_NULL;
++      break;
++    }
++  }
++}
++
++
++/**
++  global select optimisation.
++
++  @note
++    error code saved in field 'error'
++
++  @retval
++    0   success
++  @retval
++    1   error
++*/
++
++int
++JOIN::optimize()
++{
++  DBUG_ENTER("JOIN::optimize");
++  // to prevent double initialization on EXPLAIN
++  if (optimized)
++    DBUG_RETURN(0);
++  optimized= 1;
++
++  thd_proc_info(thd, "optimizing");
++  row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR :
++	      unit->select_limit_cnt);
++  /* select_limit is used to decide if we are likely to scan the whole table */
++  select_limit= unit->select_limit_cnt;
++  if (having || (select_options & OPTION_FOUND_ROWS))
++    select_limit= HA_POS_ERROR;
++  do_send_rows = (unit->select_limit_cnt) ? 1 : 0;
++  // Ignore errors of execution if option IGNORE present
++  if (thd->lex->ignore)
++    thd->lex->current_select->no_error= 1;
++#ifdef HAVE_REF_TO_FIELDS			// Not done yet
++  /* Add HAVING to WHERE if possible */
++  if (having && !group_list && !sum_func_count)
++  {
++    if (!conds)
++    {
++      conds= having;
++      having= 0;
++    }
++    else if ((conds=new Item_cond_and(conds,having)))
++    {
++      /*
++        Item_cond_and can't be fixed after creation, so we do not check
++        conds->fixed
++      */
++      conds->fix_fields(thd, &conds);
++      conds->change_ref_to_fields(thd, tables_list);
++      conds->top_level_item();
++      having= 0;
++    }
++  }
++#endif
++  SELECT_LEX *sel= thd->lex->current_select;
++  if (sel->first_cond_optimization)
++  {
++    /*
++      The following code will allocate the new items in a permanent
++      MEMROOT for prepared statements and stored procedures.
++    */
++
++    Query_arena *arena= thd->stmt_arena, backup;
++    if (arena->is_conventional())
++      arena= 0;                                   // For easier test
++    else
++      thd->set_n_backup_active_arena(arena, &backup);
++
++    sel->first_cond_optimization= 0;
++
++    /* Convert all outer joins to inner joins if possible */
++    conds= simplify_joins(this, join_list, conds, TRUE);
++    build_bitmap_for_nested_joins(join_list, 0);
++
++    sel->prep_where= conds ? conds->copy_andor_structure(thd) : 0;
++
++    if (arena)
++      thd->restore_active_arena(arena, &backup);
++  }
++
++  conds= optimize_cond(this, conds, join_list, &cond_value);   
++  if (thd->is_error())
++  {
++    error= 1;
++    DBUG_PRINT("error",("Error from optimize_cond"));
++    DBUG_RETURN(1);
++  }
++
++  {
++    having= optimize_cond(this, having, join_list, &having_value);
++    if (thd->is_error())
++    {
++      error= 1;
++      DBUG_PRINT("error",("Error from optimize_cond"));
++      DBUG_RETURN(1);
++    }
++    if (select_lex->where)
++      select_lex->cond_value= cond_value;
++    if (select_lex->having)
++      select_lex->having_value= having_value;
++
++    if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE || 
++        (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
++    {						/* Impossible cond */
++      DBUG_PRINT("info", (having_value == Item::COND_FALSE ? 
++                            "Impossible HAVING" : "Impossible WHERE"));
++      zero_result_cause=  having_value == Item::COND_FALSE ?
++                           "Impossible HAVING" : "Impossible WHERE";
++      tables= 0;
++      error= 0;
++      DBUG_RETURN(0);
++    }
++  }
++
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  {
++    TABLE_LIST *tbl;
++    for (tbl= select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
++    {
++      /* 
++        If tbl->embedding!=NULL that means that this table is in the inner
++        part of the nested outer join, and we can't do partition pruning
++        (TODO: check if this limitation can be lifted)
++      */
++      if (!tbl->embedding)
++      {
++        Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
++        tbl->table->no_partitions_used= prune_partitions(thd, tbl->table,
++	                                                 prune_cond);
++      }
++    }
++  }
++#endif
++
++  /* 
++     Try to optimize count(*), min() and max() to const fields if
++     there is implicit grouping (aggregate functions but no
++     group_list). In this case, the result set shall only contain one
++     row. 
++  */
++  if (tables_list && implicit_grouping)
++  {
++    int res;
++    /*
++      opt_sum_query() returns HA_ERR_KEY_NOT_FOUND if no rows match
++      to the WHERE conditions,
++      or 1 if all items were resolved (optimized away),
++      or 0, or an error number HA_ERR_...
++
++      If all items were resolved by opt_sum_query, there is no need to
++      open any tables.
++    */
++    if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds)))
++    {
++      if (res == HA_ERR_KEY_NOT_FOUND)
++      {
++        DBUG_PRINT("info",("No matching min/max row"));
++	zero_result_cause= "No matching min/max row";
++        tables= 0;
++	error=0;
++	DBUG_RETURN(0);
++      }
++      if (res > 1)
++      {
++        error= res;
++        DBUG_PRINT("error",("Error from opt_sum_query"));
++        DBUG_RETURN(1);
++      }
++      if (res < 0)
++      {
++        DBUG_PRINT("info",("No matching min/max row"));
++        zero_result_cause= "No matching min/max row";
++        tables= 0;
++        error=0;
++        DBUG_RETURN(0);
++      }
++      DBUG_PRINT("info",("Select tables optimized away"));
++      zero_result_cause= "Select tables optimized away";
++      tables_list= 0;				// All tables resolved
++      const_tables= tables;
++      /*
++        Extract all table-independent conditions and replace the WHERE
++        clause with them. All other conditions were computed by opt_sum_query
++        and the MIN/MAX/COUNT function(s) have been replaced by constants,
++        so there is no need to compute the whole WHERE clause again.
++        Notice that make_cond_for_table() will always succeed to remove all
++        computed conditions, because opt_sum_query() is applicable only to
++        conjunctions.
++        Preserve conditions for EXPLAIN.
++      */
++      if (conds && !(thd->lex->describe & DESCRIBE_EXTENDED))
++      {
++        COND *table_independent_conds=
++          make_cond_for_table(conds, PSEUDO_TABLE_BITS, 0);
++        DBUG_EXECUTE("where",
++                     print_where(table_independent_conds,
++                                 "where after opt_sum_query()",
++                                 QT_ORDINARY););
++        conds= table_independent_conds;
++      }
++    }
++  }
++  if (!tables_list)
++  {
++    DBUG_PRINT("info",("No tables"));
++    error= 0;
++    DBUG_RETURN(0);
++  }
++  error= -1;					// Error is sent to client
++  sort_by_table= get_sort_by_table(order, group_list, select_lex->leaf_tables);
++
++  /* Calculate how to do the join */
++  thd_proc_info(thd, "statistics");
++  if (make_join_statistics(this, select_lex->leaf_tables, conds, &keyuse) ||
++      thd->is_fatal_error)
++  {
++    DBUG_PRINT("error",("Error: make_join_statistics() failed"));
++    DBUG_RETURN(1);
++  }
++
++  if (rollup.state != ROLLUP::STATE_NONE)
++  {
++    if (rollup_process_const_fields())
++    {
++      DBUG_PRINT("error", ("Error: rollup_process_fields() failed"));
++      DBUG_RETURN(1);
++    }
++  }
++  else
++  {
++    /* Remove distinct if only const tables */
++    select_distinct= select_distinct && (const_tables != tables);
++  }
++
++  thd_proc_info(thd, "preparing");
++  if (result->initialize_tables(this))
++  {
++    DBUG_PRINT("error",("Error: initialize_tables() failed"));
++    DBUG_RETURN(1);				// error == -1
++  }
++  if (const_table_map != found_const_table_map &&
++      !(select_options & SELECT_DESCRIBE) &&
++      (!conds ||
++       !(conds->used_tables() & RAND_TABLE_BIT) ||
++       select_lex->master_unit() == &thd->lex->unit)) // upper level SELECT
++  {
++    zero_result_cause= "no matching row in const table";
++    DBUG_PRINT("error",("Error: %s", zero_result_cause));
++    error= 0;
++    DBUG_RETURN(0);
++  }
++  if (!(thd->options & OPTION_BIG_SELECTS) &&
++      best_read > (double) thd->variables.max_join_size &&
++      !(select_options & SELECT_DESCRIBE))
++  {						/* purecov: inspected */
++    my_message(ER_TOO_BIG_SELECT, ER(ER_TOO_BIG_SELECT), MYF(0));
++    error= -1;
++    DBUG_RETURN(1);
++  }
++  if (const_tables && !thd->locked_tables &&
++      !(select_options & SELECT_NO_UNLOCK))
++    mysql_unlock_some_tables(thd, table, const_tables);
++  if (!conds && outer_join)
++  {
++    /* Handle the case where we have an OUTER JOIN without a WHERE */
++    conds=new Item_int((longlong) 1,1);	// Always true
++  }
++  select= make_select(*table, const_table_map,
++                      const_table_map, conds, 1, &error);
++  if (error)
++  {						/* purecov: inspected */
++    error= -1;					/* purecov: inspected */
++    DBUG_PRINT("error",("Error: make_select() failed"));
++    DBUG_RETURN(1);
++  }
++  
++  reset_nj_counters(join_list);
++  make_outerjoin_info(this);
++
++  /*
++    Among the equal fields belonging to the same multiple equality
++    choose the one that is to be retrieved first and substitute
++    all references to these in where condition for a reference for
++    the selected field.
++  */
++  if (conds)
++  {
++    conds= substitute_for_best_equal_field(conds, cond_equal, map2table);
++    conds->update_used_tables();
++    DBUG_EXECUTE("where",
++                 print_where(conds,
++                             "after substitute_best_equal",
++                             QT_ORDINARY););
++  }
++
++  /*
++    Permorm the the optimization on fields evaluation mentioned above
++    for all on expressions.
++  */ 
++  for (JOIN_TAB *tab= join_tab + const_tables; tab < join_tab + tables ; tab++)
++  {
++    if (*tab->on_expr_ref)
++    {
++      *tab->on_expr_ref= substitute_for_best_equal_field(*tab->on_expr_ref,
++                                                         tab->cond_equal,
++                                                         map2table);
++      (*tab->on_expr_ref)->update_used_tables();
++    }
++  }
++
++  if (conds && const_table_map != found_const_table_map &&
++      (select_options & SELECT_DESCRIBE))
++  {
++    conds=new Item_int((longlong) 0,1);	// Always false
++  }
++
++  /*
++    It's necessary to check const part of HAVING cond as
++    there is a chance that some cond parts may become
++    const items after make_join_statisctics(for example
++    when Item is a reference to cost table field from
++    outer join).
++    This check is performed only for those conditions
++    which do not use aggregate functions. In such case
++    temporary table may not be used and const condition
++    elements may be lost during further having
++    condition transformation in JOIN::exec.
++  */
++  if (having && const_table_map && !having->with_sum_func)
++  {
++    having->update_used_tables();
++    having= remove_eq_conds(thd, having, &having_value);
++    if (having_value == Item::COND_FALSE)
++    {
++      having= new Item_int((longlong) 0,1);
++      zero_result_cause= "Impossible HAVING noticed after reading const tables";
++      DBUG_RETURN(0);
++    }
++  }
++
++  if (make_join_select(this, select, conds))
++  {
++    zero_result_cause=
++      "Impossible WHERE noticed after reading const tables";
++    DBUG_RETURN(0);				// error == 0
++  }
++
++  error= -1;					/* if goto err */
++
++  /* Optimize distinct away if possible */
++  {
++    ORDER *org_order= order;
++    order=remove_const(this, order,conds,1, &simple_order);
++    if (thd->is_error())
++    {
++      error= 1;
++      DBUG_PRINT("error",("Error from remove_const"));
++      DBUG_RETURN(1);
++    }
++
++    /*
++      If we are using ORDER BY NULL or ORDER BY const_expression,
++      return result in any order (even if we are using a GROUP BY)
++    */
++    if (!order && org_order)
++      skip_sort_order= 1;
++  }
++  /*
++     Check if we can optimize away GROUP BY/DISTINCT.
++     We can do that if there are no aggregate functions, the
++     fields in DISTINCT clause (if present) and/or columns in GROUP BY
++     (if present) contain direct references to all key parts of
++     an unique index (in whatever order) and if the key parts of the
++     unique index cannot contain NULLs.
++     Note that the unique keys for DISTINCT and GROUP BY should not
++     be the same (as long as they are unique).
++
++     The FROM clause must contain a single non-constant table.
++  */
++  if (tables - const_tables == 1 && (group_list || select_distinct) &&
++      !tmp_table_param.sum_func_count &&
++      (!join_tab[const_tables].select ||
++       !join_tab[const_tables].select->quick ||
++       join_tab[const_tables].select->quick->get_type() != 
++       QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX))
++  {
++    if (group_list && rollup.state == ROLLUP::STATE_NONE &&
++       list_contains_unique_index(join_tab[const_tables].table,
++                                 find_field_in_order_list,
++                                 (void *) group_list))
++    {
++      /*
++        We have found that grouping can be removed since groups correspond to
++        only one row anyway, but we still have to guarantee correct result
++        order. The line below effectively rewrites the query from GROUP BY
++        <fields> to ORDER BY <fields>. There are two exceptions:
++        - if skip_sort_order is set (see above), then we can simply skip
++          GROUP BY;
++        - we can only rewrite ORDER BY if the ORDER BY fields are 'compatible'
++          with the GROUP BY ones, i.e. either one is a prefix of another.
++          We only check if the ORDER BY is a prefix of GROUP BY. In this case
++          test_if_subpart() copies the ASC/DESC attributes from the original
++          ORDER BY fields.
++          If GROUP BY is a prefix of ORDER BY, then it is safe to leave
++          'order' as is.
++       */
++      if (!order || test_if_subpart(group_list, order))
++          order= skip_sort_order ? 0 : group_list;
++      /*
++        If we have an IGNORE INDEX FOR GROUP BY(fields) clause, this must be 
++        rewritten to IGNORE INDEX FOR ORDER BY(fields).
++      */
++      join_tab->table->keys_in_use_for_order_by=
++        join_tab->table->keys_in_use_for_group_by;
++      group_list= 0;
++      group= 0;
++    }
++    if (select_distinct &&
++       list_contains_unique_index(join_tab[const_tables].table,
++                                 find_field_in_item_list,
++                                 (void *) &fields_list))
++    {
++      select_distinct= 0;
++    }
++  }
++  if (group_list || tmp_table_param.sum_func_count)
++  {
++    if (! hidden_group_fields && rollup.state == ROLLUP::STATE_NONE)
++      select_distinct=0;
++  }
++  else if (select_distinct && tables - const_tables == 1 &&
++           rollup.state == ROLLUP::STATE_NONE)
++  {
++    /*
++      We are only using one table. In this case we change DISTINCT to a
++      GROUP BY query if:
++      - The GROUP BY can be done through indexes (no sort) and the ORDER
++        BY only uses selected fields.
++	(In this case we can later optimize away GROUP BY and ORDER BY)
++      - We are scanning the whole table without LIMIT
++        This can happen if:
++        - We are using CALC_FOUND_ROWS
++        - We are using an ORDER BY that can't be optimized away.
++
++      We don't want to use this optimization when we are using LIMIT
++      because in this case we can just create a temporary table that
++      holds LIMIT rows and stop when this table is full.
++    */
++    JOIN_TAB *tab= &join_tab[const_tables];
++    bool all_order_fields_used;
++    if (order)
++      skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1, 
++        &tab->table->keys_in_use_for_order_by);
++    if ((group_list=create_distinct_group(thd, select_lex->ref_pointer_array,
++                                          order, fields_list, all_fields,
++				          &all_order_fields_used)))
++    {
++      bool skip_group= (skip_sort_order &&
++        test_if_skip_sort_order(tab, group_list, select_limit, 1, 
++                                &tab->table->keys_in_use_for_group_by) != 0);
++      count_field_types(select_lex, &tmp_table_param, all_fields, 0);
++      if ((skip_group && all_order_fields_used) ||
++	  select_limit == HA_POS_ERROR ||
++	  (order && !skip_sort_order))
++      {
++	/*  Change DISTINCT to GROUP BY */
++	select_distinct= 0;
++	no_order= !order;
++	if (all_order_fields_used)
++	{
++	  if (order && skip_sort_order)
++	  {
++	    /*
++	      Force MySQL to read the table in sorted order to get result in
++	      ORDER BY order.
++	    */
++	    tmp_table_param.quick_group=0;
++	  }
++	  order=0;
++        }
++	group=1;				// For end_write_group
++      }
++      else
++	group_list= 0;
++    }
++    else if (thd->is_fatal_error)			// End of memory
++      DBUG_RETURN(1);
++  }
++  simple_group= 0;
++  {
++    ORDER *old_group_list;
++    group_list= remove_const(this, (old_group_list= group_list), conds,
++                             rollup.state == ROLLUP::STATE_NONE,
++			     &simple_group);
++    if (thd->is_error())
++    {
++      error= 1;
++      DBUG_PRINT("error",("Error from remove_const"));
++      DBUG_RETURN(1);
++    }
++    if (old_group_list && !group_list)
++      select_distinct= 0;
++  }
++  if (!group_list && group)
++  {
++    order=0;					// The output has only one row
++    simple_order=1;
++    select_distinct= 0;                       // No need in distinct for 1 row
++    group_optimized_away= 1;
++  }
++
++  calc_group_buffer(this, group_list);
++  send_group_parts= tmp_table_param.group_parts; /* Save org parts */
++  if (procedure && procedure->group)
++  {
++    group_list= procedure->group= remove_const(this, procedure->group, conds,
++					       1, &simple_group);
++    if (thd->is_error())
++    {
++      error= 1;
++      DBUG_PRINT("error",("Error from remove_const"));
++      DBUG_RETURN(1);
++    }   
++    calc_group_buffer(this, group_list);
++  }
++
++  if (test_if_subpart(group_list, order) ||
++      (!group_list && tmp_table_param.sum_func_count))
++    order=0;
++
++  // Can't use sort on head table if using join buffering
++  if (full_join)
++  {
++    TABLE *stable= (sort_by_table == (TABLE *) 1 ? 
++      join_tab[const_tables].table : sort_by_table);
++    /* 
++      FORCE INDEX FOR ORDER BY can be used to prevent join buffering when
++      sorting on the first table.
++    */
++    if (!stable || !stable->force_index_order)
++    {
++      if (group_list)
++        simple_group= 0;
++      if (order)
++        simple_order= 0;
++    }
++  }
++
++  /*
++    Check if we need to create a temporary table.
++    This has to be done if all tables are not already read (const tables)
++    and one of the following conditions holds:
++    - We are using DISTINCT (simple distinct's are already optimized away)
++    - We are using an ORDER BY or GROUP BY on fields not in the first table
++    - We are using different ORDER BY and GROUP BY orders
++    - The user wants us to buffer the result.
++    When the WITH ROLLUP modifier is present, we cannot skip temporary table
++    creation for the DISTINCT clause just because there are only const tables.
++  */
++  need_tmp= ((const_tables != tables &&
++	     ((select_distinct || !simple_order || !simple_group) ||
++	      (group_list && order) ||
++	      test(select_options & OPTION_BUFFER_RESULT))) ||
++             (rollup.state != ROLLUP::STATE_NONE && select_distinct));
++
++  // No cache for MATCH
++  make_join_readinfo(this,
++		     (select_options & (SELECT_DESCRIBE |
++					SELECT_NO_JOIN_CACHE)) |
++		     (select_lex->ftfunc_list->elements ?
++		      SELECT_NO_JOIN_CACHE : 0));
++
++  /* Perform FULLTEXT search before all regular searches */
++  if (!(select_options & SELECT_DESCRIBE))
++    init_ftfuncs(thd, select_lex, test(order));
++
++  /*
++    is this simple IN subquery?
++  */
++  if (!group_list && !order &&
++      unit->item && unit->item->substype() == Item_subselect::IN_SUBS &&
++      tables == 1 && conds &&
++      !unit->is_union())
++  {
++    if (!having)
++    {
++      Item *where= conds;
++      if (join_tab[0].type == JT_EQ_REF &&
++	  join_tab[0].ref.items[0]->name == in_left_expr_name)
++      {
++        remove_subq_pushed_predicates(&where);
++        save_index_subquery_explain_info(join_tab, where);
++        join_tab[0].type= JT_UNIQUE_SUBQUERY;
++        error= 0;
++        DBUG_RETURN(unit->item->
++                    change_engine(new
++                                  subselect_uniquesubquery_engine(thd,
++                                                                  join_tab,
++                                                                  unit->item,
++                                                                  where)));
++      }
++      else if (join_tab[0].type == JT_REF &&
++	       join_tab[0].ref.items[0]->name == in_left_expr_name)
++      {
++	remove_subq_pushed_predicates(&where);
++        save_index_subquery_explain_info(join_tab, where);
++        join_tab[0].type= JT_INDEX_SUBQUERY;
++        error= 0;
++        DBUG_RETURN(unit->item->
++                    change_engine(new
++                                  subselect_indexsubquery_engine(thd,
++                                                                 join_tab,
++                                                                 unit->item,
++                                                                 where,
++                                                                 NULL,
++                                                                 0)));
++      }
++    } else if (join_tab[0].type == JT_REF_OR_NULL &&
++	       join_tab[0].ref.items[0]->name == in_left_expr_name &&
++               having->name == in_having_cond)
++    {
++      join_tab[0].type= JT_INDEX_SUBQUERY;
++      error= 0;
++      conds= remove_additional_cond(conds);
++      save_index_subquery_explain_info(join_tab, conds);
++      DBUG_RETURN(unit->item->
++		  change_engine(new subselect_indexsubquery_engine(thd,
++								   join_tab,
++								   unit->item,
++								   conds,
++                                                                   having,
++								   1)));
++    }
++
++  }
++  /*
++    Need to tell handlers that to play it safe, it should fetch all
++    columns of the primary key of the tables: this is because MySQL may
++    build row pointers for the rows, and for all columns of the primary key
++    the read set has not necessarily been set by the server code.
++  */
++  if (need_tmp || select_distinct || group_list || order)
++  {
++    for (uint i = const_tables; i < tables; i++)
++      join_tab[i].table->prepare_for_position();
++  }
++
++  DBUG_EXECUTE("info",TEST_join(this););
++
++  if (const_tables != tables)
++  {
++    /*
++      Because filesort always does a full table scan or a quick range scan
++      we must add the removed reference to the select for the table.
++      We only need to do this when we have a simple_order or simple_group
++      as in other cases the join is done before the sort.
++    */
++    if ((order || group_list) &&
++        join_tab[const_tables].type != JT_ALL &&
++        join_tab[const_tables].type != JT_FT &&
++        join_tab[const_tables].type != JT_REF_OR_NULL &&
++        ((order && simple_order) || (group_list && simple_group)))
++    {
++      if (add_ref_to_table_cond(thd,&join_tab[const_tables])) {
++        DBUG_RETURN(1);
++      }
++    }
++    
++    if (!(select_options & SELECT_BIG_RESULT) &&
++        ((group_list &&
++          (!simple_group ||
++           !test_if_skip_sort_order(&join_tab[const_tables], group_list,
++                                    unit->select_limit_cnt, 0, 
++                                    &join_tab[const_tables].table->
++                                    keys_in_use_for_group_by))) ||
++         select_distinct) &&
++        tmp_table_param.quick_group && !procedure)
++    {
++      need_tmp=1; simple_order=simple_group=0;	// Force tmp table without sort
++    }
++    if (order)
++    {
++      /*
++        Do we need a temporary table due to the ORDER BY not being equal to
++        the GROUP BY? The call to test_if_skip_sort_order above tests for the
++        GROUP BY clause only and hence is not valid in this case. So the
++        estimated number of rows to be read from the first table is not valid.
++        We clear it here so that it doesn't show up in EXPLAIN.
++       */
++      if (need_tmp && (select_options & SELECT_DESCRIBE) != 0)
++        join_tab[const_tables].limit= 0;
++      /*
++        Force using of tmp table if sorting by a SP or UDF function due to
++        their expensive and probably non-deterministic nature.
++      */
++      for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next)
++      {
++        Item *item= *tmp_order->item;
++        if (item->walk(&Item::is_expensive_processor, 0, (uchar*)0))
++        {
++          /* Force tmp table without sort */
++          need_tmp=1; simple_order=simple_group=0;
++          break;
++        }
++      }
++    }
++  }
++
++  tmp_having= having;
++  if (select_options & SELECT_DESCRIBE)
++  {
++    error= 0;
++    DBUG_RETURN(0);
++  }
++  having= 0;
++
++  /*
++    The loose index scan access method guarantees that all grouping or
++    duplicate row elimination (for distinct) is already performed
++    during data retrieval, and that all MIN/MAX functions are already
++    computed for each group. Thus all MIN/MAX functions should be
++    treated as regular functions, and there is no need to perform
++    grouping in the main execution loop.
++    Notice that currently loose index scan is applicable only for
++    single table queries, thus it is sufficient to test only the first
++    join_tab element of the plan for its access method.
++  */
++  if (join_tab->is_using_loose_index_scan())
++    tmp_table_param.precomputed_group_by= TRUE;
++
++  /* Create a tmp table if distinct or if the sort is too complicated */
++  if (need_tmp)
++  {
++    DBUG_PRINT("info",("Creating tmp table"));
++    thd_proc_info(thd, "Creating tmp table");
++
++    init_items_ref_array();
++
++    tmp_table_param.hidden_field_count= (all_fields.elements -
++					 fields_list.elements);
++    ORDER *tmp_group= ((!simple_group && !procedure &&
++                        !(test_flags & TEST_NO_KEY_GROUP)) ? group_list :
++                                                             (ORDER*) 0);
++    /*
++      Pushing LIMIT to the temporary table creation is not applicable
++      when there is ORDER BY or GROUP BY or there is no GROUP BY, but
++      there are aggregate functions, because in all these cases we need
++      all result rows.
++    */
++    ha_rows tmp_rows_limit= ((order == 0 || skip_sort_order) &&
++                             !tmp_group &&
++                             !thd->lex->current_select->with_sum_func) ?
++                            select_limit : HA_POS_ERROR;
++
++    if (!(exec_tmp_table1=
++	  create_tmp_table(thd, &tmp_table_param, all_fields,
++                           tmp_group,
++			   group_list ? 0 : select_distinct,
++			   group_list && simple_group,
++			   select_options,
++                           tmp_rows_limit,
++			   (char *) "")))
++		{
++      DBUG_RETURN(1);
++    }
++
++    /*
++      We don't have to store rows in temp table that doesn't match HAVING if:
++      - we are sorting the table and writing complete group rows to the
++        temp table.
++      - We are using DISTINCT without resolving the distinct as a GROUP BY
++        on all columns.
++      
++      If having is not handled here, it will be checked before the row
++      is sent to the client.
++    */    
++    if (tmp_having && 
++	(sort_and_group || (exec_tmp_table1->distinct && !group_list)))
++      having= tmp_having;
++
++    /* if group or order on first table, sort first */
++    if (group_list && simple_group)
++    {
++      DBUG_PRINT("info",("Sorting for group"));
++      thd_proc_info(thd, "Sorting for group");
++      if (create_sort_index(thd, this, group_list,
++			    HA_POS_ERROR, HA_POS_ERROR, FALSE) ||
++	  alloc_group_fields(this, group_list) ||
++          make_sum_func_list(all_fields, fields_list, 1) ||
++          setup_sum_funcs(thd, sum_funcs))
++      {
++        DBUG_RETURN(1);
++      }
++      group_list=0;
++    }
++    else
++    {
++      if (make_sum_func_list(all_fields, fields_list, 0) ||
++          setup_sum_funcs(thd, sum_funcs))
++      {
++        DBUG_RETURN(1);
++      }
++
++      if (!group_list && ! exec_tmp_table1->distinct && order && simple_order)
++      {
++        thd_proc_info(thd, "Sorting for order");
++        if (create_sort_index(thd, this, order,
++                              HA_POS_ERROR, HA_POS_ERROR, TRUE))
++        {
++          DBUG_RETURN(1);
++        }
++        order=0;
++      }
++    }
++    
++    /*
++      Optimize distinct when used on some of the tables
++      SELECT DISTINCT t1.a FROM t1,t2 WHERE t1.b=t2.b
++      In this case we can stop scanning t2 when we have found one t1.a
++    */
++
++    if (exec_tmp_table1->distinct)
++    {
++      table_map used_tables= thd->used_tables;
++      JOIN_TAB *last_join_tab= join_tab+tables-1;
++      do
++      {
++	if (used_tables & last_join_tab->table->map)
++	  break;
++	last_join_tab->not_used_in_distinct=1;
++      } while (last_join_tab-- != join_tab);
++      /* Optimize "select distinct b from t1 order by key_part_1 limit #" */
++      if (order && skip_sort_order)
++      {
++ 	/* Should always succeed */
++	if (test_if_skip_sort_order(&join_tab[const_tables],
++				    order, unit->select_limit_cnt, 0, 
++                                    &join_tab[const_tables].table->
++                                      keys_in_use_for_order_by))
++	  order=0;
++      }
++    }
++
++    /* If this join belongs to an uncacheable query save the original join */
++    if (select_lex->uncacheable && init_save_join_tab())
++      DBUG_RETURN(-1);                         /* purecov: inspected */
++  }
++
++  error= 0;
++  DBUG_RETURN(0);
++}
++
++
++/**
++  Restore values in temporary join.
++*/
++void JOIN::restore_tmp()
++{
++  memcpy(tmp_join, this, (size_t) sizeof(JOIN));
++}
++
++
++int
++JOIN::reinit()
++{
++  DBUG_ENTER("JOIN::reinit");
++
++  unit->offset_limit_cnt= (ha_rows)(select_lex->offset_limit ?
++                                    select_lex->offset_limit->val_uint() :
++                                    ULL(0));
++
++  first_record= 0;
++
++  if (exec_tmp_table1)
++  {
++    exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE);
++    exec_tmp_table1->file->ha_delete_all_rows();
++    free_io_cache(exec_tmp_table1);
++    filesort_free_buffers(exec_tmp_table1,0);
++  }
++  if (exec_tmp_table2)
++  {
++    exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE);
++    exec_tmp_table2->file->ha_delete_all_rows();
++    free_io_cache(exec_tmp_table2);
++    filesort_free_buffers(exec_tmp_table2,0);
++  }
++  if (items0)
++    set_items_ref_array(items0);
++
++  if (join_tab_save)
++    memcpy(join_tab, join_tab_save, sizeof(JOIN_TAB) * tables);
++
++  /* need to reset ref access state (see join_read_key) */
++  if (join_tab)
++    for (uint i= 0; i < tables; i++)
++      join_tab[i].ref.key_err= TRUE;
++
++  if (tmp_join)
++    restore_tmp();
++
++  /* Reset of sum functions */
++  if (sum_funcs)
++  {
++    Item_sum *func, **func_ptr= sum_funcs;
++    while ((func= *(func_ptr++)))
++      func->clear();
++  }
++
++  if (!(select_options & SELECT_DESCRIBE))
++    init_ftfuncs(thd, select_lex, test(order));
++
++  DBUG_RETURN(0);
++}
++
++/**
++   @brief Save the original join layout
++      
++   @details Saves the original join layout so it can be reused in 
++   re-execution and for EXPLAIN.
++             
++   @return Operation status
++   @retval 0      success.
++   @retval 1      error occurred.
++*/
++
++bool
++JOIN::init_save_join_tab()
++{
++  if (!(tmp_join= (JOIN*)thd->alloc(sizeof(JOIN))))
++    return 1;                                  /* purecov: inspected */
++  error= 0;				       // Ensure that tmp_join.error= 0
++  restore_tmp();
++  return 0;
++}
++
++
++bool
++JOIN::save_join_tab()
++{
++  if (!join_tab_save && select_lex->master_unit()->uncacheable)
++  {
++    if (!(join_tab_save= (JOIN_TAB*)thd->memdup((uchar*) join_tab,
++						sizeof(JOIN_TAB) * tables)))
++      return 1;
++  }
++  return 0;
++}
++
++
++/**
++  Exec select.
++
++  @todo
++    Note, that create_sort_index calls test_if_skip_sort_order and may
++    finally replace sorting with index scan if there is a LIMIT clause in
++    the query.  It's never shown in EXPLAIN!
++
++  @todo
++    When can we have here thd->net.report_error not zero?
++*/
++void
++JOIN::exec()
++{
++  List<Item> *columns_list= &fields_list;
++  int      tmp_error;
++  DBUG_ENTER("JOIN::exec");
++
++  thd_proc_info(thd, "executing");
++  error= 0;
++  if (procedure)
++  {
++    procedure_fields_list= fields_list;
++    if (procedure->change_columns(procedure_fields_list) ||
++	result->prepare(procedure_fields_list, unit))
++    {
++      thd->limit_found_rows= thd->examined_row_count= 0;
++      DBUG_VOID_RETURN;
++    }
++    columns_list= &procedure_fields_list;
++  }
++  (void) result->prepare2(); // Currently, this cannot fail.
++
++  if (!tables_list && (tables || !select_lex->with_sum_func))
++  {                                           // Only test of functions
++    if (select_options & SELECT_DESCRIBE)
++      select_describe(this, FALSE, FALSE, FALSE,
++		      (zero_result_cause?zero_result_cause:"No tables used"));
++    else
++    {
++      if (result->send_fields(*columns_list,
++                              Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++      {
++        DBUG_VOID_RETURN;
++      }
++      /*
++        We have to test for 'conds' here as the WHERE may not be constant
++        even if we don't have any tables for prepared statements or if
++        conds uses something like 'rand()'.
++        If the HAVING clause is either impossible or always true, then
++        JOIN::having is set to NULL by optimize_cond.
++        In this case JOIN::exec must check for JOIN::having_value, in the
++        same way it checks for JOIN::cond_value.
++      */
++      if (cond_value != Item::COND_FALSE &&
++          having_value != Item::COND_FALSE &&
++          (!conds || conds->val_int()) &&
++          (!having || having->val_int()))
++      {
++	if (do_send_rows &&
++            (procedure ? (procedure->send_row(procedure_fields_list) ||
++             procedure->end_of_records()) : result->send_data(fields_list)))
++	  error= 1;
++	else
++	{
++	  error= (int) result->send_eof();
++	  send_records= ((select_options & OPTION_FOUND_ROWS) ? 1 :
++                         thd->sent_row_count);
++	}
++      }
++      else
++      {
++	error=(int) result->send_eof();
++        send_records= 0;
++      }
++    }
++    /* Single select (without union) always returns 0 or 1 row */
++    thd->limit_found_rows= send_records;
++    thd->examined_row_count= 0;
++    DBUG_VOID_RETURN;
++  }
++  /*
++    Don't reset the found rows count if there're no tables as
++    FOUND_ROWS() may be called. Never reset the examined row count here.
++    It must be accumulated from all join iterations of all join parts.
++  */
++  if (tables)
++    thd->limit_found_rows= 0;
++
++  if (zero_result_cause)
++  {
++    (void) return_zero_rows(this, result, select_lex->leaf_tables,
++                            *columns_list,
++			    send_row_on_empty_set(),
++			    select_options,
++			    zero_result_cause,
++			    having);
++    DBUG_VOID_RETURN;
++  }
++
++  if ((this->select_lex->options & OPTION_SCHEMA_TABLE) &&
++      get_schema_tables_result(this, PROCESSED_BY_JOIN_EXEC))
++    DBUG_VOID_RETURN;
++
++  if (select_options & SELECT_DESCRIBE)
++  {
++    /*
++      Check if we managed to optimize ORDER BY away and don't use temporary
++      table to resolve ORDER BY: in that case, we only may need to do
++      filesort for GROUP BY.
++    */
++    if (!order && !no_order && (!skip_sort_order || !need_tmp))
++    {
++      /*
++	Reset 'order' to 'group_list' and reinit variables describing
++	'order'
++      */
++      order= group_list;
++      simple_order= simple_group;
++      skip_sort_order= 0;
++    }
++    if (order && 
++        (order != group_list || !(select_options & SELECT_BIG_RESULT)) &&
++	(const_tables == tables ||
++ 	 ((simple_order || skip_sort_order) &&
++	  test_if_skip_sort_order(&join_tab[const_tables], order,
++				  select_limit, 0, 
++                                  &join_tab[const_tables].table->
++                                    keys_in_use_for_query))))
++      order=0;
++    having= tmp_having;
++    select_describe(this, need_tmp,
++		    order != 0 && !skip_sort_order,
++		    select_distinct,
++                    !tables ? "No tables used" : NullS);
++    DBUG_VOID_RETURN;
++  }
++
++  JOIN *curr_join= this;
++  List<Item> *curr_all_fields= &all_fields;
++  List<Item> *curr_fields_list= &fields_list;
++  TABLE *curr_tmp_table= 0;
++  /*
++    Initialize examined rows here because the values from all join parts
++    must be accumulated in examined_row_count. Hence every join
++    iteration must count from zero.
++  */
++  curr_join->examined_rows= 0;
++
++  /* Create a tmp table if distinct or if the sort is too complicated */
++  if (need_tmp)
++  {
++    if (tmp_join)
++    {
++      /*
++        We are in a non cacheable sub query. Get the saved join structure
++        after optimization.
++        (curr_join may have been modified during last exection and we need
++        to reset it)
++      */
++      curr_join= tmp_join;
++    }
++    curr_tmp_table= exec_tmp_table1;
++
++    /* Copy data to the temporary table */
++    thd_proc_info(thd, "Copying to tmp table");
++    DBUG_PRINT("info", ("%s", thd->proc_info));
++    if (!curr_join->sort_and_group &&
++        curr_join->const_tables != curr_join->tables)
++      curr_join->join_tab[curr_join->const_tables].sorted= 0;
++    if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0)))
++    {
++      error= tmp_error;
++      DBUG_VOID_RETURN;
++    }
++    curr_tmp_table->file->info(HA_STATUS_VARIABLE);
++    
++    if (curr_join->having)
++      curr_join->having= curr_join->tmp_having= 0; // Allready done
++    
++    /* Change sum_fields reference to calculated fields in tmp_table */
++    if (curr_join != this)
++      curr_join->all_fields= *curr_all_fields;
++    if (!items1)
++    {
++      items1= items0 + all_fields.elements;
++      if (sort_and_group || curr_tmp_table->group ||
++          tmp_table_param.precomputed_group_by)
++      {
++	if (change_to_use_tmp_fields(thd, items1,
++				     tmp_fields_list1, tmp_all_fields1,
++				     fields_list.elements, all_fields))
++	  DBUG_VOID_RETURN;
++      }
++      else
++      {
++	if (change_refs_to_tmp_fields(thd, items1,
++				      tmp_fields_list1, tmp_all_fields1,
++				      fields_list.elements, all_fields))
++	  DBUG_VOID_RETURN;
++      }
++      if (curr_join != this)
++      {
++        curr_join->tmp_all_fields1= tmp_all_fields1;
++        curr_join->tmp_fields_list1= tmp_fields_list1;
++      }
++      curr_join->items1= items1;
++    }
++    curr_all_fields= &tmp_all_fields1;
++    curr_fields_list= &tmp_fields_list1;
++    curr_join->set_items_ref_array(items1);
++    
++    if (sort_and_group || curr_tmp_table->group)
++    {
++      curr_join->tmp_table_param.field_count+= 
++	curr_join->tmp_table_param.sum_func_count+
++	curr_join->tmp_table_param.func_count;
++      curr_join->tmp_table_param.sum_func_count= 
++	curr_join->tmp_table_param.func_count= 0;
++    }
++    else
++    {
++      curr_join->tmp_table_param.field_count+= 
++	curr_join->tmp_table_param.func_count;
++      curr_join->tmp_table_param.func_count= 0;
++    }
++    
++    // procedure can't be used inside subselect => we do nothing special for it
++    if (procedure)
++      procedure->update_refs();
++    
++    if (curr_tmp_table->group)
++    {						// Already grouped
++      if (!curr_join->order && !curr_join->no_order && !skip_sort_order)
++	curr_join->order= curr_join->group_list;  /* order by group */
++      curr_join->group_list= 0;
++    }
++    
++    /*
++      If we have different sort & group then we must sort the data by group
++      and copy it to another tmp table
++      This code is also used if we are using distinct something
++      we haven't been able to store in the temporary table yet
++      like SEC_TO_TIME(SUM(...)).
++    */
++
++    if ((curr_join->group_list && (!test_if_subpart(curr_join->group_list,
++						   curr_join->order) || 
++				  curr_join->select_distinct)) ||
++	(curr_join->select_distinct &&
++	 curr_join->tmp_table_param.using_indirect_summary_function))
++    {					/* Must copy to another table */
++      DBUG_PRINT("info",("Creating group table"));
++      
++      /* Free first data from old join */
++      curr_join->join_free();
++      if (curr_join->make_simple_join(this, curr_tmp_table))
++	DBUG_VOID_RETURN;
++      calc_group_buffer(curr_join, group_list);
++      count_field_types(select_lex, &curr_join->tmp_table_param,
++			curr_join->tmp_all_fields1,
++			curr_join->select_distinct && !curr_join->group_list);
++      curr_join->tmp_table_param.hidden_field_count= 
++	(curr_join->tmp_all_fields1.elements-
++	 curr_join->tmp_fields_list1.elements);
++      
++      
++      if (exec_tmp_table2)
++	curr_tmp_table= exec_tmp_table2;
++      else
++      {
++	/* group data to new table */
++
++        /*
++          If the access method is loose index scan then all MIN/MAX
++          functions are precomputed, and should be treated as regular
++          functions. See extended comment in JOIN::exec.
++        */
++        if (curr_join->join_tab->is_using_loose_index_scan())
++          curr_join->tmp_table_param.precomputed_group_by= TRUE;
++
++	if (!(curr_tmp_table=
++	      exec_tmp_table2= create_tmp_table(thd,
++						&curr_join->tmp_table_param,
++						*curr_all_fields,
++						(ORDER*) 0,
++						curr_join->select_distinct && 
++						!curr_join->group_list,
++						1, curr_join->select_options,
++						HA_POS_ERROR,
++						(char *) "")))
++	  DBUG_VOID_RETURN;
++	curr_join->exec_tmp_table2= exec_tmp_table2;
++      }
++      if (curr_join->group_list)
++      {
++	thd_proc_info(thd, "Creating sort index");
++	if (curr_join->join_tab == join_tab && save_join_tab())
++	{
++	  DBUG_VOID_RETURN;
++	}
++	if (create_sort_index(thd, curr_join, curr_join->group_list,
++			      HA_POS_ERROR, HA_POS_ERROR, FALSE) ||
++	    make_group_fields(this, curr_join))
++	{
++	  DBUG_VOID_RETURN;
++	}
++        sortorder= curr_join->sortorder;
++      }
++      
++      thd_proc_info(thd, "Copying to group table");
++      DBUG_PRINT("info", ("%s", thd->proc_info));
++      tmp_error= -1;
++      if (curr_join != this)
++      {
++	if (sum_funcs2)
++	{
++	  curr_join->sum_funcs= sum_funcs2;
++	  curr_join->sum_funcs_end= sum_funcs_end2; 
++	}
++	else
++	{
++	  curr_join->alloc_func_list();
++	  sum_funcs2= curr_join->sum_funcs;
++	  sum_funcs_end2= curr_join->sum_funcs_end;
++	}
++      }
++      if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list,
++					1, TRUE))
++        DBUG_VOID_RETURN;
++      curr_join->group_list= 0;
++      if (!curr_join->sort_and_group &&
++          curr_join->const_tables != curr_join->tables)
++        curr_join->join_tab[curr_join->const_tables].sorted= 0;
++      if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
++	  (tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table,
++				0)))
++      {
++	error= tmp_error;
++	DBUG_VOID_RETURN;
++      }
++      end_read_record(&curr_join->join_tab->read_record);
++      curr_join->const_tables= curr_join->tables; // Mark free for cleanup()
++      curr_join->join_tab[0].table= 0;           // Table is freed
++      
++      // No sum funcs anymore
++      if (!items2)
++      {
++	items2= items1 + all_fields.elements;
++	if (change_to_use_tmp_fields(thd, items2,
++				     tmp_fields_list2, tmp_all_fields2, 
++				     fields_list.elements, tmp_all_fields1))
++	  DBUG_VOID_RETURN;
++        if (curr_join != this)
++        {
++          curr_join->tmp_fields_list2= tmp_fields_list2;
++          curr_join->tmp_all_fields2= tmp_all_fields2;
++        }
++      }
++      curr_fields_list= &curr_join->tmp_fields_list2;
++      curr_all_fields= &curr_join->tmp_all_fields2;
++      curr_join->set_items_ref_array(items2);
++      curr_join->tmp_table_param.field_count+= 
++	curr_join->tmp_table_param.sum_func_count;
++      curr_join->tmp_table_param.sum_func_count= 0;
++    }
++    if (curr_tmp_table->distinct)
++      curr_join->select_distinct=0;		/* Each row is unique */
++    
++    curr_join->join_free();			/* Free quick selects */
++    if (curr_join->select_distinct && ! curr_join->group_list)
++    {
++      thd_proc_info(thd, "Removing duplicates");
++      if (curr_join->tmp_having)
++	curr_join->tmp_having->update_used_tables();
++      if (remove_duplicates(curr_join, curr_tmp_table,
++			    *curr_fields_list, curr_join->tmp_having))
++	DBUG_VOID_RETURN;
++      curr_join->tmp_having=0;
++      curr_join->select_distinct=0;
++    }
++    curr_tmp_table->reginfo.lock_type= TL_UNLOCK;
++    if (curr_join->make_simple_join(this, curr_tmp_table))
++      DBUG_VOID_RETURN;
++    calc_group_buffer(curr_join, curr_join->group_list);
++    count_field_types(select_lex, &curr_join->tmp_table_param, 
++                      *curr_all_fields, 0);
++    
++  }
++  if (procedure)
++    count_field_types(select_lex, &curr_join->tmp_table_param, 
++                      *curr_all_fields, 0);
++  
++  if (curr_join->group || curr_join->implicit_grouping ||
++      curr_join->tmp_table_param.sum_func_count ||
++      (procedure && (procedure->flags & PROC_GROUP)))
++  {
++    if (make_group_fields(this, curr_join))
++    {
++      DBUG_VOID_RETURN;
++    }
++    if (!items3)
++    {
++      if (!items0)
++	init_items_ref_array();
++      items3= ref_pointer_array + (all_fields.elements*4);
++      setup_copy_fields(thd, &curr_join->tmp_table_param,
++			items3, tmp_fields_list3, tmp_all_fields3,
++			curr_fields_list->elements, *curr_all_fields);
++      tmp_table_param.save_copy_funcs= curr_join->tmp_table_param.copy_funcs;
++      tmp_table_param.save_copy_field= curr_join->tmp_table_param.copy_field;
++      tmp_table_param.save_copy_field_end=
++	curr_join->tmp_table_param.copy_field_end;
++      if (curr_join != this)
++      {
++        curr_join->tmp_all_fields3= tmp_all_fields3;
++        curr_join->tmp_fields_list3= tmp_fields_list3;
++      }
++    }
++    else
++    {
++      curr_join->tmp_table_param.copy_funcs= tmp_table_param.save_copy_funcs;
++      curr_join->tmp_table_param.copy_field= tmp_table_param.save_copy_field;
++      curr_join->tmp_table_param.copy_field_end=
++	tmp_table_param.save_copy_field_end;
++    }
++    curr_fields_list= &tmp_fields_list3;
++    curr_all_fields= &tmp_all_fields3;
++    curr_join->set_items_ref_array(items3);
++
++    if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list,
++				      1, TRUE) || 
++        setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
++        thd->is_fatal_error)
++      DBUG_VOID_RETURN;
++  }
++  if (curr_join->group_list || curr_join->order)
++  {
++    DBUG_PRINT("info",("Sorting for send_fields"));
++    thd_proc_info(thd, "Sorting result");
++    /* If we have already done the group, add HAVING to sorted table */
++    if (curr_join->tmp_having && ! curr_join->group_list && 
++	! curr_join->sort_and_group)
++    {
++      // Some tables may have been const
++      curr_join->tmp_having->update_used_tables();
++      JOIN_TAB *curr_table= &curr_join->join_tab[curr_join->const_tables];
++      table_map used_tables= (curr_join->const_table_map |
++			      curr_table->table->map);
++
++      Item* sort_table_cond= make_cond_for_table(curr_join->tmp_having,
++						 used_tables,
++						 used_tables);
++      if (sort_table_cond)
++      {
++	if (!curr_table->select)
++	  if (!(curr_table->select= new SQL_SELECT))
++	    DBUG_VOID_RETURN;
++	if (!curr_table->select->cond)
++	  curr_table->select->cond= sort_table_cond;
++	else
++	{
++	  if (!(curr_table->select->cond=
++		new Item_cond_and(curr_table->select->cond,
++				  sort_table_cond)))
++	    DBUG_VOID_RETURN;
++	  curr_table->select->cond->fix_fields(thd, 0);
++	}
++	curr_table->select_cond= curr_table->select->cond;
++	curr_table->select_cond->top_level_item();
++	DBUG_EXECUTE("where",print_where(curr_table->select->cond,
++					 "select and having",
++                                         QT_ORDINARY););
++	curr_join->tmp_having= make_cond_for_table(curr_join->tmp_having,
++						   ~ (table_map) 0,
++						   ~used_tables);
++	DBUG_EXECUTE("where",print_where(curr_join->tmp_having,
++                                         "having after sort",
++                                         QT_ORDINARY););
++      }
++    }
++    {
++      if (group)
++	curr_join->select_limit= HA_POS_ERROR;
++      else
++      {
++	/*
++	  We can abort sorting after thd->select_limit rows if we there is no
++	  WHERE clause for any tables after the sorted one.
++	*/
++	JOIN_TAB *curr_table= &curr_join->join_tab[curr_join->const_tables+1];
++	JOIN_TAB *end_table= &curr_join->join_tab[curr_join->tables];
++	for (; curr_table < end_table ; curr_table++)
++	{
++	  /*
++	    table->keyuse is set in the case there was an original WHERE clause
++	    on the table that was optimized away.
++	  */
++	  if (curr_table->select_cond ||
++	      (curr_table->keyuse && !curr_table->first_inner))
++	  {
++	    /* We have to sort all rows */
++	    curr_join->select_limit= HA_POS_ERROR;
++	    break;
++	  }
++	}
++      }
++      if (curr_join->join_tab == join_tab && save_join_tab())
++      {
++	DBUG_VOID_RETURN;
++      }
++      /*
++	Here we sort rows for ORDER BY/GROUP BY clause, if the optimiser
++	chose FILESORT to be faster than INDEX SCAN or there is no 
++	suitable index present.
++	Note, that create_sort_index calls test_if_skip_sort_order and may
++	finally replace sorting with index scan if there is a LIMIT clause in
++	the query. XXX: it's never shown in EXPLAIN!
++	OPTION_FOUND_ROWS supersedes LIMIT and is taken into account.
++      */
++      if (create_sort_index(thd, curr_join,
++			    curr_join->group_list ? 
++			    curr_join->group_list : curr_join->order,
++			    curr_join->select_limit,
++			    (select_options & OPTION_FOUND_ROWS ?
++			     HA_POS_ERROR : unit->select_limit_cnt),
++                            curr_join->group_list ? TRUE : FALSE))
++	DBUG_VOID_RETURN;
++      sortorder= curr_join->sortorder;
++      if (curr_join->const_tables != curr_join->tables &&
++          !curr_join->join_tab[curr_join->const_tables].table->sort.io_cache)
++      {
++        /*
++          If no IO cache exists for the first table then we are using an
++          INDEX SCAN and no filesort. Thus we should not remove the sorted
++          attribute on the INDEX SCAN.
++        */
++        skip_sort_order= 1;
++      }
++    }
++  }
++  /* XXX: When can we have here thd->is_error() not zero? */
++  if (thd->is_error())
++  {
++    error= thd->is_error();
++    DBUG_VOID_RETURN;
++  }
++  curr_join->having= curr_join->tmp_having;
++  curr_join->fields= curr_fields_list;
++  curr_join->procedure= procedure;
++
++  if (is_top_level_join() && thd->cursor && tables != const_tables)
++  {
++    /*
++      We are here if this is JOIN::exec for the last select of the main unit
++      and the client requested to open a cursor.
++      We check that not all tables are constant because this case is not
++      handled by do_select() separately, and this case is not implemented
++      for cursors yet.
++    */
++    DBUG_ASSERT(error == 0);
++    /*
++      curr_join is used only for reusable joins - that is, 
++      to perform SELECT for each outer row (like in subselects).
++      This join is main, so we know for sure that curr_join == join.
++    */
++    DBUG_ASSERT(curr_join == this);
++    /* Open cursor for the last join sweep */
++    error= thd->cursor->open(this);
++  }
++  else
++  {
++    thd_proc_info(thd, "Sending data");
++    DBUG_PRINT("info", ("%s", thd->proc_info));
++    result->send_fields((procedure ? curr_join->procedure_fields_list :
++                         *curr_fields_list),
++                        Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
++    error= do_select(curr_join, curr_fields_list, NULL, procedure);
++    thd->limit_found_rows= curr_join->send_records;
++  }
++
++  /* Accumulate the counts from all join iterations of all join parts. */
++  thd->examined_row_count+= curr_join->examined_rows;
++  DBUG_PRINT("counts", ("thd->examined_row_count: %lu",
++                        (ulong) thd->examined_row_count));
++
++  /* 
++    With EXPLAIN EXTENDED we have to restore original ref_array
++    for a derived table which is always materialized.
++    We also need to do this when we have temp table(s).
++    Otherwise we would not be able to print the query correctly.
++  */ 
++  if (items0 && (thd->lex->describe & DESCRIBE_EXTENDED) &&
++      (select_lex->linkage == DERIVED_TABLE_TYPE ||
++       exec_tmp_table1 || exec_tmp_table2))
++    set_items_ref_array(items0);
++
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Clean up join.
++
++  @return
++    Return error that hold JOIN.
++*/
++
++int
++JOIN::destroy()
++{
++  DBUG_ENTER("JOIN::destroy");
++  select_lex->join= 0;
++
++  if (tmp_join)
++  {
++    if (join_tab != tmp_join->join_tab)
++    {
++      JOIN_TAB *tab, *end;
++      for (tab= join_tab, end= tab+tables ; tab != end ; tab++)
++	tab->cleanup();
++    }
++    tmp_join->tmp_join= 0;
++    /*
++      We need to clean up tmp_table_param for reusable JOINs (having non-zero
++      and different from self tmp_join) because it's not being cleaned up
++      anywhere else (as we need to keep the join is reusable).
++    */
++    tmp_table_param.cleanup();
++    tmp_table_param.copy_field= tmp_join->tmp_table_param.copy_field= 0;
++    DBUG_RETURN(tmp_join->destroy());
++  }
++  cond_equal= 0;
++
++  cleanup(1);
++ /* Cleanup items referencing temporary table columns */
++  cleanup_item_list(tmp_all_fields1);
++  cleanup_item_list(tmp_all_fields3);
++  if (exec_tmp_table1)
++    free_tmp_table(thd, exec_tmp_table1);
++  if (exec_tmp_table2)
++    free_tmp_table(thd, exec_tmp_table2);
++  delete select;
++  delete_dynamic(&keyuse);
++  delete procedure;
++  DBUG_RETURN(error);
++}
++
++
++void JOIN::cleanup_item_list(List<Item> &items) const
++{
++  if (!items.is_empty())
++  {
++    List_iterator_fast<Item> it(items);
++    Item *item;
++    while ((item= it++))
++      item->cleanup();
++  }
++}
++
++
++/**
++  An entry point to single-unit select (a select without UNION).
++
++  @param thd                  thread handler
++  @param rref_pointer_array   a reference to ref_pointer_array of
++                              the top-level select_lex for this query
++  @param tables               list of all tables used in this query.
++                              The tables have been pre-opened.
++  @param wild_num             number of wildcards used in the top level 
++                              select of this query.
++                              For example statement
++                              SELECT *, t1.*, catalog.t2.* FROM t0, t1, t2;
++                              has 3 wildcards.
++  @param fields               list of items in SELECT list of the top-level
++                              select
++                              e.g. SELECT a, b, c FROM t1 will have Item_field
++                              for a, b and c in this list.
++  @param conds                top level item of an expression representing
++                              WHERE clause of the top level select
++  @param og_num               total number of ORDER BY and GROUP BY clauses
++                              arguments
++  @param order                linked list of ORDER BY agruments
++  @param group                linked list of GROUP BY arguments
++  @param having               top level item of HAVING expression
++  @param proc_param           list of PROCEDUREs
++  @param select_options       select options (BIG_RESULT, etc)
++  @param result               an instance of result set handling class.
++                              This object is responsible for send result
++                              set rows to the client or inserting them
++                              into a table.
++  @param select_lex           the only SELECT_LEX of this query
++  @param unit                 top-level UNIT of this query
++                              UNIT is an artificial object created by the
++                              parser for every SELECT clause.
++                              e.g.
++                              SELECT * FROM t1 WHERE a1 IN (SELECT * FROM t2)
++                              has 2 unions.
++
++  @retval
++    FALSE  success
++  @retval
++    TRUE   an error
++*/
++
++bool
++mysql_select(THD *thd, Item ***rref_pointer_array,
++	     TABLE_LIST *tables, uint wild_num, List<Item> &fields,
++	     COND *conds, uint og_num,  ORDER *order, ORDER *group,
++	     Item *having, ORDER *proc_param, ulonglong select_options,
++	     select_result *result, SELECT_LEX_UNIT *unit,
++	     SELECT_LEX *select_lex)
++{
++  bool err;
++  bool free_join= 1;
++  DBUG_ENTER("mysql_select");
++
++  select_lex->context.resolve_in_select_list= TRUE;
++  JOIN *join;
++  if (select_lex->join != 0)
++  {
++    join= select_lex->join;
++    /*
++      is it single SELECT in derived table, called in derived table
++      creation
++    */
++    if (select_lex->linkage != DERIVED_TABLE_TYPE ||
++	(select_options & SELECT_DESCRIBE))
++    {
++      if (select_lex->linkage != GLOBAL_OPTIONS_TYPE)
++      {
++	//here is EXPLAIN of subselect or derived table
++	if (join->change_result(result))
++	{
++	  DBUG_RETURN(TRUE);
++	}
++        /*
++          Original join tabs might be overwritten at first
++          subselect execution. So we need to restore them.
++        */
++        Item_subselect *subselect= select_lex->master_unit()->item;
++        if (subselect && subselect->is_uncacheable() && join->reinit())
++          DBUG_RETURN(TRUE);
++      }
++      else
++      {
++        err= join->prepare(rref_pointer_array, tables, wild_num,
++                           conds, og_num, order, group, having, proc_param,
++                           select_lex, unit);
++        if (err)
++	{
++	  goto err;
++	}
++      }
++    }
++    free_join= 0;
++    join->select_options= select_options;
++  }
++  else
++  {
++    if (!(join= new JOIN(thd, fields, select_options, result)))
++	DBUG_RETURN(TRUE);
++    thd_proc_info(thd, "init");
++    thd->used_tables=0;                         // Updated by setup_fields
++    err= join->prepare(rref_pointer_array, tables, wild_num,
++                       conds, og_num, order, group, having, proc_param,
++                       select_lex, unit);
++    if (err)
++    {
++      goto err;
++    }
++  }
++
++  if ((err= join->optimize()))
++  {
++    goto err;					// 1
++  }
++
++  if (thd->lex->describe & DESCRIBE_EXTENDED)
++  {
++    join->conds_history= join->conds;
++    join->having_history= (join->having?join->having:join->tmp_having);
++  }
++
++  if (thd->is_error())
++    goto err;
++
++  join->exec();
++
++  if (thd->cursor && thd->cursor->is_open())
++  {
++    /*
++      A cursor was opened for the last sweep in exec().
++      We are here only if this is mysql_select for top-level SELECT_LEX_UNIT
++      and there were no error.
++    */
++    free_join= 0;
++  }
++
++  if (thd->lex->describe & DESCRIBE_EXTENDED)
++  {
++    select_lex->where= join->conds_history;
++    select_lex->having= join->having_history;
++  }
++
++err:
++  if (free_join)
++  {
++    thd_proc_info(thd, "end");
++    err|= select_lex->cleanup();
++    DBUG_RETURN(err || thd->is_error());
++  }
++  DBUG_RETURN(join->error);
++}
++
++/*****************************************************************************
++  Create JOIN_TABS, make a guess about the table types,
++  Approximate how many records will be used in each table
++*****************************************************************************/
++
++static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
++				      TABLE *table,
++				      const key_map *keys,ha_rows limit)
++{
++  int error;
++  DBUG_ENTER("get_quick_record_count");
++#ifndef EMBEDDED_LIBRARY                      // Avoid compiler warning
++  uchar buff[STACK_BUFF_ALLOC];
++#endif
++  if (check_stack_overrun(thd, STACK_MIN_SIZE, buff))
++    DBUG_RETURN(0);                           // Fatal error flag is set
++  if (select)
++  {
++    select->head=table;
++    if ((error= select->test_quick_select(thd, *(key_map *)keys,(table_map) 0,
++                                          limit, 0)) == 1)
++      DBUG_RETURN(select->quick->records);
++    if (error == -1)
++    {
++      table->reginfo.impossible_range=1;
++      DBUG_RETURN(0);
++    }
++    DBUG_PRINT("warning",("Couldn't use record count on const keypart"));
++  }
++  DBUG_RETURN(HA_POS_ERROR);			/* This shouldn't happend */
++}
++
++/*
++   This structure is used to collect info on potentially sargable
++   predicates in order to check whether they become sargable after
++   reading const tables.
++   We form a bitmap of indexes that can be used for sargable predicates.
++   Only such indexes are involved in range analysis.
++*/
++typedef struct st_sargable_param
++{
++  Field *field;              /* field against which to check sargability */
++  Item **arg_value;          /* values of potential keys for lookups     */
++  uint num_values;           /* number of values in the above array      */
++} SARGABLE_PARAM;  
++
++/**
++  Calculate the best possible join and initialize the join structure.
++
++  @retval
++    0	ok
++  @retval
++    1	Fatal error
++*/
++
++static bool
++make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
++		     DYNAMIC_ARRAY *keyuse_array)
++{
++  int error;
++  TABLE *table;
++  TABLE_LIST *tables= tables_arg;
++  uint i,table_count,const_count,key;
++  table_map found_const_table_map, all_table_map, found_ref, refs;
++  key_map const_ref, eq_part;
++  TABLE **table_vector;
++  JOIN_TAB *stat,*stat_end,*s,**stat_ref;
++  KEYUSE *keyuse,*start_keyuse;
++  table_map outer_join=0;
++  SARGABLE_PARAM *sargables= 0;
++  JOIN_TAB *stat_vector[MAX_TABLES+1];
++  DBUG_ENTER("make_join_statistics");
++
++  table_count=join->tables;
++  stat=(JOIN_TAB*) join->thd->calloc(sizeof(JOIN_TAB)*table_count);
++  stat_ref=(JOIN_TAB**) join->thd->alloc(sizeof(JOIN_TAB*)*MAX_TABLES);
++  table_vector=(TABLE**) join->thd->alloc(sizeof(TABLE*)*(table_count*2));
++  if (!stat || !stat_ref || !table_vector)
++    DBUG_RETURN(1);				// Eom /* purecov: inspected */
++
++  join->best_ref=stat_vector;
++
++  stat_end=stat+table_count;
++  found_const_table_map= all_table_map=0;
++  const_count=0;
++
++  for (s= stat, i= 0;
++       tables;
++       s++, tables= tables->next_leaf, i++)
++  {
++    TABLE_LIST *embedding= tables->embedding;
++    stat_vector[i]=s;
++    s->keys.init();
++    s->const_keys.init();
++    s->checked_keys.init();
++    s->needed_reg.init();
++    table_vector[i]=s->table=table=tables->table;
++    table->pos_in_table_list= tables;
++    error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
++    if (error)
++    {
++      table->file->print_error(error, MYF(0));
++      goto error;
++    }
++    table->quick_keys.clear_all();
++    table->reginfo.join_tab=s;
++    table->reginfo.not_exists_optimize=0;
++    bzero((char*) table->const_key_parts, sizeof(key_part_map)*table->s->keys);
++    all_table_map|= table->map;
++    s->join=join;
++    s->info=0;					// For describe
++
++    s->dependent= tables->dep_tables;
++    s->key_dependent= 0;
++    if (tables->schema_table)
++      table->file->stats.records= 2;
++    table->quick_condition_rows= table->file->stats.records;
++
++    s->on_expr_ref= &tables->on_expr;
++    if (*s->on_expr_ref)
++    {
++      /* s is the only inner table of an outer join */
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++      if ((!table->file->stats.records || table->no_partitions_used) && !embedding)
++#else
++      if (!table->file->stats.records && !embedding)
++#endif
++      {						// Empty table
++        s->dependent= 0;                        // Ignore LEFT JOIN depend.
++	set_position(join,const_count++,s,(KEYUSE*) 0);
++	continue;
++      }
++      outer_join|= table->map;
++      s->embedding_map= 0;
++      for (;embedding; embedding= embedding->embedding)
++        s->embedding_map|= embedding->nested_join->nj_map;
++      continue;
++    }
++    if (embedding)
++    {
++      /* s belongs to a nested join, maybe to several embedded joins */
++      s->embedding_map= 0;
++      do
++      {
++        NESTED_JOIN *nested_join= embedding->nested_join;
++        s->embedding_map|=nested_join->nj_map;
++        s->dependent|= embedding->dep_tables;
++        embedding= embedding->embedding;
++        outer_join|= nested_join->used_tables;
++      }
++      while (embedding);
++      continue;
++    }
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    const bool no_partitions_used= table->no_partitions_used;
++#else
++    const bool no_partitions_used= FALSE;
++#endif
++    if ((table->s->system || table->file->stats.records <= 1 ||
++         no_partitions_used) &&
++	!s->dependent &&
++	(table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
++        !table->fulltext_searched && !join->no_const_tables)
++    {
++      set_position(join,const_count++,s,(KEYUSE*) 0);
++    }
++  }
++  stat_vector[i]=0;
++  join->outer_join=outer_join;
++
++  if (join->outer_join)
++  {
++    /* 
++       Build transitive closure for relation 'to be dependent on'.
++       This will speed up the plan search for many cases with outer joins,
++       as well as allow us to catch illegal cross references.
++       Warshall's algorithm is used to build the transitive closure.
++       As we may restart the outer loop upto 'table_count' times, the
++       complexity of the algorithm is O((number of tables)^3).
++       However, most of the iterations will be shortcircuited when
++       there are no pedendencies to propogate.
++    */
++    for (i= 0 ; i < table_count ; i++)
++    {
++      uint j;
++      table= stat[i].table;
++
++      if (!table->reginfo.join_tab->dependent)
++        continue;
++
++      /* Add my dependencies to other tables depending on me */
++      for (j= 0, s= stat ; j < table_count ; j++, s++)
++      {
++        if (s->dependent & table->map)
++        {
++          table_map was_dependent= s->dependent;
++          s->dependent |= table->reginfo.join_tab->dependent;
++          /*
++            If we change dependencies for a table we already have
++            processed: Redo dependency propagation from this table.
++          */
++          if (i > j && s->dependent != was_dependent)
++          {
++            i = j-1;
++            break;
++          }
++        }
++      }
++    }
++
++    for (i= 0, s= stat ; i < table_count ; i++, s++)
++    {
++      /* Catch illegal cross references for outer joins */
++      if (s->dependent & s->table->map)
++      {
++        join->tables=0;			// Don't use join->table
++        my_message(ER_WRONG_OUTER_JOIN, ER(ER_WRONG_OUTER_JOIN), MYF(0));
++        goto error;
++      }
++
++      if (outer_join & s->table->map)
++        s->table->maybe_null= 1;
++      s->key_dependent= s->dependent;
++    }
++  }
++
++  if (conds || outer_join)
++    if (update_ref_and_keys(join->thd, keyuse_array, stat, join->tables,
++                            conds, join->cond_equal,
++                            ~outer_join, join->select_lex, &sargables))
++      goto error;
++
++  /* Read tables with 0 or 1 rows (system tables) */
++  join->const_table_map= 0;
++
++  for (POSITION *p_pos=join->positions, *p_end=p_pos+const_count;
++       p_pos < p_end ;
++       p_pos++)
++  {
++    int tmp;
++    s= p_pos->table;
++    s->type=JT_SYSTEM;
++    join->const_table_map|=s->table->map;
++    if ((tmp=join_read_const_table(s, p_pos)))
++    {
++      if (tmp > 0)
++	goto error;		// Fatal error
++    }
++    else
++      found_const_table_map|= s->table->map;
++  }
++
++  /* loop until no more const tables are found */
++  int ref_changed;
++  do
++  {
++  more_const_tables_found:
++    ref_changed = 0;
++    found_ref=0;
++
++    /*
++      We only have to loop from stat_vector + const_count as
++      set_position() will move all const_tables first in stat_vector
++    */
++
++    for (JOIN_TAB **pos=stat_vector+const_count ; (s= *pos) ; pos++)
++    {
++      table=s->table;
++
++      /* 
++        If equi-join condition by a key is null rejecting and after a
++        substitution of a const table the key value happens to be null
++        then we can state that there are no matches for this equi-join.
++      */  
++      if ((keyuse= s->keyuse) && *s->on_expr_ref && !s->embedding_map)
++      {
++        /* 
++          When performing an outer join operation if there are no matching rows
++          for the single row of the outer table all the inner tables are to be
++          null complemented and thus considered as constant tables.
++          Here we apply this consideration to the case of outer join operations 
++          with a single inner table only because the case with nested tables
++          would require a more thorough analysis.
++          TODO. Apply single row substitution to null complemented inner tables
++          for nested outer join operations. 
++	*/              
++        while (keyuse->table == table)
++        {
++          if (!(keyuse->val->used_tables() & ~join->const_table_map) &&
++              keyuse->val->is_null() && keyuse->null_rejecting)
++          {
++            s->type= JT_CONST;
++            mark_as_null_row(table);
++            found_const_table_map|= table->map;
++	    join->const_table_map|= table->map;
++	    set_position(join,const_count++,s,(KEYUSE*) 0);
++            goto more_const_tables_found;
++           }
++	  keyuse++;
++        }
++      }
++
++      if (s->dependent)				// If dependent on some table
++      {
++	// All dep. must be constants
++	if (s->dependent & ~(found_const_table_map))
++	  continue;
++	if (table->file->stats.records <= 1L &&
++	    (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
++            !table->pos_in_table_list->embedding)
++	{					// system table
++	  int tmp= 0;
++	  s->type=JT_SYSTEM;
++	  join->const_table_map|=table->map;
++	  set_position(join,const_count++,s,(KEYUSE*) 0);
++	  if ((tmp= join_read_const_table(s, join->positions+const_count-1)))
++	  {
++	    if (tmp > 0)
++	      goto error;			// Fatal error
++	  }
++	  else
++	    found_const_table_map|= table->map;
++	  continue;
++	}
++      }
++      /* check if table can be read by key or table only uses const refs */
++      if ((keyuse=s->keyuse))
++      {
++	s->type= JT_REF;
++	while (keyuse->table == table)
++	{
++	  start_keyuse=keyuse;
++	  key=keyuse->key;
++	  s->keys.set_bit(key);               // QQ: remove this ?
++
++	  refs=0;
++          const_ref.clear_all();
++	  eq_part.clear_all();
++	  do
++	  {
++	    if (keyuse->val->type() != Item::NULL_ITEM && !keyuse->optimize)
++	    {
++	      if (!((~found_const_table_map) & keyuse->used_tables))
++		const_ref.set_bit(keyuse->keypart);
++	      else
++		refs|=keyuse->used_tables;
++	      eq_part.set_bit(keyuse->keypart);
++	    }
++	    keyuse++;
++	  } while (keyuse->table == table && keyuse->key == key);
++
++	  if (eq_part.is_prefix(table->key_info[key].key_parts) &&
++              !table->fulltext_searched && 
++              !table->pos_in_table_list->embedding)
++	  {
++            if ((table->key_info[key].flags & (HA_NOSAME | HA_END_SPACE_KEY))
++                 == HA_NOSAME)
++            {
++	      if (const_ref == eq_part)
++	      {					// Found everything for ref.
++	        int tmp;
++	        ref_changed = 1;
++	        s->type= JT_CONST;
++	        join->const_table_map|=table->map;
++	        set_position(join,const_count++,s,start_keyuse);
++	        if (create_ref_for_key(join, s, start_keyuse,
++				       found_const_table_map))
++                  goto error;
++	        if ((tmp=join_read_const_table(s,
++                                               join->positions+const_count-1)))
++	        {
++		  if (tmp > 0)
++		    goto error;			// Fatal error
++	        }
++	        else
++		  found_const_table_map|= table->map;
++	        break;
++	      }
++	      else
++	        found_ref|= refs;      // Table is const if all refs are const
++	    }
++            else if (const_ref == eq_part)
++              s->const_keys.set_bit(key);
++          }
++	}
++      }
++    }
++  } while (join->const_table_map & found_ref && ref_changed);
++
++  /* 
++    Update info on indexes that can be used for search lookups as
++    reading const tables may has added new sargable predicates. 
++  */
++  if (const_count && sargables)
++  {
++    for( ; sargables->field ; sargables++)
++    {
++      Field *field= sargables->field;
++      JOIN_TAB *join_tab= field->table->reginfo.join_tab;
++      key_map possible_keys= field->key_start;
++      possible_keys.intersect(field->table->keys_in_use_for_query);
++      bool is_const= 1;
++      for (uint j=0; j < sargables->num_values; j++)
++        is_const&= sargables->arg_value[j]->const_item();
++      if (is_const)
++        join_tab[0].const_keys.merge(possible_keys);
++    }
++  }
++
++  /* Calc how many (possible) matched records in each table */
++
++  for (s=stat ; s < stat_end ; s++)
++  {
++    if (s->type == JT_SYSTEM || s->type == JT_CONST)
++    {
++      /* Only one matching row */
++      s->found_records=s->records=s->read_time=1; s->worst_seeks=1.0;
++      continue;
++    }
++    /* Approximate found rows and time to read them */
++    s->found_records=s->records=s->table->file->stats.records;
++    s->read_time=(ha_rows) s->table->file->scan_time();
++
++    /*
++      Set a max range of how many seeks we can expect when using keys
++      This is can't be to high as otherwise we are likely to use
++      table scan.
++    */
++    s->worst_seeks= min((double) s->found_records / 10,
++			(double) s->read_time*3);
++    if (s->worst_seeks < 2.0)			// Fix for small tables
++      s->worst_seeks=2.0;
++
++    /*
++      Add to stat->const_keys those indexes for which all group fields or
++      all select distinct fields participate in one index.
++    */
++    add_group_and_distinct_keys(join, s);
++
++    if (!s->const_keys.is_clear_all() &&
++        !s->table->pos_in_table_list->embedding)
++    {
++      ha_rows records;
++      SQL_SELECT *select;
++      select= make_select(s->table, found_const_table_map,
++			  found_const_table_map,
++			  *s->on_expr_ref ? *s->on_expr_ref : conds,
++			  1, &error);
++      if (!select)
++        goto error;
++      records= get_quick_record_count(join->thd, select, s->table,
++				      &s->const_keys, join->row_limit);
++      s->quick=select->quick;
++      s->needed_reg=select->needed_reg;
++      select->quick=0;
++      if (records == 0 && s->table->reginfo.impossible_range)
++      {
++	/*
++	  Impossible WHERE or ON expression
++	  In case of ON, we mark that the we match one empty NULL row.
++	  In case of WHERE, don't set found_const_table_map to get the
++	  caller to abort with a zero row result.
++	*/
++	join->const_table_map|= s->table->map;
++	set_position(join,const_count++,s,(KEYUSE*) 0);
++	s->type= JT_CONST;
++	if (*s->on_expr_ref)
++	{
++	  /* Generate empty row */
++	  s->info= "Impossible ON condition";
++	  found_const_table_map|= s->table->map;
++	  s->type= JT_CONST;
++	  mark_as_null_row(s->table);		// All fields are NULL
++	}
++      }
++      if (records != HA_POS_ERROR)
++      {
++	s->found_records=records;
++	s->read_time= (ha_rows) (s->quick ? s->quick->read_time : 0.0);
++      }
++      delete select;
++    }
++  }
++
++  join->join_tab=stat;
++  join->map2table=stat_ref;
++  join->table= join->all_tables=table_vector;
++  join->const_tables=const_count;
++  join->found_const_table_map=found_const_table_map;
++
++  /* Find an optimal join order of the non-constant tables. */
++  if (join->const_tables != join->tables)
++  {
++    optimize_keyuse(join, keyuse_array);
++    if (choose_plan(join, all_table_map & ~join->const_table_map))
++      goto error;
++  }
++  else
++  {
++    memcpy((uchar*) join->best_positions,(uchar*) join->positions,
++	   sizeof(POSITION)*join->const_tables);
++    join->best_read=1.0;
++  }
++  /* Generate an execution plan from the found optimal join order. */
++  DBUG_RETURN(join->thd->killed || get_best_combination(join));
++
++error:
++  /*
++    Need to clean up join_tab from TABLEs in case of error.
++    They won't get cleaned up by JOIN::cleanup() because JOIN::join_tab
++    may not be assigned yet by this function (which is building join_tab).
++    Dangling TABLE::reginfo.join_tab may cause part_of_refkey to choke. 
++  */
++  for (tables= tables_arg; tables; tables= tables->next_leaf)
++    tables->table->reginfo.join_tab= NULL;
++  DBUG_RETURN (1);
++}
++
++
++/*****************************************************************************
++  Check with keys are used and with tables references with tables
++  Updates in stat:
++	  keys	     Bitmap of all used keys
++	  const_keys Bitmap of all keys with may be used with quick_select
++	  keyuse     Pointer to possible keys
++*****************************************************************************/
++
++/// Used when finding key fields
++typedef struct key_field_t {
++  Field		*field;
++  Item		*val;			///< May be empty if diff constant
++  uint		level;
++  uint		optimize;
++  bool		eq_func;
++  /**
++    If true, the condition this struct represents will not be satisfied
++    when val IS NULL.
++  */
++  bool          null_rejecting; 
++  bool         *cond_guard; /* See KEYUSE::cond_guard */
++} KEY_FIELD;
++
++/* Values in optimize */
++#define KEY_OPTIMIZE_EXISTS		1
++#define KEY_OPTIMIZE_REF_OR_NULL	2
++
++/**
++  Merge new key definitions to old ones, remove those not used in both.
++
++  This is called for OR between different levels.
++
++  To be able to do 'ref_or_null' we merge a comparison of a column
++  and 'column IS NULL' to one test.  This is useful for sub select queries
++  that are internally transformed to something like:.
++
++  @code
++  SELECT * FROM t1 WHERE t1.key=outer_ref_field or t1.key IS NULL 
++  @endcode
++
++  KEY_FIELD::null_rejecting is processed as follows: @n
++  result has null_rejecting=true if it is set for both ORed references.
++  for example:
++  -   (t2.key = t1.field OR t2.key  =  t1.field) -> null_rejecting=true
++  -   (t2.key = t1.field OR t2.key <=> t1.field) -> null_rejecting=false
++
++  @todo
++    The result of this is that we're missing some 'ref' accesses.
++    OptimizerTeam: Fix this
++*/
++
++static KEY_FIELD *
++merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end,
++		 uint and_level)
++{
++  if (start == new_fields)
++    return start;				// Impossible or
++  if (new_fields == end)
++    return start;				// No new fields, skip all
++
++  KEY_FIELD *first_free=new_fields;
++
++  /* Mark all found fields in old array */
++  for (; new_fields != end ; new_fields++)
++  {
++    for (KEY_FIELD *old=start ; old != first_free ; old++)
++    {
++      if (old->field == new_fields->field)
++      {
++        /*
++          NOTE: below const_item() call really works as "!used_tables()", i.e.
++          it can return FALSE where it is feasible to make it return TRUE.
++          
++          The cause is as follows: Some of the tables are already known to be
++          const tables (the detection code is in make_join_statistics(),
++          above the update_ref_and_keys() call), but we didn't propagate 
++          information about this: TABLE::const_table is not set to TRUE, and
++          Item::update_used_tables() hasn't been called for each item.
++          The result of this is that we're missing some 'ref' accesses.
++          TODO: OptimizerTeam: Fix this
++        */
++	if (!new_fields->val->const_item())
++	{
++	  /*
++	    If the value matches, we can use the key reference.
++	    If not, we keep it until we have examined all new values
++	  */
++	  if (old->val->eq(new_fields->val, old->field->binary()))
++	  {
++	    old->level= and_level;
++	    old->optimize= ((old->optimize & new_fields->optimize &
++			     KEY_OPTIMIZE_EXISTS) |
++			    ((old->optimize | new_fields->optimize) &
++			     KEY_OPTIMIZE_REF_OR_NULL));
++            old->null_rejecting= (old->null_rejecting &&
++                                  new_fields->null_rejecting);
++	  }
++	}
++	else if (old->eq_func && new_fields->eq_func &&
++                 old->val->eq_by_collation(new_fields->val, 
++                                           old->field->binary(),
++                                           old->field->charset()))
++
++	{
++	  old->level= and_level;
++	  old->optimize= ((old->optimize & new_fields->optimize &
++			   KEY_OPTIMIZE_EXISTS) |
++			  ((old->optimize | new_fields->optimize) &
++			   KEY_OPTIMIZE_REF_OR_NULL));
++          old->null_rejecting= (old->null_rejecting &&
++                                new_fields->null_rejecting);
++	}
++	else if (old->eq_func && new_fields->eq_func &&
++		 ((old->val->const_item() && old->val->is_null()) || 
++                  new_fields->val->is_null()))
++	{
++	  /* field = expression OR field IS NULL */
++	  old->level= and_level;
++	  old->optimize= KEY_OPTIMIZE_REF_OR_NULL;
++	  /*
++            Remember the NOT NULL value unless the value does not depend
++            on other tables.
++          */
++	  if (!old->val->used_tables() && old->val->is_null())
++	    old->val= new_fields->val;
++          /* The referred expression can be NULL: */ 
++          old->null_rejecting= 0;
++	}
++	else
++	{
++	  /*
++	    We are comparing two different const.  In this case we can't
++	    use a key-lookup on this so it's better to remove the value
++	    and let the range optimzier handle it
++	  */
++	  if (old == --first_free)		// If last item
++	    break;
++	  *old= *first_free;			// Remove old value
++	  old--;				// Retry this value
++	}
++      }
++    }
++  }
++  /* Remove all not used items */
++  for (KEY_FIELD *old=start ; old != first_free ;)
++  {
++    if (old->level != and_level)
++    {						// Not used in all levels
++      if (old == --first_free)
++	break;
++      *old= *first_free;			// Remove old value
++      continue;
++    }
++    old++;
++  }
++  return first_free;
++}
++
++
++/**
++  Add a possible key to array of possible keys if it's usable as a key
++
++    @param key_fields      Pointer to add key, if usable
++    @param and_level       And level, to be stored in KEY_FIELD
++    @param cond            Condition predicate
++    @param field           Field used in comparision
++    @param eq_func         True if we used =, <=> or IS NULL
++    @param value           Value used for comparison with field
++    @param usable_tables   Tables which can be used for key optimization
++    @param sargables       IN/OUT Array of found sargable candidates
++
++  @note
++    If we are doing a NOT NULL comparison on a NOT NULL field in a outer join
++    table, we store this to be able to do not exists optimization later.
++
++  @returns
++    *key_fields is incremented if we stored a key in the array
++*/
++
++static void
++add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond,
++              Field *field, bool eq_func, Item **value, uint num_values,
++              table_map usable_tables, SARGABLE_PARAM **sargables)
++{
++  uint exists_optimize= 0;
++  if (!(field->flags & PART_KEY_FLAG))
++  {
++    // Don't remove column IS NULL on a LEFT JOIN table
++    if (!eq_func || (*value)->type() != Item::NULL_ITEM ||
++        !field->table->maybe_null || field->null_ptr)
++      return;					// Not a key. Skip it
++    exists_optimize= KEY_OPTIMIZE_EXISTS;
++    DBUG_ASSERT(num_values == 1);
++  }
++  else
++  {
++    table_map used_tables=0;
++    bool optimizable=0;
++    for (uint i=0; i<num_values; i++)
++    {
++      used_tables|=(value[i])->used_tables();
++      if (!((value[i])->used_tables() & (field->table->map | RAND_TABLE_BIT)))
++        optimizable=1;
++    }
++    if (!optimizable)
++      return;
++    if (!(usable_tables & field->table->map))
++    {
++      if (!eq_func || (*value)->type() != Item::NULL_ITEM ||
++          !field->table->maybe_null || field->null_ptr)
++	return;					// Can't use left join optimize
++      exists_optimize= KEY_OPTIMIZE_EXISTS;
++    }
++    else
++    {
++      JOIN_TAB *stat=field->table->reginfo.join_tab;
++      key_map possible_keys=field->key_start;
++      possible_keys.intersect(field->table->keys_in_use_for_query);
++      stat[0].keys.merge(possible_keys);             // Add possible keys
++
++      /*
++	Save the following cases:
++	Field op constant
++	Field LIKE constant where constant doesn't start with a wildcard
++	Field = field2 where field2 is in a different table
++	Field op formula
++	Field IS NULL
++	Field IS NOT NULL
++         Field BETWEEN ...
++         Field IN ...
++      */
++      stat[0].key_dependent|=used_tables;
++
++      bool is_const=1;
++      for (uint i=0; i<num_values; i++)
++      {
++        if (!(is_const&= value[i]->const_item()))
++          break;
++      }
++      if (is_const)
++        stat[0].const_keys.merge(possible_keys);
++      else if (!eq_func)
++      {
++        /* 
++          Save info to be able check whether this predicate can be 
++          considered as sargable for range analisis after reading const tables.
++          We do not save info about equalities as update_const_equal_items
++          will take care of updating info on keys from sargable equalities. 
++        */
++        (*sargables)--;
++        (*sargables)->field= field;
++        (*sargables)->arg_value= value;
++        (*sargables)->num_values= num_values;
++      }
++      /*
++	We can't always use indexes when comparing a string index to a
++	number. cmp_type() is checked to allow compare of dates to numbers.
++        eq_func is NEVER true when num_values > 1
++       */
++      if (!eq_func)
++        return;
++      if (field->result_type() == STRING_RESULT)
++      {
++        if ((*value)->result_type() != STRING_RESULT)
++        {
++          if (field->cmp_type() != (*value)->result_type())
++            return;
++        }
++        else
++        {
++          /*
++            We can't use indexes if the effective collation
++            of the operation differ from the field collation.
++          */
++          if (field->cmp_type() == STRING_RESULT &&
++              ((Field_str*)field)->charset() != cond->compare_collation())
++            return;
++        }
++      }
++    }
++  }
++  /*
++    For the moment eq_func is always true. This slot is reserved for future
++    extensions where we want to remembers other things than just eq comparisons
++  */
++  DBUG_ASSERT(eq_func);
++  /* Store possible eq field */
++  (*key_fields)->field=		field;
++  (*key_fields)->eq_func=	eq_func;
++  (*key_fields)->val=		*value;
++  (*key_fields)->level=		and_level;
++  (*key_fields)->optimize=	exists_optimize;
++  /*
++    If the condition has form "tbl.keypart = othertbl.field" and 
++    othertbl.field can be NULL, there will be no matches if othertbl.field 
++    has NULL value.
++    We use null_rejecting in add_not_null_conds() to add
++    'othertbl.field IS NOT NULL' to tab->select_cond.
++  */
++  (*key_fields)->null_rejecting= ((cond->functype() == Item_func::EQ_FUNC ||
++                                   cond->functype() == Item_func::MULT_EQUAL_FUNC) &&
++                                  ((*value)->type() == Item::FIELD_ITEM) &&
++                                  ((Item_field*)*value)->field->maybe_null());
++  (*key_fields)->cond_guard= NULL;
++  (*key_fields)++;
++}
++
++/**
++  Add possible keys to array of possible keys originated from a simple
++  predicate.
++
++    @param  key_fields     Pointer to add key, if usable
++    @param  and_level      And level, to be stored in KEY_FIELD
++    @param  cond           Condition predicate
++    @param  field          Field used in comparision
++    @param  eq_func        True if we used =, <=> or IS NULL
++    @param  value          Value used for comparison with field
++                           Is NULL for BETWEEN and IN    
++    @param  usable_tables  Tables which can be used for key optimization
++    @param  sargables      IN/OUT Array of found sargable candidates
++
++  @note
++    If field items f1 and f2 belong to the same multiple equality and
++    a key is added for f1, the the same key is added for f2.
++
++  @returns
++    *key_fields is incremented if we stored a key in the array
++*/
++
++static void
++add_key_equal_fields(KEY_FIELD **key_fields, uint and_level,
++                     Item_func *cond, Item_field *field_item,
++                     bool eq_func, Item **val,
++                     uint num_values, table_map usable_tables,
++                     SARGABLE_PARAM **sargables)
++{
++  Field *field= field_item->field;
++  add_key_field(key_fields, and_level, cond, field,
++                eq_func, val, num_values, usable_tables, sargables);
++  Item_equal *item_equal= field_item->item_equal;
++  if (item_equal)
++  { 
++    /*
++      Add to the set of possible key values every substitution of
++      the field for an equal field included into item_equal
++    */
++    Item_equal_iterator it(*item_equal);
++    Item_field *item;
++    while ((item= it++))
++    {
++      if (!field->eq(item->field))
++      {
++        add_key_field(key_fields, and_level, cond, item->field,
++                      eq_func, val, num_values, usable_tables,
++                      sargables);
++      }
++    }
++  }
++}
++
++
++/**
++  Check if an expression is a non-outer field.
++
++  Checks if an expression is a field and belongs to the current select.
++
++  @param   field  Item expression to check
++
++  @return boolean
++     @retval TRUE   the expression is a local field
++     @retval FALSE  it's something else
++*/
++
++static bool
++is_local_field (Item *field)
++{
++  return field->real_item()->type() == Item::FIELD_ITEM
++    && !(field->used_tables() & OUTER_REF_TABLE_BIT)
++    && !((Item_field *)field->real_item())->depended_from;
++}
++
++
++static void
++add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level,
++               COND *cond, table_map usable_tables,
++               SARGABLE_PARAM **sargables)
++{
++  if (cond->type() == Item_func::COND_ITEM)
++  {
++    List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list());
++    KEY_FIELD *org_key_fields= *key_fields;
++
++    if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
++    {
++      Item *item;
++      while ((item=li++))
++        add_key_fields(join, key_fields, and_level, item, usable_tables,
++                       sargables);
++      for (; org_key_fields != *key_fields ; org_key_fields++)
++	org_key_fields->level= *and_level;
++    }
++    else
++    {
++      (*and_level)++;
++      add_key_fields(join, key_fields, and_level, li++, usable_tables,
++                     sargables);
++      Item *item;
++      while ((item=li++))
++      {
++	KEY_FIELD *start_key_fields= *key_fields;
++	(*and_level)++;
++        add_key_fields(join, key_fields, and_level, item, usable_tables,
++                       sargables);
++	*key_fields=merge_key_fields(org_key_fields,start_key_fields,
++				     *key_fields,++(*and_level));
++      }
++    }
++    return;
++  }
++
++  /* 
++    Subquery optimization: Conditions that are pushed down into subqueries
++    are wrapped into Item_func_trig_cond. We process the wrapped condition
++    but need to set cond_guard for KEYUSE elements generated from it.
++  */
++  {
++    if (cond->type() == Item::FUNC_ITEM &&
++        ((Item_func*)cond)->functype() == Item_func::TRIG_COND_FUNC)
++    {
++      Item *cond_arg= ((Item_func*)cond)->arguments()[0];
++      if (!join->group_list && !join->order &&
++          join->unit->item && 
++          join->unit->item->substype() == Item_subselect::IN_SUBS &&
++          !join->unit->is_union())
++      {
++        KEY_FIELD *save= *key_fields;
++        add_key_fields(join, key_fields, and_level, cond_arg, usable_tables,
++                       sargables);
++        // Indicate that this ref access candidate is for subquery lookup:
++        for (; save != *key_fields; save++)
++          save->cond_guard= ((Item_func_trig_cond*)cond)->get_trig_var();
++      }
++      return;
++    }
++  }
++
++  /* If item is of type 'field op field/constant' add it to key_fields */
++  if (cond->type() != Item::FUNC_ITEM)
++    return;
++  Item_func *cond_func= (Item_func*) cond;
++  switch (cond_func->select_optimize()) {
++  case Item_func::OPTIMIZE_NONE:
++    break;
++  case Item_func::OPTIMIZE_KEY:
++  {
++    Item **values;
++    /*
++      Build list of possible keys for 'a BETWEEN low AND high'.
++      It is handled similar to the equivalent condition 
++      'a >= low AND a <= high':
++    */
++    if (cond_func->functype() == Item_func::BETWEEN)
++    {
++      Item_field *field_item;
++      bool equal_func= FALSE;
++      uint num_values= 2;
++      values= cond_func->arguments();
++
++      bool binary_cmp= (values[0]->real_item()->type() == Item::FIELD_ITEM)
++            ? ((Item_field*)values[0]->real_item())->field->binary()
++            : TRUE;
++
++      /*
++        Additional optimization: If 'low = high':
++        Handle as if the condition was "t.key = low".
++      */
++      if (!((Item_func_between*)cond_func)->negated &&
++          values[1]->eq(values[2], binary_cmp))
++      {
++        equal_func= TRUE;
++        num_values= 1;
++      }
++
++      /*
++        Append keys for 'field <cmp> value[]' if the
++        condition is of the form::
++        '<field> BETWEEN value[1] AND value[2]'
++      */
++      if (is_local_field (values[0]))
++      {
++        field_item= (Item_field *) (values[0]->real_item());
++        add_key_equal_fields(key_fields, *and_level, cond_func,
++                             field_item, equal_func, &values[1],
++                             num_values, usable_tables, sargables);
++      }
++      /*
++        Append keys for 'value[0] <cmp> field' if the
++        condition is of the form:
++        'value[0] BETWEEN field1 AND field2'
++      */
++      for (uint i= 1; i <= num_values; i++)
++      {
++        if (is_local_field (values[i]))
++        {
++          field_item= (Item_field *) (values[i]->real_item());
++          add_key_equal_fields(key_fields, *and_level, cond_func,
++                               field_item, equal_func, values,
++                               1, usable_tables, sargables);
++        }
++      }
++    } // if ( ... Item_func::BETWEEN)
++
++    // IN, NE
++    else if (is_local_field (cond_func->key_item()) &&
++            !(cond_func->used_tables() & OUTER_REF_TABLE_BIT))
++    {
++      values= cond_func->arguments()+1;
++      if (cond_func->functype() == Item_func::NE_FUNC &&
++        is_local_field (cond_func->arguments()[1]))
++        values--;
++      DBUG_ASSERT(cond_func->functype() != Item_func::IN_FUNC ||
++                  cond_func->argument_count() != 2);
++      add_key_equal_fields(key_fields, *and_level, cond_func,
++                           (Item_field*) (cond_func->key_item()->real_item()),
++                           0, values, 
++                           cond_func->argument_count()-1,
++                           usable_tables, sargables);
++    }
++    break;
++  }
++  case Item_func::OPTIMIZE_OP:
++  {
++    bool equal_func=(cond_func->functype() == Item_func::EQ_FUNC ||
++		     cond_func->functype() == Item_func::EQUAL_FUNC);
++
++    if (is_local_field (cond_func->arguments()[0]))
++    {
++      add_key_equal_fields(key_fields, *and_level, cond_func,
++	                (Item_field*) (cond_func->arguments()[0])->real_item(),
++		           equal_func,
++                           cond_func->arguments()+1, 1, usable_tables,
++                           sargables);
++    }
++    if (is_local_field (cond_func->arguments()[1]) &&
++	cond_func->functype() != Item_func::LIKE_FUNC)
++    {
++      add_key_equal_fields(key_fields, *and_level, cond_func, 
++                       (Item_field*) (cond_func->arguments()[1])->real_item(),
++		           equal_func,
++                           cond_func->arguments(),1,usable_tables,
++                           sargables);
++    }
++    break;
++  }
++  case Item_func::OPTIMIZE_NULL:
++    /* column_name IS [NOT] NULL */
++    if (is_local_field (cond_func->arguments()[0]) &&
++	!(cond_func->used_tables() & OUTER_REF_TABLE_BIT))
++    {
++      Item *tmp=new Item_null;
++      if (unlikely(!tmp))                       // Should never be true
++	return;
++      add_key_equal_fields(key_fields, *and_level, cond_func,
++		    (Item_field*) (cond_func->arguments()[0])->real_item(),
++		    cond_func->functype() == Item_func::ISNULL_FUNC,
++			   &tmp, 1, usable_tables, sargables);
++    }
++    break;
++  case Item_func::OPTIMIZE_EQUAL:
++    Item_equal *item_equal= (Item_equal *) cond;
++    Item *const_item= item_equal->get_const();
++    Item_equal_iterator it(*item_equal);
++    Item_field *item;
++    if (const_item)
++    {
++      /*
++        For each field field1 from item_equal consider the equality 
++        field1=const_item as a condition allowing an index access of the table
++        with field1 by the keys value of field1.
++      */   
++      while ((item= it++))
++      {
++        add_key_field(key_fields, *and_level, cond_func, item->field,
++                      TRUE, &const_item, 1, usable_tables, sargables);
++      }
++    }
++    else 
++    {
++      /*
++        Consider all pairs of different fields included into item_equal.
++        For each of them (field1, field1) consider the equality 
++        field1=field2 as a condition allowing an index access of the table
++        with field1 by the keys value of field2.
++      */   
++      Item_equal_iterator fi(*item_equal);
++      while ((item= fi++))
++      {
++        Field *field= item->field;
++        while ((item= it++))
++        {
++          if (!field->eq(item->field))
++          {
++            add_key_field(key_fields, *and_level, cond_func, field,
++                          TRUE, (Item **) &item, 1, usable_tables,
++                          sargables);
++          }
++        }
++        it.rewind();
++      }
++    }
++    break;
++  }
++}
++
++
++static uint
++max_part_bit(key_part_map bits)
++{
++  uint found;
++  for (found=0; bits & 1 ; found++,bits>>=1) ;
++  return found;
++}
++
++/*
++  Add all keys with uses 'field' for some keypart
++  If field->and_level != and_level then only mark key_part as const_part
++
++  RETURN 
++   0 - OK
++   1 - Out of memory.
++*/
++
++static bool
++add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field)
++{
++  Field *field=key_field->field;
++  TABLE *form= field->table;
++  KEYUSE keyuse;
++
++  if (key_field->eq_func && !(key_field->optimize & KEY_OPTIMIZE_EXISTS))
++  {
++    for (uint key=0 ; key < form->s->keys ; key++)
++    {
++      if (!(form->keys_in_use_for_query.is_set(key)))
++	continue;
++      if (form->key_info[key].flags & (HA_FULLTEXT | HA_SPATIAL))
++	continue;    // ToDo: ft-keys in non-ft queries.   SerG
++
++      uint key_parts= (uint) form->key_info[key].key_parts;
++      for (uint part=0 ; part <  key_parts ; part++)
++      {
++	if (field->eq(form->key_info[key].key_part[part].field))
++	{
++	  keyuse.table= field->table;
++	  keyuse.val =  key_field->val;
++	  keyuse.key =  key;
++	  keyuse.keypart=part;
++	  keyuse.keypart_map= (key_part_map) 1 << part;
++	  keyuse.used_tables=key_field->val->used_tables();
++	  keyuse.optimize= key_field->optimize & KEY_OPTIMIZE_REF_OR_NULL;
++          keyuse.null_rejecting= key_field->null_rejecting;
++          keyuse.cond_guard= key_field->cond_guard;
++	  if (insert_dynamic(keyuse_array,(uchar*) &keyuse))
++            return TRUE;
++	}
++      }
++    }
++  }
++  return FALSE;
++}
++
++
++#define FT_KEYPART   (MAX_REF_PARTS+10)
++
++static bool
++add_ft_keys(DYNAMIC_ARRAY *keyuse_array,
++            JOIN_TAB *stat,COND *cond,table_map usable_tables)
++{
++  Item_func_match *cond_func=NULL;
++
++  if (!cond)
++    return FALSE;
++
++  if (cond->type() == Item::FUNC_ITEM)
++  {
++    Item_func *func=(Item_func *)cond;
++    Item_func::Functype functype=  func->functype();
++    if (functype == Item_func::FT_FUNC)
++      cond_func=(Item_func_match *)cond;
++    else if (func->arg_count == 2)
++    {
++      Item *arg0= func->arguments()[0],
++           *arg1= func->arguments()[1];
++      if (arg1->const_item() && arg1->cols() == 1 &&
++          ((functype == Item_func::GE_FUNC && arg1->val_real() > 0) ||
++           (functype == Item_func::GT_FUNC && arg1->val_real() >= 0)) &&
++           arg0->type() == Item::FUNC_ITEM &&
++           ((Item_func *) arg0)->functype() == Item_func::FT_FUNC)
++        cond_func= (Item_func_match *) arg0;
++      else if (arg0->const_item() && arg0->cols() == 1 &&
++               ((functype == Item_func::LE_FUNC && arg0->val_real() > 0) ||
++                (functype == Item_func::LT_FUNC && arg0->val_real() >= 0)) &&
++                arg1->type() == Item::FUNC_ITEM &&
++                ((Item_func *) arg1)->functype() == Item_func::FT_FUNC)
++        cond_func= (Item_func_match *) arg1;
++    }
++  }
++  else if (cond->type() == Item::COND_ITEM)
++  {
++    List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list());
++
++    if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
++    {
++      Item *item;
++      while ((item=li++))
++      {
++        if (add_ft_keys(keyuse_array,stat,item,usable_tables))
++          return TRUE;
++      }
++    }
++  }
++
++  if (!cond_func || cond_func->key == NO_SUCH_KEY ||
++      !(usable_tables & cond_func->table->map))
++    return FALSE;
++
++  KEYUSE keyuse;
++  keyuse.table= cond_func->table;
++  keyuse.val =  cond_func;
++  keyuse.key =  cond_func->key;
++  keyuse.keypart= FT_KEYPART;
++  keyuse.used_tables=cond_func->key_item()->used_tables();
++  keyuse.optimize= 0;
++  keyuse.keypart_map= 0;
++  return insert_dynamic(keyuse_array,(uchar*) &keyuse);
++}
++
++
++static int
++sort_keyuse(KEYUSE *a,KEYUSE *b)
++{
++  int res;
++  if (a->table->tablenr != b->table->tablenr)
++    return (int) (a->table->tablenr - b->table->tablenr);
++  if (a->key != b->key)
++    return (int) (a->key - b->key);
++  if (a->keypart != b->keypart)
++    return (int) (a->keypart - b->keypart);
++  // Place const values before other ones
++  if ((res= test((a->used_tables & ~OUTER_REF_TABLE_BIT)) -
++       test((b->used_tables & ~OUTER_REF_TABLE_BIT))))
++    return res;
++  /* Place rows that are not 'OPTIMIZE_REF_OR_NULL' first */
++  return (int) ((a->optimize & KEY_OPTIMIZE_REF_OR_NULL) -
++		(b->optimize & KEY_OPTIMIZE_REF_OR_NULL));
++}
++
++
++/*
++  Add to KEY_FIELD array all 'ref' access candidates within nested join.
++
++    This function populates KEY_FIELD array with entries generated from the 
++    ON condition of the given nested join, and does the same for nested joins 
++    contained within this nested join.
++
++  @param[in]      nested_join_table   Nested join pseudo-table to process
++  @param[in,out]  end                 End of the key field array
++  @param[in,out]  and_level           And-level
++  @param[in,out]  sargables           Array of found sargable candidates
++
++
++  @note
++    We can add accesses to the tables that are direct children of this nested 
++    join (1), and are not inner tables w.r.t their neighbours (2).
++    
++    Example for #1 (outer brackets pair denotes nested join this function is 
++    invoked for):
++    @code
++     ... LEFT JOIN (t1 LEFT JOIN (t2 ... ) ) ON cond
++    @endcode
++    Example for #2:
++    @code
++     ... LEFT JOIN (t1 LEFT JOIN t2 ) ON cond
++    @endcode
++    In examples 1-2 for condition cond, we can add 'ref' access candidates to 
++    t1 only.
++    Example #3:
++    @code
++     ... LEFT JOIN (t1, t2 LEFT JOIN t3 ON inner_cond) ON cond
++    @endcode
++    Here we can add 'ref' access candidates for t1 and t2, but not for t3.
++*/
++
++static void add_key_fields_for_nj(JOIN *join, TABLE_LIST *nested_join_table,
++                                  KEY_FIELD **end, uint *and_level,
++                                  SARGABLE_PARAM **sargables)
++{
++  List_iterator<TABLE_LIST> li(nested_join_table->nested_join->join_list);
++  table_map tables= 0;
++  TABLE_LIST *table;
++  DBUG_ASSERT(nested_join_table->nested_join);
++
++  while ((table= li++))
++  {
++    if (table->nested_join)
++      add_key_fields_for_nj(join, table, end, and_level, sargables);
++    else
++      if (!table->on_expr)
++        tables |= table->table->map;
++  }
++  add_key_fields(join, end, and_level, nested_join_table->on_expr, tables,
++                 sargables);
++}
++
++
++/**
++  Update keyuse array with all possible keys we can use to fetch rows.
++  
++  @param       thd 
++  @param[out]  keyuse         Put here ordered array of KEYUSE structures
++  @param       join_tab       Array in tablenr_order
++  @param       tables         Number of tables in join
++  @param       cond           WHERE condition (note that the function analyzes
++                              join_tab[i]->on_expr too)
++  @param       normal_tables  Tables not inner w.r.t some outer join (ones
++                              for which we can make ref access based the WHERE
++                              clause)
++  @param       select_lex     current SELECT
++  @param[out]  sargables      Array of found sargable candidates
++      
++   @retval
++     0  OK
++   @retval
++     1  Out of memory.
++*/
++
++static bool
++update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
++                    uint tables, COND *cond, COND_EQUAL *cond_equal,
++                    table_map normal_tables, SELECT_LEX *select_lex,
++                    SARGABLE_PARAM **sargables)
++{
++  uint	and_level,i,found_eq_constant;
++  KEY_FIELD *key_fields, *end, *field;
++  uint sz;
++  uint m= max(select_lex->max_equal_elems,1);
++  
++  /* 
++    We use the same piece of memory to store both  KEY_FIELD 
++    and SARGABLE_PARAM structure.
++    KEY_FIELD values are placed at the beginning this memory
++    while  SARGABLE_PARAM values are put at the end.
++    All predicates that are used to fill arrays of KEY_FIELD
++    and SARGABLE_PARAM structures have at most 2 arguments
++    except BETWEEN predicates that have 3 arguments and 
++    IN predicates.
++    This any predicate if it's not BETWEEN/IN can be used 
++    directly to fill at most 2 array elements, either of KEY_FIELD
++    or SARGABLE_PARAM type. For a BETWEEN predicate 3 elements
++    can be filled as this predicate is considered as
++    saragable with respect to each of its argument.
++    An IN predicate can require at most 1 element as currently
++    it is considered as sargable only for its first argument.
++    Multiple equality can add  elements that are filled after
++    substitution of field arguments by equal fields. There
++    can be not more than select_lex->max_equal_elems such 
++    substitutions.
++  */ 
++  sz= max(sizeof(KEY_FIELD),sizeof(SARGABLE_PARAM))*
++      (((thd->lex->current_select->cond_count+1)*2 +
++	thd->lex->current_select->between_count)*m+1);
++  if (!(key_fields=(KEY_FIELD*)	thd->alloc(sz)))
++    return TRUE; /* purecov: inspected */
++  and_level= 0;
++  field= end= key_fields;
++  *sargables= (SARGABLE_PARAM *) key_fields + 
++                (sz - sizeof((*sargables)[0].field))/sizeof(SARGABLE_PARAM);
++  /* set a barrier for the array of SARGABLE_PARAM */
++  (*sargables)[0].field= 0; 
++
++  if (my_init_dynamic_array(keyuse,sizeof(KEYUSE),20,64))
++    return TRUE;
++  if (cond)
++  {
++    add_key_fields(join_tab->join, &end, &and_level, cond, normal_tables,
++                   sargables);
++    for (; field != end ; field++)
++    {
++      if (add_key_part(keyuse,field))
++        return TRUE;
++      /* Mark that we can optimize LEFT JOIN */
++      if (field->val->type() == Item::NULL_ITEM &&
++	  !field->field->real_maybe_null())
++	field->field->table->reginfo.not_exists_optimize=1;
++    }
++  }
++  for (i=0 ; i < tables ; i++)
++  {
++    /*
++      Block the creation of keys for inner tables of outer joins.
++      Here only the outer joins that can not be converted to
++      inner joins are left and all nests that can be eliminated
++      are flattened.
++      In the future when we introduce conditional accesses
++      for inner tables in outer joins these keys will be taken
++      into account as well.
++    */ 
++    if (*join_tab[i].on_expr_ref)
++      add_key_fields(join_tab->join, &end, &and_level, 
++                     *join_tab[i].on_expr_ref,
++                     join_tab[i].table->map, sargables);
++  }
++
++  /* Process ON conditions for the nested joins */
++  {
++    List_iterator<TABLE_LIST> li(*join_tab->join->join_list);
++    TABLE_LIST *table;
++    while ((table= li++))
++    {
++      if (table->nested_join)
++        add_key_fields_for_nj(join_tab->join, table, &end, &and_level, 
++                              sargables);
++    }
++  }
++
++  /* fill keyuse with found key parts */
++  for ( ; field != end ; field++)
++  {
++    if (add_key_part(keyuse,field))
++      return TRUE;
++  }
++
++  if (select_lex->ftfunc_list->elements)
++  {
++    if (add_ft_keys(keyuse,join_tab,cond,normal_tables))
++      return TRUE;
++  }
++
++  /*
++    Sort the array of possible keys and remove the following key parts:
++    - ref if there is a keypart which is a ref and a const.
++      (e.g. if there is a key(a,b) and the clause is a=3 and b=7 and b=t2.d,
++      then we skip the key part corresponding to b=t2.d)
++    - keyparts without previous keyparts
++      (e.g. if there is a key(a,b,c) but only b < 5 (or a=2 and c < 3) is
++      used in the query, we drop the partial key parts from consideration).
++    Special treatment for ft-keys.
++  */
++  if (keyuse->elements)
++  {
++    KEYUSE key_end,*prev,*save_pos,*use;
++
++    my_qsort(keyuse->buffer,keyuse->elements,sizeof(KEYUSE),
++	  (qsort_cmp) sort_keyuse);
++
++    bzero((char*) &key_end,sizeof(key_end));    /* Add for easy testing */
++    if (insert_dynamic(keyuse,(uchar*) &key_end))
++      return TRUE;
++
++    use=save_pos=dynamic_element(keyuse,0,KEYUSE*);
++    prev= &key_end;
++    found_eq_constant=0;
++    for (i=0 ; i < keyuse->elements-1 ; i++,use++)
++    {
++      if (!use->used_tables && use->optimize != KEY_OPTIMIZE_REF_OR_NULL)
++	use->table->const_key_parts[use->key]|= use->keypart_map;
++      if (use->keypart != FT_KEYPART)
++      {
++	if (use->key == prev->key && use->table == prev->table)
++	{
++	  if (prev->keypart+1 < use->keypart ||
++	      (prev->keypart == use->keypart && found_eq_constant))
++	    continue;				/* remove */
++	}
++	else if (use->keypart != 0)		// First found must be 0
++	  continue;
++      }
++
++#if defined(__GNUC__) && !MY_GNUC_PREREQ(4,4)
++      /*
++        Old gcc used a memcpy(), which is undefined if save_pos==use:
++        http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19410
++        http://gcc.gnu.org/bugzilla/show_bug.cgi?id=39480
++      */
++      if (save_pos != use)
++#endif
++        *save_pos= *use;
++      prev=use;
++      found_eq_constant= !use->used_tables;
++      /* Save ptr to first use */
++      if (!use->table->reginfo.join_tab->keyuse)
++	use->table->reginfo.join_tab->keyuse=save_pos;
++      use->table->reginfo.join_tab->checked_keys.set_bit(use->key);
++      save_pos++;
++    }
++    i=(uint) (save_pos-(KEYUSE*) keyuse->buffer);
++    VOID(set_dynamic(keyuse,(uchar*) &key_end,i));
++    keyuse->elements=i;
++  }
++  return FALSE;
++}
++
++/**
++  Update some values in keyuse for faster choose_plan() loop.
++*/
++
++static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
++{
++  KEYUSE *end,*keyuse= dynamic_element(keyuse_array, 0, KEYUSE*);
++
++  for (end= keyuse+ keyuse_array->elements ; keyuse < end ; keyuse++)
++  {
++    table_map map;
++    /*
++      If we find a ref, assume this table matches a proportional
++      part of this table.
++      For example 100 records matching a table with 5000 records
++      gives 5000/100 = 50 records per key
++      Constant tables are ignored.
++      To avoid bad matches, we don't make ref_table_rows less than 100.
++    */
++    keyuse->ref_table_rows= ~(ha_rows) 0;	// If no ref
++    if (keyuse->used_tables &
++	(map= (keyuse->used_tables & ~join->const_table_map &
++	       ~OUTER_REF_TABLE_BIT)))
++    {
++      uint tablenr;
++      for (tablenr=0 ; ! (map & 1) ; map>>=1, tablenr++) ;
++      if (map == 1)			// Only one table
++      {
++	TABLE *tmp_table=join->all_tables[tablenr];
++	keyuse->ref_table_rows= max(tmp_table->file->stats.records, 100);
++      }
++    }
++    /*
++      Outer reference (external field) is constant for single executing
++      of subquery
++    */
++    if (keyuse->used_tables == OUTER_REF_TABLE_BIT)
++      keyuse->ref_table_rows= 1;
++  }
++}
++
++
++/**
++  Discover the indexes that can be used for GROUP BY or DISTINCT queries.
++
++  If the query has a GROUP BY clause, find all indexes that contain all
++  GROUP BY fields, and add those indexes to join->const_keys.
++
++  If the query has a DISTINCT clause, find all indexes that contain all
++  SELECT fields, and add those indexes to join->const_keys.
++  This allows later on such queries to be processed by a
++  QUICK_GROUP_MIN_MAX_SELECT.
++
++  @param join
++  @param join_tab
++
++  @return
++    None
++*/
++
++static void
++add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
++{
++  List<Item_field> indexed_fields;
++  List_iterator<Item_field> indexed_fields_it(indexed_fields);
++  ORDER      *cur_group;
++  Item_field *cur_item;
++  key_map possible_keys(0);
++
++  if (join->group_list)
++  { /* Collect all query fields referenced in the GROUP clause. */
++    for (cur_group= join->group_list; cur_group; cur_group= cur_group->next)
++      (*cur_group->item)->walk(&Item::collect_item_field_processor, 0,
++                               (uchar*) &indexed_fields);
++  }
++  else if (join->select_distinct)
++  { /* Collect all query fields referenced in the SELECT clause. */
++    List<Item> &select_items= join->fields_list;
++    List_iterator<Item> select_items_it(select_items);
++    Item *item;
++    while ((item= select_items_it++))
++      item->walk(&Item::collect_item_field_processor, 0,
++                 (uchar*) &indexed_fields);
++  }
++  else
++    return;
++
++  if (indexed_fields.elements == 0)
++    return;
++
++  /* Intersect the keys of all group fields. */
++  cur_item= indexed_fields_it++;
++  possible_keys.merge(cur_item->field->part_of_key);
++  while ((cur_item= indexed_fields_it++))
++  {
++    possible_keys.intersect(cur_item->field->part_of_key);
++  }
++
++  if (!possible_keys.is_clear_all())
++    join_tab->const_keys.merge(possible_keys);
++}
++
++
++/*****************************************************************************
++  Go through all combinations of not marked tables and find the one
++  which uses least records
++*****************************************************************************/
++
++/** Save const tables first as used tables. */
++
++static void
++set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
++{
++  join->positions[idx].table= table;
++  join->positions[idx].key=key;
++  join->positions[idx].records_read=1.0;	/* This is a const table */
++  join->positions[idx].ref_depend_map= 0;
++
++  /* Move the const table as down as possible in best_ref */
++  JOIN_TAB **pos=join->best_ref+idx+1;
++  JOIN_TAB *next=join->best_ref[idx];
++  for (;next != table ; pos++)
++  {
++    JOIN_TAB *tmp=pos[0];
++    pos[0]=next;
++    next=tmp;
++  }
++  join->best_ref[idx]=table;
++}
++
++
++/**
++  Find the best access path for an extension of a partial execution
++  plan and add this path to the plan.
++
++  The function finds the best access path to table 's' from the passed
++  partial plan where an access path is the general term for any means to
++  access the data in 's'. An access path may use either an index or a scan,
++  whichever is cheaper. The input partial plan is passed via the array
++  'join->positions' of length 'idx'. The chosen access method for 's' and its
++  cost are stored in 'join->positions[idx]'.
++
++  @param join             pointer to the structure providing all context info
++                          for the query
++  @param s                the table to be joined by the function
++  @param thd              thread for the connection that submitted the query
++  @param remaining_tables set of tables not included into the partial plan yet
++  @param idx              the length of the partial plan
++  @param record_count     estimate for the number of records returned by the
++                          partial plan
++  @param read_time        the cost of the partial plan
++
++  @return
++    None
++*/
++
++static void
++best_access_path(JOIN      *join,
++                 JOIN_TAB  *s,
++                 THD       *thd,
++                 table_map remaining_tables,
++                 uint      idx,
++                 double    record_count,
++                 double    read_time)
++{
++  KEYUSE *best_key=         0;
++  uint best_max_key_part=   0;
++  my_bool found_constraint= 0;
++  double best=              DBL_MAX;
++  double best_time=         DBL_MAX;
++  double records=           DBL_MAX;
++  table_map best_ref_depends_map= 0;
++  double tmp;
++  ha_rows rec;
++  DBUG_ENTER("best_access_path");
++
++  if (s->keyuse)
++  {                                            /* Use key if possible */
++    TABLE *table= s->table;
++    KEYUSE *keyuse,*start_key=0;
++    double best_records= DBL_MAX;
++    uint max_key_part=0;
++
++    /* Test how we can use keys */
++    rec= s->records/MATCHING_ROWS_IN_OTHER_TABLE;  // Assumed records/key
++    for (keyuse=s->keyuse ; keyuse->table == table ;)
++    {
++      key_part_map found_part= 0;
++      table_map found_ref= 0;
++      uint key= keyuse->key;
++      KEY *keyinfo= table->key_info+key;
++      bool ft_key=  (keyuse->keypart == FT_KEYPART);
++      /* Bitmap of keyparts where the ref access is over 'keypart=const': */
++      key_part_map const_part= 0;
++      /* The or-null keypart in ref-or-null access: */
++      key_part_map ref_or_null_part= 0;
++
++      /* Calculate how many key segments of the current key we can use */
++      start_key= keyuse;
++
++      do /* For each keypart */
++      {
++        uint keypart= keyuse->keypart;
++        table_map best_part_found_ref= 0;
++        double best_prev_record_reads= DBL_MAX;
++        
++        do /* For each way to access the keypart */
++        {
++
++          /*
++            if 1. expression doesn't refer to forward tables
++               2. we won't get two ref-or-null's
++          */
++          if (!(remaining_tables & keyuse->used_tables) &&
++              !(ref_or_null_part && (keyuse->optimize &
++                                     KEY_OPTIMIZE_REF_OR_NULL)))
++          {
++            found_part|= keyuse->keypart_map;
++            if (!(keyuse->used_tables & ~join->const_table_map))
++              const_part|= keyuse->keypart_map;
++
++            double tmp2= prev_record_reads(join, idx, (found_ref |
++                                                      keyuse->used_tables));
++            if (tmp2 < best_prev_record_reads)
++            {
++              best_part_found_ref= keyuse->used_tables & ~join->const_table_map;
++              best_prev_record_reads= tmp2;
++            }
++            if (rec > keyuse->ref_table_rows)
++              rec= keyuse->ref_table_rows;
++	    /*
++	      If there is one 'key_column IS NULL' expression, we can
++	      use this ref_or_null optimisation of this field
++	    */
++            if (keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL)
++              ref_or_null_part |= keyuse->keypart_map;
++          }
++          keyuse++;
++        } while (keyuse->table == table && keyuse->key == key &&
++                 keyuse->keypart == keypart);
++	found_ref|= best_part_found_ref;
++      } while (keyuse->table == table && keyuse->key == key);
++
++      /*
++        Assume that that each key matches a proportional part of table.
++      */
++      if (!found_part && !ft_key)
++        continue;                               // Nothing usable found
++
++      if (rec < MATCHING_ROWS_IN_OTHER_TABLE)
++        rec= MATCHING_ROWS_IN_OTHER_TABLE;      // Fix for small tables
++
++      /*
++        ft-keys require special treatment
++      */
++      if (ft_key)
++      {
++        /*
++          Really, there should be records=0.0 (yes!)
++          but 1.0 would be probably safer
++        */
++        tmp= prev_record_reads(join, idx, found_ref);
++        records= 1.0;
++      }
++      else
++      {
++        found_constraint= 1;
++        /*
++          Check if we found full key
++        */
++        if (found_part == PREV_BITS(uint,keyinfo->key_parts) &&
++            !ref_or_null_part)
++        {                                         /* use eq key */
++          max_key_part= (uint) ~0;
++          if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)
++          {
++            tmp = prev_record_reads(join, idx, found_ref);
++            records=1.0;
++          }
++          else
++          {
++            if (!found_ref)
++            {                                     /* We found a const key */
++              /*
++                ReuseRangeEstimateForRef-1:
++                We get here if we've found a ref(const) (c_i are constants):
++                  "(keypart1=c1) AND ... AND (keypartN=cN)"   [ref_const_cond]
++                
++                If range optimizer was able to construct a "range" 
++                access on this index, then its condition "quick_cond" was
++                eqivalent to ref_const_cond (*), and we can re-use E(#rows)
++                from the range optimizer.
++                
++                Proof of (*): By properties of range and ref optimizers 
++                quick_cond will be equal or tighther than ref_const_cond. 
++                ref_const_cond already covers "smallest" possible interval - 
++                a singlepoint interval over all keyparts. Therefore, 
++                quick_cond is equivalent to ref_const_cond (if it was an 
++                empty interval we wouldn't have got here).
++              */
++              if (table->quick_keys.is_set(key))
++                records= (double) table->quick_rows[key];
++              else
++              {
++                /* quick_range couldn't use key! */
++                records= (double) s->records/rec;
++              }
++            }
++            else
++            {
++              if (!(records=keyinfo->rec_per_key[keyinfo->key_parts-1]))
++              {                                   /* Prefer longer keys */
++                records=
++                  ((double) s->records / (double) rec *
++                   (1.0 +
++                    ((double) (table->s->max_key_length-keyinfo->key_length) /
++                     (double) table->s->max_key_length)));
++                if (records < 2.0)
++                  records=2.0;               /* Can't be as good as a unique */
++              }
++              /*
++                ReuseRangeEstimateForRef-2:  We get here if we could not reuse
++                E(#rows) from range optimizer. Make another try:
++                
++                If range optimizer produced E(#rows) for a prefix of the ref
++                access we're considering, and that E(#rows) is lower then our
++                current estimate, make an adjustment. The criteria of when we
++                can make an adjustment is a special case of the criteria used
++                in ReuseRangeEstimateForRef-3.
++              */
++              if (table->quick_keys.is_set(key) &&
++                  const_part & (1 << table->quick_key_parts[key]) &&
++                  table->quick_n_ranges[key] == 1 &&
++                  records > (double) table->quick_rows[key])
++              {
++                records= (double) table->quick_rows[key];
++              }
++            }
++            /* Limit the number of matched rows */
++            tmp= records;
++            set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key);
++            if (table->covering_keys.is_set(key))
++            {
++              /* we can use only index tree */
++              uint keys_per_block= table->file->stats.block_size/2/
++                (keyinfo->key_length+table->file->ref_length)+1;
++              tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
++            }
++            else
++              tmp= record_count*min(tmp,s->worst_seeks);
++          }
++        }
++        else
++        {
++          /*
++            Use as much key-parts as possible and a uniq key is better
++            than a not unique key
++            Set tmp to (previous record count) * (records / combination)
++          */
++          if ((found_part & 1) &&
++              (!(table->file->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) ||
++               found_part == PREV_BITS(uint,keyinfo->key_parts)))
++          {
++            max_key_part= max_part_bit(found_part);
++            /*
++              ReuseRangeEstimateForRef-3:
++              We're now considering a ref[or_null] access via
++              (t.keypart1=e1 AND ... AND t.keypartK=eK) [ OR  
++              (same-as-above but with one cond replaced 
++               with "t.keypart_i IS NULL")]  (**)
++              
++              Try re-using E(#rows) from "range" optimizer:
++              We can do so if "range" optimizer used the same intervals as
++              in (**). The intervals used by range optimizer may be not 
++              available at this point (as "range" access might have choosen to
++              create quick select over another index), so we can't compare
++              them to (**). We'll make indirect judgements instead.
++              The sufficient conditions for re-use are:
++              (C1) All e_i in (**) are constants, i.e. found_ref==FALSE. (if
++                   this is not satisfied we have no way to know which ranges
++                   will be actually scanned by 'ref' until we execute the 
++                   join)
++              (C2) max #key parts in 'range' access == K == max_key_part (this
++                   is apparently a necessary requirement)
++
++              We also have a property that "range optimizer produces equal or 
++              tighter set of scan intervals than ref(const) optimizer". Each
++              of the intervals in (**) are "tightest possible" intervals when 
++              one limits itself to using keyparts 1..K (which we do in #2).              
++              From here it follows that range access used either one, or
++              both of the (I1) and (I2) intervals:
++              
++               (t.keypart1=c1 AND ... AND t.keypartK=eK)  (I1) 
++               (same-as-above but with one cond replaced  
++                with "t.keypart_i IS NULL")               (I2)
++
++              The remaining part is to exclude the situation where range
++              optimizer used one interval while we're considering
++              ref-or-null and looking for estimate for two intervals. This
++              is done by last limitation:
++
++              (C3) "range optimizer used (have ref_or_null?2:1) intervals"
++            */
++            if (table->quick_keys.is_set(key) && !found_ref &&          //(C1)
++                table->quick_key_parts[key] == max_key_part &&          //(C2)
++                table->quick_n_ranges[key] == 1+test(ref_or_null_part)) //(C3)
++            {
++              tmp= records= (double) table->quick_rows[key];
++            }
++            else
++            {
++              /* Check if we have statistic about the distribution */
++              if ((records= keyinfo->rec_per_key[max_key_part-1]))
++              {
++                /* 
++                  Fix for the case where the index statistics is too
++                  optimistic: If 
++                  (1) We're considering ref(const) and there is quick select
++                      on the same index, 
++                  (2) and that quick select uses more keyparts (i.e. it will
++                      scan equal/smaller interval then this ref(const))
++                  (3) and E(#rows) for quick select is higher then our
++                      estimate,
++                  Then 
++                    We'll use E(#rows) from quick select.
++
++                  Q: Why do we choose to use 'ref'? Won't quick select be
++                  cheaper in some cases ?
++                  TODO: figure this out and adjust the plan choice if needed.
++                */
++                if (!found_ref && table->quick_keys.is_set(key) &&    // (1)
++                    table->quick_key_parts[key] > max_key_part &&     // (2)
++                    records < (double)table->quick_rows[key])         // (3)
++                  records= (double)table->quick_rows[key];
++
++                tmp= records;
++              }
++              else
++              {
++                /*
++                  Assume that the first key part matches 1% of the file
++                  and that the whole key matches 10 (duplicates) or 1
++                  (unique) records.
++                  Assume also that more key matches proportionally more
++                  records
++                  This gives the formula:
++                  records = (x * (b-a) + a*c-b)/(c-1)
++
++                  b = records matched by whole key
++                  a = records matched by first key part (1% of all records?)
++                  c = number of key parts in key
++                  x = used key parts (1 <= x <= c)
++                */
++                double rec_per_key;
++                if (!(rec_per_key=(double)
++                      keyinfo->rec_per_key[keyinfo->key_parts-1]))
++                  rec_per_key=(double) s->records/rec+1;
++
++                if (!s->records)
++                  tmp = 0;
++                else if (rec_per_key/(double) s->records >= 0.01)
++                  tmp = rec_per_key;
++                else
++                {
++                  double a=s->records*0.01;
++                  if (keyinfo->key_parts > 1)
++                    tmp= (max_key_part * (rec_per_key - a) +
++                          a*keyinfo->key_parts - rec_per_key)/
++                         (keyinfo->key_parts-1);
++                  else
++                    tmp= a;
++                  set_if_bigger(tmp,1.0);
++                }
++                records = (ulong) tmp;
++              }
++
++              if (ref_or_null_part)
++              {
++                /* We need to do two key searches to find key */
++                tmp *= 2.0;
++                records *= 2.0;
++              }
++
++              /*
++                ReuseRangeEstimateForRef-4:  We get here if we could not reuse
++                E(#rows) from range optimizer. Make another try:
++                
++                If range optimizer produced E(#rows) for a prefix of the ref 
++                access we're considering, and that E(#rows) is lower then our
++                current estimate, make the adjustment.
++
++                The decision whether we can re-use the estimate from the range
++                optimizer is the same as in ReuseRangeEstimateForRef-3,
++                applied to first table->quick_key_parts[key] key parts.
++              */
++              if (table->quick_keys.is_set(key) &&
++                  table->quick_key_parts[key] <= max_key_part &&
++                  const_part & (1 << table->quick_key_parts[key]) &&
++                  table->quick_n_ranges[key] == 1 + test(ref_or_null_part &
++                                                         const_part) &&
++                  records > (double) table->quick_rows[key])
++              {
++                tmp= records= (double) table->quick_rows[key];
++              }
++            }
++
++            /* Limit the number of matched rows */
++            set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key);
++            if (table->covering_keys.is_set(key))
++            {
++              /* we can use only index tree */
++              uint keys_per_block= table->file->stats.block_size/2/
++                (keyinfo->key_length+table->file->ref_length)+1;
++              tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
++            }
++            else
++              tmp= record_count*min(tmp,s->worst_seeks);
++          }
++          else
++            tmp= best_time;                    // Do nothing
++        }
++      } /* not ft_key */
++      if (tmp < best_time - records/(double) TIME_FOR_COMPARE)
++      {
++        best_time= tmp + records/(double) TIME_FOR_COMPARE;
++        best= tmp;
++        best_records= records;
++        best_key= start_key;
++        best_max_key_part= max_key_part;
++        best_ref_depends_map= found_ref;
++      }
++    }
++    records= best_records;
++  }
++
++  /*
++    Don't test table scan if it can't be better.
++    Prefer key lookup if we would use the same key for scanning.
++
++    Don't do a table scan on InnoDB tables, if we can read the used
++    parts of the row from any of the used index.
++    This is because table scans uses index and we would not win
++    anything by using a table scan.
++
++    A word for word translation of the below if-statement in psergey's
++    understanding: we check if we should use table scan if:
++    (1) The found 'ref' access produces more records than a table scan
++        (or index scan, or quick select), or 'ref' is more expensive than
++        any of them.
++    (2) This doesn't hold: the best way to perform table scan is to to perform
++        'range' access using index IDX, and the best way to perform 'ref' 
++        access is to use the same index IDX, with the same or more key parts.
++        (note: it is not clear how this rule is/should be extended to 
++        index_merge quick selects)
++    (3) See above note about InnoDB.
++    (4) NOT ("FORCE INDEX(...)" is used for table and there is 'ref' access
++             path, but there is no quick select)
++        If the condition in the above brackets holds, then the only possible
++        "table scan" access method is ALL/index (there is no quick select).
++        Since we have a 'ref' access path, and FORCE INDEX instructs us to
++        choose it over ALL/index, there is no need to consider a full table
++        scan.
++  */
++  if ((records >= s->found_records || best > s->read_time) &&            // (1)
++      !(s->quick && best_key && s->quick->index == best_key->key &&      // (2)
++        best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2)
++      !((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) &&   // (3)
++        ! s->table->covering_keys.is_clear_all() && best_key && !s->quick) &&// (3)
++      !(s->table->force_index && best_key && !s->quick))                 // (4)
++  {                                             // Check full join
++    ha_rows rnd_records= s->found_records;
++    /*
++      If there is a filtering condition on the table (i.e. ref analyzer found
++      at least one "table.keyXpartY= exprZ", where exprZ refers only to tables
++      preceding this table in the join order we're now considering), then 
++      assume that 25% of the rows will be filtered out by this condition.
++
++      This heuristic is supposed to force tables used in exprZ to be before
++      this table in join order.
++    */
++    if (found_constraint)
++      rnd_records-= rnd_records/4;
++
++    /*
++      If applicable, get a more accurate estimate. Don't use the two
++      heuristics at once.
++    */
++    if (s->table->quick_condition_rows != s->found_records)
++      rnd_records= s->table->quick_condition_rows;
++
++    /*
++      Range optimizer never proposes a RANGE if it isn't better
++      than FULL: so if RANGE is present, it's always preferred to FULL.
++      Here we estimate its cost.
++    */
++    if (s->quick)
++    {
++      /*
++        For each record we:
++        - read record range through 'quick'
++        - skip rows which does not satisfy WHERE constraints
++        TODO: 
++        We take into account possible use of join cache for ALL/index
++        access (see first else-branch below), but we don't take it into 
++        account here for range/index_merge access. Find out why this is so.
++      */
++      tmp= record_count *
++        (s->quick->read_time +
++         (s->found_records - rnd_records)/(double) TIME_FOR_COMPARE);
++    }
++    else
++    {
++      /* Estimate cost of reading table. */
++      tmp= s->table->file->scan_time();
++      if (s->table->map & join->outer_join)     // Can't use join cache
++      {
++        /*
++          For each record we have to:
++          - read the whole table record 
++          - skip rows which does not satisfy join condition
++        */
++        tmp= record_count *
++          (tmp +
++           (s->records - rnd_records)/(double) TIME_FOR_COMPARE);
++      }
++      else
++      {
++        /* We read the table as many times as join buffer becomes full. */
++        tmp*= (1.0 + floor((double) cache_record_length(join,idx) *
++                           record_count /
++                           (double) thd->variables.join_buff_size));
++        /* 
++            We don't make full cartesian product between rows in the scanned
++           table and existing records because we skip all rows from the
++           scanned table, which does not satisfy join condition when 
++           we read the table (see flush_cached_records for details). Here we
++           take into account cost to read and skip these records.
++        */
++        tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
++      }
++    }
++
++    /*
++      We estimate the cost of evaluating WHERE clause for found records
++      as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus
++      tmp give us total cost of using TABLE SCAN
++    */
++    if (best == DBL_MAX ||
++        (tmp  + record_count/(double) TIME_FOR_COMPARE*rnd_records <
++         best + record_count/(double) TIME_FOR_COMPARE*records))
++    {
++      /*
++        If the table has a range (s->quick is set) make_join_select()
++        will ensure that this will be used
++      */
++      best= tmp;
++      records= rows2double(rnd_records);
++      best_key= 0;
++      /* range/index_merge/ALL/index access method are "independent", so: */
++      best_ref_depends_map= 0;
++    }
++  }
++
++  /* Update the cost information for the current partial plan */
++  join->positions[idx].records_read= records;
++  join->positions[idx].read_time=    best;
++  join->positions[idx].key=          best_key;
++  join->positions[idx].table=        s;
++  join->positions[idx].ref_depend_map= best_ref_depends_map;
++
++  if (!best_key &&
++      idx == join->const_tables &&
++      s->table == join->sort_by_table &&
++      join->unit->select_limit_cnt >= records)
++    join->sort_by_table= (TABLE*) 1;  // Must use temporary table
++
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Selects and invokes a search strategy for an optimal query plan.
++
++  The function checks user-configurable parameters that control the search
++  strategy for an optimal plan, selects the search method and then invokes
++  it. Each specific optimization procedure stores the final optimal plan in
++  the array 'join->best_positions', and the cost of the plan in
++  'join->best_read'.
++
++  @param join         pointer to the structure providing all context info for
++                      the query
++  @param join_tables  set of the tables in the query
++
++  @todo
++    'MAX_TABLES+2' denotes the old implementation of find_best before
++    the greedy version. Will be removed when greedy_search is approved.
++
++  @retval
++    FALSE       ok
++  @retval
++    TRUE        Fatal error
++*/
++
++static bool
++choose_plan(JOIN *join, table_map join_tables)
++{
++  uint search_depth= join->thd->variables.optimizer_search_depth;
++  uint prune_level=  join->thd->variables.optimizer_prune_level;
++  bool straight_join= test(join->select_options & SELECT_STRAIGHT_JOIN);
++  DBUG_ENTER("choose_plan");
++
++  join->cur_embedding_map= 0;
++  reset_nj_counters(join->join_list);
++  /*
++    if (SELECT_STRAIGHT_JOIN option is set)
++      reorder tables so dependent tables come after tables they depend 
++      on, otherwise keep tables in the order they were specified in the query 
++    else
++      Apply heuristic: pre-sort all access plans with respect to the number of
++      records accessed.
++  */
++  my_qsort(join->best_ref + join->const_tables,
++           join->tables - join->const_tables, sizeof(JOIN_TAB*),
++           straight_join ? join_tab_cmp_straight : join_tab_cmp);
++  
++  if (straight_join)
++  {
++    optimize_straight_join(join, join_tables);
++  }
++  else
++  {
++    if (search_depth == MAX_TABLES+2)
++    { /*
++        TODO: 'MAX_TABLES+2' denotes the old implementation of find_best before
++        the greedy version. Will be removed when greedy_search is approved.
++      */
++      join->best_read= DBL_MAX;
++      if (find_best(join, join_tables, join->const_tables, 1.0, 0.0))
++        DBUG_RETURN(TRUE);
++    } 
++    else
++    {
++      if (search_depth == 0)
++        /* Automatically determine a reasonable value for 'search_depth' */
++        search_depth= determine_search_depth(join);
++      if (greedy_search(join, join_tables, search_depth, prune_level))
++        DBUG_RETURN(TRUE);
++    }
++  }
++
++  /* 
++    Store the cost of this query into a user variable
++    Don't update last_query_cost for statements that are not "flat joins" :
++    i.e. they have subqueries, unions or call stored procedures.
++    TODO: calculate a correct cost for a query with subqueries and UNIONs.
++  */
++  if (join->thd->lex->is_single_level_stmt())
++    join->thd->status_var.last_query_cost= join->best_read;
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  Compare two JOIN_TAB objects based on the number of accessed records.
++
++  @param ptr1 pointer to first JOIN_TAB object
++  @param ptr2 pointer to second JOIN_TAB object
++
++  NOTES
++    The order relation implemented by join_tab_cmp() is not transitive,
++    i.e. it is possible to choose such a, b and c that (a < b) && (b < c)
++    but (c < a). This implies that result of a sort using the relation
++    implemented by join_tab_cmp() depends on the order in which
++    elements are compared, i.e. the result is implementation-specific.
++    Example:
++      a: dependent = 0x0 table->map = 0x1 found_records = 3 ptr = 0x907e6b0
++      b: dependent = 0x0 table->map = 0x2 found_records = 3 ptr = 0x907e838
++      c: dependent = 0x6 table->map = 0x10 found_records = 2 ptr = 0x907ecd0
++     
++  @retval
++    1  if first is bigger
++  @retval
++    -1  if second is bigger
++  @retval
++    0  if equal
++*/
++
++static int
++join_tab_cmp(const void* ptr1, const void* ptr2)
++{
++  JOIN_TAB *jt1= *(JOIN_TAB**) ptr1;
++  JOIN_TAB *jt2= *(JOIN_TAB**) ptr2;
++
++  if (jt1->dependent & jt2->table->map)
++    return 1;
++  if (jt2->dependent & jt1->table->map)
++    return -1;  
++  if (jt1->found_records > jt2->found_records)
++    return 1;
++  if (jt1->found_records < jt2->found_records)
++    return -1; 
++  return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0);
++}
++
++
++/**
++  Same as join_tab_cmp, but for use with SELECT_STRAIGHT_JOIN.
++*/
++
++static int
++join_tab_cmp_straight(const void* ptr1, const void* ptr2)
++{
++  JOIN_TAB *jt1= *(JOIN_TAB**) ptr1;
++  JOIN_TAB *jt2= *(JOIN_TAB**) ptr2;
++
++  if (jt1->dependent & jt2->table->map)
++    return 1;
++  if (jt2->dependent & jt1->table->map)
++    return -1;
++  return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0);
++}
++
++/**
++  Heuristic procedure to automatically guess a reasonable degree of
++  exhaustiveness for the greedy search procedure.
++
++  The procedure estimates the optimization time and selects a search depth
++  big enough to result in a near-optimal QEP, that doesn't take too long to
++  find. If the number of tables in the query exceeds some constant, then
++  search_depth is set to this constant.
++
++  @param join   pointer to the structure providing all context info for
++                the query
++
++  @note
++    This is an extremely simplistic implementation that serves as a stub for a
++    more advanced analysis of the join. Ideally the search depth should be
++    determined by learning from previous query optimizations, because it will
++    depend on the CPU power (and other factors).
++
++  @todo
++    this value should be determined dynamically, based on statistics:
++    uint max_tables_for_exhaustive_opt= 7;
++
++  @todo
++    this value could be determined by some mapping of the form:
++    depth : table_count -> [max_tables_for_exhaustive_opt..MAX_EXHAUSTIVE]
++
++  @return
++    A positive integer that specifies the search depth (and thus the
++    exhaustiveness) of the depth-first search algorithm used by
++    'greedy_search'.
++*/
++
++static uint
++determine_search_depth(JOIN *join)
++{
++  uint table_count=  join->tables - join->const_tables;
++  uint search_depth;
++  /* TODO: this value should be determined dynamically, based on statistics: */
++  uint max_tables_for_exhaustive_opt= 7;
++
++  if (table_count <= max_tables_for_exhaustive_opt)
++    search_depth= table_count+1; // use exhaustive for small number of tables
++  else
++    /*
++      TODO: this value could be determined by some mapping of the form:
++      depth : table_count -> [max_tables_for_exhaustive_opt..MAX_EXHAUSTIVE]
++    */
++    search_depth= max_tables_for_exhaustive_opt; // use greedy search
++
++  return search_depth;
++}
++
++
++/**
++  Select the best ways to access the tables in a query without reordering them.
++
++    Find the best access paths for each query table and compute their costs
++    according to their order in the array 'join->best_ref' (thus without
++    reordering the join tables). The function calls sequentially
++    'best_access_path' for each table in the query to select the best table
++    access method. The final optimal plan is stored in the array
++    'join->best_positions', and the corresponding cost in 'join->best_read'.
++
++  @param join          pointer to the structure providing all context info for
++                       the query
++  @param join_tables   set of the tables in the query
++
++  @note
++    This function can be applied to:
++    - queries with STRAIGHT_JOIN
++    - internally to compute the cost of an arbitrary QEP
++  @par
++    Thus 'optimize_straight_join' can be used at any stage of the query
++    optimization process to finalize a QEP as it is.
++*/
++
++static void
++optimize_straight_join(JOIN *join, table_map join_tables)
++{
++  JOIN_TAB *s;
++  uint idx= join->const_tables;
++  double    record_count= 1.0;
++  double    read_time=    0.0;
++ 
++  for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++)
++  {
++    /* Find the best access method from 's' to the current partial plan */
++    best_access_path(join, s, join->thd, join_tables, idx,
++                     record_count, read_time);
++    /* compute the cost of the new plan extended with 's' */
++    record_count*= join->positions[idx].records_read;
++    read_time+=    join->positions[idx].read_time;
++    join_tables&= ~(s->table->map);
++    ++idx;
++  }
++
++  read_time+= record_count / (double) TIME_FOR_COMPARE;
++  if (join->sort_by_table &&
++      join->sort_by_table != join->positions[join->const_tables].table->table)
++    read_time+= record_count;  // We have to make a temp table
++  memcpy((uchar*) join->best_positions, (uchar*) join->positions,
++         sizeof(POSITION)*idx);
++  join->best_read= read_time;
++}
++
++
++/**
++  Find a good, possibly optimal, query execution plan (QEP) by a greedy search.
++
++    The search procedure uses a hybrid greedy/exhaustive search with controlled
++    exhaustiveness. The search is performed in N = card(remaining_tables)
++    steps. Each step evaluates how promising is each of the unoptimized tables,
++    selects the most promising table, and extends the current partial QEP with
++    that table.  Currenly the most 'promising' table is the one with least
++    expensive extension.\
++
++    There are two extreme cases:
++    -# When (card(remaining_tables) < search_depth), the estimate finds the
++    best complete continuation of the partial QEP. This continuation can be
++    used directly as a result of the search.
++    -# When (search_depth == 1) the 'best_extension_by_limited_search'
++    consideres the extension of the current QEP with each of the remaining
++    unoptimized tables.
++
++    All other cases are in-between these two extremes. Thus the parameter
++    'search_depth' controlls the exhaustiveness of the search. The higher the
++    value, the longer the optimizaton time and possibly the better the
++    resulting plan. The lower the value, the fewer alternative plans are
++    estimated, but the more likely to get a bad QEP.
++
++    All intermediate and final results of the procedure are stored in 'join':
++    - join->positions     : modified for every partial QEP that is explored
++    - join->best_positions: modified for the current best complete QEP
++    - join->best_read     : modified for the current best complete QEP
++    - join->best_ref      : might be partially reordered
++
++    The final optimal plan is stored in 'join->best_positions', and its
++    corresponding cost in 'join->best_read'.
++
++  @note
++    The following pseudocode describes the algorithm of 'greedy_search':
++
++    @code
++    procedure greedy_search
++    input: remaining_tables
++    output: pplan;
++    {
++      pplan = <>;
++      do {
++        (t, a) = best_extension(pplan, remaining_tables);
++        pplan = concat(pplan, (t, a));
++        remaining_tables = remaining_tables - t;
++      } while (remaining_tables != {})
++      return pplan;
++    }
++
++  @endcode
++    where 'best_extension' is a placeholder for a procedure that selects the
++    most "promising" of all tables in 'remaining_tables'.
++    Currently this estimate is performed by calling
++    'best_extension_by_limited_search' to evaluate all extensions of the
++    current QEP of size 'search_depth', thus the complexity of 'greedy_search'
++    mainly depends on that of 'best_extension_by_limited_search'.
++
++  @par
++    If 'best_extension()' == 'best_extension_by_limited_search()', then the
++    worst-case complexity of this algorithm is <=
++    O(N*N^search_depth/search_depth). When serch_depth >= N, then the
++    complexity of greedy_search is O(N!).
++
++  @par
++    In the future, 'greedy_search' might be extended to support other
++    implementations of 'best_extension', e.g. some simpler quadratic procedure.
++
++  @param join             pointer to the structure providing all context info
++                          for the query
++  @param remaining_tables set of tables not included into the partial plan yet
++  @param search_depth     controlls the exhaustiveness of the search
++  @param prune_level      the pruning heuristics that should be applied during
++                          search
++
++  @retval
++    FALSE       ok
++  @retval
++    TRUE        Fatal error
++*/
++
++static bool
++greedy_search(JOIN      *join,
++              table_map remaining_tables,
++              uint      search_depth,
++              uint      prune_level)
++{
++  double    record_count= 1.0;
++  double    read_time=    0.0;
++  uint      idx= join->const_tables; // index into 'join->best_ref'
++  uint      best_idx;
++  uint      size_remain;    // cardinality of remaining_tables
++  POSITION  best_pos;
++  JOIN_TAB  *best_table; // the next plan node to be added to the curr QEP
++
++  DBUG_ENTER("greedy_search");
++
++  /* number of tables that remain to be optimized */
++  size_remain= my_count_bits(remaining_tables);
++
++  do {
++    /* Find the extension of the current QEP with the lowest cost */
++    join->best_read= DBL_MAX;
++    if (best_extension_by_limited_search(join, remaining_tables, idx, record_count,
++                                         read_time, search_depth, prune_level))
++      DBUG_RETURN(TRUE);
++    /*
++      'best_read < DBL_MAX' means that optimizer managed to find
++      some plan and updated 'best_positions' array accordingly.
++    */
++    DBUG_ASSERT(join->best_read < DBL_MAX); 
++
++    if (size_remain <= search_depth)
++    {
++      /*
++        'join->best_positions' contains a complete optimal extension of the
++        current partial QEP.
++      */
++      DBUG_EXECUTE("opt", print_plan(join, join->tables,
++                                     record_count, read_time, read_time,
++                                     "optimal"););
++      DBUG_RETURN(FALSE);
++    }
++
++    /* select the first table in the optimal extension as most promising */
++    best_pos= join->best_positions[idx];
++    best_table= best_pos.table;
++    /*
++      Each subsequent loop of 'best_extension_by_limited_search' uses
++      'join->positions' for cost estimates, therefore we have to update its
++      value.
++    */
++    join->positions[idx]= best_pos;
++
++    /*
++      Update the interleaving state after extending the current partial plan
++      with a new table.
++      We are doing this here because best_extension_by_limited_search reverts
++      the interleaving state to the one of the non-extended partial plan 
++      on exit.
++    */
++    IF_DBUG(bool is_interleave_error= )
++    check_interleaving_with_nj (best_table);
++    /* This has been already checked by best_extension_by_limited_search */
++    DBUG_ASSERT(!is_interleave_error);
++
++    /* find the position of 'best_table' in 'join->best_ref' */
++    best_idx= idx;
++    JOIN_TAB *pos= join->best_ref[best_idx];
++    while (pos && best_table != pos)
++      pos= join->best_ref[++best_idx];
++    DBUG_ASSERT((pos != NULL)); // should always find 'best_table'
++    /* move 'best_table' at the first free position in the array of joins */
++    swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]);
++
++    /* compute the cost of the new plan extended with 'best_table' */
++    record_count*= join->positions[idx].records_read;
++    read_time+=    join->positions[idx].read_time;
++
++    remaining_tables&= ~(best_table->table->map);
++    --size_remain;
++    ++idx;
++
++    DBUG_EXECUTE("opt", print_plan(join, idx,
++                                   record_count, read_time, read_time,
++                                   "extended"););
++  } while (TRUE);
++}
++
++
++/**
++  Find a good, possibly optimal, query execution plan (QEP) by a possibly
++  exhaustive search.
++
++    The procedure searches for the optimal ordering of the query tables in set
++    'remaining_tables' of size N, and the corresponding optimal access paths to
++    each table. The choice of a table order and an access path for each table
++    constitutes a query execution plan (QEP) that fully specifies how to
++    execute the query.
++   
++    The maximal size of the found plan is controlled by the parameter
++    'search_depth'. When search_depth == N, the resulting plan is complete and
++    can be used directly as a QEP. If search_depth < N, the found plan consists
++    of only some of the query tables. Such "partial" optimal plans are useful
++    only as input to query optimization procedures, and cannot be used directly
++    to execute a query.
++
++    The algorithm begins with an empty partial plan stored in 'join->positions'
++    and a set of N tables - 'remaining_tables'. Each step of the algorithm
++    evaluates the cost of the partial plan extended by all access plans for
++    each of the relations in 'remaining_tables', expands the current partial
++    plan with the access plan that results in lowest cost of the expanded
++    partial plan, and removes the corresponding relation from
++    'remaining_tables'. The algorithm continues until it either constructs a
++    complete optimal plan, or constructs an optimal plartial plan with size =
++    search_depth.
++
++    The final optimal plan is stored in 'join->best_positions'. The
++    corresponding cost of the optimal plan is in 'join->best_read'.
++
++  @note
++    The procedure uses a recursive depth-first search where the depth of the
++    recursion (and thus the exhaustiveness of the search) is controlled by the
++    parameter 'search_depth'.
++
++  @note
++    The pseudocode below describes the algorithm of
++    'best_extension_by_limited_search'. The worst-case complexity of this
++    algorithm is O(N*N^search_depth/search_depth). When serch_depth >= N, then
++    the complexity of greedy_search is O(N!).
++
++    @code
++    procedure best_extension_by_limited_search(
++      pplan in,             // in, partial plan of tables-joined-so-far
++      pplan_cost,           // in, cost of pplan
++      remaining_tables,     // in, set of tables not referenced in pplan
++      best_plan_so_far,     // in/out, best plan found so far
++      best_plan_so_far_cost,// in/out, cost of best_plan_so_far
++      search_depth)         // in, maximum size of the plans being considered
++    {
++      for each table T from remaining_tables
++      {
++        // Calculate the cost of using table T as above
++        cost = complex-series-of-calculations;
++
++        // Add the cost to the cost so far.
++        pplan_cost+= cost;
++
++        if (pplan_cost >= best_plan_so_far_cost)
++          // pplan_cost already too great, stop search
++          continue;
++
++        pplan= expand pplan by best_access_method;
++        remaining_tables= remaining_tables - table T;
++        if (remaining_tables is not an empty set
++            and
++            search_depth > 1)
++        {
++          best_extension_by_limited_search(pplan, pplan_cost,
++                                           remaining_tables,
++                                           best_plan_so_far,
++                                           best_plan_so_far_cost,
++                                           search_depth - 1);
++        }
++        else
++        {
++          best_plan_so_far_cost= pplan_cost;
++          best_plan_so_far= pplan;
++        }
++      }
++    }
++    @endcode
++
++  @note
++    When 'best_extension_by_limited_search' is called for the first time,
++    'join->best_read' must be set to the largest possible value (e.g. DBL_MAX).
++    The actual implementation provides a way to optionally use pruning
++    heuristic (controlled by the parameter 'prune_level') to reduce the search
++    space by skipping some partial plans.
++
++  @note
++    The parameter 'search_depth' provides control over the recursion
++    depth, and thus the size of the resulting optimal plan.
++
++  @param join             pointer to the structure providing all context info
++                          for the query
++  @param remaining_tables set of tables not included into the partial plan yet
++  @param idx              length of the partial QEP in 'join->positions';
++                          since a depth-first search is used, also corresponds
++                          to the current depth of the search tree;
++                          also an index in the array 'join->best_ref';
++  @param record_count     estimate for the number of records returned by the
++                          best partial plan
++  @param read_time        the cost of the best partial plan
++  @param search_depth     maximum depth of the recursion and thus size of the
++                          found optimal plan
++                          (0 < search_depth <= join->tables+1).
++  @param prune_level      pruning heuristics that should be applied during
++                          optimization
++                          (values: 0 = EXHAUSTIVE, 1 = PRUNE_BY_TIME_OR_ROWS)
++
++  @retval
++    FALSE       ok
++  @retval
++    TRUE        Fatal error
++*/
++
++static bool
++best_extension_by_limited_search(JOIN      *join,
++                                 table_map remaining_tables,
++                                 uint      idx,
++                                 double    record_count,
++                                 double    read_time,
++                                 uint      search_depth,
++                                 uint      prune_level)
++{
++  DBUG_ENTER("best_extension_by_limited_search");
++
++  THD *thd= join->thd;
++  if (thd->killed)  // Abort
++    DBUG_RETURN(TRUE);
++
++  DBUG_EXECUTE("opt", print_plan(join, idx, read_time, record_count, idx,
++                                 "SOFAR:"););
++
++  /* 
++     'join' is a partial plan with lower cost than the best plan so far,
++     so continue expanding it further with the tables in 'remaining_tables'.
++  */
++  JOIN_TAB *s;
++  double best_record_count= DBL_MAX;
++  double best_read_time=    DBL_MAX;
++
++  DBUG_EXECUTE("opt", print_plan(join, idx, record_count, read_time, read_time,
++                                "part_plan"););
++
++  for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++)
++  {
++    table_map real_table_bit= s->table->map;
++    if ((remaining_tables & real_table_bit) && 
++        !(remaining_tables & s->dependent) && 
++        (!idx || !check_interleaving_with_nj(s)))
++    {
++      double current_record_count, current_read_time;
++
++      /* Find the best access method from 's' to the current partial plan */
++      best_access_path(join, s, thd, remaining_tables, idx,
++                       record_count, read_time);
++      /* Compute the cost of extending the plan with 's' */
++      current_record_count= record_count * join->positions[idx].records_read;
++      current_read_time=    read_time + join->positions[idx].read_time;
++
++      /* Expand only partial plans with lower cost than the best QEP so far */
++      if ((current_read_time +
++           current_record_count / (double) TIME_FOR_COMPARE) >= join->best_read)
++      {
++        DBUG_EXECUTE("opt", print_plan(join, idx+1,
++                                       current_record_count,
++                                       read_time,
++                                       (current_read_time +
++                                        current_record_count / 
++                                        (double) TIME_FOR_COMPARE),
++                                       "prune_by_cost"););
++        restore_prev_nj_state(s);
++        continue;
++      }
++
++      /*
++        Prune some less promising partial plans. This heuristic may miss
++        the optimal QEPs, thus it results in a non-exhaustive search.
++      */
++      if (prune_level == 1)
++      {
++        if (best_record_count > current_record_count ||
++            best_read_time > current_read_time ||
++            (idx == join->const_tables &&  // 's' is the first table in the QEP
++            s->table == join->sort_by_table))
++        {
++          if (best_record_count >= current_record_count &&
++              best_read_time >= current_read_time &&
++              /* TODO: What is the reasoning behind this condition? */
++              (!(s->key_dependent & remaining_tables) ||
++               join->positions[idx].records_read < 2.0))
++          {
++            best_record_count= current_record_count;
++            best_read_time=    current_read_time;
++          }
++        }
++        else
++        {
++          DBUG_EXECUTE("opt", print_plan(join, idx+1,
++                                         current_record_count,
++                                         read_time,
++                                         current_read_time,
++                                         "pruned_by_heuristic"););
++          restore_prev_nj_state(s);
++          continue;
++        }
++      }
++
++      if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) )
++      { /* Recursively expand the current partial plan */
++        swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
++        if (best_extension_by_limited_search(join,
++                                             remaining_tables & ~real_table_bit,
++                                             idx + 1,
++                                             current_record_count,
++                                             current_read_time,
++                                             search_depth - 1,
++                                             prune_level))
++          DBUG_RETURN(TRUE);
++        swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
++      }
++      else
++      { /*
++          'join' is either the best partial QEP with 'search_depth' relations,
++          or the best complete QEP so far, whichever is smaller.
++        */
++        current_read_time+= current_record_count / (double) TIME_FOR_COMPARE;
++        if (join->sort_by_table &&
++            join->sort_by_table !=
++            join->positions[join->const_tables].table->table)
++          /* We have to make a temp table */
++          current_read_time+= current_record_count;
++        if ((search_depth == 1) || (current_read_time < join->best_read))
++        {
++          memcpy((uchar*) join->best_positions, (uchar*) join->positions,
++                 sizeof(POSITION) * (idx + 1));
++          join->best_read= current_read_time - 0.001;
++        }
++        DBUG_EXECUTE("opt", print_plan(join, idx+1,
++                                       current_record_count,
++                                       read_time,
++                                       current_read_time,
++                                       "full_plan"););
++      }
++      restore_prev_nj_state(s);
++    }
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  @todo
++  - TODO: this function is here only temporarily until 'greedy_search' is
++  tested and accepted.
++
++  RETURN VALUES
++    FALSE       ok
++    TRUE        Fatal error
++*/
++static bool
++find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
++	  double read_time)
++{
++  DBUG_ENTER("find_best");
++  THD *thd= join->thd;
++  if (thd->killed)
++    DBUG_RETURN(TRUE);
++  if (!rest_tables)
++  {
++    DBUG_PRINT("best",("read_time: %g  record_count: %g",read_time,
++		       record_count));
++
++    read_time+=record_count/(double) TIME_FOR_COMPARE;
++    if (join->sort_by_table &&
++	join->sort_by_table !=
++	join->positions[join->const_tables].table->table)
++      read_time+=record_count;			// We have to make a temp table
++    if (read_time < join->best_read)
++    {
++      memcpy((uchar*) join->best_positions,(uchar*) join->positions,
++	     sizeof(POSITION)*idx);
++      join->best_read= read_time - 0.001;
++    }
++    DBUG_RETURN(FALSE);
++  }
++  if (read_time+record_count/(double) TIME_FOR_COMPARE >= join->best_read)
++    DBUG_RETURN(FALSE);					/* Found better before */
++
++  JOIN_TAB *s;
++  double best_record_count=DBL_MAX,best_read_time=DBL_MAX;
++  for (JOIN_TAB **pos=join->best_ref+idx ; (s=*pos) ; pos++)
++  {
++    table_map real_table_bit=s->table->map;
++    if ((rest_tables & real_table_bit) && !(rest_tables & s->dependent) &&
++        (!idx|| !check_interleaving_with_nj(s)))
++    {
++      double records, best;
++      best_access_path(join, s, thd, rest_tables, idx, record_count, 
++                       read_time);
++      records= join->positions[idx].records_read;
++      best= join->positions[idx].read_time;
++      /*
++	Go to the next level only if there hasn't been a better key on
++	this level! This will cut down the search for a lot simple cases!
++      */
++      double current_record_count=record_count*records;
++      double current_read_time=read_time+best;
++      if (best_record_count > current_record_count ||
++	  best_read_time > current_read_time ||
++	  (idx == join->const_tables && s->table == join->sort_by_table))
++      {
++	if (best_record_count >= current_record_count &&
++	    best_read_time >= current_read_time &&
++	    (!(s->key_dependent & rest_tables) || records < 2.0))
++	{
++	  best_record_count=current_record_count;
++	  best_read_time=current_read_time;
++	}
++	swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
++	if (find_best(join,rest_tables & ~real_table_bit,idx+1,
++                      current_record_count,current_read_time))
++          DBUG_RETURN(TRUE);
++	swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
++      }
++      restore_prev_nj_state(s);
++      if (join->select_options & SELECT_STRAIGHT_JOIN)
++	break;				// Don't test all combinations
++    }
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  Find how much space the prevous read not const tables takes in cache.
++*/
++
++static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
++{
++  uint null_fields,blobs,fields,rec_length;
++  Field **f_ptr,*field;
++  MY_BITMAP *read_set= join_tab->table->read_set;;
++
++  null_fields= blobs= fields= rec_length=0;
++  for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
++  {
++    if (bitmap_is_set(read_set, field->field_index))
++    {
++      uint flags=field->flags;
++      fields++;
++      rec_length+=field->pack_length();
++      if (flags & BLOB_FLAG)
++	blobs++;
++      if (!(flags & NOT_NULL_FLAG))
++	null_fields++;
++    }
++  }
++  if (null_fields)
++    rec_length+=(join_tab->table->s->null_fields+7)/8;
++  if (join_tab->table->maybe_null)
++    rec_length+=sizeof(my_bool);
++  if (blobs)
++  {
++    uint blob_length=(uint) (join_tab->table->file->stats.mean_rec_length-
++			     (join_tab->table->s->reclength- rec_length));
++    rec_length+=(uint) max(4,blob_length);
++  }
++  join_tab->used_fields=fields;
++  join_tab->used_fieldlength=rec_length;
++  join_tab->used_blobs=blobs;
++}
++
++
++static uint
++cache_record_length(JOIN *join,uint idx)
++{
++  uint length=0;
++  JOIN_TAB **pos,**end;
++  THD *thd=join->thd;
++
++  for (pos=join->best_ref+join->const_tables,end=join->best_ref+idx ;
++       pos != end ;
++       pos++)
++  {
++    JOIN_TAB *join_tab= *pos;
++    if (!join_tab->used_fieldlength)		/* Not calced yet */
++      calc_used_field_length(thd, join_tab);
++    length+=join_tab->used_fieldlength;
++  }
++  return length;
++}
++
++
++/*
++  Get the number of different row combinations for subset of partial join
++
++  SYNOPSIS
++    prev_record_reads()
++      join       The join structure
++      idx        Number of tables in the partial join order (i.e. the
++                 partial join order is in join->positions[0..idx-1])
++      found_ref  Bitmap of tables for which we need to find # of distinct
++                 row combinations.
++
++  DESCRIPTION
++    Given a partial join order (in join->positions[0..idx-1]) and a subset of
++    tables within that join order (specified in found_ref), find out how many
++    distinct row combinations of subset tables will be in the result of the
++    partial join order.
++     
++    This is used as follows: Suppose we have a table accessed with a ref-based
++    method. The ref access depends on current rows of tables in found_ref.
++    We want to count # of different ref accesses. We assume two ref accesses
++    will be different if at least one of access parameters is different.
++    Example: consider a query
++
++    SELECT * FROM t1, t2, t3 WHERE t1.key=c1 AND t2.key=c2 AND t3.key=t1.field
++
++    and a join order:
++      t1,  ref access on t1.key=c1
++      t2,  ref access on t2.key=c2       
++      t3,  ref access on t3.key=t1.field 
++    
++    For t1: n_ref_scans = 1, n_distinct_ref_scans = 1
++    For t2: n_ref_scans = records_read(t1), n_distinct_ref_scans=1
++    For t3: n_ref_scans = records_read(t1)*records_read(t2)
++            n_distinct_ref_scans = #records_read(t1)
++    
++    The reason for having this function (at least the latest version of it)
++    is that we need to account for buffering in join execution. 
++    
++    An edge-case example: if we have a non-first table in join accessed via
++    ref(const) or ref(param) where there is a small number of different
++    values of param, then the access will likely hit the disk cache and will
++    not require any disk seeks.
++    
++    The proper solution would be to assume an LRU disk cache of some size,
++    calculate probability of cache hits, etc. For now we just count
++    identical ref accesses as one.
++
++  RETURN 
++    Expected number of row combinations
++*/
++
++static double
++prev_record_reads(JOIN *join, uint idx, table_map found_ref)
++{
++  double found=1.0;
++  POSITION *pos_end= join->positions - 1;
++  for (POSITION *pos= join->positions + idx - 1; pos != pos_end; pos--)
++  {
++    if (pos->table->table->map & found_ref)
++    {
++      found_ref|= pos->ref_depend_map;
++      /* 
++        For the case of "t1 LEFT JOIN t2 ON ..." where t2 is a const table 
++        with no matching row we will get position[t2].records_read==0. 
++        Actually the size of output is one null-complemented row, therefore 
++        we will use value of 1 whenever we get records_read==0.
++
++        Note
++        - the above case can't occur if inner part of outer join has more 
++          than one table: table with no matches will not be marked as const.
++
++        - Ideally we should add 1 to records_read for every possible null-
++          complemented row. We're not doing it because: 1. it will require
++          non-trivial code and add overhead. 2. The value of records_read
++          is an inprecise estimate and adding 1 (or, in the worst case,
++          #max_nested_outer_joins=64-1) will not make it any more precise.
++      */
++      if (pos->records_read)
++        found*= pos->records_read;
++    }
++  }
++  return found;
++}
++
++
++/**
++  Set up join struct according to best position.
++*/
++
++static bool
++get_best_combination(JOIN *join)
++{
++  uint i,tablenr;
++  table_map used_tables;
++  JOIN_TAB *join_tab,*j;
++  KEYUSE *keyuse;
++  uint table_count;
++  THD *thd=join->thd;
++  DBUG_ENTER("get_best_combination");
++
++  table_count=join->tables;
++  if (!(join->join_tab=join_tab=
++	(JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*table_count)))
++    DBUG_RETURN(TRUE);
++
++  join->full_join=0;
++
++  used_tables= OUTER_REF_TABLE_BIT;		// Outer row is already read
++  for (j=join_tab, tablenr=0 ; tablenr < table_count ; tablenr++,j++)
++  {
++    TABLE *form;
++    *j= *join->best_positions[tablenr].table;
++    form=join->table[tablenr]=j->table;
++    used_tables|= form->map;
++    form->reginfo.join_tab=j;
++    if (!*j->on_expr_ref)
++      form->reginfo.not_exists_optimize=0;	// Only with LEFT JOIN
++    DBUG_PRINT("info",("type: %d", j->type));
++    if (j->type == JT_CONST)
++      continue;					// Handled in make_join_stat..
++
++    j->ref.key = -1;
++    j->ref.key_parts=0;
++
++    if (j->type == JT_SYSTEM)
++      continue;
++    if (j->keys.is_clear_all() || !(keyuse= join->best_positions[tablenr].key))
++    {
++      j->type=JT_ALL;
++      if (tablenr != join->const_tables)
++	join->full_join=1;
++    }
++    else if (create_ref_for_key(join, j, keyuse, used_tables))
++      DBUG_RETURN(TRUE);                        // Something went wrong
++  }
++
++  for (i=0 ; i < table_count ; i++)
++    join->map2table[join->join_tab[i].table->tablenr]=join->join_tab+i;
++  update_depend_map(join);
++  DBUG_RETURN(0);
++}
++
++
++static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
++			       table_map used_tables)
++{
++  KEYUSE *keyuse=org_keyuse;
++  bool ftkey=(keyuse->keypart == FT_KEYPART);
++  THD  *thd= join->thd;
++  uint keyparts,length,key;
++  TABLE *table;
++  KEY *keyinfo;
++  DBUG_ENTER("create_ref_for_key");
++
++  /*  Use best key from find_best */
++  table=j->table;
++  key=keyuse->key;
++  keyinfo=table->key_info+key;
++
++  if (ftkey)
++  {
++    Item_func_match *ifm=(Item_func_match *)keyuse->val;
++
++    length=0;
++    keyparts=1;
++    ifm->join_key=1;
++  }
++  else
++  {
++    keyparts=length=0;
++    uint found_part_ref_or_null= 0;
++    /*
++      Calculate length for the used key
++      Stop if there is a missing key part or when we find second key_part
++      with KEY_OPTIMIZE_REF_OR_NULL
++    */
++    do
++    {
++      if (!(~used_tables & keyuse->used_tables))
++      {
++	if (keyparts == keyuse->keypart &&
++	    !(found_part_ref_or_null & keyuse->optimize))
++	{
++	  keyparts++;
++	  length+= keyinfo->key_part[keyuse->keypart].store_length;
++	  found_part_ref_or_null|= keyuse->optimize;
++	}
++      }
++      keyuse++;
++    } while (keyuse->table == table && keyuse->key == key);
++  } /* not ftkey */
++
++  /* set up fieldref */
++  keyinfo=table->key_info+key;
++  j->ref.key_parts=keyparts;
++  j->ref.key_length=length;
++  j->ref.key=(int) key;
++  if (!(j->ref.key_buff= (uchar*) thd->calloc(ALIGN_SIZE(length)*2)) ||
++      !(j->ref.key_copy= (store_key**) thd->alloc((sizeof(store_key*) *
++						   (keyparts+1)))) ||
++      !(j->ref.items=    (Item**) thd->alloc(sizeof(Item*)*keyparts)) ||
++      !(j->ref.cond_guards= (bool**) thd->alloc(sizeof(uint*)*keyparts)))
++  {
++    DBUG_RETURN(TRUE);
++  }
++  j->ref.key_buff2=j->ref.key_buff+ALIGN_SIZE(length);
++  j->ref.key_err=1;
++  j->ref.has_record= FALSE;
++  j->ref.null_rejecting= 0;
++  j->ref.use_count= 0;
++  keyuse=org_keyuse;
++
++  store_key **ref_key= j->ref.key_copy;
++  uchar *key_buff=j->ref.key_buff, *null_ref_key= 0;
++  bool keyuse_uses_no_tables= TRUE;
++  if (ftkey)
++  {
++    j->ref.items[0]=((Item_func*)(keyuse->val))->key_item();
++    /* Predicates pushed down into subquery can't be used FT access */
++    j->ref.cond_guards[0]= NULL;
++    if (keyuse->used_tables)
++      DBUG_RETURN(TRUE);                        // not supported yet. SerG
++
++    j->type=JT_FT;
++  }
++  else
++  {
++    uint i;
++    for (i=0 ; i < keyparts ; keyuse++,i++)
++    {
++      while (keyuse->keypart != i ||
++	     ((~used_tables) & keyuse->used_tables))
++	keyuse++;				/* Skip other parts */
++
++      uint maybe_null= test(keyinfo->key_part[i].null_bit);
++      j->ref.items[i]=keyuse->val;		// Save for cond removal
++      j->ref.cond_guards[i]= keyuse->cond_guard;
++      if (keyuse->null_rejecting) 
++        j->ref.null_rejecting |= 1 << i;
++      keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables;
++      if (!keyuse->used_tables &&
++	  !(join->select_options & SELECT_DESCRIBE))
++      {					// Compare against constant
++	store_key_item tmp(thd, keyinfo->key_part[i].field,
++                           key_buff + maybe_null,
++                           maybe_null ?  key_buff : 0,
++                           keyinfo->key_part[i].length, keyuse->val);
++	if (thd->is_fatal_error)
++	  DBUG_RETURN(TRUE);
++	tmp.copy();
++      }
++      else
++	*ref_key++= get_store_key(thd,
++				  keyuse,join->const_table_map,
++				  &keyinfo->key_part[i],
++				  key_buff, maybe_null);
++      /*
++	Remember if we are going to use REF_OR_NULL
++	But only if field _really_ can be null i.e. we force JT_REF
++	instead of JT_REF_OR_NULL in case if field can't be null
++      */
++      if ((keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL) && maybe_null)
++	null_ref_key= key_buff;
++      key_buff+=keyinfo->key_part[i].store_length;
++    }
++  } /* not ftkey */
++  *ref_key=0;				// end_marker
++  if (j->type == JT_FT)
++    DBUG_RETURN(0);
++  if (j->type == JT_CONST)
++    j->table->const_table= 1;
++  else if (((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY |
++			       HA_END_SPACE_KEY)) != HA_NOSAME) ||
++	   keyparts != keyinfo->key_parts || null_ref_key)
++  {
++    /* Must read with repeat */
++    j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF;
++    j->ref.null_ref_key= null_ref_key;
++  }
++  else if (keyuse_uses_no_tables)
++  {
++    /*
++      This happen if we are using a constant expression in the ON part
++      of an LEFT JOIN.
++      SELECT * FROM a LEFT JOIN b ON b.key=30
++      Here we should not mark the table as a 'const' as a field may
++      have a 'normal' value or a NULL value.
++    */
++    j->type=JT_CONST;
++  }
++  else
++    j->type=JT_EQ_REF;
++  DBUG_RETURN(0);
++}
++
++
++
++static store_key *
++get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables,
++	      KEY_PART_INFO *key_part, uchar *key_buff, uint maybe_null)
++{
++  if (!((~used_tables) & keyuse->used_tables))		// if const item
++  {
++    return new store_key_const_item(thd,
++				    key_part->field,
++				    key_buff + maybe_null,
++				    maybe_null ? key_buff : 0,
++				    key_part->length,
++				    keyuse->val);
++  }
++  else if (keyuse->val->type() == Item::FIELD_ITEM ||
++           (keyuse->val->type() == Item::REF_ITEM &&
++            ((Item_ref*)keyuse->val)->ref_type() == Item_ref::OUTER_REF &&
++            (*(Item_ref**)((Item_ref*)keyuse->val)->ref)->ref_type() ==
++             Item_ref::DIRECT_REF && 
++            keyuse->val->real_item()->type() == Item::FIELD_ITEM))
++    return new store_key_field(thd,
++			       key_part->field,
++			       key_buff + maybe_null,
++			       maybe_null ? key_buff : 0,
++			       key_part->length,
++			       ((Item_field*) keyuse->val->real_item())->field,
++			       keyuse->val->full_name());
++  return new store_key_item(thd,
++			    key_part->field,
++			    key_buff + maybe_null,
++			    maybe_null ? key_buff : 0,
++			    key_part->length,
++			    keyuse->val);
++}
++
++/**
++  This function is only called for const items on fields which are keys.
++
++  @return
++    returns 1 if there was some conversion made when the field was stored.
++*/
++
++bool
++store_val_in_field(Field *field, Item *item, enum_check_fields check_flag)
++{
++  bool error;
++  TABLE *table= field->table;
++  THD *thd= table->in_use;
++  ha_rows cuted_fields=thd->cuted_fields;
++  my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
++                                                   table->write_set);
++
++  /*
++    we should restore old value of count_cuted_fields because
++    store_val_in_field can be called from mysql_insert 
++    with select_insert, which make count_cuted_fields= 1
++   */
++  enum_check_fields old_count_cuted_fields= thd->count_cuted_fields;
++  thd->count_cuted_fields= check_flag;
++  error= item->save_in_field(field, 1);
++  thd->count_cuted_fields= old_count_cuted_fields;
++  dbug_tmp_restore_column_map(table->write_set, old_map);
++  return error || cuted_fields != thd->cuted_fields;
++}
++
++
++/**
++  @details Initialize a JOIN as a query execution plan
++  that accesses a single table via a table scan.
++
++  @param  parent      contains JOIN_TAB and TABLE object buffers for this join
++  @param  tmp_table   temporary table
++
++  @retval FALSE       success
++  @retval TRUE        error occurred
++*/
++bool
++JOIN::make_simple_join(JOIN *parent, TABLE *temp_table)
++{
++  DBUG_ENTER("JOIN::make_simple_join");
++
++  /*
++    Reuse TABLE * and JOIN_TAB if already allocated by a previous call
++    to this function through JOIN::exec (may happen for sub-queries).
++  */
++  if (!parent->join_tab_reexec &&
++      !(parent->join_tab_reexec= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB))))
++    DBUG_RETURN(TRUE);                        /* purecov: inspected */
++
++  join_tab= parent->join_tab_reexec;
++  table= &parent->table_reexec[0]; parent->table_reexec[0]= temp_table;
++  tables= 1;
++  const_tables= 0;
++  const_table_map= 0;
++  tmp_table_param.field_count= tmp_table_param.sum_func_count=
++    tmp_table_param.func_count= 0;
++  /*
++    We need to destruct the copy_field (allocated in create_tmp_table())
++    before setting it to 0 if the join is not "reusable".
++  */
++  if (!tmp_join || tmp_join != this) 
++    tmp_table_param.cleanup(); 
++  tmp_table_param.copy_field= tmp_table_param.copy_field_end=0;
++  first_record= sort_and_group=0;
++  send_records= (ha_rows) 0;
++  group= 0;
++  row_limit= unit->select_limit_cnt;
++  do_send_rows= row_limit ? 1 : 0;
++
++  join_tab->cache.buff=0;			/* No caching */
++  join_tab->table=temp_table;
++  join_tab->select=0;
++  join_tab->select_cond=0;
++  join_tab->quick=0;
++  join_tab->type= JT_ALL;			/* Map through all records */
++  join_tab->keys.init();
++  join_tab->keys.set_all();                     /* test everything in quick */
++  join_tab->info=0;
++  join_tab->on_expr_ref=0;
++  join_tab->last_inner= 0;
++  join_tab->first_unmatched= 0;
++  join_tab->ref.key = -1;
++  join_tab->not_used_in_distinct=0;
++  join_tab->read_first_record= join_init_read_record;
++  join_tab->join= this;
++  join_tab->ref.key_parts= 0;
++  bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record));
++  temp_table->status=0;
++  temp_table->null_row=0;
++  DBUG_RETURN(FALSE);
++}
++
++
++inline void add_cond_and_fix(Item **e1, Item *e2)
++{
++  if (*e1)
++  {
++    Item *res;
++    if ((res= new Item_cond_and(*e1, e2)))
++    {
++      *e1= res;
++      res->quick_fix_field();
++      res->update_used_tables();
++    }
++  }
++  else
++    *e1= e2;
++}
++
++
++/**
++  Add to join_tab->select_cond[i] "table.field IS NOT NULL" conditions
++  we've inferred from ref/eq_ref access performed.
++
++    This function is a part of "Early NULL-values filtering for ref access"
++    optimization.
++
++    Example of this optimization:
++    For query SELECT * FROM t1,t2 WHERE t2.key=t1.field @n
++    and plan " any-access(t1), ref(t2.key=t1.field) " @n
++    add "t1.field IS NOT NULL" to t1's table condition. @n
++
++    Description of the optimization:
++    
++      We look through equalities choosen to perform ref/eq_ref access,
++      pick equalities that have form "tbl.part_of_key = othertbl.field"
++      (where othertbl is a non-const table and othertbl.field may be NULL)
++      and add them to conditions on correspoding tables (othertbl in this
++      example).
++
++      Exception from that is the case when referred_tab->join != join.
++      I.e. don't add NOT NULL constraints from any embedded subquery.
++      Consider this query:
++      @code
++      SELECT A.f2 FROM t1 LEFT JOIN t2 A ON A.f2 = f1
++      WHERE A.f3=(SELECT MIN(f3) FROM  t2 C WHERE A.f4 = C.f4) OR A.f3 IS NULL;
++      @endocde
++      Here condition A.f3 IS NOT NULL is going to be added to the WHERE
++      condition of the embedding query.
++      Another example:
++      SELECT * FROM t10, t11 WHERE (t10.a < 10 OR t10.a IS NULL)
++      AND t11.b <=> t10.b AND (t11.a = (SELECT MAX(a) FROM t12
++      WHERE t12.b = t10.a ));
++      Here condition t10.a IS NOT NULL is going to be added.
++      In both cases addition of NOT NULL condition will erroneously reject
++      some rows of the result set.
++      referred_tab->join != join constraint would disallow such additions.
++
++      This optimization doesn't affect the choices that ref, range, or join
++      optimizer make. This was intentional because this was added after 4.1
++      was GA.
++      
++    Implementation overview
++      1. update_ref_and_keys() accumulates info about null-rejecting
++         predicates in in KEY_FIELD::null_rejecting
++      1.1 add_key_part saves these to KEYUSE.
++      2. create_ref_for_key copies them to TABLE_REF.
++      3. add_not_null_conds adds "x IS NOT NULL" to join_tab->select_cond of
++         appropiate JOIN_TAB members.
++*/
++
++static void add_not_null_conds(JOIN *join)
++{
++  DBUG_ENTER("add_not_null_conds");
++  for (uint i=join->const_tables ; i < join->tables ; i++)
++  {
++    JOIN_TAB *tab=join->join_tab+i;
++    if ((tab->type == JT_REF || tab->type == JT_EQ_REF || 
++         tab->type == JT_REF_OR_NULL) &&
++        !tab->table->maybe_null)
++    {
++      for (uint keypart= 0; keypart < tab->ref.key_parts; keypart++)
++      {
++        if (tab->ref.null_rejecting & (1 << keypart))
++        {
++          Item *item= tab->ref.items[keypart];
++          Item *notnull;
++          DBUG_ASSERT(item->type() == Item::FIELD_ITEM);
++          Item_field *not_null_item= (Item_field*)item;
++          JOIN_TAB *referred_tab= not_null_item->field->table->reginfo.join_tab;
++          /*
++            For UPDATE queries such as:
++            UPDATE t1 SET t1.f2=(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1);
++            not_null_item is the t1.f1, but it's referred_tab is 0.
++          */
++          if (!referred_tab || referred_tab->join != join)
++            continue;
++          if (!(notnull= new Item_func_isnotnull(not_null_item)))
++            DBUG_VOID_RETURN;
++          /*
++            We need to do full fix_fields() call here in order to have correct
++            notnull->const_item(). This is needed e.g. by test_quick_select 
++            when it is called from make_join_select after this function is 
++            called.
++          */
++          if (notnull->fix_fields(join->thd, &notnull))
++            DBUG_VOID_RETURN;
++          DBUG_EXECUTE("where",print_where(notnull,
++                                           referred_tab->table->alias,
++                                           QT_ORDINARY););
++          add_cond_and_fix(&referred_tab->select_cond, notnull);
++        }
++      }
++    }
++  }
++  DBUG_VOID_RETURN;
++}
++
++/**
++  Build a predicate guarded by match variables for embedding outer joins.
++  The function recursively adds guards for predicate cond
++  assending from tab to the first inner table  next embedding
++  nested outer join and so on until it reaches root_tab
++  (root_tab can be 0).
++
++  @param tab       the first inner table for most nested outer join
++  @param cond      the predicate to be guarded (must be set)
++  @param root_tab  the first inner table to stop
++
++  @return
++    -  pointer to the guarded predicate, if success
++    -  0, otherwise
++*/
++
++static COND*
++add_found_match_trig_cond(JOIN_TAB *tab, COND *cond, JOIN_TAB *root_tab)
++{
++  COND *tmp;
++  DBUG_ASSERT(cond != 0);
++  if (tab == root_tab)
++    return cond;
++  if ((tmp= add_found_match_trig_cond(tab->first_upper, cond, root_tab)))
++    tmp= new Item_func_trig_cond(tmp, &tab->found);
++  if (tmp)
++  {
++    tmp->quick_fix_field();
++    tmp->update_used_tables();
++  }
++  return tmp;
++}
++
++
++/**
++  Fill in outer join related info for the execution plan structure.
++
++    For each outer join operation left after simplification of the
++    original query the function set up the following pointers in the linear
++    structure join->join_tab representing the selected execution plan.
++    The first inner table t0 for the operation is set to refer to the last
++    inner table tk through the field t0->last_inner.
++    Any inner table ti for the operation are set to refer to the first
++    inner table ti->first_inner.
++    The first inner table t0 for the operation is set to refer to the
++    first inner table of the embedding outer join operation, if there is any,
++    through the field t0->first_upper.
++    The on expression for the outer join operation is attached to the
++    corresponding first inner table through the field t0->on_expr_ref.
++    Here ti are structures of the JOIN_TAB type.
++
++  EXAMPLE. For the query: 
++  @code
++        SELECT * FROM t1
++                      LEFT JOIN
++                      (t2, t3 LEFT JOIN t4 ON t3.a=t4.a)
++                      ON (t1.a=t2.a AND t1.b=t3.b)
++          WHERE t1.c > 5,
++  @endcode
++
++    given the execution plan with the table order t1,t2,t3,t4
++    is selected, the following references will be set;
++    t4->last_inner=[t4], t4->first_inner=[t4], t4->first_upper=[t2]
++    t2->last_inner=[t4], t2->first_inner=t3->first_inner=[t2],
++    on expression (t1.a=t2.a AND t1.b=t3.b) will be attached to 
++    *t2->on_expr_ref, while t3.a=t4.a will be attached to *t4->on_expr_ref.
++
++  @param join   reference to the info fully describing the query
++
++  @note
++    The function assumes that the simplification procedure has been
++    already applied to the join query (see simplify_joins).
++    This function can be called only after the execution plan
++    has been chosen.
++*/
++
++static void
++make_outerjoin_info(JOIN *join)
++{
++  DBUG_ENTER("make_outerjoin_info");
++  for (uint i=join->const_tables ; i < join->tables ; i++)
++  {
++    JOIN_TAB *tab=join->join_tab+i;
++    TABLE *table=tab->table;
++    TABLE_LIST *tbl= table->pos_in_table_list;
++    TABLE_LIST *embedding= tbl->embedding;
++
++    if (tbl->outer_join)
++    {
++      /* 
++        Table tab is the only one inner table for outer join.
++        (Like table t4 for the table reference t3 LEFT JOIN t4 ON t3.a=t4.a
++        is in the query above.)
++      */
++      tab->last_inner= tab->first_inner= tab;
++      tab->on_expr_ref= &tbl->on_expr;
++      tab->cond_equal= tbl->cond_equal;
++      if (embedding)
++        tab->first_upper= embedding->nested_join->first_nested;
++    }    
++    for ( ; embedding ; embedding= embedding->embedding)
++    {
++      NESTED_JOIN *nested_join= embedding->nested_join;
++      if (!nested_join->counter)
++      {
++        /* 
++          Table tab is the first inner table for nested_join.
++          Save reference to it in the nested join structure.
++        */ 
++        nested_join->first_nested= tab;
++        tab->on_expr_ref= &embedding->on_expr;
++        tab->cond_equal= tbl->cond_equal;
++        if (embedding->embedding)
++          tab->first_upper= embedding->embedding->nested_join->first_nested;
++      }
++      if (!tab->first_inner)  
++        tab->first_inner= nested_join->first_nested;
++      if (++nested_join->counter < nested_join->join_list.elements)
++        break;
++      /* Table tab is the last inner table for nested join. */
++      nested_join->first_nested->last_inner= tab;
++    }
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++static bool
++make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
++{
++  THD *thd= join->thd;
++  DBUG_ENTER("make_join_select");
++  if (select)
++  {
++    add_not_null_conds(join);
++    table_map used_tables;
++    if (cond)                /* Because of QUICK_GROUP_MIN_MAX_SELECT */
++    {                        /* there may be a select without a cond. */    
++      if (join->tables > 1)
++        cond->update_used_tables();		// Tablenr may have changed
++      if (join->const_tables == join->tables &&
++	  thd->lex->current_select->master_unit() ==
++	  &thd->lex->unit)		// not upper level SELECT
++        join->const_table_map|=RAND_TABLE_BIT;
++      {						// Check const tables
++        COND *const_cond=
++	  make_cond_for_table(cond,
++                              join->const_table_map,
++                              (table_map) 0);
++        DBUG_EXECUTE("where",print_where(const_cond,"constants", QT_ORDINARY););
++        for (JOIN_TAB *tab= join->join_tab+join->const_tables;
++             tab < join->join_tab+join->tables ; tab++)
++        {
++          if (*tab->on_expr_ref)
++          {
++            JOIN_TAB *cond_tab= tab->first_inner;
++            COND *tmp= make_cond_for_table(*tab->on_expr_ref,
++                                           join->const_table_map,
++                                         (  table_map) 0);
++            if (!tmp)
++              continue;
++            tmp= new Item_func_trig_cond(tmp, &cond_tab->not_null_compl);
++            if (!tmp)
++              DBUG_RETURN(1);
++            tmp->quick_fix_field();
++            cond_tab->select_cond= !cond_tab->select_cond ? tmp :
++	                            new Item_cond_and(cond_tab->select_cond,
++                                                      tmp);
++            if (!cond_tab->select_cond)
++	      DBUG_RETURN(1);
++            cond_tab->select_cond->quick_fix_field();
++          }       
++        }
++        if (const_cond && !const_cond->val_int())
++        {
++	  DBUG_PRINT("info",("Found impossible WHERE condition"));
++	  DBUG_RETURN(1);	 // Impossible const condition
++        }
++      }
++    }
++    used_tables=((select->const_tables=join->const_table_map) |
++		 OUTER_REF_TABLE_BIT | RAND_TABLE_BIT);
++    for (uint i=join->const_tables ; i < join->tables ; i++)
++    {
++      JOIN_TAB *tab=join->join_tab+i;
++      /*
++        first_inner is the X in queries like:
++        SELECT * FROM t1 LEFT OUTER JOIN (t2 JOIN t3) ON X
++      */
++      JOIN_TAB *first_inner_tab= tab->first_inner; 
++      table_map current_map= tab->table->map;
++      bool use_quick_range=0;
++      COND *tmp;
++
++      /*
++	Following force including random expression in last table condition.
++	It solve problem with select like SELECT * FROM t1 WHERE rand() > 0.5
++      */
++      if (i == join->tables-1)
++	current_map|= OUTER_REF_TABLE_BIT | RAND_TABLE_BIT;
++      used_tables|=current_map;
++
++      if (tab->type == JT_REF && tab->quick &&
++	  (uint) tab->ref.key == tab->quick->index &&
++	  tab->ref.key_length < tab->quick->max_used_key_length)
++      {
++	/* Range uses longer key;  Use this instead of ref on key */
++	tab->type=JT_ALL;
++	use_quick_range=1;
++	tab->use_quick=1;
++        tab->ref.key= -1;
++	tab->ref.key_parts=0;		// Don't use ref key.
++	join->best_positions[i].records_read= rows2double(tab->quick->records);
++        /* 
++          We will use join cache here : prevent sorting of the first
++          table only and sort at the end.
++        */
++        if (i != join->const_tables && join->tables > join->const_tables + 1)
++          join->full_join= 1;
++      }
++
++      tmp= NULL;
++      if (cond)
++        tmp= make_cond_for_table(cond,used_tables,current_map);
++      if (cond && !tmp && tab->quick)
++      {						// Outer join
++        if (tab->type != JT_ALL)
++        {
++          /*
++            Don't use the quick method
++            We come here in the case where we have 'key=constant' and
++            the test is removed by make_cond_for_table()
++          */
++          delete tab->quick;
++          tab->quick= 0;
++        }
++        else
++        {
++          /*
++            Hack to handle the case where we only refer to a table
++            in the ON part of an OUTER JOIN. In this case we want the code
++            below to check if we should use 'quick' instead.
++          */
++          DBUG_PRINT("info", ("Item_int"));
++          tmp= new Item_int((longlong) 1,1);	// Always true
++        }
++
++      }
++      if (tmp || !cond || tab->type == JT_REF)
++      {
++        DBUG_EXECUTE("where",print_where(tmp,tab->table->alias, QT_ORDINARY););
++	SQL_SELECT *sel= tab->select= ((SQL_SELECT*)
++                                       thd->memdup((uchar*) select,
++                                                   sizeof(*select)));
++	if (!sel)
++	  DBUG_RETURN(1);			// End of memory
++        /*
++          If tab is an inner table of an outer join operation,
++          add a match guard to the pushed down predicate.
++          The guard will turn the predicate on only after
++          the first match for outer tables is encountered.
++	*/        
++        if (cond && tmp)
++        {
++          /*
++            Because of QUICK_GROUP_MIN_MAX_SELECT there may be a select without
++            a cond, so neutralize the hack above.
++          */
++          if (!(tmp= add_found_match_trig_cond(first_inner_tab, tmp, 0)))
++            DBUG_RETURN(1);
++          tab->select_cond=sel->cond=tmp;
++          /* Push condition to storage engine if this is enabled
++             and the condition is not guarded */
++	  if (thd->variables.engine_condition_pushdown)
++          {
++            COND *push_cond= 
++              make_cond_for_table(tmp, current_map, current_map);
++            if (push_cond)
++            {
++              /* Push condition to handler */
++              if (!tab->table->file->cond_push(push_cond))
++                tab->table->file->pushed_cond= push_cond;
++            }
++          }
++        }
++        else
++          tab->select_cond= sel->cond= NULL;
++
++	sel->head=tab->table;
++        DBUG_EXECUTE("where",print_where(tmp,tab->table->alias, QT_ORDINARY););
++	if (tab->quick)
++	{
++	  /* Use quick key read if it's a constant and it's not used
++	     with key reading */
++	  if (tab->needed_reg.is_clear_all() && tab->type != JT_EQ_REF
++	      && tab->type != JT_FT && (tab->type != JT_REF ||
++               (uint) tab->ref.key == tab->quick->index))
++	  {
++	    sel->quick=tab->quick;		// Use value from get_quick_...
++	    sel->quick_keys.clear_all();
++	    sel->needed_reg.clear_all();
++	  }
++	  else
++	  {
++	    delete tab->quick;
++	  }
++	  tab->quick=0;
++	}
++	uint ref_key=(uint) sel->head->reginfo.join_tab->ref.key+1;
++	if (i == join->const_tables && ref_key)
++	{
++	  if (!tab->const_keys.is_clear_all() &&
++              tab->table->reginfo.impossible_range)
++	    DBUG_RETURN(1);
++	}
++	else if (tab->type == JT_ALL && ! use_quick_range)
++	{
++	  if (!tab->const_keys.is_clear_all() &&
++	      tab->table->reginfo.impossible_range)
++	    DBUG_RETURN(1);				// Impossible range
++	  /*
++	    We plan to scan all rows.
++	    Check again if we should use an index.
++	    We could have used an column from a previous table in
++	    the index if we are using limit and this is the first table
++	  */
++
++	  if ((cond &&
++              !tab->keys.is_subset(tab->const_keys) && i > 0) ||
++	      (!tab->const_keys.is_clear_all() && i == join->const_tables &&
++	       join->unit->select_limit_cnt <
++	       join->best_positions[i].records_read &&
++	       !(join->select_options & OPTION_FOUND_ROWS)))
++	  {
++	    /* Join with outer join condition */
++	    COND *orig_cond=sel->cond;
++	    sel->cond= and_conds(sel->cond, *tab->on_expr_ref);
++
++	    /*
++              We can't call sel->cond->fix_fields,
++              as it will break tab->on_expr if it's AND condition
++              (fix_fields currently removes extra AND/OR levels).
++              Yet attributes of the just built condition are not needed.
++              Thus we call sel->cond->quick_fix_field for safety.
++	    */
++	    if (sel->cond && !sel->cond->fixed)
++	      sel->cond->quick_fix_field();
++
++	    if (sel->test_quick_select(thd, tab->keys,
++				       used_tables & ~ current_map,
++				       (join->select_options &
++					OPTION_FOUND_ROWS ?
++					HA_POS_ERROR :
++					join->unit->select_limit_cnt), 0) < 0)
++            {
++	      /*
++		Before reporting "Impossible WHERE" for the whole query
++		we have to check isn't it only "impossible ON" instead
++	      */
++              sel->cond=orig_cond;
++              if (!*tab->on_expr_ref ||
++                  sel->test_quick_select(thd, tab->keys,
++                                         used_tables & ~ current_map,
++                                         (join->select_options &
++                                          OPTION_FOUND_ROWS ?
++                                          HA_POS_ERROR :
++                                          join->unit->select_limit_cnt),0) < 0)
++		DBUG_RETURN(1);			// Impossible WHERE
++            }
++            else
++	      sel->cond=orig_cond;
++
++	    /* Fix for EXPLAIN */
++	    if (sel->quick)
++	      join->best_positions[i].records_read= (double)sel->quick->records;
++	  }
++	  else
++	  {
++	    sel->needed_reg=tab->needed_reg;
++	    sel->quick_keys.clear_all();
++	  }
++	  if (!sel->quick_keys.is_subset(tab->checked_keys) ||
++              !sel->needed_reg.is_subset(tab->checked_keys))
++	  {
++	    tab->keys=sel->quick_keys;
++            tab->keys.merge(sel->needed_reg);
++	    tab->use_quick= (!sel->needed_reg.is_clear_all() &&
++			     (select->quick_keys.is_clear_all() ||
++			      (select->quick &&
++			       (select->quick->records >= 100L)))) ?
++	      2 : 1;
++	    sel->read_tables= used_tables & ~current_map;
++	  }
++	  if (i != join->const_tables && tab->use_quick != 2)
++	  {					/* Read with cache */
++	    if (cond &&
++                (tmp=make_cond_for_table(cond,
++					 join->const_table_map |
++					 current_map,
++					 current_map)))
++	    {
++              DBUG_EXECUTE("where",print_where(tmp,"cache", QT_ORDINARY););
++	      tab->cache.select=(SQL_SELECT*)
++		thd->memdup((uchar*) sel, sizeof(SQL_SELECT));
++	      tab->cache.select->cond=tmp;
++	      tab->cache.select->read_tables=join->const_table_map;
++	    }
++	  }
++	}
++      }
++      
++      /* 
++        Push down conditions from all on expressions.
++        Each of these conditions are guarded by a variable
++        that turns if off just before null complemented row for
++        outer joins is formed. Thus, the condition from an
++        'on expression' are guaranteed not to be checked for
++        the null complemented row.
++      */ 
++
++      /* First push down constant conditions from on expressions */
++      for (JOIN_TAB *join_tab= join->join_tab+join->const_tables;
++           join_tab < join->join_tab+join->tables ; join_tab++)
++      {
++        if (*join_tab->on_expr_ref)
++        {
++          JOIN_TAB *cond_tab= join_tab->first_inner;
++          COND *tmp= make_cond_for_table(*join_tab->on_expr_ref,
++                                         join->const_table_map,
++                                         (table_map) 0);
++          if (!tmp)
++            continue;
++          tmp= new Item_func_trig_cond(tmp, &cond_tab->not_null_compl);
++          if (!tmp)
++            DBUG_RETURN(1);
++          tmp->quick_fix_field();
++          cond_tab->select_cond= !cond_tab->select_cond ? tmp :
++	                            new Item_cond_and(cond_tab->select_cond,tmp);
++          if (!cond_tab->select_cond)
++	    DBUG_RETURN(1);
++          cond_tab->select_cond->quick_fix_field();
++        }       
++      }
++
++      /* Push down non-constant conditions from on expressions */
++      JOIN_TAB *last_tab= tab;
++      while (first_inner_tab && first_inner_tab->last_inner == last_tab)
++      {  
++        /* 
++          Table tab is the last inner table of an outer join.
++          An on expression is always attached to it.
++	*/     
++        COND *on_expr= *first_inner_tab->on_expr_ref;
++
++        table_map used_tables2= (join->const_table_map |
++                                 OUTER_REF_TABLE_BIT | RAND_TABLE_BIT);
++	for (tab= join->join_tab+join->const_tables; tab <= last_tab ; tab++)
++        {
++          current_map= tab->table->map;
++          used_tables2|= current_map;
++          COND *tmp_cond= make_cond_for_table(on_expr, used_tables2,
++                                             current_map);
++          if (tmp_cond)
++          {
++            JOIN_TAB *cond_tab= tab < first_inner_tab ? first_inner_tab : tab;
++            /*
++              First add the guards for match variables of
++              all embedding outer join operations.
++	    */
++            if (!(tmp_cond= add_found_match_trig_cond(cond_tab->first_inner,
++                                                     tmp_cond,
++                                                     first_inner_tab)))
++              DBUG_RETURN(1);
++            /* 
++              Now add the guard turning the predicate off for 
++              the null complemented row.
++	    */ 
++            DBUG_PRINT("info", ("Item_func_trig_cond"));
++            tmp_cond= new Item_func_trig_cond(tmp_cond,
++                                              &first_inner_tab->
++                                              not_null_compl);
++            DBUG_PRINT("info", ("Item_func_trig_cond 0x%lx",
++                                (ulong) tmp_cond));
++            if (tmp_cond)
++              tmp_cond->quick_fix_field();
++	    /* Add the predicate to other pushed down predicates */
++            DBUG_PRINT("info", ("Item_cond_and"));
++            cond_tab->select_cond= !cond_tab->select_cond ? tmp_cond :
++	                          new Item_cond_and(cond_tab->select_cond,
++                                                    tmp_cond);
++            DBUG_PRINT("info", ("Item_cond_and 0x%lx",
++                                (ulong)cond_tab->select_cond));
++            if (!cond_tab->select_cond)
++	      DBUG_RETURN(1);
++            cond_tab->select_cond->quick_fix_field();
++          }              
++        }
++        first_inner_tab= first_inner_tab->first_upper;       
++      }
++    }
++  }
++  DBUG_RETURN(0);
++}
++
++
++/**
++  The default implementation of unlock-row method of READ_RECORD,
++  used in all access methods.
++*/
++
++void rr_unlock_row(st_join_table *tab)
++{
++  READ_RECORD *info= &tab->read_record;
++  info->file->unlock_row();
++}
++
++
++
++/**
++  Pick the appropriate access method functions
++
++  Sets the functions for the selected table access method
++
++  @param      tab               Table reference to put access method
++*/
++
++static void
++pick_table_access_method(JOIN_TAB *tab)
++{
++  switch (tab->type) 
++  {
++  case JT_REF:
++    tab->read_first_record= join_read_always_key;
++    tab->read_record.read_record= join_read_next_same;
++    break;
++
++  case JT_REF_OR_NULL:
++    tab->read_first_record= join_read_always_key_or_null;
++    tab->read_record.read_record= join_read_next_same_or_null;
++    break;
++
++  case JT_CONST:
++    tab->read_first_record= join_read_const;
++    tab->read_record.read_record= join_no_more_records;
++    break;
++
++  case JT_EQ_REF:
++    tab->read_first_record= join_read_key;
++    tab->read_record.read_record= join_no_more_records;
++    break;
++
++  case JT_FT:
++    tab->read_first_record= join_ft_read_first;
++    tab->read_record.read_record= join_ft_read_next;
++    break;
++
++  case JT_SYSTEM:
++    tab->read_first_record= join_read_system;
++    tab->read_record.read_record= join_no_more_records;
++    break;
++
++  /* keep gcc happy */  
++  default:
++    break;  
++  }
++}
++
++
++static void
++make_join_readinfo(JOIN *join, ulonglong options)
++{
++  uint i;
++  bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
++  bool ordered_set= 0;
++  bool sorted= 1;
++  DBUG_ENTER("make_join_readinfo");
++
++  for (i=join->const_tables ; i < join->tables ; i++)
++  {
++    JOIN_TAB *tab=join->join_tab+i;
++    TABLE *table=tab->table;
++    tab->read_record.table= table;
++    tab->read_record.file=table->file;
++    tab->read_record.unlock_row= rr_unlock_row;
++    tab->next_select=sub_select;		/* normal select */
++
++    /*
++      Determine if the set is already ordered for ORDER BY, so it can 
++      disable join cache because it will change the ordering of the results.
++      Code handles sort table that is at any location (not only first after 
++      the const tables) despite the fact that it's currently prohibited.
++      We must disable join cache if the first non-const table alone is
++      ordered. If there is a temp table the ordering is done as a last
++      operation and doesn't prevent join cache usage.
++    */
++    if (!ordered_set && !join->need_tmp && 
++        (table == join->sort_by_table ||
++         (join->sort_by_table == (TABLE *) 1 && i != join->const_tables)))
++      ordered_set= 1;
++
++    tab->sorted= sorted;
++    sorted= 0;                                  // only first must be sorted
++    table->status=STATUS_NO_RECORD;
++    pick_table_access_method (tab);
++
++    switch (tab->type) {
++    case JT_EQ_REF:
++      tab->read_record.unlock_row= join_read_key_unlock_row;
++      /* fall through */
++    case JT_REF_OR_NULL:
++    case JT_REF:
++      if (tab->select)
++      {
++	delete tab->select->quick;
++	tab->select->quick=0;
++      }
++      delete tab->quick;
++      tab->quick=0;
++      /* fall through */
++    case JT_CONST:				// Only happens with left join
++      if (table->covering_keys.is_set(tab->ref.key) &&
++	  !table->no_keyread)
++        table->set_keyread(TRUE);
++      break;
++    case JT_ALL:
++      /*
++	If previous table use cache
++        If the incoming data set is already sorted don't use cache.
++      */
++      if (i != join->const_tables && !(options & SELECT_NO_JOIN_CACHE) &&
++          tab->use_quick != 2 && !tab->first_inner && !ordered_set)
++      {
++	if ((options & SELECT_DESCRIBE) ||
++	    !join_init_cache(join->thd,join->join_tab+join->const_tables,
++			     i-join->const_tables))
++	{
++	  tab[-1].next_select=sub_select_cache; /* Patch previous */
++	}
++      }
++      /* These init changes read_record */
++      if (tab->use_quick == 2)
++      {
++	join->thd->server_status|=SERVER_QUERY_NO_GOOD_INDEX_USED;
++	tab->read_first_record= join_init_quick_read_record;
++	if (statistics)
++	  status_var_increment(join->thd->status_var.select_range_check_count);
++      }
++      else
++      {
++	tab->read_first_record= join_init_read_record;
++	if (i == join->const_tables)
++	{
++	  if (tab->select && tab->select->quick)
++	  {
++	    if (statistics)
++	      status_var_increment(join->thd->status_var.select_range_count);
++	  }
++	  else
++	  {
++	    join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
++	    if (statistics)
++	      status_var_increment(join->thd->status_var.select_scan_count);
++	  }
++	}
++	else
++	{
++	  if (tab->select && tab->select->quick)
++	  {
++	    if (statistics)
++	      status_var_increment(join->thd->status_var.select_full_range_join_count);
++	  }
++	  else
++	  {
++	    join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
++	    if (statistics)
++	      status_var_increment(join->thd->status_var.select_full_join_count);
++	  }
++	}
++	if (!table->no_keyread)
++	{
++	  if (tab->select && tab->select->quick &&
++              tab->select->quick->index != MAX_KEY && //not index_merge
++	      table->covering_keys.is_set(tab->select->quick->index))
++            table->set_keyread(TRUE);
++	  else if (!table->covering_keys.is_clear_all() &&
++		   !(tab->select && tab->select->quick))
++	  {					// Only read index tree
++	    /*
++            It has turned out that the below change, while speeding things
++            up for disk-bound loads, slows them down for cases when the data
++            is in disk cache (see BUG#35850):
++	    //  See bug #26447: "Using the clustered index for a table scan
++	    //  is always faster than using a secondary index".
++            if (table->s->primary_key != MAX_KEY &&
++                table->file->primary_key_is_clustered())
++              tab->index= table->s->primary_key;
++            else
++	    */
++              tab->index=find_shortest_key(table, & table->covering_keys);
++	    tab->read_first_record= join_read_first;
++	    tab->type=JT_NEXT;		// Read with index_first / index_next
++	  }
++	}
++      }
++      break;
++    case JT_FT:
++    case JT_SYSTEM: 
++      break;
++    default:
++      DBUG_PRINT("error",("Table type %d found",tab->type)); /* purecov: deadcode */
++      break;					/* purecov: deadcode */
++    case JT_UNKNOWN:
++    case JT_MAYBE_REF:
++      abort();					/* purecov: deadcode */
++    }
++  }
++  join->join_tab[join->tables-1].next_select=0; /* Set by do_select */
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Give error if we some tables are done with a full join.
++
++  This is used by multi_table_update and multi_table_delete when running
++  in safe mode.
++
++  @param join		Join condition
++
++  @retval
++    0	ok
++  @retval
++    1	Error (full join used)
++*/
++
++bool error_if_full_join(JOIN *join)
++{
++  for (JOIN_TAB *tab=join->join_tab, *end=join->join_tab+join->tables;
++       tab < end;
++       tab++)
++  {
++    if (tab->type == JT_ALL && (!tab->select || !tab->select->quick))
++    {
++      /* This error should not be ignored. */
++      join->select_lex->no_error= FALSE;
++      my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
++                 ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
++      return(1);
++    }
++  }
++  return(0);
++}
++
++
++/**
++  cleanup JOIN_TAB.
++*/
++
++void JOIN_TAB::cleanup()
++{
++  delete select;
++  select= 0;
++  delete quick;
++  quick= 0;
++  x_free(cache.buff);
++  cache.buff= 0;
++  limit= 0;
++  if (table)
++  {
++    table->set_keyread(FALSE);
++    table->file->ha_index_or_rnd_end();
++    /*
++      We need to reset this for next select
++      (Tested in part_of_refkey)
++    */
++    table->reginfo.join_tab= 0;
++  }
++  end_read_record(&read_record);
++}
++
++
++/**
++  Partially cleanup JOIN after it has executed: close index or rnd read
++  (table cursors), free quick selects.
++
++    This function is called in the end of execution of a JOIN, before the used
++    tables are unlocked and closed.
++
++    For a join that is resolved using a temporary table, the first sweep is
++    performed against actual tables and an intermediate result is inserted
++    into the temprorary table.
++    The last sweep is performed against the temporary table. Therefore,
++    the base tables and associated buffers used to fill the temporary table
++    are no longer needed, and this function is called to free them.
++
++    For a join that is performed without a temporary table, this function
++    is called after all rows are sent, but before EOF packet is sent.
++
++    For a simple SELECT with no subqueries this function performs a full
++    cleanup of the JOIN and calls mysql_unlock_read_tables to free used base
++    tables.
++
++    If a JOIN is executed for a subquery or if it has a subquery, we can't
++    do the full cleanup and need to do a partial cleanup only.
++    - If a JOIN is not the top level join, we must not unlock the tables
++    because the outer select may not have been evaluated yet, and we
++    can't unlock only selected tables of a query.
++    - Additionally, if this JOIN corresponds to a correlated subquery, we
++    should not free quick selects and join buffers because they will be
++    needed for the next execution of the correlated subquery.
++    - However, if this is a JOIN for a [sub]select, which is not
++    a correlated subquery itself, but has subqueries, we can free it
++    fully and also free JOINs of all its subqueries. The exception
++    is a subquery in SELECT list, e.g: @n
++    SELECT a, (select max(b) from t1) group by c @n
++    This subquery will not be evaluated at first sweep and its value will
++    not be inserted into the temporary table. Instead, it's evaluated
++    when selecting from the temporary table. Therefore, it can't be freed
++    here even though it's not correlated.
++
++  @todo
++    Unlock tables even if the join isn't top level select in the tree
++*/
++
++void JOIN::join_free()
++{
++  SELECT_LEX_UNIT *tmp_unit;
++  SELECT_LEX *sl;
++  /*
++    Optimization: if not EXPLAIN and we are done with the JOIN,
++    free all tables.
++  */
++  bool full= (!select_lex->uncacheable && !thd->lex->describe);
++  bool can_unlock= full;
++  DBUG_ENTER("JOIN::join_free");
++
++  cleanup(full);
++
++  for (tmp_unit= select_lex->first_inner_unit();
++       tmp_unit;
++       tmp_unit= tmp_unit->next_unit())
++    for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
++    {
++      Item_subselect *subselect= sl->master_unit()->item;
++      bool full_local= full && (!subselect || subselect->is_evaluated());
++      /*
++        If this join is evaluated, we can fully clean it up and clean up all
++        its underlying joins even if they are correlated -- they will not be
++        used any more anyway.
++        If this join is not yet evaluated, we still must clean it up to
++        close its table cursors -- it may never get evaluated, as in case of
++        ... HAVING FALSE OR a IN (SELECT ...))
++        but all table cursors must be closed before the unlock.
++      */
++      sl->cleanup_all_joins(full_local);
++      /* Can't unlock if at least one JOIN is still needed */
++      can_unlock= can_unlock && full_local;
++    }
++
++  /*
++    We are not using tables anymore
++    Unlock all tables. We may be in an INSERT .... SELECT statement.
++  */
++  if (can_unlock && lock && thd->lock &&
++      !(select_options & SELECT_NO_UNLOCK) &&
++      !select_lex->subquery_in_having &&
++      (select_lex == (thd->lex->unit.fake_select_lex ?
++                      thd->lex->unit.fake_select_lex : &thd->lex->select_lex)))
++  {
++    /*
++      TODO: unlock tables even if the join isn't top level select in the
++      tree.
++    */
++    mysql_unlock_read_tables(thd, lock);           // Don't free join->lock
++    lock= 0;
++  }
++
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Free resources of given join.
++
++  @param fill   true if we should free all resources, call with full==1
++                should be last, before it this function can be called with
++                full==0
++
++  @note
++    With subquery this function definitely will be called several times,
++    but even for simple query it can be called several times.
++*/
++
++void JOIN::cleanup(bool full)
++{
++  DBUG_ENTER("JOIN::cleanup");
++
++  if (table)
++  {
++    JOIN_TAB *tab,*end;
++    /*
++      Only a sorted table may be cached.  This sorted table is always the
++      first non const table in join->table
++    */
++    if (tables > const_tables) // Test for not-const tables
++    {
++      free_io_cache(table[const_tables]);
++      filesort_free_buffers(table[const_tables],full);
++    }
++
++    if (full)
++    {
++      for (tab= join_tab, end= tab+tables; tab != end; tab++)
++	tab->cleanup();
++      table= 0;
++    }
++    else
++    {
++      for (tab= join_tab, end= tab+tables; tab != end; tab++)
++      {
++	if (tab->table)
++          tab->table->file->ha_index_or_rnd_end();
++      }
++    }
++  }
++  /*
++    We are not using tables anymore
++    Unlock all tables. We may be in an INSERT .... SELECT statement.
++  */
++  if (full)
++  {
++    if (tmp_join)
++      tmp_table_param.copy_field= 0;
++    group_fields.delete_elements();
++    /* 
++      Ensure that the above delete_elements() would not be called
++      twice for the same list.
++    */
++    if (tmp_join && tmp_join != this)
++      tmp_join->group_fields= group_fields;
++    /*
++      We can't call delete_elements() on copy_funcs as this will cause
++      problems in free_elements() as some of the elements are then deleted.
++    */
++    tmp_table_param.copy_funcs.empty();
++    /*
++      If we have tmp_join and 'this' JOIN is not tmp_join and
++      tmp_table_param.copy_field's  of them are equal then we have to remove
++      pointer to  tmp_table_param.copy_field from tmp_join, because it qill
++      be removed in tmp_table_param.cleanup().
++    */
++    if (tmp_join &&
++        tmp_join != this &&
++        tmp_join->tmp_table_param.copy_field ==
++        tmp_table_param.copy_field)
++    {
++      tmp_join->tmp_table_param.copy_field=
++        tmp_join->tmp_table_param.save_copy_field= 0;
++    }
++    tmp_table_param.cleanup();
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Remove the following expressions from ORDER BY and GROUP BY:
++  Constant expressions @n
++  Expression that only uses tables that are of type EQ_REF and the reference
++  is in the ORDER list or if all refereed tables are of the above type.
++
++  In the following, the X field can be removed:
++  @code
++  SELECT * FROM t1,t2 WHERE t1.a=t2.a ORDER BY t1.a,t2.X
++  SELECT * FROM t1,t2,t3 WHERE t1.a=t2.a AND t2.b=t3.b ORDER BY t1.a,t3.X
++  @endcode
++
++  These can't be optimized:
++  @code
++  SELECT * FROM t1,t2 WHERE t1.a=t2.a ORDER BY t2.X,t1.a
++  SELECT * FROM t1,t2 WHERE t1.a=t2.a AND t1.b=t2.b ORDER BY t1.a,t2.c
++  SELECT * FROM t1,t2 WHERE t1.a=t2.a ORDER BY t2.b,t1.a
++  @endcode
++*/
++
++static bool
++eq_ref_table(JOIN *join, ORDER *start_order, JOIN_TAB *tab)
++{
++  if (tab->cached_eq_ref_table)			// If cached
++    return tab->eq_ref_table;
++  tab->cached_eq_ref_table=1;
++  /* We can skip const tables only if not an outer table */
++  if (tab->type == JT_CONST && !tab->first_inner)
++    return (tab->eq_ref_table=1);		/* purecov: inspected */
++  if (tab->type != JT_EQ_REF || tab->table->maybe_null)
++    return (tab->eq_ref_table=0);		// We must use this
++  Item **ref_item=tab->ref.items;
++  Item **end=ref_item+tab->ref.key_parts;
++  uint found=0;
++  table_map map=tab->table->map;
++
++  for (; ref_item != end ; ref_item++)
++  {
++    if (! (*ref_item)->const_item())
++    {						// Not a const ref
++      ORDER *order;
++      for (order=start_order ; order ; order=order->next)
++      {
++	if ((*ref_item)->eq(order->item[0],0))
++	  break;
++      }
++      if (order)
++      {
++        if (!(order->used & map))
++        {
++          found++;
++          order->used|= map;
++        }
++	continue;				// Used in ORDER BY
++      }
++      if (!only_eq_ref_tables(join,start_order, (*ref_item)->used_tables()))
++	return (tab->eq_ref_table=0);
++    }
++  }
++  /* Check that there was no reference to table before sort order */
++  for (; found && start_order ; start_order=start_order->next)
++  {
++    if (start_order->used & map)
++    {
++      found--;
++      continue;
++    }
++    if (start_order->depend_map & map)
++      return (tab->eq_ref_table=0);
++  }
++  return tab->eq_ref_table=1;
++}
++
++
++static bool
++only_eq_ref_tables(JOIN *join,ORDER *order,table_map tables)
++{
++  if (specialflag &  SPECIAL_SAFE_MODE)
++    return 0;			// skip this optimize /* purecov: inspected */
++  tables&= ~PSEUDO_TABLE_BITS;
++  for (JOIN_TAB **tab=join->map2table ; tables ; tab++, tables>>=1)
++  {
++    if (tables & 1 && !eq_ref_table(join, order, *tab))
++      return 0;
++  }
++  return 1;
++}
++
++
++/** Update the dependency map for the tables. */
++
++static void update_depend_map(JOIN *join)
++{
++  JOIN_TAB *join_tab=join->join_tab, *end=join_tab+join->tables;
++
++  for (; join_tab != end ; join_tab++)
++  {
++    TABLE_REF *ref= &join_tab->ref;
++    table_map depend_map=0;
++    Item **item=ref->items;
++    uint i;
++    for (i=0 ; i < ref->key_parts ; i++,item++)
++      depend_map|=(*item)->used_tables();
++    ref->depend_map=depend_map & ~OUTER_REF_TABLE_BIT;
++    depend_map&= ~OUTER_REF_TABLE_BIT;
++    for (JOIN_TAB **tab=join->map2table;
++	 depend_map ;
++	 tab++,depend_map>>=1 )
++    {
++      if (depend_map & 1)
++	ref->depend_map|=(*tab)->ref.depend_map;
++    }
++  }
++}
++
++
++/** Update the dependency map for the sort order. */
++
++static void update_depend_map(JOIN *join, ORDER *order)
++{
++  for (; order ; order=order->next)
++  {
++    table_map depend_map;
++    order->item[0]->update_used_tables();
++    order->depend_map=depend_map=order->item[0]->used_tables();
++    order->used= 0;
++    // Not item_sum(), RAND() and no reference to table outside of sub select
++    if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))
++        && !order->item[0]->with_sum_func)
++    {
++      for (JOIN_TAB **tab=join->map2table;
++	   depend_map ;
++	   tab++, depend_map>>=1)
++      {
++	if (depend_map & 1)
++	  order->depend_map|=(*tab)->ref.depend_map;
++      }
++    }
++  }
++}
++
++
++/**
++  Remove all constants and check if ORDER only contains simple
++  expressions.
++
++  simple_order is set to 1 if sort_order only uses fields from head table
++  and the head table is not a LEFT JOIN table.
++
++  @param join			Join handler
++  @param first_order		List of SORT or GROUP order
++  @param cond			WHERE statement
++  @param change_list		Set to 1 if we should remove things from list.
++                               If this is not set, then only simple_order is
++                               calculated.
++  @param simple_order		Set to 1 if we are only using simple expressions
++
++  @return
++    Returns new sort order
++*/
++
++static ORDER *
++remove_const(JOIN *join,ORDER *first_order, COND *cond,
++             bool change_list, bool *simple_order)
++{
++  if (join->tables == join->const_tables)
++    return change_list ? 0 : first_order;		// No need to sort
++
++  ORDER *order,**prev_ptr;
++  table_map first_table= join->join_tab[join->const_tables].table->map;
++  table_map not_const_tables= ~join->const_table_map;
++  table_map ref;
++  DBUG_ENTER("remove_const");
++
++  prev_ptr= &first_order;
++  *simple_order= *join->join_tab[join->const_tables].on_expr_ref ? 0 : 1;
++
++  /* NOTE: A variable of not_const_tables ^ first_table; breaks gcc 2.7 */
++
++  update_depend_map(join, first_order);
++  for (order=first_order; order ; order=order->next)
++  {
++    table_map order_tables=order->item[0]->used_tables();
++    if (order->item[0]->with_sum_func ||
++        /*
++          If the outer table of an outer join is const (either by itself or
++          after applying WHERE condition), grouping on a field from such a
++          table will be optimized away and filesort without temporary table
++          will be used unless we prevent that now. Filesort is not fit to
++          handle joins and the join condition is not applied. We can't detect
++          the case without an expensive test, however, so we force temporary
++          table for all queries containing more than one table, ROLLUP, and an
++          outer join.
++         */
++        (join->tables > 1 && join->rollup.state == ROLLUP::STATE_INITED &&
++        join->outer_join))
++      *simple_order=0;				// Must do a temp table to sort
++    else if (!(order_tables & not_const_tables))
++    {
++      if (order->item[0]->with_subselect && 
++          !(join->select_lex->options & SELECT_DESCRIBE))
++        order->item[0]->val_str(&order->item[0]->str_value);
++      DBUG_PRINT("info",("removing: %s", order->item[0]->full_name()));
++      continue;					// skip const item
++    }
++    else
++    {
++      if (order_tables & (RAND_TABLE_BIT | OUTER_REF_TABLE_BIT))
++	*simple_order=0;
++      else
++      {
++	Item *comp_item=0;
++	if (cond && const_expression_in_where(cond,order->item[0], &comp_item))
++	{
++	  DBUG_PRINT("info",("removing: %s", order->item[0]->full_name()));
++	  continue;
++	}
++	if ((ref=order_tables & (not_const_tables ^ first_table)))
++	{
++	  if (!(order_tables & first_table) &&
++              only_eq_ref_tables(join,first_order, ref))
++	  {
++	    DBUG_PRINT("info",("removing: %s", order->item[0]->full_name()));
++	    continue;
++	  }
++	  *simple_order=0;			// Must do a temp table to sort
++	}
++      }
++    }
++    if (change_list)
++      *prev_ptr= order;				// use this entry
++    prev_ptr= &order->next;
++  }
++  if (change_list)
++    *prev_ptr=0;
++  if (prev_ptr == &first_order)			// Nothing to sort/group
++    *simple_order=1;
++  DBUG_PRINT("exit",("simple_order: %d",(int) *simple_order));
++  DBUG_RETURN(first_order);
++}
++
++
++static int
++return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables,
++		 List<Item> &fields, bool send_row, ulonglong select_options,
++		 const char *info, Item *having)
++{
++  DBUG_ENTER("return_zero_rows");
++
++  if (select_options & SELECT_DESCRIBE)
++  {
++    select_describe(join, FALSE, FALSE, FALSE, info);
++    DBUG_RETURN(0);
++  }
++
++  join->join_free();
++
++  if (send_row)
++  {
++    for (TABLE_LIST *table= tables; table; table= table->next_leaf)
++      mark_as_null_row(table->table);		// All fields are NULL
++    if (having && having->val_int() == 0)
++      send_row=0;
++  }
++  if (!(result->send_fields(fields,
++                              Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)))
++  {
++    bool send_error= FALSE;
++    if (send_row)
++    {
++      List_iterator_fast<Item> it(fields);
++      Item *item;
++      while ((item= it++))
++	item->no_rows_in_result();
++      send_error= result->send_data(fields);
++    }
++    if (!send_error)
++      result->send_eof();				// Should be safe
++  }
++  /* Update results for FOUND_ROWS */
++  join->thd->limit_found_rows= join->thd->examined_row_count= 0;
++  DBUG_RETURN(0);
++}
++
++/*
++  used only in JOIN::clear
++*/
++static void clear_tables(JOIN *join)
++{
++  /* 
++    must clear only the non-const tables, as const tables
++    are not re-calculated.
++  */
++  for (uint i=join->const_tables ; i < join->tables ; i++)
++    mark_as_null_row(join->table[i]);		// All fields are NULL
++}
++
++/*****************************************************************************
++  Make som simple condition optimization:
++  If there is a test 'field = const' change all refs to 'field' to 'const'
++  Remove all dummy tests 'item = item', 'const op const'.
++  Remove all 'item is NULL', when item can never be null!
++  item->marker should be 0 for all items on entry
++  Return in cond_value FALSE if condition is impossible (1 = 2)
++*****************************************************************************/
++
++class COND_CMP :public ilink {
++public:
++  static void *operator new(size_t size)
++  {
++    return (void*) sql_alloc((uint) size);
++  }
++  static void operator delete(void *ptr __attribute__((unused)),
++                              size_t size __attribute__((unused)))
++  { TRASH(ptr, size); }
++
++  Item *and_level;
++  Item_func *cmp_func;
++  COND_CMP(Item *a,Item_func *b) :and_level(a),cmp_func(b) {}
++};
++
++#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
++template class I_List<COND_CMP>;
++template class I_List_iterator<COND_CMP>;
++template class List<Item_func_match>;
++template class List_iterator<Item_func_match>;
++#endif
++
++
++/**
++  Find the multiple equality predicate containing a field.
++
++  The function retrieves the multiple equalities accessed through
++  the con_equal structure from current level and up looking for
++  an equality containing field. It stops retrieval as soon as the equality
++  is found and set up inherited_fl to TRUE if it's found on upper levels.
++
++  @param cond_equal          multiple equalities to search in
++  @param field               field to look for
++  @param[out] inherited_fl   set up to TRUE if multiple equality is found
++                             on upper levels (not on current level of
++                             cond_equal)
++
++  @return
++    - Item_equal for the found multiple equality predicate if a success;
++    - NULL otherwise.
++*/
++
++Item_equal *find_item_equal(COND_EQUAL *cond_equal, Field *field,
++                            bool *inherited_fl)
++{
++  Item_equal *item= 0;
++  bool in_upper_level= FALSE;
++  while (cond_equal)
++  {
++    List_iterator_fast<Item_equal> li(cond_equal->current_level);
++    while ((item= li++))
++    {
++      if (item->contains(field))
++        goto finish;
++    }
++    in_upper_level= TRUE;
++    cond_equal= cond_equal->upper_levels;
++  }
++  in_upper_level= FALSE;
++finish:
++  *inherited_fl= in_upper_level;
++  return item;
++}
++
++  
++/**
++  Check whether an equality can be used to build multiple equalities.
++
++    This function first checks whether the equality (left_item=right_item)
++    is a simple equality i.e. the one that equates a field with another field
++    or a constant (field=field_item or field=const_item).
++    If this is the case the function looks for a multiple equality
++    in the lists referenced directly or indirectly by cond_equal inferring
++    the given simple equality. If it doesn't find any, it builds a multiple
++    equality that covers the predicate, i.e. the predicate can be inferred
++    from this multiple equality.
++    The built multiple equality could be obtained in such a way:
++    create a binary  multiple equality equivalent to the predicate, then
++    merge it, if possible, with one of old multiple equalities.
++    This guarantees that the set of multiple equalities covering equality
++    predicates will be minimal.
++
++  EXAMPLE:
++    For the where condition
++    @code
++      WHERE a=b AND b=c AND
++            (b=2 OR f=e)
++    @endcode
++    the check_equality will be called for the following equality
++    predicates a=b, b=c, b=2 and f=e.
++    - For a=b it will be called with *cond_equal=(0,[]) and will transform
++      *cond_equal into (0,[Item_equal(a,b)]). 
++    - For b=c it will be called with *cond_equal=(0,[Item_equal(a,b)])
++      and will transform *cond_equal into CE=(0,[Item_equal(a,b,c)]).
++    - For b=2 it will be called with *cond_equal=(ptr(CE),[])
++      and will transform *cond_equal into (ptr(CE),[Item_equal(2,a,b,c)]).
++    - For f=e it will be called with *cond_equal=(ptr(CE), [])
++      and will transform *cond_equal into (ptr(CE),[Item_equal(f,e)]).
++
++  @note
++    Now only fields that have the same type definitions (verified by
++    the Field::eq_def method) are placed to the same multiple equalities.
++    Because of this some equality predicates are not eliminated and
++    can be used in the constant propagation procedure.
++    We could weeken the equlity test as soon as at least one of the 
++    equal fields is to be equal to a constant. It would require a 
++    more complicated implementation: we would have to store, in
++    general case, its own constant for each fields from the multiple
++    equality. But at the same time it would allow us to get rid
++    of constant propagation completely: it would be done by the call
++    to build_equal_items_for_cond.
++
++
++    The implementation does not follow exactly the above rules to
++    build a new multiple equality for the equality predicate.
++    If it processes the equality of the form field1=field2, it
++    looks for multiple equalities me1 containig field1 and me2 containing
++    field2. If only one of them is found the fuction expands it with
++    the lacking field. If multiple equalities for both fields are
++    found they are merged. If both searches fail a new multiple equality
++    containing just field1 and field2 is added to the existing
++    multiple equalities.
++    If the function processes the predicate of the form field1=const,
++    it looks for a multiple equality containing field1. If found, the 
++    function checks the constant of the multiple equality. If the value
++    is unknown, it is setup to const. Otherwise the value is compared with
++    const and the evaluation of the equality predicate is performed.
++    When expanding/merging equality predicates from the upper levels
++    the function first copies them for the current level. It looks
++    acceptable, as this happens rarely. The implementation without
++    copying would be much more complicated.
++
++  @param left_item   left term of the quality to be checked
++  @param right_item  right term of the equality to be checked
++  @param item        equality item if the equality originates from a condition
++                     predicate, 0 if the equality is the result of row
++                     elimination
++  @param cond_equal  multiple equalities that must hold together with the
++                     equality
++
++  @retval
++    TRUE    if the predicate is a simple equality predicate to be used
++    for building multiple equalities
++  @retval
++    FALSE   otherwise
++*/
++
++static bool check_simple_equality(Item *left_item, Item *right_item,
++                                  Item *item, COND_EQUAL *cond_equal)
++{
++  if (left_item->type() == Item::REF_ITEM &&
++      ((Item_ref*)left_item)->ref_type() == Item_ref::VIEW_REF)
++  {
++    if (((Item_ref*)left_item)->depended_from)
++      return FALSE;
++    left_item= left_item->real_item();
++  }
++  if (right_item->type() == Item::REF_ITEM &&
++      ((Item_ref*)right_item)->ref_type() == Item_ref::VIEW_REF)
++  {
++    if (((Item_ref*)right_item)->depended_from)
++      return FALSE;
++    right_item= right_item->real_item();
++  }
++  if (left_item->type() == Item::FIELD_ITEM &&
++      right_item->type() == Item::FIELD_ITEM &&
++      !((Item_field*)left_item)->depended_from &&
++      !((Item_field*)right_item)->depended_from)
++  {
++    /* The predicate the form field1=field2 is processed */
++
++    Field *left_field= ((Item_field*) left_item)->field;
++    Field *right_field= ((Item_field*) right_item)->field;
++
++    if (!left_field->eq_def(right_field))
++      return FALSE;
++
++    /* Search for multiple equalities containing field1 and/or field2 */
++    bool left_copyfl, right_copyfl;
++    Item_equal *left_item_equal=
++               find_item_equal(cond_equal, left_field, &left_copyfl);
++    Item_equal *right_item_equal= 
++               find_item_equal(cond_equal, right_field, &right_copyfl);
++
++    /* As (NULL=NULL) != TRUE we can't just remove the predicate f=f */
++    if (left_field->eq(right_field)) /* f = f */
++      return (!(left_field->maybe_null() && !left_item_equal)); 
++
++    if (left_item_equal && left_item_equal == right_item_equal)
++    {
++      /* 
++        The equality predicate is inference of one of the existing
++        multiple equalities, i.e the condition is already covered
++        by upper level equalities
++      */
++       return TRUE;
++    }
++      
++    /* Copy the found multiple equalities at the current level if needed */
++    if (left_copyfl)
++    {
++      /* left_item_equal of an upper level contains left_item */
++      left_item_equal= new Item_equal(left_item_equal);
++      cond_equal->current_level.push_back(left_item_equal);
++    }
++    if (right_copyfl)
++    {
++      /* right_item_equal of an upper level contains right_item */
++      right_item_equal= new Item_equal(right_item_equal);
++      cond_equal->current_level.push_back(right_item_equal);
++    }
++
++    if (left_item_equal)
++    { 
++      /* left item was found in the current or one of the upper levels */
++      if (! right_item_equal)
++        left_item_equal->add((Item_field *) right_item);
++      else
++      {
++        /* Merge two multiple equalities forming a new one */
++        left_item_equal->merge(right_item_equal);
++        /* Remove the merged multiple equality from the list */
++        List_iterator<Item_equal> li(cond_equal->current_level);
++        while ((li++) != right_item_equal) ;
++        li.remove();
++      }
++    }
++    else
++    { 
++      /* left item was not found neither the current nor in upper levels  */
++      if (right_item_equal)
++        right_item_equal->add((Item_field *) left_item);
++      else 
++      {
++        /* None of the fields was found in multiple equalities */
++        Item_equal *item_equal= new Item_equal((Item_field *) left_item,
++                                               (Item_field *) right_item);
++        cond_equal->current_level.push_back(item_equal);
++      }
++    }
++    return TRUE;
++  }
++
++  {
++    /* The predicate of the form field=const/const=field is processed */
++    Item *const_item= 0;
++    Item_field *field_item= 0;
++    if (left_item->type() == Item::FIELD_ITEM &&
++        !((Item_field*)left_item)->depended_from &&
++        right_item->const_item())
++    {
++      field_item= (Item_field*) left_item;
++      const_item= right_item;
++    }
++    else if (right_item->type() == Item::FIELD_ITEM &&
++             !((Item_field*)right_item)->depended_from &&
++             left_item->const_item())
++    {
++      field_item= (Item_field*) right_item;
++      const_item= left_item;
++    }
++
++    if (const_item &&
++        field_item->result_type() == const_item->result_type())
++    {
++      bool copyfl;
++
++      if (field_item->result_type() == STRING_RESULT)
++      {
++        CHARSET_INFO *cs= ((Field_str*) field_item->field)->charset();
++        if (!item)
++        {
++          Item_func_eq *eq_item;
++          if ((eq_item= new Item_func_eq(left_item, right_item)))
++            return FALSE;
++          eq_item->set_cmp_func();
++          eq_item->quick_fix_field();
++          item= eq_item;
++        }  
++        if ((cs != ((Item_func *) item)->compare_collation()) ||
++            !cs->coll->propagate(cs, 0, 0))
++          return FALSE;
++      }
++
++      Item_equal *item_equal = find_item_equal(cond_equal,
++                                               field_item->field, &copyfl);
++      if (copyfl)
++      {
++        item_equal= new Item_equal(item_equal);
++        cond_equal->current_level.push_back(item_equal);
++      }
++      if (item_equal)
++      {
++        /* 
++          The flag cond_false will be set to 1 after this, if item_equal
++          already contains a constant and its value is  not equal to
++          the value of const_item.
++        */
++        item_equal->add(const_item, field_item);
++      }
++      else
++      {
++        item_equal= new Item_equal(const_item, field_item);
++        cond_equal->current_level.push_back(item_equal);
++      }
++      return TRUE;
++    }
++  }
++  return FALSE;
++}
++
++
++/**
++  Convert row equalities into a conjunction of regular equalities.
++
++    The function converts a row equality of the form (E1,...,En)=(E'1,...,E'n)
++    into a list of equalities E1=E'1,...,En=E'n. For each of these equalities
++    Ei=E'i the function checks whether it is a simple equality or a row
++    equality. If it is a simple equality it is used to expand multiple
++    equalities of cond_equal. If it is a row equality it converted to a
++    sequence of equalities between row elements. If Ei=E'i is neither a
++    simple equality nor a row equality the item for this predicate is added
++    to eq_list.
++
++  @param thd        thread handle
++  @param left_row   left term of the row equality to be processed
++  @param right_row  right term of the row equality to be processed
++  @param cond_equal multiple equalities that must hold together with the
++                    predicate
++  @param eq_list    results of conversions of row equalities that are not
++                    simple enough to form multiple equalities
++
++  @retval
++    TRUE    if conversion has succeeded (no fatal error)
++  @retval
++    FALSE   otherwise
++*/
++ 
++static bool check_row_equality(THD *thd, Item *left_row, Item_row *right_row,
++                               COND_EQUAL *cond_equal, List<Item>* eq_list)
++{ 
++  uint n= left_row->cols();
++  for (uint i= 0 ; i < n; i++)
++  {
++    bool is_converted;
++    Item *left_item= left_row->element_index(i);
++    Item *right_item= right_row->element_index(i);
++    if (left_item->type() == Item::ROW_ITEM &&
++        right_item->type() == Item::ROW_ITEM)
++    {
++      is_converted= check_row_equality(thd, 
++                                       (Item_row *) left_item,
++                                       (Item_row *) right_item,
++			               cond_equal, eq_list);
++      if (!is_converted)
++        thd->lex->current_select->cond_count++;      
++    }
++    else
++    { 
++      is_converted= check_simple_equality(left_item, right_item, 0, cond_equal);
++      thd->lex->current_select->cond_count++;
++    }  
++ 
++    if (!is_converted)
++    {
++      Item_func_eq *eq_item;
++      if (!(eq_item= new Item_func_eq(left_item, right_item)))
++        return FALSE;
++      eq_item->set_cmp_func();
++      eq_item->quick_fix_field();
++      eq_list->push_back(eq_item);
++    }
++  }
++  return TRUE;
++}
++
++
++/**
++  Eliminate row equalities and form multiple equalities predicates.
++
++    This function checks whether the item is a simple equality
++    i.e. the one that equates a field with another field or a constant
++    (field=field_item or field=constant_item), or, a row equality.
++    For a simple equality the function looks for a multiple equality
++    in the lists referenced directly or indirectly by cond_equal inferring
++    the given simple equality. If it doesn't find any, it builds/expands
++    multiple equality that covers the predicate.
++    Row equalities are eliminated substituted for conjunctive regular
++    equalities which are treated in the same way as original equality
++    predicates.
++
++  @param thd        thread handle
++  @param item       predicate to process
++  @param cond_equal multiple equalities that must hold together with the
++                    predicate
++  @param eq_list    results of conversions of row equalities that are not
++                    simple enough to form multiple equalities
++
++  @retval
++    TRUE   if re-writing rules have been applied
++  @retval
++    FALSE  otherwise, i.e.
++           if the predicate is not an equality,
++           or, if the equality is neither a simple one nor a row equality,
++           or, if the procedure fails by a fatal error.
++*/
++
++static bool check_equality(THD *thd, Item *item, COND_EQUAL *cond_equal,
++                           List<Item> *eq_list)
++{
++  if (item->type() == Item::FUNC_ITEM &&
++         ((Item_func*) item)->functype() == Item_func::EQ_FUNC)
++  {
++    Item *left_item= ((Item_func*) item)->arguments()[0];
++    Item *right_item= ((Item_func*) item)->arguments()[1];
++
++    if (left_item->type() == Item::ROW_ITEM &&
++        right_item->type() == Item::ROW_ITEM)
++    {
++      thd->lex->current_select->cond_count--;
++      return check_row_equality(thd,
++                                (Item_row *) left_item,
++                                (Item_row *) right_item,
++                                cond_equal, eq_list);
++    }
++    else 
++      return check_simple_equality(left_item, right_item, item, cond_equal);
++  } 
++  return FALSE;
++}
++
++                          
++/**
++  Replace all equality predicates in a condition by multiple equality items.
++
++    At each 'and' level the function detects items for equality predicates
++    and replaced them by a set of multiple equality items of class Item_equal,
++    taking into account inherited equalities from upper levels. 
++    If an equality predicate is used not in a conjunction it's just
++    replaced by a multiple equality predicate.
++    For each 'and' level the function set a pointer to the inherited
++    multiple equalities in the cond_equal field of the associated
++    object of the type Item_cond_and.   
++    The function also traverses the cond tree and and for each field reference
++    sets a pointer to the multiple equality item containing the field, if there
++    is any. If this multiple equality equates fields to a constant the
++    function replaces the field reference by the constant in the cases 
++    when the field is not of a string type or when the field reference is
++    just an argument of a comparison predicate.
++    The function also determines the maximum number of members in 
++    equality lists of each Item_cond_and object assigning it to
++    thd->lex->current_select->max_equal_elems.
++
++  @note
++    Multiple equality predicate =(f1,..fn) is equivalent to the conjuction of
++    f1=f2, .., fn-1=fn. It substitutes any inference from these
++    equality predicates that is equivalent to the conjunction.
++    Thus, =(a1,a2,a3) can substitute for ((a1=a3) AND (a2=a3) AND (a2=a1)) as
++    it is equivalent to ((a1=a2) AND (a2=a3)).
++    The function always makes a substitution of all equality predicates occured
++    in a conjuction for a minimal set of multiple equality predicates.
++    This set can be considered as a canonical representation of the
++    sub-conjunction of the equality predicates.
++    E.g. (t1.a=t2.b AND t2.b>5 AND t1.a=t3.c) is replaced by 
++    (=(t1.a,t2.b,t3.c) AND t2.b>5), not by
++    (=(t1.a,t2.b) AND =(t1.a,t3.c) AND t2.b>5);
++    while (t1.a=t2.b AND t2.b>5 AND t3.c=t4.d) is replaced by
++    (=(t1.a,t2.b) AND =(t3.c=t4.d) AND t2.b>5),
++    but if additionally =(t4.d,t2.b) is inherited, it
++    will be replaced by (=(t1.a,t2.b,t3.c,t4.d) AND t2.b>5)
++
++    The function performs the substitution in a recursive descent by
++    the condtion tree, passing to the next AND level a chain of multiple
++    equality predicates which have been built at the upper levels.
++    The Item_equal items built at the level are attached to other 
++    non-equality conjucts as a sublist. The pointer to the inherited
++    multiple equalities is saved in the and condition object (Item_cond_and).
++    This chain allows us for any field reference occurence easyly to find a 
++    multiple equality that must be held for this occurence.
++    For each AND level we do the following:
++    - scan it for all equality predicate (=) items
++    - join them into disjoint Item_equal() groups
++    - process the included OR conditions recursively to do the same for 
++      lower AND levels. 
++
++    We need to do things in this order as lower AND levels need to know about
++    all possible Item_equal objects in upper levels.
++
++  @param thd        thread handle
++  @param cond       condition(expression) where to make replacement
++  @param inherited  path to all inherited multiple equality items
++
++  @return
++    pointer to the transformed condition
++*/
++
++static COND *build_equal_items_for_cond(THD *thd, COND *cond,
++                                        COND_EQUAL *inherited)
++{
++  Item_equal *item_equal;
++  COND_EQUAL cond_equal;
++  cond_equal.upper_levels= inherited;
++
++  if (cond->type() == Item::COND_ITEM)
++  {
++    List<Item> eq_list;
++    bool and_level= ((Item_cond*) cond)->functype() ==
++      Item_func::COND_AND_FUNC;
++    List<Item> *args= ((Item_cond*) cond)->argument_list();
++    
++    List_iterator<Item> li(*args);
++    Item *item;
++
++    if (and_level)
++    {
++      /*
++         Retrieve all conjuncts of this level detecting the equality
++         that are subject to substitution by multiple equality items and
++         removing each such predicate from the conjunction after having 
++         found/created a multiple equality whose inference the predicate is.
++     */      
++      while ((item= li++))
++      {
++        /*
++          PS/SP note: we can safely remove a node from AND-OR
++          structure here because it's restored before each
++          re-execution of any prepared statement/stored procedure.
++        */
++        if (check_equality(thd, item, &cond_equal, &eq_list))
++          li.remove();
++      }
++
++      /*
++        Check if we eliminated all the predicates of the level, e.g.
++        (a=a AND b=b AND a=a).
++      */
++      if (!args->elements && 
++          !cond_equal.current_level.elements && 
++          !eq_list.elements)
++        return new Item_int((longlong) 1, 1);
++
++      List_iterator_fast<Item_equal> it(cond_equal.current_level);
++      while ((item_equal= it++))
++      {
++        item_equal->fix_length_and_dec();
++        item_equal->update_used_tables();
++        set_if_bigger(thd->lex->current_select->max_equal_elems,
++                      item_equal->members());  
++      }
++
++      ((Item_cond_and*)cond)->cond_equal= cond_equal;
++      inherited= &(((Item_cond_and*)cond)->cond_equal);
++    }
++    /*
++       Make replacement of equality predicates for lower levels
++       of the condition expression.
++    */
++    li.rewind();
++    while ((item= li++))
++    { 
++      Item *new_item;
++      if ((new_item= build_equal_items_for_cond(thd, item, inherited)) != item)
++      {
++        /* This replacement happens only for standalone equalities */
++        /*
++          This is ok with PS/SP as the replacement is done for
++          arguments of an AND/OR item, which are restored for each
++          execution of PS/SP.
++        */
++        li.replace(new_item);
++      }
++    }
++    if (and_level)
++    {
++      args->concat(&eq_list);
++      args->concat((List<Item> *)&cond_equal.current_level);
++    }
++  }
++  else if (cond->type() == Item::FUNC_ITEM)
++  {
++    List<Item> eq_list;
++    /*
++      If an equality predicate forms the whole and level,
++      we call it standalone equality and it's processed here.
++      E.g. in the following where condition
++      WHERE a=5 AND (b=5 or a=c)
++      (b=5) and (a=c) are standalone equalities.
++      In general we can't leave alone standalone eqalities:
++      for WHERE a=b AND c=d AND (b=c OR d=5)
++      b=c is replaced by =(a,b,c,d).  
++     */
++    if (check_equality(thd, cond, &cond_equal, &eq_list))
++    {
++      int n= cond_equal.current_level.elements + eq_list.elements;
++      if (n == 0)
++        return new Item_int((longlong) 1,1);
++      else if (n == 1)
++      {
++        if ((item_equal= cond_equal.current_level.pop()))
++        {
++          item_equal->fix_length_and_dec();
++          item_equal->update_used_tables();
++          set_if_bigger(thd->lex->current_select->max_equal_elems,
++                        item_equal->members());  
++          return item_equal;
++	}
++
++        return eq_list.pop();
++      }
++      else
++      {
++        /* 
++          Here a new AND level must be created. It can happen only
++          when a row equality is processed as a standalone predicate.
++	*/
++        Item_cond_and *and_cond= new Item_cond_and(eq_list);
++        and_cond->quick_fix_field();
++        List<Item> *args= and_cond->argument_list();
++        List_iterator_fast<Item_equal> it(cond_equal.current_level);
++        while ((item_equal= it++))
++        {
++          item_equal->fix_length_and_dec();
++          item_equal->update_used_tables();
++          set_if_bigger(thd->lex->current_select->max_equal_elems,
++                        item_equal->members());  
++        }
++        and_cond->cond_equal= cond_equal;
++        args->concat((List<Item> *)&cond_equal.current_level);
++        
++        return and_cond;
++      }
++    }
++    /* 
++      For each field reference in cond, not from equal item predicates,
++      set a pointer to the multiple equality it belongs to (if there is any)
++      as soon the field is not of a string type or the field reference is
++      an argument of a comparison predicate. 
++    */ 
++    uchar *is_subst_valid= (uchar *) 1;
++    cond= cond->compile(&Item::subst_argument_checker,
++                        &is_subst_valid, 
++                        &Item::equal_fields_propagator,
++                        (uchar *) inherited);
++    cond->update_used_tables();
++  }
++  return cond;
++}
++
++
++/**
++  Build multiple equalities for a condition and all on expressions that
++  inherit these multiple equalities.
++
++    The function first applies the build_equal_items_for_cond function
++    to build all multiple equalities for condition cond utilizing equalities
++    referred through the parameter inherited. The extended set of
++    equalities is returned in the structure referred by the cond_equal_ref
++    parameter. After this the function calls itself recursively for
++    all on expressions whose direct references can be found in join_list
++    and who inherit directly the multiple equalities just having built.
++
++  @note
++    The on expression used in an outer join operation inherits all equalities
++    from the on expression of the embedding join, if there is any, or
++    otherwise - from the where condition.
++    This fact is not obvious, but presumably can be proved.
++    Consider the following query:
++    @code
++      SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t2.a=t4.a
++        WHERE t1.a=t2.a;
++    @endcode
++    If the on expression in the query inherits =(t1.a,t2.a), then we
++    can build the multiple equality =(t1.a,t2.a,t3.a,t4.a) that infers
++    the equality t3.a=t4.a. Although the on expression
++    t1.a=t3.a AND t2.a=t4.a AND t3.a=t4.a is not equivalent to the one
++    in the query the latter can be replaced by the former: the new query
++    will return the same result set as the original one.
++
++    Interesting that multiple equality =(t1.a,t2.a,t3.a,t4.a) allows us
++    to use t1.a=t3.a AND t3.a=t4.a under the on condition:
++    @code
++      SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a
++        WHERE t1.a=t2.a
++    @endcode
++    This query equivalent to:
++    @code
++      SELECT * FROM (t1 LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a),t2
++        WHERE t1.a=t2.a
++    @endcode
++    Similarly the original query can be rewritten to the query:
++    @code
++      SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t2.a=t4.a AND t3.a=t4.a
++        WHERE t1.a=t2.a
++    @endcode
++    that is equivalent to:   
++    @code
++      SELECT * FROM (t2 LEFT JOIN (t3,t4)ON t2.a=t4.a AND t3.a=t4.a), t1
++        WHERE t1.a=t2.a
++    @endcode
++    Thus, applying equalities from the where condition we basically
++    can get more freedom in performing join operations.
++    Althogh we don't use this property now, it probably makes sense to use 
++    it in the future.    
++  @param thd		      Thread handler
++  @param cond                condition to build the multiple equalities for
++  @param inherited           path to all inherited multiple equality items
++  @param join_list           list of join tables to which the condition
++                             refers to
++  @param[out] cond_equal_ref pointer to the structure to place built
++                             equalities in
++
++  @return
++    pointer to the transformed condition containing multiple equalities
++*/
++   
++static COND *build_equal_items(THD *thd, COND *cond,
++                               COND_EQUAL *inherited,
++                               List<TABLE_LIST> *join_list,
++                               COND_EQUAL **cond_equal_ref)
++{
++  COND_EQUAL *cond_equal= 0;
++
++  if (cond) 
++  {
++    cond= build_equal_items_for_cond(thd, cond, inherited);
++    cond->update_used_tables();
++    if (cond->type() == Item::COND_ITEM &&
++        ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
++      cond_equal= &((Item_cond_and*) cond)->cond_equal;
++    else if (cond->type() == Item::FUNC_ITEM &&
++             ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
++    {
++      cond_equal= new COND_EQUAL;
++      cond_equal->current_level.push_back((Item_equal *) cond);
++    }
++  }
++  if (cond_equal)
++  {
++    cond_equal->upper_levels= inherited;
++    inherited= cond_equal;
++  }
++  *cond_equal_ref= cond_equal;
++
++  if (join_list)
++  {
++    TABLE_LIST *table;
++    List_iterator<TABLE_LIST> li(*join_list);
++
++    while ((table= li++))
++    {
++      if (table->on_expr)
++      {
++        List<TABLE_LIST> *nested_join_list= table->nested_join ?
++          &table->nested_join->join_list : NULL;
++        /*
++          We can modify table->on_expr because its old value will
++          be restored before re-execution of PS/SP.
++        */
++        table->on_expr= build_equal_items(thd, table->on_expr, inherited,
++                                          nested_join_list,
++                                          &table->cond_equal);
++      }
++    }
++  }
++
++  return cond;
++}    
++
++
++/**
++  Compare field items by table order in the execution plan.
++
++    field1 considered as better than field2 if the table containing
++    field1 is accessed earlier than the table containing field2.   
++    The function finds out what of two fields is better according
++    this criteria.
++
++  @param field1          first field item to compare
++  @param field2          second field item to compare
++  @param table_join_idx  index to tables determining table order
++
++  @retval
++    1  if field1 is better than field2
++  @retval
++    -1  if field2 is better than field1
++  @retval
++    0  otherwise
++*/
++
++static int compare_fields_by_table_order(Item_field *field1,
++                                  Item_field *field2,
++                                  void *table_join_idx)
++{
++  int cmp= 0;
++  bool outer_ref= 0;
++  if (field2->used_tables() & OUTER_REF_TABLE_BIT)
++  {  
++    outer_ref= 1;
++    cmp= -1;
++  }
++  if (field2->used_tables() & OUTER_REF_TABLE_BIT)
++  {
++    outer_ref= 1;
++    cmp++;
++  }
++  if (outer_ref)
++    return cmp;
++  JOIN_TAB **idx= (JOIN_TAB **) table_join_idx;
++  cmp= idx[field2->field->table->tablenr]-idx[field1->field->table->tablenr];
++  return cmp < 0 ? -1 : (cmp ? 1 : 0);
++}
++
++
++/**
++  Generate minimal set of simple equalities equivalent to a multiple equality.
++
++    The function retrieves the fields of the multiple equality item
++    item_equal and  for each field f:
++    - if item_equal contains const it generates the equality f=const_item;
++    - otherwise, if f is not the first field, generates the equality
++      f=item_equal->get_first().
++    All generated equality are added to the cond conjunction.
++
++  @param cond            condition to add the generated equality to
++  @param upper_levels    structure to access multiple equality of upper levels
++  @param item_equal      multiple equality to generate simple equality from
++
++  @note
++    Before generating an equality function checks that it has not
++    been generated for multiple equalities of the upper levels.
++    E.g. for the following where condition
++    WHERE a=5 AND ((a=b AND b=c) OR  c>4)
++    the upper level AND condition will contain =(5,a),
++    while the lower level AND condition will contain =(5,a,b,c).
++    When splitting =(5,a,b,c) into a separate equality predicates
++    we should omit 5=a, as we have it already in the upper level.
++    The following where condition gives us a more complicated case:
++    WHERE t1.a=t2.b AND t3.c=t4.d AND (t2.b=t3.c OR t4.e>5 ...) AND ...
++    Given the tables are accessed in the order t1->t2->t3->t4 for
++    the selected query execution plan the lower level multiple
++    equality =(t1.a,t2.b,t3.c,t4.d) formally  should be converted to
++    t1.a=t2.b AND t1.a=t3.c AND t1.a=t4.d. But t1.a=t2.a will be
++    generated for the upper level. Also t3.c=t4.d will be generated there.
++    So only t1.a=t3.c should be left in the lower level.
++    If cond is equal to 0, then not more then one equality is generated
++    and a pointer to it is returned as the result of the function.
++
++  @return
++    - The condition with generated simple equalities or
++    a pointer to the simple generated equality, if success.
++    - 0, otherwise.
++*/
++
++static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
++                                  Item_equal *item_equal)
++{
++  List<Item> eq_list;
++  Item_func_eq *eq_item= 0;
++  if (((Item *) item_equal)->const_item() && !item_equal->val_int())
++    return new Item_int((longlong) 0,1); 
++  Item *item_const= item_equal->get_const();
++  Item_equal_iterator it(*item_equal);
++  Item *head;
++  if (item_const)
++    head= item_const;
++  else
++  {
++    head= item_equal->get_first();
++    it++;
++  }
++  Item_field *item_field;
++  while ((item_field= it++))
++  {
++    Item_equal *upper= item_field->find_item_equal(upper_levels);
++    Item_field *item= item_field;
++    if (upper)
++    { 
++      if (item_const && upper->get_const())
++        item= 0;
++      else
++      {
++        Item_equal_iterator li(*item_equal);
++        while ((item= li++) != item_field)
++        {
++          if (item->find_item_equal(upper_levels) == upper)
++            break;
++        }
++      }
++    }
++    if (item == item_field)
++    {
++      if (eq_item)
++        eq_list.push_back(eq_item);
++      eq_item= new Item_func_eq(item_field, head);
++      if (!eq_item)
++        return 0;
++      eq_item->set_cmp_func();
++      eq_item->quick_fix_field();
++   }
++  }
++
++  if (!cond && !eq_list.head())
++  {
++    if (!eq_item)
++      return new Item_int((longlong) 1,1);
++    return eq_item;
++  }
++
++  if (eq_item)
++    eq_list.push_back(eq_item);
++  if (!cond)
++    cond= new Item_cond_and(eq_list);
++  else
++  {
++    DBUG_ASSERT(cond->type() == Item::COND_ITEM);
++    if (eq_list.elements)
++      ((Item_cond *) cond)->add_at_head(&eq_list);
++  }
++
++  cond->quick_fix_field();
++  cond->update_used_tables();
++   
++  return cond;
++}
++
++
++/**
++  Substitute every field reference in a condition by the best equal field
++  and eliminate all multiple equality predicates.
++
++    The function retrieves the cond condition and for each encountered
++    multiple equality predicate it sorts the field references in it
++    according to the order of tables specified by the table_join_idx
++    parameter. Then it eliminates the multiple equality predicate it
++    replacing it by the conjunction of simple equality predicates 
++    equating every field from the multiple equality to the first
++    field in it, or to the constant, if there is any.
++    After this the function retrieves all other conjuncted
++    predicates substitute every field reference by the field reference
++    to the first equal field or equal constant if there are any.
++  @param cond            condition to process
++  @param cond_equal      multiple equalities to take into consideration
++  @param table_join_idx  index to tables determining field preference
++
++  @note
++    At the first glance full sort of fields in multiple equality
++    seems to be an overkill. Yet it's not the case due to possible
++    new fields in multiple equality item of lower levels. We want
++    the order in them to comply with the order of upper levels.
++
++  @return
++    The transformed condition
++*/
++
++static COND* substitute_for_best_equal_field(COND *cond,
++                                             COND_EQUAL *cond_equal,
++                                             void *table_join_idx)
++{
++  Item_equal *item_equal;
++
++  if (cond->type() == Item::COND_ITEM)
++  {
++    List<Item> *cond_list= ((Item_cond*) cond)->argument_list();
++
++    bool and_level= ((Item_cond*) cond)->functype() ==
++                      Item_func::COND_AND_FUNC;
++    if (and_level)
++    {
++      cond_equal= &((Item_cond_and *) cond)->cond_equal;
++      cond_list->disjoin((List<Item> *) &cond_equal->current_level);
++
++      List_iterator_fast<Item_equal> it(cond_equal->current_level);      
++      while ((item_equal= it++))
++      {
++        item_equal->sort(&compare_fields_by_table_order, table_join_idx);
++      }
++    }
++    
++    List_iterator<Item> li(*cond_list);
++    Item *item;
++    while ((item= li++))
++    {
++      Item *new_item =substitute_for_best_equal_field(item, cond_equal,
++                                                      table_join_idx);
++      /*
++        This works OK with PS/SP re-execution as changes are made to
++        the arguments of AND/OR items only
++      */
++      if (new_item != item)
++        li.replace(new_item);
++    }
++
++    if (and_level)
++    {
++      List_iterator_fast<Item_equal> it(cond_equal->current_level);
++      while ((item_equal= it++))
++      {
++        cond= eliminate_item_equal(cond, cond_equal->upper_levels, item_equal);
++        // This occurs when eliminate_item_equal() founds that cond is
++        // always false and substitutes it with Item_int 0.
++        // Due to this, value of item_equal will be 0, so just return it.
++        if (cond->type() != Item::COND_ITEM)
++          break;
++      }
++    }
++    if (cond->type() == Item::COND_ITEM &&
++        !((Item_cond*)cond)->argument_list()->elements)
++      cond= new Item_int((int32)cond->val_bool());
++
++  }
++  else if (cond->type() == Item::FUNC_ITEM && 
++           ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
++  {
++    item_equal= (Item_equal *) cond;
++    item_equal->sort(&compare_fields_by_table_order, table_join_idx);
++    if (cond_equal && cond_equal->current_level.head() == item_equal)
++      cond_equal= 0;
++    return eliminate_item_equal(0, cond_equal, item_equal);
++  }
++  else
++    cond->transform(&Item::replace_equal_field, 0);
++  return cond;
++}
++
++
++/**
++  Check appearance of new constant items in multiple equalities
++  of a condition after reading a constant table.
++
++    The function retrieves the cond condition and for each encountered
++    multiple equality checks whether new constants have appeared after
++    reading the constant (single row) table tab. If so it adjusts
++    the multiple equality appropriately.
++
++  @param cond       condition whose multiple equalities are to be checked
++  @param table      constant table that has been read
++*/
++
++static void update_const_equal_items(COND *cond, JOIN_TAB *tab)
++{
++  if (!(cond->used_tables() & tab->table->map))
++    return;
++
++  if (cond->type() == Item::COND_ITEM)
++  {
++    List<Item> *cond_list= ((Item_cond*) cond)->argument_list(); 
++    List_iterator_fast<Item> li(*cond_list);
++    Item *item;
++    while ((item= li++))
++      update_const_equal_items(item, tab);
++  }
++  else if (cond->type() == Item::FUNC_ITEM && 
++           ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
++  {
++    Item_equal *item_equal= (Item_equal *) cond;
++    bool contained_const= item_equal->get_const() != NULL;
++    item_equal->update_const();
++    if (!contained_const && item_equal->get_const())
++    {
++      /* Update keys for range analysis */
++      Item_equal_iterator it(*item_equal);
++      Item_field *item_field;
++      while ((item_field= it++))
++      {
++        Field *field= item_field->field;
++        JOIN_TAB *stat= field->table->reginfo.join_tab;
++        key_map possible_keys= field->key_start;
++        possible_keys.intersect(field->table->keys_in_use_for_query);
++        stat[0].const_keys.merge(possible_keys);
++
++        /*
++          For each field in the multiple equality (for which we know that it 
++          is a constant) we have to find its corresponding key part, and set 
++          that key part in const_key_parts.
++        */  
++        if (!possible_keys.is_clear_all())
++        {
++          TABLE *tab= field->table;
++          KEYUSE *use;
++          for (use= stat->keyuse; use && use->table == tab; use++)
++            if (possible_keys.is_set(use->key) && 
++                tab->key_info[use->key].key_part[use->keypart].field ==
++                field)
++              tab->const_key_parts[use->key]|= use->keypart_map;
++        }
++      }
++    }
++  }
++}
++
++
++/*
++  change field = field to field = const for each found field = const in the
++  and_level
++*/
++
++static void
++change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
++                         Item *and_father, Item *cond,
++                         Item *field, Item *value)
++{
++  if (cond->type() == Item::COND_ITEM)
++  {
++    bool and_level= ((Item_cond*) cond)->functype() ==
++      Item_func::COND_AND_FUNC;
++    List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
++    Item *item;
++    while ((item=li++))
++      change_cond_ref_to_const(thd, save_list,and_level ? cond : item, item,
++			       field, value);
++    return;
++  }
++  if (cond->eq_cmp_result() == Item::COND_OK)
++    return;					// Not a boolean function
++
++  Item_bool_func2 *func=  (Item_bool_func2*) cond;
++  Item **args= func->arguments();
++  Item *left_item=  args[0];
++  Item *right_item= args[1];
++  Item_func::Functype functype=  func->functype();
++
++  if (right_item->eq(field,0) && left_item != value &&
++      right_item->cmp_context == field->cmp_context &&
++      (left_item->result_type() != STRING_RESULT ||
++       value->result_type() != STRING_RESULT ||
++       left_item->collation.collation == value->collation.collation))
++  {
++    Item *tmp=value->clone_item();
++    tmp->collation.set(right_item->collation);
++    
++    if (tmp)
++    {
++      thd->change_item_tree(args + 1, tmp);
++      func->update_used_tables();
++      if ((functype == Item_func::EQ_FUNC || functype == Item_func::EQUAL_FUNC)
++	  && and_father != cond && !left_item->const_item())
++      {
++	cond->marker=1;
++	COND_CMP *tmp2;
++	if ((tmp2=new COND_CMP(and_father,func)))
++	  save_list->push_back(tmp2);
++      }
++      func->set_cmp_func();
++    }
++  }
++  else if (left_item->eq(field,0) && right_item != value &&
++           left_item->cmp_context == field->cmp_context &&
++           (right_item->result_type() != STRING_RESULT ||
++            value->result_type() != STRING_RESULT ||
++            right_item->collation.collation == value->collation.collation))
++  {
++    Item *tmp= value->clone_item();
++    tmp->collation.set(left_item->collation);
++    
++    if (tmp)
++    {
++      thd->change_item_tree(args, tmp);
++      value= tmp;
++      func->update_used_tables();
++      if ((functype == Item_func::EQ_FUNC || functype == Item_func::EQUAL_FUNC)
++	  && and_father != cond && !right_item->const_item())
++      {
++        args[0]= args[1];                       // For easy check
++        thd->change_item_tree(args + 1, value);
++	cond->marker=1;
++	COND_CMP *tmp2;
++	if ((tmp2=new COND_CMP(and_father,func)))
++	  save_list->push_back(tmp2);
++      }
++      func->set_cmp_func();
++    }
++  }
++}
++
++/**
++  Remove additional condition inserted by IN/ALL/ANY transformation.
++
++  @param conds   condition for processing
++
++  @return
++    new conditions
++*/
++
++static Item *remove_additional_cond(Item* conds)
++{
++  if (conds->name == in_additional_cond)
++    return 0;
++  if (conds->type() == Item::COND_ITEM)
++  {
++    Item_cond *cnd= (Item_cond*) conds;
++    List_iterator<Item> li(*(cnd->argument_list()));
++    Item *item;
++    while ((item= li++))
++    {
++      if (item->name == in_additional_cond)
++      {
++	li.remove();
++	if (cnd->argument_list()->elements == 1)
++	  return cnd->argument_list()->head();
++	return conds;
++      }
++    }
++  }
++  return conds;
++}
++
++static void
++propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list,
++                         COND *and_father, COND *cond)
++{
++  if (cond->type() == Item::COND_ITEM)
++  {
++    bool and_level= ((Item_cond*) cond)->functype() ==
++      Item_func::COND_AND_FUNC;
++    List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list());
++    Item *item;
++    I_List<COND_CMP> save;
++    while ((item=li++))
++    {
++      propagate_cond_constants(thd, &save,and_level ? cond : item, item);
++    }
++    if (and_level)
++    {						// Handle other found items
++      I_List_iterator<COND_CMP> cond_itr(save);
++      COND_CMP *cond_cmp;
++      while ((cond_cmp=cond_itr++))
++      {
++        Item **args= cond_cmp->cmp_func->arguments();
++        if (!args[0]->const_item())
++          change_cond_ref_to_const(thd, &save,cond_cmp->and_level,
++                                   cond_cmp->and_level, args[0], args[1]);
++      }
++    }
++  }
++  else if (and_father != cond && !cond->marker)		// In a AND group
++  {
++    if (cond->type() == Item::FUNC_ITEM &&
++	(((Item_func*) cond)->functype() == Item_func::EQ_FUNC ||
++	 ((Item_func*) cond)->functype() == Item_func::EQUAL_FUNC))
++    {
++      Item_func_eq *func=(Item_func_eq*) cond;
++      Item **args= func->arguments();
++      bool left_const= args[0]->const_item();
++      bool right_const= args[1]->const_item();
++      if (!(left_const && right_const) &&
++          args[0]->result_type() == args[1]->result_type())
++      {
++	if (right_const)
++	{
++          resolve_const_item(thd, &args[1], args[0]);
++	  func->update_used_tables();
++          change_cond_ref_to_const(thd, save_list, and_father, and_father,
++                                   args[0], args[1]);
++	}
++	else if (left_const)
++	{
++          resolve_const_item(thd, &args[0], args[1]);
++	  func->update_used_tables();
++          change_cond_ref_to_const(thd, save_list, and_father, and_father,
++                                   args[1], args[0]);
++	}
++      }
++    }
++  }
++}
++
++
++/**
++  Simplify joins replacing outer joins by inner joins whenever it's
++  possible.
++
++    The function, during a retrieval of join_list,  eliminates those
++    outer joins that can be converted into inner join, possibly nested.
++    It also moves the on expressions for the converted outer joins
++    and from inner joins to conds.
++    The function also calculates some attributes for nested joins:
++    - used_tables    
++    - not_null_tables
++    - dep_tables.
++    - on_expr_dep_tables
++    The first two attributes are used to test whether an outer join can
++    be substituted for an inner join. The third attribute represents the
++    relation 'to be dependent on' for tables. If table t2 is dependent
++    on table t1, then in any evaluated execution plan table access to
++    table t2 must precede access to table t2. This relation is used also
++    to check whether the query contains  invalid cross-references.
++    The forth attribute is an auxiliary one and is used to calculate
++    dep_tables.
++    As the attribute dep_tables qualifies possibles orders of tables in the
++    execution plan, the dependencies required by the straight join
++    modifiers are reflected in this attribute as well.
++    The function also removes all braces that can be removed from the join
++    expression without changing its meaning.
++
++  @note
++    An outer join can be replaced by an inner join if the where condition
++    or the on expression for an embedding nested join contains a conjunctive
++    predicate rejecting null values for some attribute of the inner tables.
++
++    E.g. in the query:    
++    @code
++      SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a WHERE t2.b < 5
++    @endcode
++    the predicate t2.b < 5 rejects nulls.
++    The query is converted first to:
++    @code
++      SELECT * FROM t1 INNER JOIN t2 ON t2.a=t1.a WHERE t2.b < 5
++    @endcode
++    then to the equivalent form:
++    @code
++      SELECT * FROM t1, t2 ON t2.a=t1.a WHERE t2.b < 5 AND t2.a=t1.a
++    @endcode
++
++
++    Similarly the following query:
++    @code
++      SELECT * from t1 LEFT JOIN (t2, t3) ON t2.a=t1.a t3.b=t1.b
++        WHERE t2.c < 5  
++    @endcode
++    is converted to:
++    @code
++      SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a t3.b=t1.b 
++
++    @endcode
++
++    One conversion might trigger another:
++    @code
++      SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a
++                       LEFT JOIN t3 ON t3.b=t2.b
++        WHERE t3 IS NOT NULL =>
++      SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a, t3
++        WHERE t3 IS NOT NULL AND t3.b=t2.b => 
++      SELECT * FROM t1, t2, t3
++        WHERE t3 IS NOT NULL AND t3.b=t2.b AND t2.a=t1.a
++  @endcode
++
++    The function removes all unnecessary braces from the expression
++    produced by the conversions.
++    E.g.
++    @code
++      SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b
++    @endcode
++    finally is converted to: 
++    @code
++      SELECT * FROM t1, t2, t3 WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b
++
++    @endcode
++
++
++    It also will remove braces from the following queries:
++    @code
++      SELECT * from (t1 LEFT JOIN t2 ON t2.a=t1.a) LEFT JOIN t3 ON t3.b=t2.b
++      SELECT * from (t1, (t2,t3)) WHERE t1.a=t2.a AND t2.b=t3.b.
++    @endcode
++
++    The benefit of this simplification procedure is that it might return 
++    a query for which the optimizer can evaluate execution plan with more
++    join orders. With a left join operation the optimizer does not
++    consider any plan where one of the inner tables is before some of outer
++    tables.
++
++
++    The function is implemented by a recursive procedure.  On the recursive
++    ascent all attributes are calculated, all outer joins that can be
++    converted are replaced and then all unnecessary braces are removed.
++    As join list contains join tables in the reverse order sequential
++    elimination of outer joins does not require extra recursive calls.
++
++    Here is an example of a join query with invalid cross references:
++    @code
++      SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t3.a LEFT JOIN t3 ON t3.b=t1.b 
++    @endcode
++
++  @param join        reference to the query info
++  @param join_list   list representation of the join to be converted
++  @param conds       conditions to add on expressions for converted joins
++  @param top         true <=> conds is the where condition
++
++  @return
++    - The new condition, if success
++    - 0, otherwise
++*/
++
++static COND *
++simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top)
++{
++  TABLE_LIST *table;
++  NESTED_JOIN *nested_join;
++  TABLE_LIST *prev_table= 0;
++  List_iterator<TABLE_LIST> li(*join_list);
++  bool straight_join= test(join->select_options & SELECT_STRAIGHT_JOIN);
++  DBUG_ENTER("simplify_joins");
++
++  /* 
++    Try to simplify join operations from join_list.
++    The most outer join operation is checked for conversion first. 
++  */
++  while ((table= li++))
++  {
++    table_map used_tables;
++    table_map not_null_tables= (table_map) 0;
++
++    if ((nested_join= table->nested_join))
++    {
++      /* 
++         If the element of join_list is a nested join apply
++         the procedure to its nested join list first.
++      */
++      if (table->on_expr)
++      {
++        Item *expr= table->on_expr;
++        /* 
++           If an on expression E is attached to the table, 
++           check all null rejected predicates in this expression.
++           If such a predicate over an attribute belonging to
++           an inner table of an embedded outer join is found,
++           the outer join is converted to an inner join and
++           the corresponding on expression is added to E. 
++	*/ 
++        expr= simplify_joins(join, &nested_join->join_list,
++                             expr, FALSE);
++
++        if (!table->prep_on_expr || expr != table->on_expr)
++        {
++          DBUG_ASSERT(expr);
++
++          table->on_expr= expr;
++          table->prep_on_expr= expr->copy_andor_structure(join->thd);
++        }
++      }
++      nested_join->used_tables= (table_map) 0;
++      nested_join->not_null_tables=(table_map) 0;
++      conds= simplify_joins(join, &nested_join->join_list, conds, top);
++      used_tables= nested_join->used_tables;
++      not_null_tables= nested_join->not_null_tables;  
++    }
++    else
++    {
++      if (!table->prep_on_expr)
++        table->prep_on_expr= table->on_expr;
++      used_tables= table->table->map;
++      if (conds)
++        not_null_tables= conds->not_null_tables();
++    }
++      
++    if (table->embedding)
++    {
++      table->embedding->nested_join->used_tables|= used_tables;
++      table->embedding->nested_join->not_null_tables|= not_null_tables;
++    }
++
++    if (!table->outer_join || (used_tables & not_null_tables))
++    {
++      /* 
++        For some of the inner tables there are conjunctive predicates
++        that reject nulls => the outer join can be replaced by an inner join.
++      */
++      table->outer_join= 0;
++      if (table->on_expr)
++      {
++        /* Add on expression to the where condition. */
++        if (conds)
++        {
++          conds= and_conds(conds, table->on_expr);
++          conds->top_level_item();
++          /* conds is always a new item as both cond and on_expr existed */
++          DBUG_ASSERT(!conds->fixed);
++          conds->fix_fields(join->thd, &conds);
++        }
++        else
++          conds= table->on_expr; 
++        table->prep_on_expr= table->on_expr= 0;
++      }
++    }
++    
++    if (!top)
++      continue;
++
++    /* 
++      Only inner tables of non-convertible outer joins
++      remain with on_expr.
++    */ 
++    if (table->on_expr)
++    {
++      table->dep_tables|= table->on_expr->used_tables(); 
++      if (table->embedding)
++      {
++        table->dep_tables&= ~table->embedding->nested_join->used_tables;   
++        /*
++           Embedding table depends on tables used
++           in embedded on expressions. 
++        */
++        table->embedding->on_expr_dep_tables|= table->on_expr->used_tables();
++      }
++      else
++        table->dep_tables&= ~table->table->map;
++    }
++
++    if (prev_table)
++    {
++      /* The order of tables is reverse: prev_table follows table */
++      if (prev_table->straight || straight_join)
++        prev_table->dep_tables|= used_tables;
++      if (prev_table->on_expr)
++      {
++        prev_table->dep_tables|= table->on_expr_dep_tables;
++        table_map prev_used_tables= prev_table->nested_join ?
++	                            prev_table->nested_join->used_tables :
++	                            prev_table->table->map;
++        /* 
++          If on expression contains only references to inner tables
++          we still make the inner tables dependent on the outer tables.
++          It would be enough to set dependency only on one outer table
++          for them. Yet this is really a rare case.
++          Note:
++          RAND_TABLE_BIT mask should not be counted as it
++          prevents update of inner table dependences.
++          For example it might happen if RAND() function
++          is used in JOIN ON clause.
++	*/  
++        if (!((prev_table->on_expr->used_tables() & ~RAND_TABLE_BIT) &
++              ~prev_used_tables))
++          prev_table->dep_tables|= used_tables;
++      }
++    }
++    prev_table= table;
++  }
++    
++  /* Flatten nested joins that can be flattened. */
++  TABLE_LIST *right_neighbor= NULL;
++  li.rewind();
++  while ((table= li++))
++  {
++    bool fix_name_res= FALSE;
++    nested_join= table->nested_join;
++    if (nested_join && !table->on_expr)
++    {
++      TABLE_LIST *tbl;
++      List_iterator<TABLE_LIST> it(nested_join->join_list);
++      while ((tbl= it++))
++      {
++        tbl->embedding= table->embedding;
++        tbl->join_list= table->join_list;
++      }
++      li.replace(nested_join->join_list);
++      /* Need to update the name resolution table chain when flattening joins */
++      fix_name_res= TRUE;
++      table= *li.ref();
++    }
++    if (fix_name_res)
++      table->next_name_resolution_table= right_neighbor ?
++        right_neighbor->first_leaf_for_name_resolution() :
++        NULL;
++    right_neighbor= table;
++  }
++  DBUG_RETURN(conds); 
++}
++
++
++/**
++  Assign each nested join structure a bit in nested_join_map.
++
++    Assign each nested join structure (except "confluent" ones - those that
++    embed only one element) a bit in nested_join_map.
++
++  @param join          Join being processed
++  @param join_list     List of tables
++  @param first_unused  Number of first unused bit in nested_join_map before the
++                       call
++
++  @note
++    This function is called after simplify_joins(), when there are no
++    redundant nested joins, #non_confluent_nested_joins <= #tables_in_join so
++    we will not run out of bits in nested_join_map.
++
++  @return
++    First unused bit in nested_join_map after the call.
++*/
++
++static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list, 
++                                          uint first_unused)
++{
++  List_iterator<TABLE_LIST> li(*join_list);
++  TABLE_LIST *table;
++  DBUG_ENTER("build_bitmap_for_nested_joins");
++  while ((table= li++))
++  {
++    NESTED_JOIN *nested_join;
++    if ((nested_join= table->nested_join))
++    {
++      /*
++        It is guaranteed by simplify_joins() function that a nested join
++        that has only one child represents a single table VIEW (and the child
++        is an underlying table). We don't assign bits to such nested join
++        structures because 
++        1. it is redundant (a "sequence" of one table cannot be interleaved 
++            with anything)
++        2. we could run out bits in nested_join_map otherwise.
++      */
++      if (nested_join->join_list.elements != 1)
++      {
++        nested_join->nj_map= (nested_join_map) 1 << first_unused++;
++        first_unused= build_bitmap_for_nested_joins(&nested_join->join_list,
++                                                    first_unused);
++      }
++    }
++  }
++  DBUG_RETURN(first_unused);
++}
++
++
++/**
++  Set NESTED_JOIN::counter=0 in all nested joins in passed list.
++
++    Recursively set NESTED_JOIN::counter=0 for all nested joins contained in
++    the passed join_list.
++
++  @param join_list  List of nested joins to process. It may also contain base
++                    tables which will be ignored.
++*/
++
++static void reset_nj_counters(List<TABLE_LIST> *join_list)
++{
++  List_iterator<TABLE_LIST> li(*join_list);
++  TABLE_LIST *table;
++  DBUG_ENTER("reset_nj_counters");
++  while ((table= li++))
++  {
++    NESTED_JOIN *nested_join;
++    if ((nested_join= table->nested_join))
++    {
++      nested_join->counter= 0;
++      reset_nj_counters(&nested_join->join_list);
++    }
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++/**
++  Check interleaving with an inner tables of an outer join for
++  extension table.
++
++    Check if table next_tab can be added to current partial join order, and 
++    if yes, record that it has been added.
++
++    The function assumes that both current partial join order and its
++    extension with next_tab are valid wrt table dependencies.
++
++  @verbatim
++     IMPLEMENTATION 
++       LIMITATIONS ON JOIN ORDER
++         The nested [outer] joins executioner algorithm imposes these limitations
++         on join order:
++         1. "Outer tables first" -  any "outer" table must be before any 
++             corresponding "inner" table.
++         2. "No interleaving" - tables inside a nested join must form a continuous
++            sequence in join order (i.e. the sequence must not be interrupted by 
++            tables that are outside of this nested join).
++
++         #1 is checked elsewhere, this function checks #2 provided that #1 has
++         been already checked.
++
++       WHY NEED NON-INTERLEAVING
++         Consider an example: 
++
++           select * from t0 join t1 left join (t2 join t3) on cond1
++
++         The join order "t1 t2 t0 t3" is invalid:
++
++         table t0 is outside of the nested join, so WHERE condition for t0 is
++         attached directly to t0 (without triggers, and it may be used to access
++         t0). Applying WHERE(t0) to (t2,t0,t3) record is invalid as we may miss
++         combinations of (t1, t2, t3) that satisfy condition cond1, and produce a
++         null-complemented (t1, t2.NULLs, t3.NULLs) row, which should not have
++         been produced.
++
++         If table t0 is not between t2 and t3, the problem doesn't exist:
++          If t0 is located after (t2,t3), WHERE(t0) is applied after nested join
++           processing has finished.
++          If t0 is located before (t2,t3), predicates like WHERE_cond(t0, t2) are
++           wrapped into condition triggers, which takes care of correct nested
++           join processing.
++
++       HOW IT IS IMPLEMENTED
++         The limitations on join order can be rephrased as follows: for valid
++         join order one must be able to:
++           1. write down the used tables in the join order on one line.
++           2. for each nested join, put one '(' and one ')' on the said line        
++           3. write "LEFT JOIN" and "ON (...)" where appropriate
++           4. get a query equivalent to the query we're trying to execute.
++
++         Calls to check_interleaving_with_nj() are equivalent to writing the
++         above described line from left to right. 
++         A single check_interleaving_with_nj(A,B) call is equivalent to writing 
++         table B and appropriate brackets on condition that table A and
++         appropriate brackets is the last what was written. Graphically the
++         transition is as follows:
++
++                              +---- current position
++                              |
++             ... last_tab ))) | ( next_tab )  )..) | ...
++                                X          Y   Z   |
++                                                   +- need to move to this
++                                                      position.
++
++         Notes about the position:
++           The caller guarantees that there is no more then one X-bracket by 
++           checking "!(remaining_tables & s->dependent)" before calling this 
++           function. X-bracket may have a pair in Y-bracket.
++
++         When "writing" we store/update this auxilary info about the current
++         position:
++          1. join->cur_embedding_map - bitmap of pairs of brackets (aka nested
++             joins) we've opened but didn't close.
++          2. {each NESTED_JOIN structure not simplified away}->counter - number
++             of this nested join's children that have already been added to to
++             the partial join order.
++  @endverbatim
++
++  @param next_tab   Table we're going to extend the current partial join with
++
++  @retval
++    FALSE  Join order extended, nested joins info about current join
++    order (see NOTE section) updated.
++  @retval
++    TRUE   Requested join order extension not allowed.
++*/
++
++static bool check_interleaving_with_nj(JOIN_TAB *next_tab)
++{
++  TABLE_LIST *next_emb= next_tab->table->pos_in_table_list->embedding;
++  JOIN *join= next_tab->join;
++
++  if (join->cur_embedding_map & ~next_tab->embedding_map)
++  {
++    /* 
++      next_tab is outside of the "pair of brackets" we're currently in.
++      Cannot add it.
++    */
++    return TRUE;
++  }
++   
++  /*
++    Do update counters for "pairs of brackets" that we've left (marked as
++    X,Y,Z in the above picture)
++  */
++  for (;next_emb; next_emb= next_emb->embedding)
++  {
++    next_emb->nested_join->counter++;
++    if (next_emb->nested_join->counter == 1)
++    {
++      /* 
++        next_emb is the first table inside a nested join we've "entered". In
++        the picture above, we're looking at the 'X' bracket. Don't exit yet as
++        X bracket might have Y pair bracket.
++      */
++      join->cur_embedding_map |= next_emb->nested_join->nj_map;
++    }
++    
++    if (next_emb->nested_join->join_list.elements !=
++        next_emb->nested_join->counter)
++      break;
++
++    /*
++      We're currently at Y or Z-bracket as depicted in the above picture.
++      Mark that we've left it and continue walking up the brackets hierarchy.
++    */
++    join->cur_embedding_map &= ~next_emb->nested_join->nj_map;
++  }
++  return FALSE;
++}
++
++
++/**
++  Nested joins perspective: Remove the last table from the join order.
++
++  The algorithm is the reciprocal of check_interleaving_with_nj(), hence
++  parent join nest nodes are updated only when the last table in its child
++  node is removed. The ASCII graphic below will clarify.
++
++  %A table nesting such as <tt> t1 x [ ( t2 x t3 ) x ( t4 x t5 ) ] </tt>is
++  represented by the below join nest tree.
++
++  @verbatim
++                     NJ1
++                  _/ /  \
++                _/  /    NJ2
++              _/   /     / \ 
++             /    /     /   \
++   t1 x [ (t2 x t3) x (t4 x t5) ]
++  @endverbatim
++
++  At the point in time when check_interleaving_with_nj() adds the table t5 to
++  the query execution plan, QEP, it also directs the node named NJ2 to mark
++  the table as covered. NJ2 does so by incrementing its @c counter
++  member. Since all of NJ2's tables are now covered by the QEP, the algorithm
++  proceeds up the tree to NJ1, incrementing its counter as well. All join
++  nests are now completely covered by the QEP.
++
++  restore_prev_nj_state() does the above in reverse. As seen above, the node
++  NJ1 contains the nodes t2, t3, and NJ2. Its counter being equal to 3 means
++  that the plan covers t2, t3, and NJ2, @e and that the sub-plan (t4 x t5)
++  completely covers NJ2. The removal of t5 from the partial plan will first
++  decrement NJ2's counter to 1. It will then detect that NJ2 went from being
++  completely to partially covered, and hence the algorithm must continue
++  upwards to NJ1 and decrement its counter to 2. %A subsequent removal of t4
++  will however not influence NJ1 since it did not un-cover the last table in
++  NJ2.
++
++  SYNOPSIS
++    restore_prev_nj_state()
++      last  join table to remove, it is assumed to be the last in current 
++            partial join order.
++     
++  DESCRIPTION
++
++    Remove the last table from the partial join order and update the nested
++    joins counters and join->cur_embedding_map. It is ok to call this 
++    function for the first table in join order (for which 
++    check_interleaving_with_nj has not been called)
++
++  @param last  join table to remove, it is assumed to be the last in current
++               partial join order.
++*/
++
++static void restore_prev_nj_state(JOIN_TAB *last)
++{
++  TABLE_LIST *last_emb= last->table->pos_in_table_list->embedding;
++  JOIN *join= last->join;
++  for (;last_emb != NULL; last_emb= last_emb->embedding)
++  {
++    NESTED_JOIN *nest= last_emb->nested_join;
++    DBUG_ASSERT(nest->counter > 0);
++    
++    bool was_fully_covered= nest->is_fully_covered();
++    
++    if (--nest->counter == 0)
++      join->cur_embedding_map&= ~nest->nj_map;
++    
++    if (!was_fully_covered)
++      break;
++    
++    join->cur_embedding_map|= nest->nj_map;
++  }
++}
++
++
++static COND *
++optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list,
++              Item::cond_result *cond_value)
++{
++  THD *thd= join->thd;
++  DBUG_ENTER("optimize_cond");
++
++  if (!conds)
++    *cond_value= Item::COND_TRUE;
++  else
++  {
++    /* 
++      Build all multiple equality predicates and eliminate equality
++      predicates that can be inferred from these multiple equalities.
++      For each reference of a field included into a multiple equality
++      that occurs in a function set a pointer to the multiple equality
++      predicate. Substitute a constant instead of this field if the
++      multiple equality contains a constant.
++    */ 
++    DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
++    conds= build_equal_items(join->thd, conds, NULL, join_list,
++                             &join->cond_equal);
++    DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
++
++    /* change field = field to field = const for each found field = const */
++    propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds);
++    /*
++      Remove all instances of item == item
++      Remove all and-levels where CONST item != CONST item
++    */
++    DBUG_EXECUTE("where",print_where(conds,"after const change", QT_ORDINARY););
++    conds= remove_eq_conds(thd, conds, cond_value) ;
++    DBUG_EXECUTE("info",print_where(conds,"after remove", QT_ORDINARY););
++  }
++  DBUG_RETURN(conds);
++}
++
++
++/**
++  Remove const and eq items.
++
++  @return
++    Return new item, or NULL if no condition @n
++    cond_value is set to according:
++    - COND_OK     : query is possible (field = constant)
++    - COND_TRUE   : always true	( 1 = 1 )
++    - COND_FALSE  : always false	( 1 = 2 )
++*/
++
++COND *
++remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
++{
++  if (cond->type() == Item::COND_ITEM)
++  {
++    bool and_level= ((Item_cond*) cond)->functype()
++      == Item_func::COND_AND_FUNC;
++    List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
++    Item::cond_result tmp_cond_value;
++    bool should_fix_fields=0;
++
++    *cond_value=Item::COND_UNDEF;
++    Item *item;
++    while ((item=li++))
++    {
++      Item *new_item=remove_eq_conds(thd, item, &tmp_cond_value);
++      if (!new_item)
++	li.remove();
++      else if (item != new_item)
++      {
++	VOID(li.replace(new_item));
++	should_fix_fields=1;
++      }
++      if (*cond_value == Item::COND_UNDEF)
++	*cond_value=tmp_cond_value;
++      switch (tmp_cond_value) {
++      case Item::COND_OK:			// Not TRUE or FALSE
++	if (and_level || *cond_value == Item::COND_FALSE)
++	  *cond_value=tmp_cond_value;
++	break;
++      case Item::COND_FALSE:
++	if (and_level)
++	{
++	  *cond_value=tmp_cond_value;
++	  return (COND*) 0;			// Always false
++	}
++	break;
++      case Item::COND_TRUE:
++	if (!and_level)
++	{
++	  *cond_value= tmp_cond_value;
++	  return (COND*) 0;			// Always true
++	}
++	break;
++      case Item::COND_UNDEF:			// Impossible
++	break; /* purecov: deadcode */
++      }
++    }
++    if (should_fix_fields)
++      cond->update_used_tables();
++
++    if (!((Item_cond*) cond)->argument_list()->elements ||
++	*cond_value != Item::COND_OK)
++      return (COND*) 0;
++    if (((Item_cond*) cond)->argument_list()->elements == 1)
++    {						// Remove list
++      item= ((Item_cond*) cond)->argument_list()->head();
++      ((Item_cond*) cond)->argument_list()->empty();
++      return item;
++    }
++  }
++  else if (cond->type() == Item::FUNC_ITEM &&
++	   ((Item_func*) cond)->functype() == Item_func::ISNULL_FUNC)
++  {
++    /*
++      Handles this special case for some ODBC applications:
++      The are requesting the row that was just updated with a auto_increment
++      value with this construct:
++
++      SELECT * from table_name where auto_increment_column IS NULL
++      This will be changed to:
++      SELECT * from table_name where auto_increment_column = LAST_INSERT_ID
++    */
++
++    Item_func_isnull *func=(Item_func_isnull*) cond;
++    Item **args= func->arguments();
++    if (args[0]->type() == Item::FIELD_ITEM)
++    {
++      Field *field=((Item_field*) args[0])->field;
++      if (field->flags & AUTO_INCREMENT_FLAG && !field->table->maybe_null &&
++	  (thd->options & OPTION_AUTO_IS_NULL) &&
++	  (thd->first_successful_insert_id_in_prev_stmt > 0 &&
++           thd->substitute_null_with_insert_id))
++      {
++#ifdef HAVE_QUERY_CACHE
++	query_cache_abort(&thd->net);
++#endif
++	COND *new_cond;
++	if ((new_cond= new Item_func_eq(args[0],
++					new Item_int("last_insert_id()",
++                                                     thd->read_first_successful_insert_id_in_prev_stmt(),
++                                                     MY_INT64_NUM_DECIMAL_DIGITS))))
++	{
++	  cond=new_cond;
++          /*
++            Item_func_eq can't be fixed after creation so we do not check
++            cond->fixed, also it do not need tables so we use 0 as second
++            argument.
++          */
++	  cond->fix_fields(thd, &cond);
++	}
++        /*
++          IS NULL should be mapped to LAST_INSERT_ID only for first row, so
++          clear for next row
++        */
++        thd->substitute_null_with_insert_id= FALSE;
++      }
++      /* fix to replace 'NULL' dates with '0' (shreeve@uci.edu) */
++      else if (((field->type() == MYSQL_TYPE_DATE) ||
++		(field->type() == MYSQL_TYPE_DATETIME)) &&
++		(field->flags & NOT_NULL_FLAG) &&
++	       !field->table->maybe_null)
++      {
++	COND *new_cond;
++	if ((new_cond= new Item_func_eq(args[0],new Item_int("0", 0, 2))))
++	{
++	  cond=new_cond;
++          /*
++            Item_func_eq can't be fixed after creation so we do not check
++            cond->fixed, also it do not need tables so we use 0 as second
++            argument.
++          */
++	  cond->fix_fields(thd, &cond);
++	}
++      }
++    }
++    if (cond->const_item())
++    {
++      *cond_value= eval_const_cond(cond) ? Item::COND_TRUE : Item::COND_FALSE;
++      return (COND*) 0;
++    }
++  }
++  else if (cond->const_item())
++  {
++    *cond_value= eval_const_cond(cond) ? Item::COND_TRUE : Item::COND_FALSE;
++    return (COND*) 0;
++  }
++  else if ((*cond_value= cond->eq_cmp_result()) != Item::COND_OK)
++  {						// boolan compare function
++    Item *left_item=	((Item_func*) cond)->arguments()[0];
++    Item *right_item= ((Item_func*) cond)->arguments()[1];
++    if (left_item->eq(right_item,1))
++    {
++      if (!left_item->maybe_null ||
++	  ((Item_func*) cond)->functype() == Item_func::EQUAL_FUNC)
++	return (COND*) 0;			// Compare of identical items
++    }
++  }
++  *cond_value=Item::COND_OK;
++  return cond;					// Point at next and level
++}
++
++/* 
++  Check if equality can be used in removing components of GROUP BY/DISTINCT
++  
++  SYNOPSIS
++    test_if_equality_guarantees_uniqueness()
++      l          the left comparison argument (a field if any)
++      r          the right comparison argument (a const of any)
++  
++  DESCRIPTION    
++    Checks if an equality predicate can be used to take away 
++    DISTINCT/GROUP BY because it is known to be true for exactly one 
++    distinct value (e.g. <expr> == <const>).
++    Arguments must be of the same type because e.g. 
++    <string_field> = <int_const> may match more than 1 distinct value from 
++    the column. 
++    We must take into consideration and the optimization done for various 
++    string constants when compared to dates etc (see Item_int_with_ref) as
++    well as the collation of the arguments.
++  
++  RETURN VALUE  
++    TRUE    can be used
++    FALSE   cannot be used
++*/
++static bool
++test_if_equality_guarantees_uniqueness(Item *l, Item *r)
++{
++  return r->const_item() &&
++    /* elements must be compared as dates */
++     (Arg_comparator::can_compare_as_dates(l, r, 0) ||
++      /* or of the same result type */
++      (r->result_type() == l->result_type() &&
++       /* and must have the same collation if compared as strings */
++       (l->result_type() != STRING_RESULT ||
++        l->collation.collation == r->collation.collation)));
++}
++
++/**
++  Return TRUE if the item is a const value in all the WHERE clause.
++*/
++
++static bool
++const_expression_in_where(COND *cond, Item *comp_item, Item **const_item)
++{
++  if (cond->type() == Item::COND_ITEM)
++  {
++    bool and_level= (((Item_cond*) cond)->functype()
++		     == Item_func::COND_AND_FUNC);
++    List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list());
++    Item *item;
++    while ((item=li++))
++    {
++      bool res=const_expression_in_where(item, comp_item, const_item);
++      if (res)					// Is a const value
++      {
++	if (and_level)
++	  return 1;
++      }
++      else if (!and_level)
++	return 0;
++    }
++    return and_level ? 0 : 1;
++  }
++  else if (cond->eq_cmp_result() != Item::COND_OK)
++  {						// boolan compare function
++    Item_func* func= (Item_func*) cond;
++    if (func->functype() != Item_func::EQUAL_FUNC &&
++	func->functype() != Item_func::EQ_FUNC)
++      return 0;
++    Item *left_item=	((Item_func*) cond)->arguments()[0];
++    Item *right_item= ((Item_func*) cond)->arguments()[1];
++    if (left_item->eq(comp_item,1))
++    {
++      if (test_if_equality_guarantees_uniqueness (left_item, right_item))
++      {
++	if (*const_item)
++	  return right_item->eq(*const_item, 1);
++	*const_item=right_item;
++	return 1;
++      }
++    }
++    else if (right_item->eq(comp_item,1))
++    {
++      if (test_if_equality_guarantees_uniqueness (right_item, left_item))
++      {
++	if (*const_item)
++	  return left_item->eq(*const_item, 1);
++	*const_item=left_item;
++	return 1;
++      }
++    }
++  }
++  return 0;
++}
++
++/****************************************************************************
++  Create internal temporary table
++****************************************************************************/
++
++/**
++  Create field for temporary table from given field.
++
++  @param thd	       Thread handler
++  @param org_field    field from which new field will be created
++  @param name         New field name
++  @param table	       Temporary table
++  @param item	       !=NULL if item->result_field should point to new field.
++                      This is relevant for how fill_record() is going to work:
++                      If item != NULL then fill_record() will update
++                      the record in the original table.
++                      If item == NULL then fill_record() will update
++                      the temporary table
++  @param convert_blob_length   If >0 create a varstring(convert_blob_length)
++                               field instead of blob.
++
++  @retval
++    NULL		on error
++  @retval
++    new_created field
++*/
++
++Field *create_tmp_field_from_field(THD *thd, Field *org_field,
++                                   const char *name, TABLE *table,
++                                   Item_field *item, uint convert_blob_length)
++{
++  Field *new_field;
++
++  /* 
++    Make sure that the blob fits into a Field_varstring which has 
++    2-byte lenght. 
++  */
++  if (convert_blob_length && convert_blob_length <= Field_varstring::MAX_SIZE &&
++      (org_field->flags & BLOB_FLAG))
++    new_field= new Field_varstring(convert_blob_length,
++                                   org_field->maybe_null(),
++                                   org_field->field_name, table->s,
++                                   org_field->charset());
++  else
++    new_field= org_field->new_field(thd->mem_root, table,
++                                    table == org_field->table);
++  if (new_field)
++  {
++    new_field->init(table);
++    new_field->orig_table= org_field->orig_table;
++    if (item)
++      item->result_field= new_field;
++    else
++      new_field->field_name= name;
++    new_field->flags|= (org_field->flags & NO_DEFAULT_VALUE_FLAG);
++    if (org_field->maybe_null() || (item && item->maybe_null))
++      new_field->flags&= ~NOT_NULL_FLAG;	// Because of outer join
++    if (org_field->type() == MYSQL_TYPE_VAR_STRING ||
++        org_field->type() == MYSQL_TYPE_VARCHAR)
++      table->s->db_create_options|= HA_OPTION_PACK_RECORD;
++    else if (org_field->type() == FIELD_TYPE_DOUBLE)
++      ((Field_double *) new_field)->not_fixed= TRUE;
++  }
++  return new_field;
++}
++
++/**
++  Create field for temporary table using type of given item.
++
++  @param thd                   Thread handler
++  @param item                  Item to create a field for
++  @param table                 Temporary table
++  @param copy_func             If set and item is a function, store copy of
++                               item in this array
++  @param modify_item           1 if item->result_field should point to new
++                               item. This is relevent for how fill_record()
++                               is going to work:
++                               If modify_item is 1 then fill_record() will
++                               update the record in the original table.
++                               If modify_item is 0 then fill_record() will
++                               update the temporary table
++  @param convert_blob_length   If >0 create a varstring(convert_blob_length)
++                               field instead of blob.
++
++  @retval
++    0  on error
++  @retval
++    new_created field
++*/
++
++static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
++                                         Item ***copy_func, bool modify_item,
++                                         uint convert_blob_length)
++{
++  bool maybe_null= item->maybe_null;
++  Field *new_field;
++  LINT_INIT(new_field);
++
++  switch (item->result_type()) {
++  case REAL_RESULT:
++    new_field= new Field_double(item->max_length, maybe_null,
++                                item->name, item->decimals, TRUE);
++    break;
++  case INT_RESULT:
++    /* 
++      Select an integer type with the minimal fit precision.
++      MY_INT32_NUM_DECIMAL_DIGITS is sign inclusive, don't consider the sign.
++      Values with MY_INT32_NUM_DECIMAL_DIGITS digits may or may not fit into 
++      Field_long : make them Field_longlong.  
++    */
++    if (item->max_length >= (MY_INT32_NUM_DECIMAL_DIGITS - 1))
++      new_field=new Field_longlong(item->max_length, maybe_null,
++                                   item->name, item->unsigned_flag);
++    else
++      new_field=new Field_long(item->max_length, maybe_null,
++                               item->name, item->unsigned_flag);
++    break;
++  case STRING_RESULT:
++    DBUG_ASSERT(item->collation.collation);
++  
++    enum enum_field_types type;
++    /*
++      DATE/TIME and GEOMETRY fields have STRING_RESULT result type. 
++      To preserve type they needed to be handled separately.
++    */
++    if ((type= item->field_type()) == MYSQL_TYPE_DATETIME ||
++        type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE ||
++        type == MYSQL_TYPE_NEWDATE ||
++        type == MYSQL_TYPE_TIMESTAMP || type == MYSQL_TYPE_GEOMETRY)
++      new_field= item->tmp_table_field_from_field_type(table, 1);
++    /* 
++      Make sure that the blob fits into a Field_varstring which has 
++      2-byte lenght. 
++    */
++    else if (item->max_length/item->collation.collation->mbmaxlen > 255 &&
++             convert_blob_length <= Field_varstring::MAX_SIZE && 
++             convert_blob_length)
++      new_field= new Field_varstring(convert_blob_length, maybe_null,
++                                     item->name, table->s,
++                                     item->collation.collation);
++    else
++      new_field= item->make_string_field(table);
++    new_field->set_derivation(item->collation.derivation);
++    break;
++  case DECIMAL_RESULT:
++    new_field= Field_new_decimal::create_from_item(item);
++    break;
++  case ROW_RESULT:
++  default:
++    // This case should never be choosen
++    DBUG_ASSERT(0);
++    new_field= 0;
++    break;
++  }
++  if (new_field)
++    new_field->init(table);
++    
++  if (copy_func && item->is_result_field())
++    *((*copy_func)++) = item;			// Save for copy_funcs
++  if (modify_item)
++    item->set_result_field(new_field);
++  if (item->type() == Item::NULL_ITEM)
++    new_field->is_created_from_null_item= TRUE;
++  return new_field;
++}
++
++
++/**
++  Create field for information schema table.
++
++  @param thd		Thread handler
++  @param table		Temporary table
++  @param item		Item to create a field for
++
++  @retval
++    0			on error
++  @retval
++    new_created field
++*/
++
++Field *create_tmp_field_for_schema(THD *thd, Item *item, TABLE *table)
++{
++  if (item->field_type() == MYSQL_TYPE_VARCHAR)
++  {
++    Field *field;
++    if (item->max_length > MAX_FIELD_VARCHARLENGTH)
++      field= new Field_blob(item->max_length, item->maybe_null,
++                            item->name, item->collation.collation);
++    else
++      field= new Field_varstring(item->max_length, item->maybe_null,
++                                 item->name,
++                                 table->s, item->collation.collation);
++    if (field)
++      field->init(table);
++    return field;
++  }
++  return item->tmp_table_field_from_field_type(table, 0);
++}
++
++
++/**
++  Create field for temporary table.
++
++  @param thd		Thread handler
++  @param table		Temporary table
++  @param item		Item to create a field for
++  @param type		Type of item (normally item->type)
++  @param copy_func	If set and item is a function, store copy of item
++                       in this array
++  @param from_field    if field will be created using other field as example,
++                       pointer example field will be written here
++  @param default_field	If field has a default value field, store it here
++  @param group		1 if we are going to do a relative group by on result
++  @param modify_item	1 if item->result_field should point to new item.
++                       This is relevent for how fill_record() is going to
++                       work:
++                       If modify_item is 1 then fill_record() will update
++                       the record in the original table.
++                       If modify_item is 0 then fill_record() will update
++                       the temporary table
++  @param convert_blob_length If >0 create a varstring(convert_blob_length)
++                             field instead of blob.
++
++  @retval
++    0			on error
++  @retval
++    new_created field
++*/
++
++Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
++                        Item ***copy_func, Field **from_field,
++                        Field **default_field,
++                        bool group, bool modify_item,
++                        bool table_cant_handle_bit_fields,
++                        bool make_copy_field,
++                        uint convert_blob_length)
++{
++  Field *result;
++  Item::Type orig_type= type;
++  Item *orig_item= 0;
++
++  if (type != Item::FIELD_ITEM &&
++      item->real_item()->type() == Item::FIELD_ITEM)
++  {
++    orig_item= item;
++    item= item->real_item();
++    type= Item::FIELD_ITEM;
++  }
++
++  switch (type) {
++  case Item::SUM_FUNC_ITEM:
++  {
++    Item_sum *item_sum=(Item_sum*) item;
++    result= item_sum->create_tmp_field(group, table, convert_blob_length);
++    if (!result)
++      thd->fatal_error();
++    return result;
++  }
++  case Item::FIELD_ITEM:
++  case Item::DEFAULT_VALUE_ITEM:
++  {
++    Item_field *field= (Item_field*) item;
++    bool orig_modify= modify_item;
++    if (orig_type == Item::REF_ITEM)
++      modify_item= 0;
++    /*
++      If item have to be able to store NULLs but underlaid field can't do it,
++      create_tmp_field_from_field() can't be used for tmp field creation.
++    */
++    if (field->maybe_null && !field->field->maybe_null())
++    {
++      result= create_tmp_field_from_item(thd, item, table, NULL,
++                                         modify_item, convert_blob_length);
++      *from_field= field->field;
++      if (result && modify_item)
++        field->result_field= result;
++    } 
++    else if (table_cant_handle_bit_fields && field->field->type() ==
++             MYSQL_TYPE_BIT)
++    {
++      *from_field= field->field;
++      result= create_tmp_field_from_item(thd, item, table, copy_func,
++                                        modify_item, convert_blob_length);
++      if (result && modify_item)
++        field->result_field= result;
++    }
++    else
++      result= create_tmp_field_from_field(thd, (*from_field= field->field),
++                                          orig_item ? orig_item->name :
++                                          item->name,
++                                          table,
++                                          modify_item ? field :
++                                          NULL,
++                                          convert_blob_length);
++    if (orig_type == Item::REF_ITEM && orig_modify)
++      ((Item_ref*)orig_item)->set_result_field(result);
++    /*
++      Fields that are used as arguments to the DEFAULT() function already have
++      their data pointers set to the default value during name resulotion. See
++      Item_default_value::fix_fields.
++    */
++    if (orig_type != Item::DEFAULT_VALUE_ITEM && field->field->eq_def(result))
++      *default_field= field->field;
++    return result;
++  }
++  /* Fall through */
++  case Item::FUNC_ITEM:
++    if (((Item_func *) item)->functype() == Item_func::FUNC_SP)
++    {
++      Item_func_sp *item_func_sp= (Item_func_sp *) item;
++      Field *sp_result_field= item_func_sp->get_sp_result_field();
++
++      if (make_copy_field)
++      {
++        DBUG_ASSERT(item_func_sp->result_field);
++        *from_field= item_func_sp->result_field;
++      }
++      else
++      {
++        *((*copy_func)++)= item;
++      }
++
++      Field *result_field=
++        create_tmp_field_from_field(thd,
++                                    sp_result_field,
++                                    item_func_sp->name,
++                                    table,
++                                    NULL,
++                                    convert_blob_length);
++
++      if (modify_item)
++        item->set_result_field(result_field);
++
++      return result_field;
++    }
++
++    /* Fall through */
++  case Item::COND_ITEM:
++  case Item::FIELD_AVG_ITEM:
++  case Item::FIELD_STD_ITEM:
++  case Item::SUBSELECT_ITEM:
++    /* The following can only happen with 'CREATE TABLE ... SELECT' */
++  case Item::PROC_ITEM:
++  case Item::INT_ITEM:
++  case Item::REAL_ITEM:
++  case Item::DECIMAL_ITEM:
++  case Item::STRING_ITEM:
++  case Item::REF_ITEM:
++  case Item::NULL_ITEM:
++  case Item::VARBIN_ITEM:
++    if (make_copy_field)
++    {
++      DBUG_ASSERT(((Item_result_field*)item)->result_field);
++      *from_field= ((Item_result_field*)item)->result_field;
++    }
++    return create_tmp_field_from_item(thd, item, table,
++                                      (make_copy_field ? 0 : copy_func),
++                                       modify_item, convert_blob_length);
++  case Item::TYPE_HOLDER:  
++    result= ((Item_type_holder *)item)->make_field_by_type(table);
++    result->set_derivation(item->collation.derivation);
++    return result;
++  default:					// Dosen't have to be stored
++    return 0;
++  }
++}
++
++/*
++  Set up column usage bitmaps for a temporary table
++
++  IMPLEMENTATION
++    For temporary tables, we need one bitmap with all columns set and
++    a tmp_set bitmap to be used by things like filesort.
++*/
++
++void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
++{
++  uint field_count= table->s->fields;
++  bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
++              FALSE);
++  bitmap_init(&table->tmp_set,
++              (my_bitmap_map*) (bitmaps+ bitmap_buffer_size(field_count)),
++              field_count, FALSE);
++  /* write_set and all_set are copies of read_set */
++  table->def_write_set= table->def_read_set;
++  table->s->all_set= table->def_read_set;
++  bitmap_set_all(&table->s->all_set);
++  table->default_column_bitmaps();
++}
++
++
++/**
++  Create a temp table according to a field list.
++
++  Given field pointers are changed to point at tmp_table for
++  send_fields. The table object is self contained: it's
++  allocated in its own memory root, as well as Field objects
++  created for table columns.
++  This function will replace Item_sum items in 'fields' list with
++  corresponding Item_field items, pointing at the fields in the
++  temporary table, unless this was prohibited by TRUE
++  value of argument save_sum_fields. The Item_field objects
++  are created in THD memory root.
++
++  @param thd                  thread handle
++  @param param                a description used as input to create the table
++  @param fields               list of items that will be used to define
++                              column types of the table (also see NOTES)
++  @param group                TODO document
++  @param distinct             should table rows be distinct
++  @param save_sum_fields      see NOTES
++  @param select_options
++  @param rows_limit
++  @param table_alias          possible name of the temporary table that can
++                              be used for name resolving; can be "".
++*/
++
++#define STRING_TOTAL_LENGTH_TO_PACK_ROWS 128
++#define AVG_STRING_LENGTH_TO_PACK_ROWS   64
++#define RATIO_TO_PACK_ROWS	       2
++#define MIN_STRING_LENGTH_TO_PACK_ROWS   10
++
++TABLE *
++create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
++		 ORDER *group, bool distinct, bool save_sum_fields,
++		 ulonglong select_options, ha_rows rows_limit,
++		 char *table_alias)
++{
++  MEM_ROOT *mem_root_save, own_root;
++  TABLE *table;
++  TABLE_SHARE *share;
++  uint	i,field_count,null_count,null_pack_length;
++  uint  copy_func_count= param->func_count;
++  uint  hidden_null_count, hidden_null_pack_length, hidden_field_count;
++  uint  blob_count,group_null_items, string_count;
++  uint  temp_pool_slot=MY_BIT_NONE;
++  uint fieldnr= 0;
++  ulong reclength, string_total_length;
++  bool  using_unique_constraint= 0;
++  bool  use_packed_rows= 0;
++  bool  not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS);
++  char  *tmpname,path[FN_REFLEN];
++  uchar	*pos, *group_buff, *bitmaps;
++  uchar *null_flags;
++  Field **reg_field, **from_field, **default_field;
++  uint *blob_field;
++  Copy_field *copy=0;
++  KEY *keyinfo;
++  KEY_PART_INFO *key_part_info;
++  Item **copy_func;
++  MI_COLUMNDEF *recinfo;
++  /*
++    total_uneven_bit_length is uneven bit length for visible fields
++    hidden_uneven_bit_length is uneven bit length for hidden fields
++  */
++  uint total_uneven_bit_length= 0, hidden_uneven_bit_length= 0;
++  bool force_copy_fields= param->force_copy_fields;
++  /* Treat sum functions as normal ones when loose index scan is used. */
++  save_sum_fields|= param->precomputed_group_by;
++  DBUG_ENTER("create_tmp_table");
++  DBUG_PRINT("enter",
++             ("distinct: %d  save_sum_fields: %d  rows_limit: %lu  group: %d",
++              (int) distinct, (int) save_sum_fields,
++              (ulong) rows_limit,test(group)));
++
++  status_var_increment(thd->status_var.created_tmp_tables);
++
++  if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES))
++    temp_pool_slot = bitmap_lock_set_next(&temp_pool);
++
++  if (temp_pool_slot != MY_BIT_NONE) // we got a slot
++    sprintf(path, "%s_%lx_%i", tmp_file_prefix,
++            current_pid, temp_pool_slot);
++  else
++  {
++    /* if we run out of slots or we are not using tempool */
++    sprintf(path,"%s%lx_%lx_%x", tmp_file_prefix,current_pid,
++            thd->thread_id, thd->tmp_table++);
++  }
++
++  /*
++    No need to change table name to lower case as we are only creating
++    MyISAM or HEAP tables here
++  */
++  fn_format(path, path, mysql_tmpdir, "", MY_REPLACE_EXT|MY_UNPACK_FILENAME);
++
++
++  if (group)
++  {
++    if (!param->quick_group)
++      group=0;					// Can't use group key
++    else for (ORDER *tmp=group ; tmp ; tmp=tmp->next)
++    {
++      (*tmp->item)->marker=4;			// Store null in key
++      if ((*tmp->item)->max_length >= CONVERT_IF_BIGGER_TO_BLOB)
++	using_unique_constraint=1;
++    }
++    if (param->group_length >= MAX_BLOB_WIDTH)
++      using_unique_constraint=1;
++    if (group)
++      distinct=0;				// Can't use distinct
++  }
++
++  field_count=param->field_count+param->func_count+param->sum_func_count;
++  hidden_field_count=param->hidden_field_count;
++
++  /*
++    When loose index scan is employed as access method, it already
++    computes all groups and the result of all aggregate functions. We
++    make space for the items of the aggregate function in the list of
++    functions TMP_TABLE_PARAM::items_to_copy, so that the values of
++    these items are stored in the temporary table.
++  */
++  if (param->precomputed_group_by)
++    copy_func_count+= param->sum_func_count;
++  
++  init_sql_alloc(&own_root, TABLE_ALLOC_BLOCK_SIZE, 0);
++
++  if (!multi_alloc_root(&own_root,
++                        &table, sizeof(*table),
++                        &share, sizeof(*share),
++                        &reg_field, sizeof(Field*) * (field_count+1),
++                        &default_field, sizeof(Field*) * (field_count),
++                        &blob_field, sizeof(uint)*(field_count+1),
++                        &from_field, sizeof(Field*)*field_count,
++                        &copy_func, sizeof(*copy_func)*(copy_func_count+1),
++                        &param->keyinfo, sizeof(*param->keyinfo),
++                        &key_part_info,
++                        sizeof(*key_part_info)*(param->group_parts+1),
++                        &param->start_recinfo,
++                        sizeof(*param->recinfo)*(field_count*2+4),
++                        &tmpname, (uint) strlen(path)+1,
++                        &group_buff, (group && ! using_unique_constraint ?
++                                      param->group_length : 0),
++                        &bitmaps, bitmap_buffer_size(field_count)*2,
++                        NullS))
++  {
++    if (temp_pool_slot != MY_BIT_NONE)
++      bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
++    DBUG_RETURN(NULL);				/* purecov: inspected */
++  }
++  /* Copy_field belongs to TMP_TABLE_PARAM, allocate it in THD mem_root */
++  if (!(param->copy_field= copy= new (thd->mem_root) Copy_field[field_count]))
++  {
++    if (temp_pool_slot != MY_BIT_NONE)
++      bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
++    free_root(&own_root, MYF(0));               /* purecov: inspected */
++    DBUG_RETURN(NULL);				/* purecov: inspected */
++  }
++  param->items_to_copy= copy_func;
++  strmov(tmpname,path);
++  /* make table according to fields */
++
++  bzero((char*) table,sizeof(*table));
++  bzero((char*) reg_field,sizeof(Field*)*(field_count+1));
++  bzero((char*) default_field, sizeof(Field*) * (field_count));
++  bzero((char*) from_field,sizeof(Field*)*field_count);
++
++  table->mem_root= own_root;
++  mem_root_save= thd->mem_root;
++  thd->mem_root= &table->mem_root;
++
++  table->field=reg_field;
++  table->alias= table_alias;
++  table->reginfo.lock_type=TL_WRITE;	/* Will be updated */
++  table->db_stat=HA_OPEN_KEYFILE+HA_OPEN_RNDFILE;
++  table->map=1;
++  table->temp_pool_slot = temp_pool_slot;
++  table->copy_blobs= 1;
++  table->in_use= thd;
++  table->quick_keys.init();
++  table->covering_keys.init();
++  table->merge_keys.init();
++  table->keys_in_use_for_query.init();
++
++  table->s= share;
++  init_tmp_table_share(thd, share, "", 0, tmpname, tmpname);
++  share->blob_field= blob_field;
++  share->blob_ptr_size= portable_sizeof_char_ptr;
++  share->db_low_byte_first=1;                // True for HEAP and MyISAM
++  share->table_charset= param->table_charset;
++  share->primary_key= MAX_KEY;               // Indicate no primary key
++  share->keys_for_keyread.init();
++  share->keys_in_use.init();
++
++  /* Calculate which type of fields we will store in the temporary table */
++
++  reclength= string_total_length= 0;
++  blob_count= string_count= null_count= hidden_null_count= group_null_items= 0;
++  param->using_indirect_summary_function=0;
++
++  List_iterator_fast<Item> li(fields);
++  Item *item;
++  Field **tmp_from_field=from_field;
++  while ((item=li++))
++  {
++    Item::Type type=item->type();
++    if (not_all_columns)
++    {
++      if (item->with_sum_func && type != Item::SUM_FUNC_ITEM)
++      {
++        if (item->used_tables() & OUTER_REF_TABLE_BIT)
++          item->update_used_tables();
++        if (type == Item::SUBSELECT_ITEM ||
++            (item->used_tables() & ~OUTER_REF_TABLE_BIT))
++        {
++	  /*
++	    Mark that the we have ignored an item that refers to a summary
++	    function. We need to know this if someone is going to use
++	    DISTINCT on the result.
++	  */
++	  param->using_indirect_summary_function=1;
++	  continue;
++        }
++      }
++      if (item->const_item() && (int) hidden_field_count <= 0)
++        continue; // We don't have to store this
++    }
++    if (type == Item::SUM_FUNC_ITEM && !group && !save_sum_fields)
++    {						/* Can't calc group yet */
++      Item_sum *sum_item= (Item_sum *) item;
++      sum_item->result_field=0;
++      for (i=0 ; i < sum_item->get_arg_count() ; i++)
++      {
++	Item *arg= sum_item->get_arg(i);
++	if (!arg->const_item())
++	{
++	  Field *new_field=
++            create_tmp_field(thd, table, arg, arg->type(), &copy_func,
++                             tmp_from_field, &default_field[fieldnr],
++                             group != 0,not_all_columns,
++                             distinct, 0,
++                             param->convert_blob_length);
++	  if (!new_field)
++	    goto err;					// Should be OOM
++	  tmp_from_field++;
++	  reclength+=new_field->pack_length();
++	  if (new_field->flags & BLOB_FLAG)
++	  {
++	    *blob_field++= fieldnr;
++	    blob_count++;
++	  }
++          if (new_field->type() == MYSQL_TYPE_BIT)
++            total_uneven_bit_length+= new_field->field_length & 7;
++	  *(reg_field++)= new_field;
++          if (new_field->real_type() == MYSQL_TYPE_STRING ||
++              new_field->real_type() == MYSQL_TYPE_VARCHAR)
++          {
++            string_count++;
++            string_total_length+= new_field->pack_length();
++          }
++          thd->mem_root= mem_root_save;
++          arg= sum_item->set_arg(i, thd, new Item_field(new_field));
++          thd->mem_root= &table->mem_root;
++	  if (!(new_field->flags & NOT_NULL_FLAG))
++          {
++	    null_count++;
++            /*
++              new_field->maybe_null() is still false, it will be
++              changed below. But we have to setup Item_field correctly
++            */
++            arg->maybe_null=1;
++          }
++          new_field->field_index= fieldnr++;
++	}
++      }
++    }
++    else
++    {
++      /*
++	The last parameter to create_tmp_field() is a bit tricky:
++
++	We need to set it to 0 in union, to get fill_record() to modify the
++	temporary table.
++	We need to set it to 1 on multi-table-update and in select to
++	write rows to the temporary table.
++	We here distinguish between UNION and multi-table-updates by the fact
++	that in the later case group is set to the row pointer.
++
++        The test for item->marker == 4 is ensure we don't create a group-by
++        key over a bit field as heap tables can't handle that.
++      */
++      Field *new_field= (param->schema_table) ?
++        create_tmp_field_for_schema(thd, item, table) :
++        create_tmp_field(thd, table, item, type, &copy_func,
++                         tmp_from_field, &default_field[fieldnr],
++                         group != 0,
++                         !force_copy_fields &&
++                           (not_all_columns || group !=0),
++                         item->marker == 4, force_copy_fields,
++                         param->convert_blob_length);
++
++      if (!new_field)
++      {
++	if (thd->is_fatal_error)
++	  goto err;				// Got OOM
++	continue;				// Some kindf of const item
++      }
++      if (type == Item::SUM_FUNC_ITEM)
++	((Item_sum *) item)->result_field= new_field;
++      tmp_from_field++;
++      reclength+=new_field->pack_length();
++      if (!(new_field->flags & NOT_NULL_FLAG))
++	null_count++;
++      if (new_field->type() == MYSQL_TYPE_BIT)
++        total_uneven_bit_length+= new_field->field_length & 7;
++      if (new_field->flags & BLOB_FLAG)
++      {
++        *blob_field++= fieldnr;
++	blob_count++;
++      }
++      if (item->marker == 4 && item->maybe_null)
++      {
++	group_null_items++;
++	new_field->flags|= GROUP_FLAG;
++      }
++      new_field->field_index= fieldnr++;
++      *(reg_field++)= new_field;
++    }
++    if (!--hidden_field_count)
++    {
++      /*
++        This was the last hidden field; Remember how many hidden fields could
++        have null
++      */
++      hidden_null_count=null_count;
++      /*
++	We need to update hidden_field_count as we may have stored group
++	functions with constant arguments
++      */
++      param->hidden_field_count= fieldnr;
++      null_count= 0;
++      /*
++        On last hidden field we store uneven bit length in
++        hidden_uneven_bit_length and proceed calculation of
++        uneven bits for visible fields into
++        total_uneven_bit_length variable.
++      */
++      hidden_uneven_bit_length= total_uneven_bit_length;
++      total_uneven_bit_length= 0;
++    }
++  }
++  DBUG_ASSERT(fieldnr == (uint) (reg_field - table->field));
++  DBUG_ASSERT(field_count >= (uint) (reg_field - table->field));
++  field_count= fieldnr;
++  *reg_field= 0;
++  *blob_field= 0;				// End marker
++  share->fields= field_count;
++
++  /* If result table is small; use a heap */
++  /* future: storage engine selection can be made dynamic? */
++  if (blob_count || using_unique_constraint ||
++      (select_options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) ==
++      OPTION_BIG_TABLES || (select_options & TMP_TABLE_FORCE_MYISAM))
++  {
++    share->db_plugin= ha_lock_engine(0, myisam_hton);
++    table->file= get_new_handler(share, &table->mem_root,
++                                 share->db_type());
++    if (group &&
++	(param->group_parts > table->file->max_key_parts() ||
++	 param->group_length > table->file->max_key_length()))
++      using_unique_constraint=1;
++  }
++  else
++  {
++    share->db_plugin= ha_lock_engine(0, heap_hton);
++    table->file= get_new_handler(share, &table->mem_root,
++                                 share->db_type());
++  }
++  if (!table->file)
++    goto err;
++
++
++  if (!using_unique_constraint)
++    reclength+= group_null_items;	// null flag is stored separately
++
++  share->blob_fields= blob_count;
++  if (blob_count == 0)
++  {
++    /* We need to ensure that first byte is not 0 for the delete link */
++    if (param->hidden_field_count)
++      hidden_null_count++;
++    else
++      null_count++;
++  }
++  hidden_null_pack_length= (hidden_null_count + 7 +
++                            hidden_uneven_bit_length) / 8;
++  null_pack_length= (hidden_null_pack_length +
++                     (null_count + total_uneven_bit_length + 7) / 8);
++  reclength+=null_pack_length;
++  if (!reclength)
++    reclength=1;				// Dummy select
++  /* Use packed rows if there is blobs or a lot of space to gain */
++  if (blob_count ||
++      (string_total_length >= STRING_TOTAL_LENGTH_TO_PACK_ROWS &&
++      (reclength / string_total_length <= RATIO_TO_PACK_ROWS ||
++       string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS)))
++    use_packed_rows= 1;
++
++  share->reclength= reclength;
++  {
++    uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1);
++    share->rec_buff_length= alloc_length;
++    if (!(table->record[0]= (uchar*)
++                            alloc_root(&table->mem_root, alloc_length*3)))
++      goto err;
++    table->record[1]= table->record[0]+alloc_length;
++    share->default_values= table->record[1]+alloc_length;
++  }
++  copy_func[0]=0;				// End marker
++  param->func_count= copy_func - param->items_to_copy; 
++
++  setup_tmp_table_column_bitmaps(table, bitmaps);
++
++  recinfo=param->start_recinfo;
++  null_flags=(uchar*) table->record[0];
++  pos=table->record[0]+ null_pack_length;
++  if (null_pack_length)
++  {
++    bzero((uchar*) recinfo,sizeof(*recinfo));
++    recinfo->type=FIELD_NORMAL;
++    recinfo->length=null_pack_length;
++    recinfo++;
++    bfill(null_flags,null_pack_length,255);	// Set null fields
++
++    table->null_flags= (uchar*) table->record[0];
++    share->null_fields= null_count+ hidden_null_count;
++    share->null_bytes= null_pack_length;
++  }
++  null_count= (blob_count == 0) ? 1 : 0;
++  hidden_field_count=param->hidden_field_count;
++  for (i=0,reg_field=table->field; i < field_count; i++,reg_field++,recinfo++)
++  {
++    Field *field= *reg_field;
++    uint length;
++    bzero((uchar*) recinfo,sizeof(*recinfo));
++
++    if (!(field->flags & NOT_NULL_FLAG))
++    {
++      if (field->flags & GROUP_FLAG && !using_unique_constraint)
++      {
++	/*
++	  We have to reserve one byte here for NULL bits,
++	  as this is updated by 'end_update()'
++	*/
++	*pos++=0;				// Null is stored here
++	recinfo->length=1;
++	recinfo->type=FIELD_NORMAL;
++	recinfo++;
++	bzero((uchar*) recinfo,sizeof(*recinfo));
++      }
++      else
++      {
++	recinfo->null_bit= 1 << (null_count & 7);
++	recinfo->null_pos= null_count/8;
++      }
++      field->move_field(pos,null_flags+null_count/8,
++			1 << (null_count & 7));
++      null_count++;
++    }
++    else
++      field->move_field(pos,(uchar*) 0,0);
++    if (field->type() == MYSQL_TYPE_BIT)
++    {
++      /* We have to reserve place for extra bits among null bits */
++      ((Field_bit*) field)->set_bit_ptr(null_flags + null_count / 8,
++                                        null_count & 7);
++      null_count+= (field->field_length & 7);
++    }
++    field->reset();
++
++    /*
++      Test if there is a default field value. The test for ->ptr is to skip
++      'offset' fields generated by initalize_tables
++    */
++    if (default_field[i] && default_field[i]->ptr)
++    {
++      /* 
++         default_field[i] is set only in the cases  when 'field' can
++         inherit the default value that is defined for the field referred
++         by the Item_field object from which 'field' has been created.
++      */
++      my_ptrdiff_t diff;
++      Field *orig_field= default_field[i];
++      /* Get the value from default_values */
++      diff= (my_ptrdiff_t) (orig_field->table->s->default_values-
++                            orig_field->table->record[0]);
++      orig_field->move_field_offset(diff);      // Points now at default_values
++      if (orig_field->is_real_null())
++        field->set_null();
++      else
++      {
++        field->set_notnull();
++        memcpy(field->ptr, orig_field->ptr, field->pack_length());
++      }
++      orig_field->move_field_offset(-diff);     // Back to record[0]
++    } 
++
++    if (from_field[i])
++    {						/* Not a table Item */
++      copy->set(field,from_field[i],save_sum_fields);
++      copy++;
++    }
++    length=field->pack_length();
++    pos+= length;
++
++    /* Make entry for create table */
++    recinfo->length=length;
++    if (field->flags & BLOB_FLAG)
++      recinfo->type= (int) FIELD_BLOB;
++    else if (use_packed_rows &&
++             field->real_type() == MYSQL_TYPE_STRING &&
++	     length >= MIN_STRING_LENGTH_TO_PACK_ROWS)
++      recinfo->type=FIELD_SKIP_ENDSPACE;
++    else
++      recinfo->type=FIELD_NORMAL;
++    if (!--hidden_field_count)
++      null_count=(null_count+7) & ~7;		// move to next byte
++
++    // fix table name in field entry
++    field->table_name= &table->alias;
++  }
++
++  param->copy_field_end=copy;
++  param->recinfo=recinfo;
++  store_record(table,s->default_values);        // Make empty default record
++
++  if (thd->variables.tmp_table_size == ~ (ulonglong) 0)		// No limit
++    share->max_rows= ~(ha_rows) 0;
++  else
++    share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ?
++                                 min(thd->variables.tmp_table_size,
++                                     thd->variables.max_heap_table_size) :
++                                 thd->variables.tmp_table_size) /
++			         share->reclength);
++  set_if_bigger(share->max_rows,1);		// For dummy start options
++  /*
++    Push the LIMIT clause to the temporary table creation, so that we
++    materialize only up to 'rows_limit' records instead of all result records.
++  */
++  set_if_smaller(share->max_rows, rows_limit);
++  param->end_write_records= rows_limit;
++
++  keyinfo= param->keyinfo;
++
++  if (group)
++  {
++    DBUG_PRINT("info",("Creating group key in temporary table"));
++    table->group=group;				/* Table is grouped by key */
++    param->group_buff=group_buff;
++    share->keys=1;
++    share->uniques= test(using_unique_constraint);
++    table->key_info=keyinfo;
++    keyinfo->key_part=key_part_info;
++    keyinfo->flags=HA_NOSAME;
++    keyinfo->usable_key_parts=keyinfo->key_parts= param->group_parts;
++    keyinfo->key_length=0;
++    keyinfo->rec_per_key=0;
++    keyinfo->algorithm= HA_KEY_ALG_UNDEF;
++    keyinfo->name= (char*) "group_key";
++    ORDER *cur_group= group;
++    for (; cur_group ; cur_group= cur_group->next, key_part_info++)
++    {
++      Field *field=(*cur_group->item)->get_tmp_table_field();
++      DBUG_ASSERT(field->table == table);
++      bool maybe_null=(*cur_group->item)->maybe_null;
++      key_part_info->null_bit=0;
++      key_part_info->field=  field;
++      key_part_info->offset= field->offset(table->record[0]);
++      key_part_info->length= (uint16) field->key_length();
++      key_part_info->type=   (uint8) field->key_type();
++      key_part_info->key_type =
++	((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT ||
++	 (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 ||
++	 (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ?
++	0 : FIELDFLAG_BINARY;
++      if (!using_unique_constraint)
++      {
++	cur_group->buff=(char*) group_buff;
++	if (!(cur_group->field= field->new_key_field(thd->mem_root,table,
++                                                     group_buff +
++                                                     test(maybe_null),
++                                                     field->null_ptr,
++                                                     field->null_bit)))
++	  goto err; /* purecov: inspected */
++	if (maybe_null)
++	{
++	  /*
++	    To be able to group on NULL, we reserved place in group_buff
++	    for the NULL flag just before the column. (see above).
++	    The field data is after this flag.
++	    The NULL flag is updated in 'end_update()' and 'end_write()'
++	  */
++	  keyinfo->flags|= HA_NULL_ARE_EQUAL;	// def. that NULL == NULL
++	  key_part_info->null_bit=field->null_bit;
++	  key_part_info->null_offset= (uint) (field->null_ptr -
++					      (uchar*) table->record[0]);
++          cur_group->buff++;                        // Pointer to field data
++	  group_buff++;                         // Skipp null flag
++	}
++        /* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */
++        key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL;
++	group_buff+= cur_group->field->pack_length();
++      }
++      keyinfo->key_length+=  key_part_info->length;
++    }
++  }
++
++  if (distinct && field_count != param->hidden_field_count)
++  {
++    /*
++      Create an unique key or an unique constraint over all columns
++      that should be in the result.  In the temporary table, there are
++      'param->hidden_field_count' extra columns, whose null bits are stored
++      in the first 'hidden_null_pack_length' bytes of the row.
++    */
++    DBUG_PRINT("info",("hidden_field_count: %d", param->hidden_field_count));
++
++    null_pack_length-=hidden_null_pack_length;
++    keyinfo->key_parts= ((field_count-param->hidden_field_count)+
++			 test(null_pack_length));
++    table->distinct= 1;
++    share->keys= 1;
++    if (blob_count)
++    {
++      using_unique_constraint=1;
++      share->uniques= 1;
++    }
++    if (!(key_part_info= (KEY_PART_INFO*)
++          alloc_root(&table->mem_root,
++                     keyinfo->key_parts * sizeof(KEY_PART_INFO))))
++      goto err;
++    bzero((void*) key_part_info, keyinfo->key_parts * sizeof(KEY_PART_INFO));
++    table->key_info=keyinfo;
++    keyinfo->key_part=key_part_info;
++    keyinfo->flags=HA_NOSAME | HA_NULL_ARE_EQUAL;
++    keyinfo->key_length=(uint16) reclength;
++    keyinfo->name= (char*) "distinct_key";
++    keyinfo->algorithm= HA_KEY_ALG_UNDEF;
++    keyinfo->rec_per_key=0;
++    if (null_pack_length)
++    {
++      key_part_info->null_bit=0;
++      key_part_info->offset=hidden_null_pack_length;
++      key_part_info->length=null_pack_length;
++      key_part_info->field= new Field_string(table->record[0],
++                                             (uint32) key_part_info->length,
++                                             (uchar*) 0,
++                                             (uint) 0,
++                                             Field::NONE,
++                                             NullS, &my_charset_bin);
++      if (!key_part_info->field)
++        goto err;
++      key_part_info->field->init(table);
++      key_part_info->key_type=FIELDFLAG_BINARY;
++      key_part_info->type=    HA_KEYTYPE_BINARY;
++      key_part_info++;
++    }
++    /* Create a distinct key over the columns we are going to return */
++    for (i=param->hidden_field_count, reg_field=table->field + i ;
++	 i < field_count;
++	 i++, reg_field++, key_part_info++)
++    {
++      key_part_info->null_bit=0;
++      key_part_info->field=    *reg_field;
++      key_part_info->offset=   (*reg_field)->offset(table->record[0]);
++      key_part_info->length=   (uint16) (*reg_field)->pack_length();
++      key_part_info->type=     (uint8) (*reg_field)->key_type();
++      key_part_info->key_type =
++	((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT ||
++	 (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 ||
++	 (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ?
++	0 : FIELDFLAG_BINARY;
++    }
++  }
++
++  if (thd->is_fatal_error)				// If end of memory
++    goto err;					 /* purecov: inspected */
++  share->db_record_offset= 1;
++  if (share->db_type() == myisam_hton)
++  {
++    if (create_myisam_tmp_table(table,param,select_options))
++      goto err;
++  }
++  if (open_tmp_table(table))
++    goto err;
++
++  thd->mem_root= mem_root_save;
++
++  DBUG_RETURN(table);
++
++err:
++  thd->mem_root= mem_root_save;
++  free_tmp_table(thd,table);                    /* purecov: inspected */
++  if (temp_pool_slot != MY_BIT_NONE)
++    bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
++  DBUG_RETURN(NULL);				/* purecov: inspected */
++}
++
++
++/****************************************************************************/
++
++/**
++  Create a reduced TABLE object with properly set up Field list from a
++  list of field definitions.
++
++    The created table doesn't have a table handler associated with
++    it, has no keys, no group/distinct, no copy_funcs array.
++    The sole purpose of this TABLE object is to use the power of Field
++    class to read/write data to/from table->record[0]. Then one can store
++    the record in any container (RB tree, hash, etc).
++    The table is created in THD mem_root, so are the table's fields.
++    Consequently, if you don't BLOB fields, you don't need to free it.
++
++  @param thd         connection handle
++  @param field_list  list of column definitions
++
++  @return
++    0 if out of memory, TABLE object in case of success
++*/
++
++TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list)
++{
++  uint field_count= field_list.elements;
++  uint blob_count= 0;
++  Field **field;
++  Create_field *cdef;                           /* column definition */
++  uint record_length= 0;
++  uint null_count= 0;                 /* number of columns which may be null */
++  uint null_pack_length;              /* NULL representation array length */
++  uint *blob_field;
++  uchar *bitmaps;
++  TABLE *table;
++  TABLE_SHARE *share;
++
++  if (!multi_alloc_root(thd->mem_root,
++                        &table, sizeof(*table),
++                        &share, sizeof(*share),
++                        &field, (field_count + 1) * sizeof(Field*),
++                        &blob_field, (field_count+1) *sizeof(uint),
++                        &bitmaps, bitmap_buffer_size(field_count)*2,
++                        NullS))
++    return 0;
++
++  bzero(table, sizeof(*table));
++  bzero(share, sizeof(*share));
++  table->field= field;
++  table->s= share;
++  share->blob_field= blob_field;
++  share->fields= field_count;
++  share->blob_ptr_size= portable_sizeof_char_ptr;
++  setup_tmp_table_column_bitmaps(table, bitmaps);
++
++  /* Create all fields and calculate the total length of record */
++  List_iterator_fast<Create_field> it(field_list);
++  while ((cdef= it++))
++  {
++    *field= make_field(share, 0, cdef->length,
++                       (uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0),
++                       f_maybe_null(cdef->pack_flag) ? 1 : 0,
++                       cdef->pack_flag, cdef->sql_type, cdef->charset,
++                       cdef->geom_type, cdef->unireg_check,
++                       cdef->interval, cdef->field_name);
++    if (!*field)
++      goto error;
++    (*field)->init(table);
++    record_length+= (*field)->pack_length();
++    if (! ((*field)->flags & NOT_NULL_FLAG))
++      null_count++;
++
++    if ((*field)->flags & BLOB_FLAG)
++      share->blob_field[blob_count++]= (uint) (field - table->field);
++
++    field++;
++  }
++  *field= NULL;                             /* mark the end of the list */
++  share->blob_field[blob_count]= 0;            /* mark the end of the list */
++  share->blob_fields= blob_count;
++
++  null_pack_length= (null_count + 7)/8;
++  share->reclength= record_length + null_pack_length;
++  share->rec_buff_length= ALIGN_SIZE(share->reclength + 1);
++  table->record[0]= (uchar*) thd->alloc(share->rec_buff_length);
++  if (!table->record[0])
++    goto error;
++
++  if (null_pack_length)
++  {
++    table->null_flags= (uchar*) table->record[0];
++    share->null_fields= null_count;
++    share->null_bytes= null_pack_length;
++  }
++
++  table->in_use= thd;           /* field->reset() may access table->in_use */
++  {
++    /* Set up field pointers */
++    uchar *null_pos= table->record[0];
++    uchar *field_pos= null_pos + share->null_bytes;
++    uint null_bit= 1;
++
++    for (field= table->field; *field; ++field)
++    {
++      Field *cur_field= *field;
++      if ((cur_field->flags & NOT_NULL_FLAG))
++        cur_field->move_field(field_pos);
++      else
++      {
++        cur_field->move_field(field_pos, (uchar*) null_pos, null_bit);
++        null_bit<<= 1;
++        if (null_bit == (1 << 8))
++        {
++          ++null_pos;
++          null_bit= 1;
++        }
++      }
++      cur_field->reset();
++
++      field_pos+= cur_field->pack_length();
++    }
++  }
++  return table;
++error:
++  for (field= table->field; *field; ++field)
++    delete *field;                         /* just invokes field destructor */
++  return 0;
++}
++
++
++static bool open_tmp_table(TABLE *table)
++{
++  int error;
++  if ((error=table->file->ha_open(table, table->s->table_name.str,O_RDWR,
++                                  HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
++  {
++    table->file->print_error(error,MYF(0)); /* purecov: inspected */
++    table->db_stat=0;
++    return(1);
++  }
++  (void) table->file->extra(HA_EXTRA_QUICK);		/* Faster */
++  return(0);
++}
++
++
++static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
++				    ulonglong options)
++{
++  int error;
++  MI_KEYDEF keydef;
++  MI_UNIQUEDEF uniquedef;
++  KEY *keyinfo=param->keyinfo;
++  TABLE_SHARE *share= table->s;
++  DBUG_ENTER("create_myisam_tmp_table");
++
++  if (share->keys)
++  {						// Get keys for ni_create
++    bool using_unique_constraint=0;
++    HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
++                                            sizeof(*seg) * keyinfo->key_parts);
++    if (!seg)
++      goto err;
++
++    bzero(seg, sizeof(*seg) * keyinfo->key_parts);
++    if (keyinfo->key_length >= table->file->max_key_length() ||
++	keyinfo->key_parts > table->file->max_key_parts() ||
++	share->uniques)
++    {
++      /* Can't create a key; Make a unique constraint instead of a key */
++      share->keys=    0;
++      share->uniques= 1;
++      using_unique_constraint=1;
++      bzero((char*) &uniquedef,sizeof(uniquedef));
++      uniquedef.keysegs=keyinfo->key_parts;
++      uniquedef.seg=seg;
++      uniquedef.null_are_equal=1;
++
++      /* Create extra column for hash value */
++      bzero((uchar*) param->recinfo,sizeof(*param->recinfo));
++      param->recinfo->type= FIELD_CHECK;
++      param->recinfo->length=MI_UNIQUE_HASH_LENGTH;
++      param->recinfo++;
++      share->reclength+=MI_UNIQUE_HASH_LENGTH;
++    }
++    else
++    {
++      /* Create an unique key */
++      bzero((char*) &keydef,sizeof(keydef));
++      keydef.flag=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY;
++      keydef.keysegs=  keyinfo->key_parts;
++      keydef.seg= seg;
++    }
++    for (uint i=0; i < keyinfo->key_parts ; i++,seg++)
++    {
++      Field *field=keyinfo->key_part[i].field;
++      seg->flag=     0;
++      seg->language= field->charset()->number;
++      seg->length=   keyinfo->key_part[i].length;
++      seg->start=    keyinfo->key_part[i].offset;
++      if (field->flags & BLOB_FLAG)
++      {
++	seg->type=
++	((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
++	 HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2);
++	seg->bit_start= (uint8)(field->pack_length() - share->blob_ptr_size);
++	seg->flag= HA_BLOB_PART;
++	seg->length=0;			// Whole blob in unique constraint
++      }
++      else
++      {
++	seg->type= keyinfo->key_part[i].type;
++        /* Tell handler if it can do suffic space compression */
++	if (field->real_type() == MYSQL_TYPE_STRING &&
++	    keyinfo->key_part[i].length > 4)
++	  seg->flag|= HA_SPACE_PACK;
++      }
++      if (!(field->flags & NOT_NULL_FLAG))
++      {
++	seg->null_bit= field->null_bit;
++	seg->null_pos= (uint) (field->null_ptr - (uchar*) table->record[0]);
++	/*
++	  We are using a GROUP BY on something that contains NULL
++	  In this case we have to tell MyISAM that two NULL should
++	  on INSERT be regarded at the same value
++	*/
++	if (!using_unique_constraint)
++	  keydef.flag|= HA_NULL_ARE_EQUAL;
++      }
++    }
++  }
++  MI_CREATE_INFO create_info;
++  bzero((char*) &create_info,sizeof(create_info));
++
++  if ((options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) ==
++      OPTION_BIG_TABLES)
++    create_info.data_file_length= ~(ulonglong) 0;
++
++  if ((error=mi_create(share->table_name.str, share->keys, &keydef,
++		       (uint) (param->recinfo-param->start_recinfo),
++		       param->start_recinfo,
++		       share->uniques, &uniquedef,
++		       &create_info,
++		       HA_CREATE_TMP_TABLE)))
++  {
++    table->file->print_error(error,MYF(0));	/* purecov: inspected */
++    table->db_stat=0;
++    goto err;
++  }
++  status_var_increment(table->in_use->status_var.created_tmp_disk_tables);
++  share->db_record_offset= 1;
++  DBUG_RETURN(0);
++ err:
++  DBUG_RETURN(1);
++}
++
++
++void
++free_tmp_table(THD *thd, TABLE *entry)
++{
++  MEM_ROOT own_root= entry->mem_root;
++  const char *save_proc_info;
++  DBUG_ENTER("free_tmp_table");
++  DBUG_PRINT("enter",("table: %s",entry->alias));
++
++  save_proc_info=thd->proc_info;
++  thd_proc_info(thd, "removing tmp table");
++
++  // Release latches since this can take a long time
++  ha_release_temporary_latches(thd);
++
++  if (entry->file)
++  {
++    if (entry->db_stat)
++      entry->file->ha_drop_table(entry->s->table_name.str);
++    else
++      entry->file->ha_delete_table(entry->s->table_name.str);
++    delete entry->file;
++  }
++
++  /* free blobs */
++  for (Field **ptr=entry->field ; *ptr ; ptr++)
++    (*ptr)->free();
++  free_io_cache(entry);
++
++  if (entry->temp_pool_slot != MY_BIT_NONE)
++    bitmap_lock_clear_bit(&temp_pool, entry->temp_pool_slot);
++
++  plugin_unlock(0, entry->s->db_plugin);
++
++  free_root(&own_root, MYF(0)); /* the table is allocated in its own root */
++  thd_proc_info(thd, save_proc_info);
++
++  DBUG_VOID_RETURN;
++}
++
++/**
++  If a HEAP table gets full, create a MyISAM table and copy all rows
++  to this.
++*/
++
++bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
++			     int error, bool ignore_last_dupp_key_error)
++{
++  TABLE new_table;
++  TABLE_SHARE share;
++  const char *save_proc_info;
++  int write_err;
++  DBUG_ENTER("create_myisam_from_heap");
++
++  if (table->s->db_type() != heap_hton || 
++      error != HA_ERR_RECORD_FILE_FULL)
++  {
++    /*
++      We don't want this error to be converted to a warning, e.g. in case of
++      INSERT IGNORE ... SELECT.
++    */
++    thd->fatal_error();
++    table->file->print_error(error,MYF(0));
++    DBUG_RETURN(1);
++  }
++
++  // Release latches since this can take a long time
++  ha_release_temporary_latches(thd);
++
++  new_table= *table;
++  share= *table->s;
++  new_table.s= &share;
++  new_table.s->db_plugin= ha_lock_engine(thd, myisam_hton);
++  if (!(new_table.file= get_new_handler(&share, &new_table.mem_root,
++                                        new_table.s->db_type())))
++    DBUG_RETURN(1);				// End of memory
++
++  save_proc_info=thd->proc_info;
++  thd_proc_info(thd, "converting HEAP to MyISAM");
++
++  if (create_myisam_tmp_table(&new_table, param,
++			      thd->lex->select_lex.options | thd->options))
++    goto err2;
++  if (open_tmp_table(&new_table))
++    goto err1;
++  if (table->file->indexes_are_disabled())
++    new_table.file->ha_disable_indexes(HA_KEY_SWITCH_ALL);
++  table->file->ha_index_or_rnd_end();
++  table->file->ha_rnd_init(1);
++  if (table->no_rows)
++  {
++    new_table.file->extra(HA_EXTRA_NO_ROWS);
++    new_table.no_rows=1;
++  }
++
++#ifdef TO_BE_DONE_LATER_IN_4_1
++  /*
++    To use start_bulk_insert() (which is new in 4.1) we need to find
++    all places where a corresponding end_bulk_insert() should be put.
++  */
++  table->file->info(HA_STATUS_VARIABLE); /* update table->file->stats.records */
++  new_table.file->ha_start_bulk_insert(table->file->stats.records);
++#else
++  /* HA_EXTRA_WRITE_CACHE can stay until close, no need to disable it */
++  new_table.file->extra(HA_EXTRA_WRITE_CACHE);
++#endif
++
++  /*
++    copy all old rows from heap table to MyISAM table
++    This is the only code that uses record[1] to read/write but this
++    is safe as this is a temporary MyISAM table without timestamp/autoincrement
++    or partitioning.
++  */
++  while (!table->file->rnd_next(new_table.record[1]))
++  {
++    write_err= new_table.file->ha_write_row(new_table.record[1]);
++    DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
++    if (write_err)
++      goto err;
++  }
++  /* copy row that filled HEAP table */
++  if ((write_err=new_table.file->ha_write_row(table->record[0])))
++  {
++    if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
++	!ignore_last_dupp_key_error)
++      goto err;
++  }
++
++  /* remove heap table and change to use myisam table */
++  (void) table->file->ha_rnd_end();
++  (void) table->file->close();                  // This deletes the table !
++  delete table->file;
++  table->file=0;
++  plugin_unlock(0, table->s->db_plugin);
++  share.db_plugin= my_plugin_lock(0, &share.db_plugin);
++  new_table.s= table->s;                       // Keep old share
++  *table= new_table;
++  *table->s= share;
++  
++  table->file->change_table_ptr(table, table->s);
++  table->use_all_columns();
++  if (save_proc_info)
++    thd_proc_info(thd, (!strcmp(save_proc_info,"Copying to tmp table") ?
++                  "Copying to tmp table on disk" : save_proc_info));
++  DBUG_RETURN(0);
++
++ err:
++  DBUG_PRINT("error",("Got error: %d",write_err));
++  table->file->print_error(write_err, MYF(0));
++  (void) table->file->ha_rnd_end();
++  (void) new_table.file->close();
++ err1:
++  new_table.file->ha_delete_table(new_table.s->table_name.str);
++ err2:
++  delete new_table.file;
++  thd_proc_info(thd, save_proc_info);
++  table->mem_root= new_table.mem_root;
++  DBUG_RETURN(1);
++}
++
++
++/**
++  @details
++  Rows produced by a join sweep may end up in a temporary table or be sent
++  to a client. Setup the function of the nested loop join algorithm which
++  handles final fully constructed and matched records.
++
++  @param join   join to setup the function for.
++
++  @return
++    end_select function to use. This function can't fail.
++*/
++
++Next_select_func setup_end_select_func(JOIN *join)
++{
++  TABLE *table= join->tmp_table;
++  TMP_TABLE_PARAM *tmp_tbl= &join->tmp_table_param;
++  Next_select_func end_select;
++
++  /* Set up select_end */
++  if (table)
++  {
++    if (table->group && tmp_tbl->sum_func_count && 
++        !tmp_tbl->precomputed_group_by)
++    {
++      if (table->s->keys)
++      {
++	DBUG_PRINT("info",("Using end_update"));
++	end_select=end_update;
++      }
++      else
++      {
++	DBUG_PRINT("info",("Using end_unique_update"));
++	end_select=end_unique_update;
++      }
++    }
++    else if (join->sort_and_group && !tmp_tbl->precomputed_group_by)
++    {
++      DBUG_PRINT("info",("Using end_write_group"));
++      end_select=end_write_group;
++    }
++    else
++    {
++      DBUG_PRINT("info",("Using end_write"));
++      end_select=end_write;
++      if (tmp_tbl->precomputed_group_by)
++      {
++        /*
++          A preceding call to create_tmp_table in the case when loose
++          index scan is used guarantees that
++          TMP_TABLE_PARAM::items_to_copy has enough space for the group
++          by functions. It is OK here to use memcpy since we copy
++          Item_sum pointers into an array of Item pointers.
++        */
++        memcpy(tmp_tbl->items_to_copy + tmp_tbl->func_count,
++               join->sum_funcs,
++               sizeof(Item*)*tmp_tbl->sum_func_count);
++        tmp_tbl->items_to_copy[tmp_tbl->func_count+tmp_tbl->sum_func_count]= 0;
++      }
++    }
++  }
++  else
++  {
++    /* 
++       Choose method for presenting result to user. Use end_send_group
++       if the query requires grouping (has a GROUP BY clause and/or one or
++       more aggregate functions). Use end_send if the query should not
++       be grouped.
++     */
++    if ((join->sort_and_group ||
++         (join->procedure && join->procedure->flags & PROC_GROUP)) &&
++        !tmp_tbl->precomputed_group_by)
++      end_select= end_send_group;
++    else
++      end_select= end_send;
++  }
++  return end_select;
++}
++
++
++/**
++  Make a join of all tables and write it on socket or to table.
++
++  @retval
++    0  if ok
++  @retval
++    1  if error is sent
++  @retval
++    -1  if error should be sent
++*/
++
++static int
++do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
++{
++  int rc= 0;
++  enum_nested_loop_state error= NESTED_LOOP_OK;
++  JOIN_TAB *join_tab= NULL;
++  DBUG_ENTER("do_select");
++  
++  join->procedure=procedure;
++  join->tmp_table= table;			/* Save for easy recursion */
++  join->fields= fields;
++
++  if (table)
++  {
++    VOID(table->file->extra(HA_EXTRA_WRITE_CACHE));
++    empty_record(table);
++    if (table->group && join->tmp_table_param.sum_func_count &&
++        table->s->keys && !table->file->inited)
++      table->file->ha_index_init(0, 0);
++  }
++  /* Set up select_end */
++  Next_select_func end_select= setup_end_select_func(join);
++  if (join->tables)
++  {
++    join->join_tab[join->tables-1].next_select= end_select;
++
++    join_tab=join->join_tab+join->const_tables;
++  }
++  join->send_records=0;
++  if (join->tables == join->const_tables)
++  {
++    /*
++      HAVING will be checked after processing aggregate functions,
++      But WHERE should checkd here (we alredy have read tables)
++    */
++    if (!join->conds || join->conds->val_int())
++    {
++      error= (*end_select)(join, 0, 0);
++      if (error == NESTED_LOOP_OK || error == NESTED_LOOP_QUERY_LIMIT)
++	error= (*end_select)(join, 0, 1);
++
++      /*
++        If we don't go through evaluate_join_record(), do the counting
++        here.  join->send_records is increased on success in end_send(),
++        so we don't touch it here.
++      */
++      join->examined_rows++;
++      join->thd->row_count++;
++      DBUG_ASSERT(join->examined_rows <= 1);
++    }
++    else if (join->send_row_on_empty_set())
++    {
++      List<Item> *columns_list= (procedure ? &join->procedure_fields_list :
++                                 fields);
++      rc= join->result->send_data(*columns_list);
++    }
++  }
++  else
++  {
++    DBUG_ASSERT(join->tables);
++    error= sub_select(join,join_tab,0);
++    if (error == NESTED_LOOP_OK || error == NESTED_LOOP_NO_MORE_ROWS)
++      error= sub_select(join,join_tab,1);
++    if (error == NESTED_LOOP_QUERY_LIMIT)
++      error= NESTED_LOOP_OK;                    /* select_limit used */
++  }
++  if (error == NESTED_LOOP_NO_MORE_ROWS)
++    error= NESTED_LOOP_OK;
++
++  if (table == NULL)					// If sending data to client
++    /*
++      The following will unlock all cursors if the command wasn't an
++      update command
++    */
++    join->join_free();			// Unlock all cursors
++  if (error == NESTED_LOOP_OK)
++  {
++    /*
++      Sic: this branch works even if rc != 0, e.g. when
++      send_data above returns an error.
++    */
++    if (table == NULL && join->result->send_eof()) // If sending data to client
++      rc= 1;                                  // Don't send error 
++    DBUG_PRINT("info",("%ld records output", (long) join->send_records));
++  }
++  else
++    rc= -1;
++  if (table)
++  {
++    int tmp, new_errno= 0;
++    if ((tmp=table->file->extra(HA_EXTRA_NO_CACHE)))
++    {
++      DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed"));
++      new_errno= tmp;
++    }
++    if ((tmp=table->file->ha_index_or_rnd_end()))
++    {
++      DBUG_PRINT("error",("ha_index_or_rnd_end() failed"));
++      new_errno= tmp;
++    }
++    if (new_errno)
++      table->file->print_error(new_errno,MYF(0));
++  }
++#ifndef DBUG_OFF
++  if (rc)
++  {
++    DBUG_PRINT("error",("Error: do_select() failed"));
++  }
++#endif
++  DBUG_RETURN(join->thd->is_error() ? -1 : rc);
++}
++
++
++enum_nested_loop_state
++sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
++{
++  enum_nested_loop_state rc;
++
++  if (end_of_records)
++  {
++    rc= flush_cached_records(join,join_tab,FALSE);
++    if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS)
++      rc= sub_select(join,join_tab,end_of_records);
++    return rc;
++  }
++  if (join->thd->killed)		// If aborted by user
++  {
++    join->thd->send_kill_message();
++    return NESTED_LOOP_KILLED;                   /* purecov: inspected */
++  }
++  if (join_tab->use_quick != 2 || test_if_quick_select(join_tab) <= 0)
++  {
++    if (!store_record_in_cache(&join_tab->cache))
++      return NESTED_LOOP_OK;                     // There is more room in cache
++    return flush_cached_records(join,join_tab,FALSE);
++  }
++  rc= flush_cached_records(join, join_tab, TRUE);
++  if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS)
++    rc= sub_select(join, join_tab, end_of_records);
++  return rc;
++}
++
++/**
++  Retrieve records ends with a given beginning from the result of a join.
++
++    For a given partial join record consisting of records from the tables 
++    preceding the table join_tab in the execution plan, the function
++    retrieves all matching full records from the result set and
++    send them to the result set stream. 
++
++  @note
++    The function effectively implements the  final (n-k) nested loops
++    of nested loops join algorithm, where k is the ordinal number of
++    the join_tab table and n is the total number of tables in the join query.
++    It performs nested loops joins with all conjunctive predicates from
++    the where condition pushed as low to the tables as possible.
++    E.g. for the query
++    @code
++      SELECT * FROM t1,t2,t3
++      WHERE t1.a=t2.a AND t2.b=t3.b AND t1.a BETWEEN 5 AND 9
++    @endcode
++    the predicate (t1.a BETWEEN 5 AND 9) will be pushed to table t1,
++    given the selected plan prescribes to nest retrievals of the
++    joined tables in the following order: t1,t2,t3.
++    A pushed down predicate are attached to the table which it pushed to,
++    at the field join_tab->select_cond.
++    When executing a nested loop of level k the function runs through
++    the rows of 'join_tab' and for each row checks the pushed condition
++    attached to the table.
++    If it is false the function moves to the next row of the
++    table. If the condition is true the function recursively executes (n-k-1)
++    remaining embedded nested loops.
++    The situation becomes more complicated if outer joins are involved in
++    the execution plan. In this case the pushed down predicates can be
++    checked only at certain conditions.
++    Suppose for the query
++    @code
++      SELECT * FROM t1 LEFT JOIN (t2,t3) ON t3.a=t1.a
++      WHERE t1>2 AND (t2.b>5 OR t2.b IS NULL)
++    @endcode
++    the optimizer has chosen a plan with the table order t1,t2,t3.
++    The predicate P1=t1>2 will be pushed down to the table t1, while the
++    predicate P2=(t2.b>5 OR t2.b IS NULL) will be attached to the table
++    t2. But the second predicate can not be unconditionally tested right
++    after a row from t2 has been read. This can be done only after the
++    first row with t3.a=t1.a has been encountered.
++    Thus, the second predicate P2 is supplied with a guarded value that are
++    stored in the field 'found' of the first inner table for the outer join
++    (table t2). When the first row with t3.a=t1.a for the  current row 
++    of table t1  appears, the value becomes true. For now on the predicate
++    is evaluated immediately after the row of table t2 has been read.
++    When the first row with t3.a=t1.a has been encountered all
++    conditions attached to the inner tables t2,t3 must be evaluated.
++    Only when all of them are true the row is sent to the output stream.
++    If not, the function returns to the lowest nest level that has a false
++    attached condition.
++    The predicates from on expressions are also pushed down. If in the 
++    the above example the on expression were (t3.a=t1.a AND t2.a=t1.a),
++    then t1.a=t2.a would be pushed down to table t2, and without any
++    guard.
++    If after the run through all rows of table t2, the first inner table
++    for the outer join operation, it turns out that no matches are
++    found for the current row of t1, then current row from table t1
++    is complemented by nulls  for t2 and t3. Then the pushed down predicates
++    are checked for the composed row almost in the same way as it had
++    been done for the first row with a match. The only difference is
++    the predicates from on expressions are not checked. 
++
++  @par
++  @b IMPLEMENTATION
++  @par
++    The function forms output rows for a current partial join of k
++    tables tables recursively.
++    For each partial join record ending with a certain row from
++    join_tab it calls sub_select that builds all possible matching
++    tails from the result set.
++    To be able  check predicates conditionally items of the class
++    Item_func_trig_cond are employed.
++    An object of  this class is constructed from an item of class COND
++    and a pointer to a guarding boolean variable.
++    When the value of the guard variable is true the value of the object
++    is the same as the value of the predicate, otherwise it's just returns
++    true. 
++    To carry out a return to a nested loop level of join table t the pointer 
++    to t is remembered in the field 'return_tab' of the join structure.
++    Consider the following query:
++    @code
++        SELECT * FROM t1,
++                      LEFT JOIN
++                      (t2, t3 LEFT JOIN (t4,t5) ON t5.a=t3.a)
++                      ON t4.a=t2.a
++           WHERE (t2.b=5 OR t2.b IS NULL) AND (t4.b=2 OR t4.b IS NULL)
++    @endcode
++    Suppose the chosen execution plan dictates the order t1,t2,t3,t4,t5
++    and suppose for a given joined rows from tables t1,t2,t3 there are
++    no rows in the result set yet.
++    When first row from t5 that satisfies the on condition
++    t5.a=t3.a is found, the pushed down predicate t4.b=2 OR t4.b IS NULL
++    becomes 'activated', as well the predicate t4.a=t2.a. But
++    the predicate (t2.b=5 OR t2.b IS NULL) can not be checked until
++    t4.a=t2.a becomes true. 
++    In order not to re-evaluate the predicates that were already evaluated
++    as attached pushed down predicates, a pointer to the the first
++    most inner unmatched table is maintained in join_tab->first_unmatched.
++    Thus, when the first row from t5 with t5.a=t3.a is found
++    this pointer for t5 is changed from t4 to t2.             
++
++    @par
++    @b STRUCTURE @b NOTES
++    @par
++    join_tab->first_unmatched points always backwards to the first inner
++    table of the embedding nested join, if any.
++
++  @param join      pointer to the structure providing all context info for
++                   the query
++  @param join_tab  the first next table of the execution plan to be retrieved
++  @param end_records  true when we need to perform final steps of retrival   
++
++  @return
++    return one of enum_nested_loop_state, except NESTED_LOOP_NO_MORE_ROWS.
++*/
++
++enum_nested_loop_state
++sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
++{
++  join_tab->table->null_row=0;
++  if (end_of_records)
++    return (*join_tab->next_select)(join,join_tab+1,end_of_records);
++
++  int error;
++  enum_nested_loop_state rc;
++  READ_RECORD *info= &join_tab->read_record;
++
++  if (join->resume_nested_loop)
++  {
++    /* If not the last table, plunge down the nested loop */
++    if (join_tab < join->join_tab + join->tables - 1)
++      rc= (*join_tab->next_select)(join, join_tab + 1, 0);
++    else
++    {
++      join->resume_nested_loop= FALSE;
++      rc= NESTED_LOOP_OK;
++    }
++  }
++  else
++  {
++    join->return_tab= join_tab;
++
++    if (join_tab->last_inner)
++    {
++      /* join_tab is the first inner table for an outer join operation. */
++
++      /* Set initial state of guard variables for this table.*/
++      join_tab->found=0;
++      join_tab->not_null_compl= 1;
++
++      /* Set first_unmatched for the last inner table of this group */
++      join_tab->last_inner->first_unmatched= join_tab;
++    }
++    join->thd->row_count= 0;
++
++    error= (*join_tab->read_first_record)(join_tab);
++    rc= evaluate_join_record(join, join_tab, error);
++  }
++
++  while (rc == NESTED_LOOP_OK)
++  {
++    error= info->read_record(info);
++    rc= evaluate_join_record(join, join_tab, error);
++  }
++
++  if (rc == NESTED_LOOP_NO_MORE_ROWS &&
++      join_tab->last_inner && !join_tab->found)
++    rc= evaluate_null_complemented_join_record(join, join_tab);
++
++  if (rc == NESTED_LOOP_NO_MORE_ROWS)
++    rc= NESTED_LOOP_OK;
++  return rc;
++}
++
++
++/**
++  Process one record of the nested loop join.
++
++    This function will evaluate parts of WHERE/ON clauses that are
++    applicable to the partial record on hand and in case of success
++    submit this record to the next level of the nested loop.
++*/
++
++static enum_nested_loop_state
++evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
++                     int error)
++{
++  bool not_used_in_distinct=join_tab->not_used_in_distinct;
++  ha_rows found_records=join->found_records;
++  COND *select_cond= join_tab->select_cond;
++  bool select_cond_result= TRUE;
++
++  if (error > 0 || (join->thd->is_error()))     // Fatal error
++    return NESTED_LOOP_ERROR;
++  if (error < 0)
++    return NESTED_LOOP_NO_MORE_ROWS;
++  if (join->thd->killed)			// Aborted by user
++  {
++    join->thd->send_kill_message();
++    return NESTED_LOOP_KILLED;               /* purecov: inspected */
++  }
++  DBUG_PRINT("info", ("select cond 0x%lx", (ulong)select_cond));
++
++  if (select_cond)
++  {
++    select_cond_result= test(select_cond->val_int());
++
++    /* check for errors evaluating the condition */
++    if (join->thd->is_error())
++      return NESTED_LOOP_ERROR;
++  }
++
++  if (!select_cond || select_cond_result)
++  {
++    /*
++      There is no select condition or the attached pushed down
++      condition is true => a match is found.
++    */
++    bool found= 1;
++    while (join_tab->first_unmatched && found)
++    {
++      /*
++        The while condition is always false if join_tab is not
++        the last inner join table of an outer join operation.
++      */
++      JOIN_TAB *first_unmatched= join_tab->first_unmatched;
++      /*
++        Mark that a match for current outer table is found.
++        This activates push down conditional predicates attached
++        to the all inner tables of the outer join.
++      */
++      first_unmatched->found= 1;
++      for (JOIN_TAB *tab= first_unmatched; tab <= join_tab; tab++)
++      {
++        if (tab->table->reginfo.not_exists_optimize)
++          return NESTED_LOOP_NO_MORE_ROWS;
++        /* Check all predicates that has just been activated. */
++        /*
++          Actually all predicates non-guarded by first_unmatched->found
++          will be re-evaluated again. It could be fixed, but, probably,
++          it's not worth doing now.
++        */
++        if (tab->select_cond && !tab->select_cond->val_int())
++        {
++          /* The condition attached to table tab is false */
++          if (tab == join_tab)
++            found= 0;
++          else
++          {
++            /*
++              Set a return point if rejected predicate is attached
++              not to the last table of the current nest level.
++            */
++            join->return_tab= tab;
++            return NESTED_LOOP_OK;
++          }
++        }
++      }
++      /*
++        Check whether join_tab is not the last inner table
++        for another embedding outer join.
++      */
++      if ((first_unmatched= first_unmatched->first_upper) &&
++          first_unmatched->last_inner != join_tab)
++        first_unmatched= 0;
++      join_tab->first_unmatched= first_unmatched;
++    }
++
++    /*
++      It was not just a return to lower loop level when one
++      of the newly activated predicates is evaluated as false
++      (See above join->return_tab= tab).
++    */
++    join->examined_rows++;
++    join->thd->row_count++;
++    DBUG_PRINT("counts", ("join->examined_rows++: %lu",
++                          (ulong) join->examined_rows));
++
++    if (found)
++    {
++      enum enum_nested_loop_state rc;
++      /* A match from join_tab is found for the current partial join. */
++      rc= (*join_tab->next_select)(join, join_tab+1, 0);
++      if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
++        return rc;
++      if (join->return_tab < join_tab)
++        return NESTED_LOOP_OK;
++      /*
++        Test if this was a SELECT DISTINCT query on a table that
++        was not in the field list;  In this case we can abort if
++        we found a row, as no new rows can be added to the result.
++      */
++      if (not_used_in_distinct && found_records != join->found_records)
++        return NESTED_LOOP_NO_MORE_ROWS;
++    }
++    else
++      join_tab->read_record.unlock_row(join_tab);
++  }
++  else
++  {
++    /*
++      The condition pushed down to the table join_tab rejects all rows
++      with the beginning coinciding with the current partial join.
++    */
++    join->examined_rows++;
++    join->thd->row_count++;
++    join_tab->read_record.unlock_row(join_tab);
++  }
++  return NESTED_LOOP_OK;
++}
++
++
++/**
++
++  @details
++    Construct a NULL complimented partial join record and feed it to the next
++    level of the nested loop. This function is used in case we have
++    an OUTER join and no matching record was found.
++*/
++
++static enum_nested_loop_state
++evaluate_null_complemented_join_record(JOIN *join, JOIN_TAB *join_tab)
++{
++  /*
++    The table join_tab is the first inner table of a outer join operation
++    and no matches has been found for the current outer row.
++  */
++  JOIN_TAB *last_inner_tab= join_tab->last_inner;
++  /* Cache variables for faster loop */
++  COND *select_cond;
++  for ( ; join_tab <= last_inner_tab ; join_tab++)
++  {
++    /* Change the the values of guard predicate variables. */
++    join_tab->found= 1;
++    join_tab->not_null_compl= 0;
++    /* The outer row is complemented by nulls for each inner tables */
++    restore_record(join_tab->table,s->default_values);  // Make empty record
++    mark_as_null_row(join_tab->table);       // For group by without error
++    select_cond= join_tab->select_cond;
++    /* Check all attached conditions for inner table rows. */
++    if (select_cond && !select_cond->val_int())
++      return NESTED_LOOP_OK;
++  }
++  join_tab--;
++  /*
++    The row complemented by nulls might be the first row
++    of embedding outer joins.
++    If so, perform the same actions as in the code
++    for the first regular outer join row above.
++  */
++  for ( ; ; )
++  {
++    JOIN_TAB *first_unmatched= join_tab->first_unmatched;
++    if ((first_unmatched= first_unmatched->first_upper) &&
++        first_unmatched->last_inner != join_tab)
++      first_unmatched= 0;
++    join_tab->first_unmatched= first_unmatched;
++    if (!first_unmatched)
++      break;
++    first_unmatched->found= 1;
++    for (JOIN_TAB *tab= first_unmatched; tab <= join_tab; tab++)
++    {
++      if (tab->select_cond && !tab->select_cond->val_int())
++      {
++        join->return_tab= tab;
++        return NESTED_LOOP_OK;
++      }
++    }
++  }
++  /*
++    The row complemented by nulls satisfies all conditions
++    attached to inner tables.
++    Send the row complemented by nulls to be joined with the
++    remaining tables.
++  */
++  return (*join_tab->next_select)(join, join_tab+1, 0);
++}
++
++
++static enum_nested_loop_state
++flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last)
++{
++  enum_nested_loop_state rc= NESTED_LOOP_OK;
++  int error;
++  READ_RECORD *info;
++
++  join_tab->table->null_row= 0;
++  if (!join_tab->cache.records)
++    return NESTED_LOOP_OK;                      /* Nothing to do */
++  if (skip_last)
++    (void) store_record_in_cache(&join_tab->cache); // Must save this for later
++  if (join_tab->use_quick == 2)
++  {
++    if (join_tab->select->quick)
++    {					/* Used quick select last. reset it */
++      delete join_tab->select->quick;
++      join_tab->select->quick=0;
++    }
++  }
++ /* read through all records */
++  if ((error=join_init_read_record(join_tab)))
++  {
++    reset_cache_write(&join_tab->cache);
++    return error < 0 ? NESTED_LOOP_NO_MORE_ROWS: NESTED_LOOP_ERROR;
++  }
++
++  for (JOIN_TAB *tmp=join->join_tab; tmp != join_tab ; tmp++)
++  {
++    tmp->status=tmp->table->status;
++    tmp->table->status=0;
++  }
++
++  info= &join_tab->read_record;
++  do
++  {
++    if (join->thd->killed)
++    {
++      join->thd->send_kill_message();
++      return NESTED_LOOP_KILLED; // Aborted by user /* purecov: inspected */
++    }
++    SQL_SELECT *select=join_tab->select;
++    if (rc == NESTED_LOOP_OK)
++    {
++      bool skip_record= FALSE;
++      if (join_tab->cache.select &&
++          join_tab->cache.select->skip_record(join->thd, &skip_record))
++      {
++        reset_cache_write(&join_tab->cache);
++        return NESTED_LOOP_ERROR;
++      }
++
++      if (!skip_record)
++      {
++        uint i;
++        reset_cache_read(&join_tab->cache);
++        for (i=(join_tab->cache.records- (skip_last ? 1 : 0)) ; i-- > 0 ;)
++        {
++          read_cached_record(join_tab);
++          skip_record= FALSE;
++          if (select && select->skip_record(join->thd, &skip_record))
++          {
++            reset_cache_write(&join_tab->cache);
++            return NESTED_LOOP_ERROR;
++          }
++          if (!skip_record)
++          {
++            rc= (join_tab->next_select)(join,join_tab+1,0);
++            if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
++            {
++              reset_cache_write(&join_tab->cache);
++              return rc;
++            }
++          }
++        }
++      }
++    }
++  } while (!(error=info->read_record(info)));
++
++  if (skip_last)
++    read_cached_record(join_tab);		// Restore current record
++  reset_cache_write(&join_tab->cache);
++  if (error > 0)				// Fatal error
++    return NESTED_LOOP_ERROR;                   /* purecov: inspected */
++  for (JOIN_TAB *tmp2=join->join_tab; tmp2 != join_tab ; tmp2++)
++    tmp2->table->status=tmp2->status;
++  return NESTED_LOOP_OK;
++}
++
++
++/*****************************************************************************
++  The different ways to read a record
++  Returns -1 if row was not found, 0 if row was found and 1 on errors
++*****************************************************************************/
++
++/** Help function when we get some an error from the table handler. */
++
++int report_error(TABLE *table, int error)
++{
++  if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND)
++  {
++    table->status= STATUS_GARBAGE;
++    return -1;					// key not found; ok
++  }
++  /*
++    Locking reads can legally return also these errors, do not
++    print them to the .err log
++  */
++  if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
++    sql_print_error("Got error %d when reading table '%s'",
++		    error, table->s->path.str);
++  table->file->print_error(error,MYF(0));
++  return 1;
++}
++
++
++int safe_index_read(JOIN_TAB *tab)
++{
++  int error;
++  TABLE *table= tab->table;
++  if ((error=table->file->index_read_map(table->record[0],
++                                         tab->ref.key_buff,
++                                         make_prev_keypart_map(tab->ref.key_parts),
++                                         HA_READ_KEY_EXACT)))
++    return report_error(table, error);
++  return 0;
++}
++
++
++static int
++join_read_const_table(JOIN_TAB *tab, POSITION *pos)
++{
++  int error;
++  DBUG_ENTER("join_read_const_table");
++  TABLE *table=tab->table;
++  table->const_table=1;
++  table->null_row=0;
++  table->status=STATUS_NO_RECORD;
++  
++  if (tab->type == JT_SYSTEM)
++  {
++    if ((error=join_read_system(tab)))
++    {						// Info for DESCRIBE
++      tab->info="const row not found";
++      /* Mark for EXPLAIN that the row was not found */
++      pos->records_read=0.0;
++      pos->ref_depend_map= 0;
++      if (!table->maybe_null || error > 0)
++	DBUG_RETURN(error);
++    }
++  }
++  else
++  {
++    if (!table->key_read && table->covering_keys.is_set(tab->ref.key) &&
++	!table->no_keyread &&
++        (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY)
++    {
++      table->set_keyread(TRUE);
++      tab->index= tab->ref.key;
++    }
++    error=join_read_const(tab);
++    table->set_keyread(FALSE);
++    if (error)
++    {
++      tab->info="unique row not found";
++      /* Mark for EXPLAIN that the row was not found */
++      pos->records_read=0.0;
++      pos->ref_depend_map= 0;
++      if (!table->maybe_null || error > 0)
++	DBUG_RETURN(error);
++    }
++  }
++  if (*tab->on_expr_ref && !table->null_row)
++  {
++    if ((table->null_row= test((*tab->on_expr_ref)->val_int() == 0)))
++      mark_as_null_row(table);  
++  }
++  if (!table->null_row)
++    table->maybe_null=0;
++
++  /* Check appearance of new constant items in Item_equal objects */
++  JOIN *join= tab->join;
++  if (join->conds)
++    update_const_equal_items(join->conds, tab);
++  TABLE_LIST *tbl;
++  for (tbl= join->select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
++  {
++    TABLE_LIST *embedded;
++    TABLE_LIST *embedding= tbl;
++    do
++    {
++      embedded= embedding;
++      if (embedded->on_expr)
++         update_const_equal_items(embedded->on_expr, tab);
++      embedding= embedded->embedding;
++    }
++    while (embedding &&
++           embedding->nested_join->join_list.head() == embedded);
++  }
++
++  DBUG_RETURN(0);
++}
++
++
++static int
++join_read_system(JOIN_TAB *tab)
++{
++  TABLE *table= tab->table;
++  int error;
++  if (table->status & STATUS_GARBAGE)		// If first read
++  {
++    if ((error=table->file->read_first_row(table->record[0],
++					   table->s->primary_key)))
++    {
++      if (error != HA_ERR_END_OF_FILE)
++	return report_error(table, error);
++      mark_as_null_row(tab->table);
++      empty_record(table);			// Make empty record
++      return -1;
++    }
++    store_record(table,record[1]);
++  }
++  else if (!table->status)			// Only happens with left join
++    restore_record(table,record[1]);			// restore old record
++  table->null_row=0;
++  return table->status ? -1 : 0;
++}
++
++
++/**
++  Read a table when there is at most one matching row.
++
++  @param tab			Table to read
++
++  @retval
++    0	Row was found
++  @retval
++    -1   Row was not found
++  @retval
++    1   Got an error (other than row not found) during read
++*/
++
++static int
++join_read_const(JOIN_TAB *tab)
++{
++  int error;
++  TABLE *table= tab->table;
++  if (table->status & STATUS_GARBAGE)		// If first read
++  {
++    table->status= 0;
++    if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
++      error=HA_ERR_KEY_NOT_FOUND;
++    else
++    {
++      error=table->file->index_read_idx_map(table->record[0],tab->ref.key,
++                                            (uchar*) tab->ref.key_buff,
++                                            make_prev_keypart_map(tab->ref.key_parts),
++                                            HA_READ_KEY_EXACT);
++    }
++    if (error)
++    {
++      table->status= STATUS_NOT_FOUND;
++      mark_as_null_row(tab->table);
++      empty_record(table);
++      if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
++	return report_error(table, error);
++      return -1;
++    }
++    store_record(table,record[1]);
++  }
++  else if (!(table->status & ~STATUS_NULL_ROW))	// Only happens with left join
++  {
++    table->status=0;
++    restore_record(table,record[1]);			// restore old record
++  }
++  table->null_row=0;
++  return table->status ? -1 : 0;
++}
++
++
++static int
++join_read_key(JOIN_TAB *tab)
++{
++  int error;
++  TABLE *table= tab->table;
++
++  if (!table->file->inited)
++  {
++    table->file->ha_index_init(tab->ref.key, tab->sorted);
++  }
++  if (cmp_buffer_with_ref(tab) ||
++      (table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW)))
++  {
++    if (tab->ref.key_err)
++    {
++      table->status=STATUS_NOT_FOUND;
++      return -1;
++    }
++    /*
++      Moving away from the current record. Unlock the row
++      in the handler if it did not match the partial WHERE.
++    */
++    if (tab->ref.has_record && tab->ref.use_count == 0)
++    {
++      tab->read_record.file->unlock_row();
++      tab->ref.has_record= FALSE;
++    }
++    error=table->file->index_read_map(table->record[0],
++                                      tab->ref.key_buff,
++                                      make_prev_keypart_map(tab->ref.key_parts),
++                                      HA_READ_KEY_EXACT);
++    if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
++      return report_error(table, error);
++
++    if (! error)
++    {
++      tab->ref.has_record= TRUE;
++      tab->ref.use_count= 1;
++    }
++  }
++  else if (table->status == 0)
++  {
++    DBUG_ASSERT(tab->ref.has_record);
++    tab->ref.use_count++;
++  }
++  table->null_row=0;
++  return table->status ? -1 : 0;
++}
++
++
++/**
++  Since join_read_key may buffer a record, do not unlock
++  it if it was not used in this invocation of join_read_key().
++  Only count locks, thus remembering if the record was left unused,
++  and unlock already when pruning the current value of
++  TABLE_REF buffer.
++  @sa join_read_key()
++*/
++
++static void
++join_read_key_unlock_row(st_join_table *tab)
++{
++  DBUG_ASSERT(tab->ref.use_count);
++  if (tab->ref.use_count)
++    tab->ref.use_count--;
++}
++
++/*
++  ref access method implementation: "read_first" function
++
++  SYNOPSIS
++    join_read_always_key()
++      tab  JOIN_TAB of the accessed table
++
++  DESCRIPTION
++    This is "read_fist" function for the "ref" access method.
++   
++    The functon must leave the index initialized when it returns.
++    ref_or_null access implementation depends on that.
++
++  RETURN
++    0  - Ok
++   -1  - Row not found 
++    1  - Error
++*/
++
++static int
++join_read_always_key(JOIN_TAB *tab)
++{
++  int error;
++  TABLE *table= tab->table;
++
++  /* Initialize the index first */
++  if (!table->file->inited)
++    table->file->ha_index_init(tab->ref.key, tab->sorted);
++ 
++  /* Perform "Late NULLs Filtering" (see internals manual for explanations) */
++  for (uint i= 0 ; i < tab->ref.key_parts ; i++)
++  {
++    if ((tab->ref.null_rejecting & 1 << i) && tab->ref.items[i]->is_null())
++        return -1;
++  }
++
++  if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
++    return -1;
++  if ((error=table->file->index_read_map(table->record[0],
++                                         tab->ref.key_buff,
++                                         make_prev_keypart_map(tab->ref.key_parts),
++                                         HA_READ_KEY_EXACT)))
++  {
++    if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
++      return report_error(table, error);
++    return -1; /* purecov: inspected */
++  }
++  return 0;
++}
++
++
++/**
++  This function is used when optimizing away ORDER BY in 
++  SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC.
++*/
++  
++static int
++join_read_last_key(JOIN_TAB *tab)
++{
++  int error;
++  TABLE *table= tab->table;
++
++  if (!table->file->inited)
++    table->file->ha_index_init(tab->ref.key, tab->sorted);
++  if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
++    return -1;
++  if ((error=table->file->index_read_last_map(table->record[0],
++                                              tab->ref.key_buff,
++                                              make_prev_keypart_map(tab->ref.key_parts))))
++  {
++    if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
++      return report_error(table, error);
++    return -1; /* purecov: inspected */
++  }
++  return 0;
++}
++
++
++	/* ARGSUSED */
++static int
++join_no_more_records(READ_RECORD *info __attribute__((unused)))
++{
++  return -1;
++}
++
++
++static int
++join_read_next_same(READ_RECORD *info)
++{
++  int error;
++  TABLE *table= info->table;
++  JOIN_TAB *tab=table->reginfo.join_tab;
++
++  if ((error=table->file->index_next_same(table->record[0],
++					  tab->ref.key_buff,
++					  tab->ref.key_length)))
++  {
++    if (error != HA_ERR_END_OF_FILE)
++      return report_error(table, error);
++    table->status= STATUS_GARBAGE;
++    return -1;
++  }
++  return 0;
++}
++
++
++static int
++join_read_prev_same(READ_RECORD *info)
++{
++  int error;
++  TABLE *table= info->table;
++  JOIN_TAB *tab=table->reginfo.join_tab;
++
++  if ((error=table->file->index_prev(table->record[0])))
++    return report_error(table, error);
++  if (key_cmp_if_same(table, tab->ref.key_buff, tab->ref.key,
++                      tab->ref.key_length))
++  {
++    table->status=STATUS_NOT_FOUND;
++    error= -1;
++  }
++  return error;
++}
++
++
++static int
++join_init_quick_read_record(JOIN_TAB *tab)
++{
++  if (test_if_quick_select(tab) == -1)
++    return -1;					/* No possible records */
++  return join_init_read_record(tab);
++}
++
++
++int rr_sequential(READ_RECORD *info);
++int init_read_record_seq(JOIN_TAB *tab)
++{
++  tab->read_record.read_record= rr_sequential;
++  if (tab->read_record.file->ha_rnd_init(1))
++    return 1;
++  return (*tab->read_record.read_record)(&tab->read_record);
++}
++
++static int
++test_if_quick_select(JOIN_TAB *tab)
++{
++  delete tab->select->quick;
++  tab->select->quick=0;
++  return tab->select->test_quick_select(tab->join->thd, tab->keys,
++					(table_map) 0, HA_POS_ERROR, 0);
++}
++
++
++static int
++join_init_read_record(JOIN_TAB *tab)
++{
++  if (tab->select && tab->select->quick && tab->select->quick->reset())
++    return 1;
++  init_read_record(&tab->read_record, tab->join->thd, tab->table,
++		   tab->select,1,1, FALSE);
++  return (*tab->read_record.read_record)(&tab->read_record);
++}
++
++
++static int
++join_read_first(JOIN_TAB *tab)
++{
++  int error;
++  TABLE *table=tab->table;
++  if (table->covering_keys.is_set(tab->index) && !table->no_keyread)
++    table->set_keyread(TRUE);
++  tab->table->status=0;
++  tab->read_record.read_record=join_read_next;
++  tab->read_record.table=table;
++  tab->read_record.file=table->file;
++  tab->read_record.index=tab->index;
++  tab->read_record.record=table->record[0];
++  if (!table->file->inited)
++    table->file->ha_index_init(tab->index, tab->sorted);
++  if ((error=tab->table->file->index_first(tab->table->record[0])))
++  {
++    if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
++      report_error(table, error);
++    return -1;
++  }
++  return 0;
++}
++
++
++static int
++join_read_next(READ_RECORD *info)
++{
++  int error;
++  if ((error=info->file->index_next(info->record)))
++    return report_error(info->table, error);
++  return 0;
++}
++
++
++static int
++join_read_last(JOIN_TAB *tab)
++{
++  TABLE *table=tab->table;
++  int error;
++  if (table->covering_keys.is_set(tab->index) && !table->no_keyread)
++    table->set_keyread(TRUE);
++  tab->table->status=0;
++  tab->read_record.read_record=join_read_prev;
++  tab->read_record.table=table;
++  tab->read_record.file=table->file;
++  tab->read_record.index=tab->index;
++  tab->read_record.record=table->record[0];
++  if (!table->file->inited)
++    table->file->ha_index_init(tab->index, 1);
++  if ((error= tab->table->file->index_last(tab->table->record[0])))
++    return report_error(table, error);
++  return 0;
++}
++
++
++static int
++join_read_prev(READ_RECORD *info)
++{
++  int error;
++  if ((error= info->file->index_prev(info->record)))
++    return report_error(info->table, error);
++  return 0;
++}
++
++
++static int
++join_ft_read_first(JOIN_TAB *tab)
++{
++  int error;
++  TABLE *table= tab->table;
++
++  if (!table->file->inited)
++    table->file->ha_index_init(tab->ref.key, 1);
++#if NOT_USED_YET
++  /* as ft-key doesn't use store_key's, see also FT_SELECT::init() */
++  if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
++    return -1;                             
++#endif
++  table->file->ft_init();
++
++  if ((error= table->file->ft_read(table->record[0])))
++    return report_error(table, error);
++  return 0;
++}
++
++static int
++join_ft_read_next(READ_RECORD *info)
++{
++  int error;
++  if ((error= info->file->ft_read(info->table->record[0])))
++    return report_error(info->table, error);
++  return 0;
++}
++
++
++/**
++  Reading of key with key reference and one part that may be NULL.
++*/
++
++int
++join_read_always_key_or_null(JOIN_TAB *tab)
++{
++  int res;
++
++  /* First read according to key which is NOT NULL */
++  *tab->ref.null_ref_key= 0;			// Clear null byte
++  if ((res= join_read_always_key(tab)) >= 0)
++    return res;
++
++  /* Then read key with null value */
++  *tab->ref.null_ref_key= 1;			// Set null byte
++  return safe_index_read(tab);
++}
++
++
++int
++join_read_next_same_or_null(READ_RECORD *info)
++{
++  int error;
++  if ((error= join_read_next_same(info)) >= 0)
++    return error;
++  JOIN_TAB *tab= info->table->reginfo.join_tab;
++
++  /* Test if we have already done a read after null key */
++  if (*tab->ref.null_ref_key)
++    return -1;					// All keys read
++  *tab->ref.null_ref_key= 1;			// Set null byte
++  return safe_index_read(tab);			// then read null keys
++}
++
++
++/*****************************************************************************
++  DESCRIPTION
++    Functions that end one nested loop iteration. Different functions
++    are used to support GROUP BY clause and to redirect records
++    to a table (e.g. in case of SELECT into a temporary table) or to the
++    network client.
++
++  RETURN VALUES
++    NESTED_LOOP_OK           - the record has been successfully handled
++    NESTED_LOOP_ERROR        - a fatal error (like table corruption)
++                               was detected
++    NESTED_LOOP_KILLED       - thread shutdown was requested while processing
++                               the record
++    NESTED_LOOP_QUERY_LIMIT  - the record has been successfully handled;
++                               additionally, the nested loop produced the
++                               number of rows specified in the LIMIT clause
++                               for the query
++    NESTED_LOOP_CURSOR_LIMIT - the record has been successfully handled;
++                               additionally, there is a cursor and the nested
++                               loop algorithm produced the number of rows
++                               that is specified for current cursor fetch
++                               operation.
++   All return values except NESTED_LOOP_OK abort the nested loop.
++*****************************************************************************/
++
++/* ARGSUSED */
++static enum_nested_loop_state
++end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
++	 bool end_of_records)
++{
++  DBUG_ENTER("end_send");
++  if (!end_of_records)
++  {
++    int error;
++    if (join->having && join->having->val_int() == 0)
++      DBUG_RETURN(NESTED_LOOP_OK);               // Didn't match having
++    error=0;
++    if (join->procedure)
++      error=join->procedure->send_row(join->procedure_fields_list);
++    else if (join->do_send_rows)
++      error=join->result->send_data(*join->fields);
++    if (error)
++      DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
++    if (++join->send_records >= join->unit->select_limit_cnt &&
++	join->do_send_rows)
++    {
++      if (join->select_options & OPTION_FOUND_ROWS)
++      {
++	JOIN_TAB *jt=join->join_tab;
++	if ((join->tables == 1) && !join->tmp_table && !join->sort_and_group
++	    && !join->send_group_parts && !join->having && !jt->select_cond &&
++	    !(jt->select && jt->select->quick) &&
++	    (jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
++            (jt->ref.key < 0))
++	{
++	  /* Join over all rows in table;  Return number of found rows */
++	  TABLE *table=jt->table;
++
++	  join->select_options ^= OPTION_FOUND_ROWS;
++	  if (table->sort.record_pointers ||
++	      (table->sort.io_cache && my_b_inited(table->sort.io_cache)))
++	  {
++	    /* Using filesort */
++	    join->send_records= table->sort.found_records;
++	  }
++	  else
++	  {
++	    table->file->info(HA_STATUS_VARIABLE);
++	    join->send_records= table->file->stats.records;
++	  }
++	}
++	else 
++	{
++	  join->do_send_rows= 0;
++	  if (join->unit->fake_select_lex)
++	    join->unit->fake_select_lex->select_limit= 0;
++	  DBUG_RETURN(NESTED_LOOP_OK);
++	}
++      }
++      DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT);      // Abort nicely
++    }
++    else if (join->send_records >= join->fetch_limit)
++    {
++      /*
++        There is a server side cursor and all rows for
++        this fetch request are sent.
++      */
++      DBUG_RETURN(NESTED_LOOP_CURSOR_LIMIT);
++    }
++  }
++  else
++  {
++    if (join->procedure && join->procedure->end_of_records())
++      DBUG_RETURN(NESTED_LOOP_ERROR);
++  }
++  DBUG_RETURN(NESTED_LOOP_OK);
++}
++
++
++	/* ARGSUSED */
++static enum_nested_loop_state
++end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
++	       bool end_of_records)
++{
++  int idx= -1;
++  enum_nested_loop_state ok_code= NESTED_LOOP_OK;
++  DBUG_ENTER("end_send_group");
++
++  if (!join->first_record || end_of_records ||
++      (idx=test_if_group_changed(join->group_fields)) >= 0)
++  {
++    if (join->first_record || 
++        (end_of_records && !join->group && !join->group_optimized_away))
++    {
++      if (join->procedure)
++	join->procedure->end_group();
++      if (idx < (int) join->send_group_parts)
++      {
++	int error=0;
++	if (join->procedure)
++	{
++	  if (join->having && join->having->val_int() == 0)
++	    error= -1;				// Didn't satisfy having
++ 	  else
++	  {
++	    if (join->do_send_rows)
++	      error=join->procedure->send_row(*join->fields) ? 1 : 0;
++	    join->send_records++;
++	  }
++	  if (end_of_records && join->procedure->end_of_records())
++	    error= 1;				// Fatal error
++	}
++	else
++	{
++	  if (!join->first_record)
++	  {
++            List_iterator_fast<Item> it(*join->fields);
++            Item *item;
++	    /* No matching rows for group function */
++	    join->clear();
++
++            while ((item= it++))
++              item->no_rows_in_result();
++	  }
++	  if (join->having && join->having->val_int() == 0)
++	    error= -1;				// Didn't satisfy having
++	  else
++	  {
++	    if (join->do_send_rows)
++	      error=join->result->send_data(*join->fields) ? 1 : 0;
++	    join->send_records++;
++	  }
++	  if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0)
++	  {
++	    if (join->rollup_send_data((uint) (idx+1)))
++	      error= 1;
++	  }
++	}
++	if (error > 0)
++          DBUG_RETURN(NESTED_LOOP_ERROR);        /* purecov: inspected */
++	if (end_of_records)
++	  DBUG_RETURN(NESTED_LOOP_OK);
++	if (join->send_records >= join->unit->select_limit_cnt &&
++	    join->do_send_rows)
++	{
++	  if (!(join->select_options & OPTION_FOUND_ROWS))
++	    DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely
++	  join->do_send_rows=0;
++	  join->unit->select_limit_cnt = HA_POS_ERROR;
++        }
++        else if (join->send_records >= join->fetch_limit)
++        {
++          /*
++            There is a server side cursor and all rows
++            for this fetch request are sent.
++          */
++          /*
++            Preventing code duplication. When finished with the group reset
++            the group functions and copy_fields. We fall through. bug #11904
++          */
++          ok_code= NESTED_LOOP_CURSOR_LIMIT;
++        }
++      }
++    }
++    else
++    {
++      if (end_of_records)
++	DBUG_RETURN(NESTED_LOOP_OK);
++      join->first_record=1;
++      VOID(test_if_group_changed(join->group_fields));
++    }
++    if (idx < (int) join->send_group_parts)
++    {
++      /*
++        This branch is executed also for cursors which have finished their
++        fetch limit - the reason for ok_code.
++      */
++      copy_fields(&join->tmp_table_param);
++      if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))
++	DBUG_RETURN(NESTED_LOOP_ERROR);
++      if (join->procedure)
++	join->procedure->add();
++      DBUG_RETURN(ok_code);
++    }
++  }
++  if (update_sum_func(join->sum_funcs))
++    DBUG_RETURN(NESTED_LOOP_ERROR);
++  if (join->procedure)
++    join->procedure->add();
++  DBUG_RETURN(NESTED_LOOP_OK);
++}
++
++
++	/* ARGSUSED */
++static enum_nested_loop_state
++end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
++	  bool end_of_records)
++{
++  TABLE *table=join->tmp_table;
++  DBUG_ENTER("end_write");
++
++  if (join->thd->killed)			// Aborted by user
++  {
++    join->thd->send_kill_message();
++    DBUG_RETURN(NESTED_LOOP_KILLED);             /* purecov: inspected */
++  }
++  if (!end_of_records)
++  {
++    copy_fields(&join->tmp_table_param);
++    if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
++      DBUG_RETURN(NESTED_LOOP_ERROR);           /* purecov: inspected */
++
++#ifdef TO_BE_DELETED
++    if (!table->uniques)			// If not unique handling
++    {
++      /* Copy null values from group to row */
++      ORDER   *group;
++      for (group=table->group ; group ; group=group->next)
++      {
++	Item *item= *group->item;
++	if (item->maybe_null)
++	{
++	  Field *field=item->get_tmp_table_field();
++	  field->ptr[-1]= (uchar) (field->is_null() ? 1 : 0);
++	}
++      }
++    }
++#endif
++    if (!join->having || join->having->val_int())
++    {
++      int error;
++      join->found_records++;
++      if ((error=table->file->ha_write_row(table->record[0])))
++      {
++        if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
++	  goto end;
++	if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
++				    error,1))
++	  DBUG_RETURN(NESTED_LOOP_ERROR);        // Not a table_is_full error
++	table->s->uniques=0;			// To ensure rows are the same
++      }
++      if (++join->send_records >= join->tmp_table_param.end_write_records &&
++	  join->do_send_rows)
++      {
++	if (!(join->select_options & OPTION_FOUND_ROWS))
++	  DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT);
++	join->do_send_rows=0;
++	join->unit->select_limit_cnt = HA_POS_ERROR;
++	DBUG_RETURN(NESTED_LOOP_OK);
++      }
++    }
++  }
++end:
++  DBUG_RETURN(NESTED_LOOP_OK);
++}
++
++/* ARGSUSED */
++/** Group by searching after group record and updating it if possible. */
++
++static enum_nested_loop_state
++end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
++	   bool end_of_records)
++{
++  TABLE *table=join->tmp_table;
++  ORDER   *group;
++  int	  error;
++  DBUG_ENTER("end_update");
++
++  if (end_of_records)
++    DBUG_RETURN(NESTED_LOOP_OK);
++  if (join->thd->killed)			// Aborted by user
++  {
++    join->thd->send_kill_message();
++    DBUG_RETURN(NESTED_LOOP_KILLED);             /* purecov: inspected */
++  }
++
++  join->found_records++;
++  copy_fields(&join->tmp_table_param);		// Groups are copied twice.
++  /* Make a key of group index */
++  for (group=table->group ; group ; group=group->next)
++  {
++    Item *item= *group->item;
++    item->save_org_in_field(group->field);
++    /* Store in the used key if the field was 0 */
++    if (item->maybe_null)
++      group->buff[-1]= (char) group->field->is_null();
++  }
++  if (!table->file->index_read_map(table->record[1],
++                                   join->tmp_table_param.group_buff,
++                                   HA_WHOLE_KEY,
++                                   HA_READ_KEY_EXACT))
++  {						/* Update old record */
++    restore_record(table,record[1]);
++    update_tmptable_sum_func(join->sum_funcs,table);
++    if ((error=table->file->ha_update_row(table->record[1],
++                                          table->record[0])))
++    {
++      table->file->print_error(error,MYF(0));	/* purecov: inspected */
++      DBUG_RETURN(NESTED_LOOP_ERROR);            /* purecov: inspected */
++    }
++    DBUG_RETURN(NESTED_LOOP_OK);
++  }
++
++  /*
++    Copy null bits from group key to table
++    We can't copy all data as the key may have different format
++    as the row data (for example as with VARCHAR keys)
++  */
++  KEY_PART_INFO *key_part;
++  for (group=table->group,key_part=table->key_info[0].key_part;
++       group ;
++       group=group->next,key_part++)
++  {
++    if (key_part->null_bit)
++      memcpy(table->record[0]+key_part->offset, group->buff, 1);
++  }
++  init_tmptable_sum_functions(join->sum_funcs);
++  if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
++    DBUG_RETURN(NESTED_LOOP_ERROR);           /* purecov: inspected */
++  if ((error=table->file->ha_write_row(table->record[0])))
++  {
++    if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
++				error, 0))
++      DBUG_RETURN(NESTED_LOOP_ERROR);            // Not a table_is_full error
++    /* Change method to update rows */
++    table->file->ha_index_init(0, 0);
++    join->join_tab[join->tables-1].next_select=end_unique_update;
++  }
++  join->send_records++;
++  DBUG_RETURN(NESTED_LOOP_OK);
++}
++
++
++/** Like end_update, but this is done with unique constraints instead of keys.  */
++
++static enum_nested_loop_state
++end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
++		  bool end_of_records)
++{
++  TABLE *table=join->tmp_table;
++  int	  error;
++  DBUG_ENTER("end_unique_update");
++
++  if (end_of_records)
++    DBUG_RETURN(NESTED_LOOP_OK);
++  if (join->thd->killed)			// Aborted by user
++  {
++    join->thd->send_kill_message();
++    DBUG_RETURN(NESTED_LOOP_KILLED);             /* purecov: inspected */
++  }
++
++  init_tmptable_sum_functions(join->sum_funcs);
++  copy_fields(&join->tmp_table_param);		// Groups are copied twice.
++  if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
++    DBUG_RETURN(NESTED_LOOP_ERROR);           /* purecov: inspected */
++
++  if (!(error=table->file->ha_write_row(table->record[0])))
++    join->send_records++;			// New group
++  else
++  {
++    if ((int) table->file->get_dup_key(error) < 0)
++    {
++      table->file->print_error(error,MYF(0));	/* purecov: inspected */
++      DBUG_RETURN(NESTED_LOOP_ERROR);            /* purecov: inspected */
++    }
++    if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
++    {
++      table->file->print_error(error,MYF(0));	/* purecov: inspected */
++      DBUG_RETURN(NESTED_LOOP_ERROR);            /* purecov: inspected */
++    }
++    restore_record(table,record[1]);
++    update_tmptable_sum_func(join->sum_funcs,table);
++    if ((error=table->file->ha_update_row(table->record[1],
++                                          table->record[0])))
++    {
++      table->file->print_error(error,MYF(0));	/* purecov: inspected */
++      DBUG_RETURN(NESTED_LOOP_ERROR);            /* purecov: inspected */
++    }
++  }
++  DBUG_RETURN(NESTED_LOOP_OK);
++}
++
++
++	/* ARGSUSED */
++static enum_nested_loop_state
++end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
++		bool end_of_records)
++{
++  TABLE *table=join->tmp_table;
++  int	  idx= -1;
++  DBUG_ENTER("end_write_group");
++
++  if (join->thd->killed)
++  {						// Aborted by user
++    join->thd->send_kill_message();
++    DBUG_RETURN(NESTED_LOOP_KILLED);             /* purecov: inspected */
++  }
++  if (!join->first_record || end_of_records ||
++      (idx=test_if_group_changed(join->group_fields)) >= 0)
++  {
++    if (join->first_record || (end_of_records && !join->group))
++    {
++      if (join->procedure)
++	join->procedure->end_group();
++      int send_group_parts= join->send_group_parts;
++      if (idx < send_group_parts)
++      {
++	if (!join->first_record)
++	{
++	  /* No matching rows for group function */
++	  join->clear();
++	}
++        copy_sum_funcs(join->sum_funcs,
++                       join->sum_funcs_end[send_group_parts]);
++	if (!join->having || join->having->val_int())
++	{
++          int error= table->file->ha_write_row(table->record[0]);
++          if (error && create_myisam_from_heap(join->thd, table,
++                                               &join->tmp_table_param,
++                                               error, 0))
++	    DBUG_RETURN(NESTED_LOOP_ERROR);
++        }
++        if (join->rollup.state != ROLLUP::STATE_NONE)
++	{
++	  if (join->rollup_write_data((uint) (idx+1), table))
++	    DBUG_RETURN(NESTED_LOOP_ERROR);
++	}
++	if (end_of_records)
++	  DBUG_RETURN(NESTED_LOOP_OK);
++      }
++    }
++    else
++    {
++      if (end_of_records)
++	DBUG_RETURN(NESTED_LOOP_OK);
++      join->first_record=1;
++      VOID(test_if_group_changed(join->group_fields));
++    }
++    if (idx < (int) join->send_group_parts)
++    {
++      copy_fields(&join->tmp_table_param);
++      if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
++	DBUG_RETURN(NESTED_LOOP_ERROR);
++      if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))
++	DBUG_RETURN(NESTED_LOOP_ERROR);
++      if (join->procedure)
++	join->procedure->add();
++      DBUG_RETURN(NESTED_LOOP_OK);
++    }
++  }
++  if (update_sum_func(join->sum_funcs))
++    DBUG_RETURN(NESTED_LOOP_ERROR);
++  if (join->procedure)
++    join->procedure->add();
++  DBUG_RETURN(NESTED_LOOP_OK);
++}
++
++
++/*****************************************************************************
++  Remove calculation with tables that aren't yet read. Remove also tests
++  against fields that are read through key where the table is not a
++  outer join table.
++  We can't remove tests that are made against columns which are stored
++  in sorted order.
++*****************************************************************************/
++
++/**
++  @return
++    1 if right_item is used removable reference key on left_item
++*/
++
++static bool test_if_ref(Item_field *left_item,Item *right_item)
++{
++  Field *field=left_item->field;
++  // No need to change const test. We also have to keep tests on LEFT JOIN
++  if (!field->table->const_table && !field->table->maybe_null)
++  {
++    Item *ref_item=part_of_refkey(field->table,field);
++    if (ref_item && ref_item->eq(right_item,1))
++    {
++      right_item= right_item->real_item();
++      if (right_item->type() == Item::FIELD_ITEM)
++	return (field->eq_def(((Item_field *) right_item)->field));
++      /* remove equalities injected by IN->EXISTS transformation */
++      else if (right_item->type() == Item::CACHE_ITEM)
++        return ((Item_cache *)right_item)->eq_def (field);
++      if (right_item->const_item() && !(right_item->is_null()))
++      {
++	/*
++	  We can remove binary fields and numerical fields except float,
++	  as float comparison isn't 100 % secure
++	  We have to keep normal strings to be able to check for end spaces
++	*/
++	if (field->binary() &&
++	    field->real_type() != MYSQL_TYPE_STRING &&
++	    field->real_type() != MYSQL_TYPE_VARCHAR &&
++	    (field->type() != MYSQL_TYPE_FLOAT || field->decimals() == 0))
++	{
++	  return !store_val_in_field(field, right_item, CHECK_FIELD_WARN);
++	}
++      }
++    }
++  }
++  return 0;					// keep test
++}
++
++
++static COND *
++make_cond_for_table(COND *cond, table_map tables, table_map used_table)
++{
++  if (used_table && !(cond->used_tables() & used_table))
++    return (COND*) 0;				// Already checked
++  if (cond->type() == Item::COND_ITEM)
++  {
++    if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
++    {
++      /* Create new top level AND item */
++      Item_cond_and *new_cond=new Item_cond_and;
++      if (!new_cond)
++	return (COND*) 0;			// OOM /* purecov: inspected */
++      List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
++      Item *item;
++      while ((item=li++))
++      {
++	Item *fix=make_cond_for_table(item,tables,used_table);
++	if (fix)
++	  new_cond->argument_list()->push_back(fix);
++      }
++      switch (new_cond->argument_list()->elements) {
++      case 0:
++	return (COND*) 0;			// Always true
++      case 1:
++	return new_cond->argument_list()->head();
++      default:
++	/*
++	  Item_cond_and do not need fix_fields for execution, its parameters
++	  are fixed or do not need fix_fields, too
++	*/
++	new_cond->quick_fix_field();
++	new_cond->used_tables_cache=
++	  ((Item_cond_and*) cond)->used_tables_cache &
++	  tables;
++	return new_cond;
++      }
++    }
++    else
++    {						// Or list
++      Item_cond_or *new_cond=new Item_cond_or;
++      if (!new_cond)
++	return (COND*) 0;			// OOM /* purecov: inspected */
++      List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
++      Item *item;
++      while ((item=li++))
++      {
++	Item *fix=make_cond_for_table(item,tables,0L);
++	if (!fix)
++	  return (COND*) 0;			// Always true
++	new_cond->argument_list()->push_back(fix);
++      }
++      /*
++	Item_cond_and do not need fix_fields for execution, its parameters
++	are fixed or do not need fix_fields, too
++      */
++      new_cond->quick_fix_field();
++      new_cond->used_tables_cache= ((Item_cond_or*) cond)->used_tables_cache;
++      new_cond->top_level_item();
++      return new_cond;
++    }
++  }
++
++  /*
++    Because the following test takes a while and it can be done
++    table_count times, we mark each item that we have examined with the result
++    of the test
++  */
++
++  if (cond->marker == 3 || (cond->used_tables() & ~tables))
++    return (COND*) 0;				// Can't check this yet
++  if (cond->marker == 2 || cond->eq_cmp_result() == Item::COND_OK)
++    return cond;				// Not boolean op
++
++  if (((Item_func*) cond)->functype() == Item_func::EQ_FUNC)
++  {
++    Item *left_item=	((Item_func*) cond)->arguments()[0];
++    Item *right_item= ((Item_func*) cond)->arguments()[1];
++    if (left_item->type() == Item::FIELD_ITEM &&
++	test_if_ref((Item_field*) left_item,right_item))
++    {
++      cond->marker=3;			// Checked when read
++      return (COND*) 0;
++    }
++    if (right_item->type() == Item::FIELD_ITEM &&
++	test_if_ref((Item_field*) right_item,left_item))
++    {
++      cond->marker=3;			// Checked when read
++      return (COND*) 0;
++    }
++  }
++  cond->marker=2;
++  return cond;
++}
++
++static Item *
++part_of_refkey(TABLE *table,Field *field)
++{
++  if (!table->reginfo.join_tab)
++    return (Item*) 0;             // field from outer non-select (UPDATE,...)
++
++  uint ref_parts=table->reginfo.join_tab->ref.key_parts;
++  if (ref_parts)
++  {
++    KEY_PART_INFO *key_part=
++      table->key_info[table->reginfo.join_tab->ref.key].key_part;
++
++    for (uint part=0 ; part < ref_parts ; part++,key_part++)
++      if (field->eq(key_part->field) &&
++	  !(key_part->key_part_flag & (HA_PART_KEY_SEG | HA_NULL_PART)))
++	return table->reginfo.join_tab->ref.items[part];
++  }
++  return (Item*) 0;
++}
++
++
++/**
++  Test if one can use the key to resolve ORDER BY.
++
++  @param order                 Sort order
++  @param table                 Table to sort
++  @param idx                   Index to check
++  @param used_key_parts        Return value for used key parts.
++
++
++  @note
++    used_key_parts is set to correct key parts used if return value != 0
++    (On other cases, used_key_part may be changed)
++    Note that the value may actually be greater than the number of index 
++    key parts. This can happen for storage engines that have the primary 
++    key parts as a suffix for every secondary key.
++
++  @retval
++    1   key is ok.
++  @retval
++    0   Key can't be used
++  @retval
++    -1   Reverse key can be used
++*/
++
++static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
++				uint *used_key_parts)
++{
++  KEY_PART_INFO *key_part,*key_part_end;
++  key_part=table->key_info[idx].key_part;
++  key_part_end=key_part+table->key_info[idx].key_parts;
++  key_part_map const_key_parts=table->const_key_parts[idx];
++  int reverse=0;
++  my_bool on_pk_suffix= FALSE;
++  DBUG_ENTER("test_if_order_by_key");
++
++  for (; order ; order=order->next, const_key_parts>>=1)
++  {
++    Field *field=((Item_field*) (*order->item)->real_item())->field;
++    int flag;
++
++    /*
++      Skip key parts that are constants in the WHERE clause.
++      These are already skipped in the ORDER BY by const_expression_in_where()
++    */
++    for (; const_key_parts & 1 ; const_key_parts>>= 1)
++      key_part++; 
++
++    if (key_part == key_part_end)
++    {
++      /* 
++        We are at the end of the key. Check if the engine has the primary
++        key as a suffix to the secondary keys. If it has continue to check
++        the primary key as a suffix.
++      */
++      if (!on_pk_suffix &&
++          (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
++          table->s->primary_key != MAX_KEY &&
++          table->s->primary_key != idx)
++      {
++        on_pk_suffix= TRUE;
++        key_part= table->key_info[table->s->primary_key].key_part;
++        key_part_end=key_part+table->key_info[table->s->primary_key].key_parts;
++        const_key_parts=table->const_key_parts[table->s->primary_key];
++
++        for (; const_key_parts & 1 ; const_key_parts>>= 1)
++          key_part++; 
++        /*
++         The primary and secondary key parts were all const (i.e. there's
++         one row).  The sorting doesn't matter.
++        */
++        if (key_part == key_part_end && reverse == 0)
++        {
++          *used_key_parts= 0;
++          DBUG_RETURN(1);
++        }
++      }
++      else
++        DBUG_RETURN(0);
++    }
++
++    if (key_part->field != field)
++      DBUG_RETURN(0);
++
++    /* set flag to 1 if we can use read-next on key, else to -1 */
++    flag= ((order->asc == !(key_part->key_part_flag & HA_REVERSE_SORT)) ?
++           1 : -1);
++    if (reverse && flag != reverse)
++      DBUG_RETURN(0);
++    reverse=flag;				// Remember if reverse
++    key_part++;
++  }
++  if (on_pk_suffix)
++  {
++    uint used_key_parts_secondary= table->key_info[idx].key_parts;
++    uint used_key_parts_pk=
++      (uint) (key_part - table->key_info[table->s->primary_key].key_part);
++    *used_key_parts= used_key_parts_pk + used_key_parts_secondary;
++
++    if (reverse == -1 &&
++        (!(table->file->index_flags(idx, used_key_parts_secondary - 1, 1) &
++           HA_READ_PREV) ||
++         !(table->file->index_flags(table->s->primary_key,
++                                    used_key_parts_pk - 1, 1) & HA_READ_PREV)))
++      reverse= 0;                               // Index can't be used
++  }
++  else
++  {
++    *used_key_parts= (uint) (key_part - table->key_info[idx].key_part);
++    if (reverse == -1 && 
++        !(table->file->index_flags(idx, *used_key_parts-1, 1) & HA_READ_PREV))
++      reverse= 0;                               // Index can't be used
++  }
++  DBUG_RETURN(reverse);
++}
++
++
++/**
++  Find shortest key suitable for full table scan.
++
++  @param table                 Table to scan
++  @param usable_keys           Allowed keys
++
++  @note
++     As far as 
++     1) clustered primary key entry data set is a set of all record
++        fields (key fields and not key fields) and
++     2) secondary index entry data is a union of its key fields and
++        primary key fields (at least InnoDB and its derivatives don't
++        duplicate primary key fields there, even if the primary and
++        the secondary keys have a common subset of key fields),
++     then secondary index entry data is always a subset of primary key entry.
++     Unfortunately, key_info[nr].key_length doesn't show the length
++     of key/pointer pair but a sum of key field lengths only, thus
++     we can't estimate index IO volume comparing only this key_length
++     value of secondary keys and clustered PK.
++     So, try secondary keys first, and choose PK only if there are no
++     usable secondary covering keys or found best secondary key include
++     all table fields (i.e. same as PK):
++
++  @return
++    MAX_KEY     no suitable key found
++    key index   otherwise
++*/
++
++uint find_shortest_key(TABLE *table, const key_map *usable_keys)
++{
++  uint best= MAX_KEY;
++  uint usable_clustered_pk= (table->file->primary_key_is_clustered() &&
++                             table->s->primary_key != MAX_KEY &&
++                             usable_keys->is_set(table->s->primary_key)) ?
++                            table->s->primary_key : MAX_KEY;
++  if (!usable_keys->is_clear_all())
++  {
++    uint min_length= (uint) ~0;
++    for (uint nr=0; nr < table->s->keys ; nr++)
++    {
++      if (nr == usable_clustered_pk)
++        continue;
++      if (usable_keys->is_set(nr))
++      {
++        if (table->key_info[nr].key_length < min_length)
++        {
++          min_length=table->key_info[nr].key_length;
++          best=nr;
++        }
++      }
++    }
++  }
++  if (usable_clustered_pk != MAX_KEY)
++  {
++    /*
++     If the primary key is clustered and found shorter key covers all table
++     fields then primary key scan normally would be faster because amount of
++     data to scan is the same but PK is clustered.
++     It's safe to compare key parts with table fields since duplicate key
++     parts aren't allowed.
++     */
++    if (best == MAX_KEY ||
++        table->key_info[best].key_parts >= table->s->fields)
++      best= usable_clustered_pk;
++  }
++  return best;
++}
++
++/**
++  Test if a second key is the subkey of the first one.
++
++  @param key_part              First key parts
++  @param ref_key_part          Second key parts
++  @param ref_key_part_end      Last+1 part of the second key
++
++  @note
++    Second key MUST be shorter than the first one.
++
++  @retval
++    1	is a subkey
++  @retval
++    0	no sub key
++*/
++
++inline bool 
++is_subkey(KEY_PART_INFO *key_part, KEY_PART_INFO *ref_key_part,
++	  KEY_PART_INFO *ref_key_part_end)
++{
++  for (; ref_key_part < ref_key_part_end; key_part++, ref_key_part++)
++    if (!key_part->field->eq(ref_key_part->field))
++      return 0;
++  return 1;
++}
++
++/**
++  Test if we can use one of the 'usable_keys' instead of 'ref' key
++  for sorting.
++
++  @param ref			Number of key, used for WHERE clause
++  @param usable_keys		Keys for testing
++
++  @return
++    - MAX_KEY			If we can't use other key
++    - the number of found key	Otherwise
++*/
++
++static uint
++test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts,
++	       const key_map *usable_keys)
++{
++  uint nr;
++  uint min_length= (uint) ~0;
++  uint best= MAX_KEY;
++  uint not_used;
++  KEY_PART_INFO *ref_key_part= table->key_info[ref].key_part;
++  KEY_PART_INFO *ref_key_part_end= ref_key_part + ref_key_parts;
++
++  for (nr= 0 ; nr < table->s->keys ; nr++)
++  {
++    if (usable_keys->is_set(nr) &&
++	table->key_info[nr].key_length < min_length &&
++	table->key_info[nr].key_parts >= ref_key_parts &&
++	is_subkey(table->key_info[nr].key_part, ref_key_part,
++		  ref_key_part_end) &&
++	test_if_order_by_key(order, table, nr, &not_used))
++    {
++      min_length= table->key_info[nr].key_length;
++      best= nr;
++    }
++  }
++  return best;
++}
++
++
++/**
++  Check if GROUP BY/DISTINCT can be optimized away because the set is
++  already known to be distinct.
++
++  Used in removing the GROUP BY/DISTINCT of the following types of
++  statements:
++  @code
++    SELECT [DISTINCT] <unique_key_cols>... FROM <single_table_ref>
++      [GROUP BY <unique_key_cols>,...]
++  @endcode
++
++    If (a,b,c is distinct)
++    then <any combination of a,b,c>,{whatever} is also distinct
++
++    This function checks if all the key parts of any of the unique keys
++    of the table are referenced by a list : either the select list
++    through find_field_in_item_list or GROUP BY list through
++    find_field_in_order_list.
++    If the above holds and the key parts cannot contain NULLs then we 
++    can safely remove the GROUP BY/DISTINCT,
++    as no result set can be more distinct than an unique key.
++
++  @param table                The table to operate on.
++  @param find_func            function to iterate over the list and search
++                              for a field
++
++  @retval
++    1                    found
++  @retval
++    0                    not found.
++*/
++
++static bool
++list_contains_unique_index(TABLE *table,
++                          bool (*find_func) (Field *, void *), void *data)
++{
++  if (table->pos_in_table_list->outer_join)
++    return 0;
++  for (uint keynr= 0; keynr < table->s->keys; keynr++)
++  {
++    if (keynr == table->s->primary_key ||
++         (table->key_info[keynr].flags & HA_NOSAME))
++    {
++      KEY *keyinfo= table->key_info + keynr;
++      KEY_PART_INFO *key_part, *key_part_end;
++
++      for (key_part=keyinfo->key_part,
++           key_part_end=key_part+ keyinfo->key_parts;
++           key_part < key_part_end;
++           key_part++)
++      {
++        if (key_part->field->real_maybe_null() || 
++            !find_func(key_part->field, data))
++          break;
++      }
++      if (key_part == key_part_end)
++        return 1;
++    }
++  }
++  return 0;
++}
++
++
++/**
++  Helper function for list_contains_unique_index.
++  Find a field reference in a list of ORDER structures.
++  Finds a direct reference of the Field in the list.
++
++  @param field                The field to search for.
++  @param data                 ORDER *.The list to search in
++
++  @retval
++    1                    found
++  @retval
++    0                    not found.
++*/
++
++static bool
++find_field_in_order_list (Field *field, void *data)
++{
++  ORDER *group= (ORDER *) data;
++  bool part_found= 0;
++  for (ORDER *tmp_group= group; tmp_group; tmp_group=tmp_group->next)
++  {
++    Item *item= (*tmp_group->item)->real_item();
++    if (item->type() == Item::FIELD_ITEM &&
++        ((Item_field*) item)->field->eq(field))
++    {
++      part_found= 1;
++      break;
++    }
++  }
++  return part_found;
++}
++
++
++/**
++  Helper function for list_contains_unique_index.
++  Find a field reference in a dynamic list of Items.
++  Finds a direct reference of the Field in the list.
++
++  @param[in] field             The field to search for.
++  @param[in] data              List<Item> *.The list to search in
++
++  @retval
++    1                    found
++  @retval
++    0                    not found.
++*/
++
++static bool
++find_field_in_item_list (Field *field, void *data)
++{
++  List<Item> *fields= (List<Item> *) data;
++  bool part_found= 0;
++  List_iterator<Item> li(*fields);
++  Item *item;
++
++  while ((item= li++))
++  {
++    if (item->type() == Item::FIELD_ITEM &&
++        ((Item_field*) item)->field->eq(field))
++    {
++      part_found= 1;
++      break;
++    }
++  }
++  return part_found;
++}
++
++
++/**
++  Test if we can skip the ORDER BY by using an index.
++
++  If we can use an index, the JOIN_TAB / tab->select struct
++  is changed to use the index.
++
++  The index must cover all fields in <order>, or it will not be considered.
++
++  @param no_changes No changes will be made to the query plan.
++
++  @todo
++    - sergeyp: Results of all index merge selects actually are ordered 
++    by clustered PK values.
++
++  @retval
++    0    We have to use filesort to do the sorting
++  @retval
++    1    We can use an index.
++*/
++
++static bool
++test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
++			bool no_changes, key_map *map)
++{
++  int ref_key;
++  uint ref_key_parts;
++  int order_direction= 0;
++  uint used_key_parts;
++  TABLE *table=tab->table;
++  SQL_SELECT *select=tab->select;
++  key_map usable_keys;
++  QUICK_SELECT_I *save_quick= 0;
++  int best_key= -1;
++
++  DBUG_ENTER("test_if_skip_sort_order");
++  LINT_INIT(ref_key_parts);
++
++  /*
++    Keys disabled by ALTER TABLE ... DISABLE KEYS should have already
++    been taken into account.
++  */
++  usable_keys= *map;
++
++  for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next)
++  {
++    Item *item= (*tmp_order->item)->real_item();
++    if (item->type() != Item::FIELD_ITEM)
++    {
++      usable_keys.clear_all();
++      DBUG_RETURN(0);
++    }
++    usable_keys.intersect(((Item_field*) item)->field->part_of_sortkey);
++    if (usable_keys.is_clear_all())
++      DBUG_RETURN(0);					// No usable keys
++  }
++
++  ref_key= -1;
++  /* Test if constant range in WHERE */
++  if (tab->ref.key >= 0 && tab->ref.key_parts)
++  {
++    ref_key=	   tab->ref.key;
++    ref_key_parts= tab->ref.key_parts;
++    if (tab->type == JT_REF_OR_NULL || tab->type == JT_FT)
++      DBUG_RETURN(0);
++  }
++  else if (select && select->quick)		// Range found by opt_range
++  {
++    int quick_type= select->quick->get_type();
++    save_quick= select->quick;
++    /* 
++      assume results are not ordered when index merge is used 
++      TODO: sergeyp: Results of all index merge selects actually are ordered 
++      by clustered PK values.
++    */
++  
++    if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || 
++        quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || 
++        quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT)
++      DBUG_RETURN(0);
++    ref_key=	   select->quick->index;
++    ref_key_parts= select->quick->used_key_parts;
++  }
++
++  if (ref_key >= 0)
++  {
++    /*
++      We come here when there is a REF key.
++    */
++    if (!usable_keys.is_set(ref_key))
++    {
++      /*
++	We come here when ref_key is not among usable_keys
++      */
++      uint new_ref_key;
++      /*
++	If using index only read, only consider other possible index only
++	keys
++      */
++      if (table->covering_keys.is_set(ref_key))
++	usable_keys.intersect(table->covering_keys);
++      if ((new_ref_key= test_if_subkey(order, table, ref_key, ref_key_parts,
++				       &usable_keys)) < MAX_KEY)
++      {
++	/* Found key that can be used to retrieve data in sorted order */
++	if (tab->ref.key >= 0)
++	{
++          /*
++            We'll use ref access method on key new_ref_key. In general case 
++            the index search tuple for new_ref_key will be different (e.g.
++            when one index is defined as (part1, part2, ...) and another as
++            (part1, part2(N), ...) and the WHERE clause contains 
++            "part1 = const1 AND part2=const2". 
++            So we build tab->ref from scratch here.
++          */
++          KEYUSE *keyuse= tab->keyuse;
++          while (keyuse->key != new_ref_key && keyuse->table == tab->table)
++            keyuse++;
++          if (create_ref_for_key(tab->join, tab, keyuse, 
++                                 tab->join->const_table_map))
++            DBUG_RETURN(0);
++
++          pick_table_access_method(tab);
++	}
++	else
++	{
++          /*
++            The range optimizer constructed QUICK_RANGE for ref_key, and
++            we want to use instead new_ref_key as the index. We can't
++            just change the index of the quick select, because this may
++            result in an incosistent QUICK_SELECT object. Below we
++            create a new QUICK_SELECT from scratch so that all its
++            parameres are set correctly by the range optimizer.
++           */
++          key_map new_ref_key_map;
++          new_ref_key_map.clear_all();  // Force the creation of quick select
++          new_ref_key_map.set_bit(new_ref_key); // only for new_ref_key.
++
++          select->quick= 0;
++          if (select->test_quick_select(tab->join->thd, new_ref_key_map, 0,
++                                        (tab->join->select_options &
++                                         OPTION_FOUND_ROWS) ?
++                                        HA_POS_ERROR :
++                                        tab->join->unit->select_limit_cnt,0) <=
++              0)
++            goto use_filesort;
++	}
++        ref_key= new_ref_key;
++      }
++    }
++    /* Check if we get the rows in requested sorted order by using the key */
++    if (usable_keys.is_set(ref_key) &&
++        (order_direction= test_if_order_by_key(order,table,ref_key,
++					       &used_key_parts)))
++      goto check_reverse_order;
++  }
++  {
++    /*
++      Check whether there is an index compatible with the given order
++      usage of which is cheaper than usage of the ref_key index (ref_key>=0)
++      or a table scan.
++      It may be the case if ORDER/GROUP BY is used with LIMIT.
++    */
++    uint nr;
++    key_map keys;
++    uint best_key_parts= 0;
++    uint saved_best_key_parts= 0;
++    int best_key_direction= 0;
++    ha_rows best_records= 0;
++    double read_time;
++    bool is_best_covering= FALSE;
++    double fanout= 1;
++    JOIN *join= tab->join;
++    uint tablenr= tab - join->join_tab;
++    ha_rows table_records= table->file->stats.records;
++    bool group= join->group && order == join->group_list;
++    ha_rows ref_key_quick_rows= HA_POS_ERROR;
++
++    /*
++      If not used with LIMIT, only use keys if the whole query can be
++      resolved with a key;  This is because filesort() is usually faster than
++      retrieving all rows through an index.
++    */
++    if (select_limit >= table_records)
++    {
++      keys= *table->file->keys_to_use_for_scanning();
++      keys.merge(table->covering_keys);
++
++      /*
++	We are adding here also the index specified in FORCE INDEX clause, 
++	if any.
++        This is to allow users to use index in ORDER BY.
++      */
++      if (table->force_index) 
++	keys.merge(group ? table->keys_in_use_for_group_by :
++                           table->keys_in_use_for_order_by);
++      keys.intersect(usable_keys);
++    }
++    else
++      keys= usable_keys;
++
++    if (ref_key >= 0 && table->covering_keys.is_set(ref_key))
++      ref_key_quick_rows= table->quick_rows[ref_key];
++
++    read_time= join->best_positions[tablenr].read_time;
++    for (uint i= tablenr+1; i < join->tables; i++)
++      fanout*= join->best_positions[i].records_read; // fanout is always >= 1
++
++    for (nr=0; nr < table->s->keys ; nr++)
++    {
++      int direction;
++
++      if (keys.is_set(nr) &&
++          (direction= test_if_order_by_key(order, table, nr, &used_key_parts)))
++      {
++        /*
++          At this point we are sure that ref_key is a non-ordering
++          key (where "ordering key" is a key that will return rows
++          in the order required by ORDER BY).
++        */
++        DBUG_ASSERT (ref_key != (int) nr);
++
++        bool is_covering= table->covering_keys.is_set(nr) ||
++                          (nr == table->s->primary_key &&
++                          table->file->primary_key_is_clustered());
++	
++        /* 
++          Don't use an index scan with ORDER BY without limit.
++          For GROUP BY without limit always use index scan
++          if there is a suitable index. 
++          Why we hold to this asymmetry hardly can be explained
++          rationally. It's easy to demonstrate that using
++          temporary table + filesort could be cheaper for grouping
++          queries too.
++	*/ 
++        if (is_covering ||
++            select_limit != HA_POS_ERROR || 
++            (ref_key < 0 && (group || table->force_index)))
++        { 
++          double rec_per_key;
++          double index_scan_time;
++          KEY *keyinfo= tab->table->key_info+nr;
++          if (select_limit == HA_POS_ERROR)
++            select_limit= table_records;
++          if (group)
++          {
++            /* 
++              Used_key_parts can be larger than keyinfo->key_parts
++              when using a secondary index clustered with a primary 
++              key (e.g. as in Innodb). 
++              See Bug #28591 for details.
++            */  
++            rec_per_key= used_key_parts &&
++                         used_key_parts <= keyinfo->key_parts ?
++                         keyinfo->rec_per_key[used_key_parts-1] : 1;
++            set_if_bigger(rec_per_key, 1);
++            /*
++              With a grouping query each group containing on average
++              rec_per_key records produces only one row that will
++              be included into the result set.
++	    */  
++            if (select_limit > table_records/rec_per_key)
++                select_limit= table_records;
++            else
++              select_limit= (ha_rows) (select_limit*rec_per_key);
++          }
++          /* 
++            If tab=tk is not the last joined table tn then to get first
++            L records from the result set we can expect to retrieve
++            only L/fanout(tk,tn) where fanout(tk,tn) says how many
++            rows in the record set on average will match each row tk.
++            Usually our estimates for fanouts are too pessimistic.
++            So the estimate for L/fanout(tk,tn) will be too optimistic
++            and as result we'll choose an index scan when using ref/range
++            access + filesort will be cheaper.
++	  */
++          select_limit= (ha_rows) (select_limit < fanout ?
++                                   1 : select_limit/fanout);
++          /*
++            We assume that each of the tested indexes is not correlated
++            with ref_key. Thus, to select first N records we have to scan
++            N/selectivity(ref_key) index entries. 
++            selectivity(ref_key) = #scanned_records/#table_records =
++            table->quick_condition_rows/table_records.
++            In any case we can't select more than #table_records.
++            N/(table->quick_condition_rows/table_records) > table_records 
++            <=> N > table->quick_condition_rows.
++          */ 
++          if (select_limit > table->quick_condition_rows)
++            select_limit= table_records;
++          else
++            select_limit= (ha_rows) (select_limit *
++                                     (double) table_records /
++                                      table->quick_condition_rows);
++          rec_per_key= keyinfo->rec_per_key[keyinfo->key_parts-1];
++          set_if_bigger(rec_per_key, 1);
++          /*
++            Here we take into account the fact that rows are
++            accessed in sequences rec_per_key records in each.
++            Rows in such a sequence are supposed to be ordered
++            by rowid/primary key. When reading the data
++            in a sequence we'll touch not more pages than the
++            table file contains.
++            TODO. Use the formula for a disk sweep sequential access
++            to calculate the cost of accessing data rows for one 
++            index entry.
++	  */
++          index_scan_time= select_limit/rec_per_key *
++	                   min(rec_per_key, table->file->scan_time());
++          if ((ref_key < 0 && is_covering) || 
++              (ref_key < 0 && (group || table->force_index)) ||
++              index_scan_time < read_time)
++          {
++            ha_rows quick_records= table_records;
++            if ((is_best_covering && !is_covering) ||
++                (is_covering && ref_key_quick_rows < select_limit))
++              continue;
++            if (table->quick_keys.is_set(nr))
++              quick_records= table->quick_rows[nr];
++            if (best_key < 0 ||
++                (select_limit <= min(quick_records,best_records) ?
++                 keyinfo->key_parts < best_key_parts :
++                 quick_records < best_records))
++            {
++              best_key= nr;
++              best_key_parts= keyinfo->key_parts;
++              saved_best_key_parts= used_key_parts;
++              best_records= quick_records;
++              is_best_covering= is_covering;
++              best_key_direction= direction; 
++            }
++          }   
++	}      
++      }
++    }
++
++    /*
++      filesort() and join cache are usually faster than reading in 
++      index order and not using join cache, except in case that chosen
++      index is clustered primary key.
++    */
++    if ((select_limit >= table_records) &&
++        (tab->type == JT_ALL &&
++         tab->join->tables > tab->join->const_tables + 1) &&
++         ((unsigned) best_key != table->s->primary_key ||
++          !table->file->primary_key_is_clustered()))
++      goto use_filesort;
++
++    if (best_key >= 0)
++    {
++      if (table->quick_keys.is_set(best_key) && best_key != ref_key)
++      {
++        key_map map;
++        map.clear_all();       // Force the creation of quick select
++        map.set_bit(best_key); // only best_key.
++        select->quick= 0;
++        select->test_quick_select(join->thd, map, 0,
++                                  join->select_options & OPTION_FOUND_ROWS ?
++                                  HA_POS_ERROR :
++                                  join->unit->select_limit_cnt,
++                                  0);
++      }
++      order_direction= best_key_direction;
++      /*
++        saved_best_key_parts is actual number of used keyparts found by the
++        test_if_order_by_key function. It could differ from keyinfo->key_parts,
++        thus we have to restore it in case of desc order as it affects
++        QUICK_SELECT_DESC behaviour.
++      */
++      used_key_parts= (order_direction == -1) ?
++        saved_best_key_parts :  best_key_parts;
++    }
++    else
++      goto use_filesort;
++  } 
++
++check_reverse_order:                  
++  DBUG_ASSERT(order_direction != 0);
++
++  if (order_direction == -1)		// If ORDER BY ... DESC
++  {
++    if (select && select->quick)
++    {
++      /*
++	Don't reverse the sort order, if it's already done.
++        (In some cases test_if_order_by_key() can be called multiple times
++      */
++      if (select->quick->reverse_sorted())
++        goto skipped_filesort;
++      else
++      {
++        int quick_type= select->quick->get_type();
++        if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE ||
++            quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT ||
++            quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION ||
++            quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
++        {
++          tab->limit= 0;
++          goto use_filesort;               // Use filesort
++        }
++      }
++    }
++  }
++
++  /*
++    Update query plan with access pattern for doing 
++    ordered access according to what we have decided
++    above.
++  */
++  if (!no_changes) // We are allowed to update QEP
++  {
++    if (best_key >= 0)
++    {
++      bool quick_created= 
++        (select && select->quick && select->quick!=save_quick);
++
++      /* 
++         If ref_key used index tree reading only ('Using index' in EXPLAIN),
++         and best_key doesn't, then revert the decision.
++      */
++      if (!table->covering_keys.is_set(best_key))
++        table->set_keyread(FALSE);
++      if (!quick_created)
++      {
++        if (select)                  // Throw any existing quick select
++          select->quick= 0;          // Cleanup either reset to save_quick,
++                                     // or 'delete save_quick'
++        tab->index= best_key;
++        tab->read_first_record= order_direction > 0 ?
++                                join_read_first:join_read_last;
++        tab->type=JT_NEXT;           // Read with index_first(), index_next()
++
++        if (table->covering_keys.is_set(best_key))
++          table->set_keyread(TRUE);
++        table->file->ha_index_or_rnd_end();
++        if (tab->join->select_options & SELECT_DESCRIBE)
++        {
++          tab->ref.key= -1;
++          tab->ref.key_parts= 0;
++          if (select_limit < table->file->stats.records) 
++            tab->limit= select_limit;
++        }
++      }
++      else if (tab->type != JT_ALL)
++      {
++        /*
++          We're about to use a quick access to the table.
++          We need to change the access method so as the quick access
++          method is actually used.
++        */
++        DBUG_ASSERT(tab->select->quick);
++        tab->type=JT_ALL;
++        tab->use_quick=1;
++        tab->ref.key= -1;
++        tab->ref.key_parts=0;		// Don't use ref key.
++        tab->read_first_record= join_init_read_record;
++        if (tab->is_using_loose_index_scan())
++          tab->join->tmp_table_param.precomputed_group_by= TRUE;
++        /*
++          TODO: update the number of records in join->best_positions[tablenr]
++        */
++      }
++    } // best_key >= 0
++
++    if (order_direction == -1)		// If ORDER BY ... DESC
++    {
++      if (select && select->quick)
++      {
++        QUICK_SELECT_DESC *tmp;
++        /* ORDER BY range_key DESC */
++        tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick),
++                                    used_key_parts);
++        if (tmp && select->quick == save_quick)
++          save_quick= 0;    // ::QUICK_SELECT_DESC consumed it
++
++        if (!tmp || tmp->error)
++        {
++          delete tmp;
++          tab->limit= 0;
++          goto use_filesort;           // Reverse sort failed -> filesort
++        }
++        select->quick= tmp;
++      }
++      else if (tab->type != JT_NEXT && tab->type != JT_REF_OR_NULL &&
++               tab->ref.key >= 0 && tab->ref.key_parts <= used_key_parts)
++      {
++        /*
++          SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC
++
++          Use a traversal function that starts by reading the last row
++          with key part (A) and then traverse the index backwards.
++        */
++        tab->read_first_record= join_read_last_key;
++        tab->read_record.read_record= join_read_prev_same;
++      }
++    }
++    else if (select && select->quick)
++      select->quick->sorted= 1;
++
++  } // QEP has been modified
++
++  /*
++    Cleanup:
++    We may have both a 'select->quick' and 'save_quick' (original)
++    at this point. Delete the one that we wan't use.
++  */
++
++skipped_filesort:
++  // Keep current (ordered) select->quick 
++  if (select && save_quick != select->quick)
++  {
++    delete save_quick;
++    save_quick= NULL;
++  }
++  DBUG_RETURN(1);
++
++use_filesort:
++  // Restore original save_quick
++  if (select && select->quick != save_quick)
++  {
++    delete select->quick;
++    select->quick= save_quick;
++  }
++  DBUG_RETURN(0);
++}
++
++
++/*
++  If not selecting by given key, create an index how records should be read
++
++  SYNOPSIS
++   create_sort_index()
++     thd		Thread handler
++     tab		Table to sort (in join structure)
++     order		How table should be sorted
++     filesort_limit	Max number of rows that needs to be sorted
++     select_limit	Max number of rows in final output
++		        Used to decide if we should use index or not
++     is_order_by        true if we are sorting on ORDER BY, false if GROUP BY
++                        Used to decide if we should use index or not     
++
++
++  IMPLEMENTATION
++   - If there is an index that can be used, 'tab' is modified to use
++     this index.
++   - If no index, create with filesort() an index file that can be used to
++     retrieve rows in order (should be done with 'read_record').
++     The sorted data is stored in tab->table and will be freed when calling
++     free_io_cache(tab->table).
++
++  RETURN VALUES
++    0		ok
++    -1		Some fatal error
++    1		No records
++*/
++
++static int
++create_sort_index(THD *thd, JOIN *join, ORDER *order,
++		  ha_rows filesort_limit, ha_rows select_limit,
++                  bool is_order_by)
++{
++  uint length= 0;
++  ha_rows examined_rows;
++  TABLE *table;
++  SQL_SELECT *select;
++  JOIN_TAB *tab;
++  DBUG_ENTER("create_sort_index");
++
++  if (join->tables == join->const_tables)
++    DBUG_RETURN(0);				// One row, no need to sort
++  tab=    join->join_tab + join->const_tables;
++  table=  tab->table;
++  select= tab->select;
++
++  /*
++    When there is SQL_BIG_RESULT do not sort using index for GROUP BY,
++    and thus force sorting on disk unless a group min-max optimization
++    is going to be used as it is applied now only for one table queries
++    with covering indexes.
++  */
++  if ((order != join->group_list || 
++       !(join->select_options & SELECT_BIG_RESULT) ||
++       (select && select->quick &&
++        select->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) &&
++      test_if_skip_sort_order(tab,order,select_limit,0, 
++                              is_order_by ?  &table->keys_in_use_for_order_by :
++                              &table->keys_in_use_for_group_by))
++    DBUG_RETURN(0);
++  for (ORDER *ord= join->order; ord; ord= ord->next)
++    length++;
++  if (!(join->sortorder= 
++        make_unireg_sortorder(order, &length, join->sortorder)))
++    goto err;				/* purecov: inspected */
++
++  table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
++                                             MYF(MY_WME | MY_ZEROFILL));
++  table->status=0;				// May be wrong if quick_select
++
++  // If table has a range, move it to select
++  if (select && !select->quick && tab->ref.key >= 0)
++  {
++    if (tab->quick)
++    {
++      select->quick=tab->quick;
++      tab->quick=0;
++      /* 
++        We can only use 'Only index' if quick key is same as ref_key
++        and in index_merge 'Only index' cannot be used
++      */
++      if (((uint) tab->ref.key != select->quick->index))
++        table->set_keyread(FALSE);
++    }
++    else
++    {
++      /*
++	We have a ref on a const;  Change this to a range that filesort
++	can use.
++	For impossible ranges (like when doing a lookup on NULL on a NOT NULL
++	field, quick will contain an empty record set.
++      */
++      if (!(select->quick= (tab->type == JT_FT ?
++			    new FT_SELECT(thd, table, tab->ref.key) :
++			    get_quick_select_for_ref(thd, table, &tab->ref, 
++                                                     tab->found_records))))
++	goto err;
++    }
++  }
++
++  /* Fill schema tables with data before filesort if it's necessary */
++  if ((join->select_lex->options & OPTION_SCHEMA_TABLE) &&
++      get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX))
++    goto err;
++
++  if (table->s->tmp_table)
++    table->file->info(HA_STATUS_VARIABLE);	// Get record count
++  table->sort.found_records=filesort(thd, table,join->sortorder, length,
++                                     select, filesort_limit, 0,
++                                     &examined_rows);
++  tab->records= table->sort.found_records;	// For SQL_CALC_ROWS
++  if (select)
++  {
++    /*
++      We need to preserve tablesort's output resultset here, because
++      QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT (called by
++      SQL_SELECT::cleanup()) may free it assuming it's the result of the quick
++      select operation that we no longer need. Note that all the other parts of
++      this data structure are cleaned up when
++      QUICK_INDEX_MERGE_SELECT::get_next encounters end of data, so the next
++      SQL_SELECT::cleanup() call changes sort.io_cache alone.
++    */
++    IO_CACHE *tablesort_result_cache;
++
++    tablesort_result_cache= table->sort.io_cache;
++    table->sort.io_cache= NULL;
++
++    select->cleanup();				// filesort did select
++    tab->select= 0;
++    table->quick_keys.clear_all();  // as far as we cleanup select->quick
++    table->sort.io_cache= tablesort_result_cache;
++  }
++  tab->select_cond=0;
++  tab->last_inner= 0;
++  tab->first_unmatched= 0;
++  tab->type=JT_ALL;				// Read with normal read_record
++  tab->read_first_record= join_init_read_record;
++  tab->join->examined_rows+=examined_rows;
++  table->set_keyread(FALSE); // Restore if we used indexes
++  DBUG_RETURN(table->sort.found_records == HA_POS_ERROR);
++err:
++  DBUG_RETURN(-1);
++}
++
++#ifdef NOT_YET
++/**
++  Add the HAVING criteria to table->select.
++*/
++
++static bool fix_having(JOIN *join, Item **having)
++{
++  (*having)->update_used_tables();	// Some tables may have been const
++  JOIN_TAB *table=&join->join_tab[join->const_tables];
++  table_map used_tables= join->const_table_map | table->table->map;
++
++  DBUG_EXECUTE("where",print_where(*having,"having", QT_ORDINARY););
++  Item* sort_table_cond=make_cond_for_table(*having,used_tables,used_tables);
++  if (sort_table_cond)
++  {
++    if (!table->select)
++      if (!(table->select=new SQL_SELECT))
++	return 1;
++    if (!table->select->cond)
++      table->select->cond=sort_table_cond;
++    else					// This should never happen
++      if (!(table->select->cond= new Item_cond_and(table->select->cond,
++						   sort_table_cond)) ||
++	  table->select->cond->fix_fields(join->thd, &table->select->cond))
++	return 1;
++    table->select_cond=table->select->cond;
++    table->select_cond->top_level_item();
++    DBUG_EXECUTE("where",print_where(table->select_cond,
++				     "select and having",
++                                     QT_ORDINARY););
++    *having=make_cond_for_table(*having,~ (table_map) 0,~used_tables);
++    DBUG_EXECUTE("where",
++                 print_where(*having,"having after make_cond", QT_ORDINARY););
++  }
++  return 0;
++}
++#endif
++
++
++/*****************************************************************************
++  Remove duplicates from tmp table
++  This should be recoded to add a unique index to the table and remove
++  duplicates
++  Table is a locked single thread table
++  fields is the number of fields to check (from the end)
++*****************************************************************************/
++
++static bool compare_record(TABLE *table, Field **ptr)
++{
++  for (; *ptr ; ptr++)
++  {
++    if ((*ptr)->cmp_offset(table->s->rec_buff_length))
++      return 1;
++  }
++  return 0;
++}
++
++static bool copy_blobs(Field **ptr)
++{
++  for (; *ptr ; ptr++)
++  {
++    if ((*ptr)->flags & BLOB_FLAG)
++      if (((Field_blob *) (*ptr))->copy())
++	return 1;				// Error
++  }
++  return 0;
++}
++
++static void free_blobs(Field **ptr)
++{
++  for (; *ptr ; ptr++)
++  {
++    if ((*ptr)->flags & BLOB_FLAG)
++      ((Field_blob *) (*ptr))->free();
++  }
++}
++
++
++static int
++remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
++{
++  int error;
++  ulong reclength,offset;
++  uint field_count;
++  THD *thd= join->thd;
++  DBUG_ENTER("remove_duplicates");
++
++  entry->reginfo.lock_type=TL_WRITE;
++
++  /* Calculate how many saved fields there is in list */
++  field_count=0;
++  List_iterator<Item> it(fields);
++  Item *item;
++  while ((item=it++))
++  {
++    if (item->get_tmp_table_field() && ! item->const_item())
++      field_count++;
++  }
++
++  if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having) 
++  {                    // only const items with no OPTION_FOUND_ROWS
++    join->unit->select_limit_cnt= 1;		// Only send first row
++    DBUG_RETURN(0);
++  }
++  Field **first_field=entry->field+entry->s->fields - field_count;
++  offset= (field_count ? 
++           entry->field[entry->s->fields - field_count]->
++           offset(entry->record[0]) : 0);
++  reclength=entry->s->reclength-offset;
++
++  free_io_cache(entry);				// Safety
++  entry->file->info(HA_STATUS_VARIABLE);
++  if (entry->s->db_type() == heap_hton ||
++      (!entry->s->blob_fields &&
++       ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->stats.records <
++	thd->variables.sortbuff_size)))
++    error=remove_dup_with_hash_index(join->thd, entry,
++				     field_count, first_field,
++				     reclength, having);
++  else
++    error=remove_dup_with_compare(join->thd, entry, first_field, offset,
++				  having);
++
++  free_blobs(first_field);
++  DBUG_RETURN(error);
++}
++
++
++static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
++				   ulong offset, Item *having)
++{
++  handler *file=table->file;
++  char *org_record,*new_record;
++  uchar *record;
++  int error;
++  ulong reclength= table->s->reclength-offset;
++  DBUG_ENTER("remove_dup_with_compare");
++
++  org_record=(char*) (record=table->record[0])+offset;
++  new_record=(char*) table->record[1]+offset;
++
++  file->ha_rnd_init(1);
++  error=file->rnd_next(record);
++  for (;;)
++  {
++    if (thd->killed)
++    {
++      thd->send_kill_message();
++      error=0;
++      goto err;
++    }
++    if (error)
++    {
++      if (error == HA_ERR_RECORD_DELETED)
++      {
++        error= file->rnd_next(record);
++        continue;
++      }
++      if (error == HA_ERR_END_OF_FILE)
++	break;
++      goto err;
++    }
++    if (having && !having->val_int())
++    {
++      if ((error=file->ha_delete_row(record)))
++	goto err;
++      error=file->rnd_next(record);
++      continue;
++    }
++    if (copy_blobs(first_field))
++    {
++      my_message(ER_OUTOFMEMORY, ER(ER_OUTOFMEMORY), MYF(0));
++      error=0;
++      goto err;
++    }
++    memcpy(new_record,org_record,reclength);
++
++    /* Read through rest of file and mark duplicated rows deleted */
++    bool found=0;
++    for (;;)
++    {
++      if ((error=file->rnd_next(record)))
++      {
++	if (error == HA_ERR_RECORD_DELETED)
++	  continue;
++	if (error == HA_ERR_END_OF_FILE)
++	  break;
++	goto err;
++      }
++      if (compare_record(table, first_field) == 0)
++      {
++	if ((error=file->ha_delete_row(record)))
++	  goto err;
++      }
++      else if (!found)
++      {
++	found=1;
++	file->position(record);	// Remember position
++      }
++    }
++    if (!found)
++      break;					// End of file
++    /* Restart search on next row */
++    error=file->restart_rnd_next(record,file->ref);
++  }
++
++  file->extra(HA_EXTRA_NO_CACHE);
++  DBUG_RETURN(0);
++err:
++  file->extra(HA_EXTRA_NO_CACHE);
++  if (error)
++    file->print_error(error,MYF(0));
++  DBUG_RETURN(1);
++}
++
++
++/**
++  Generate a hash index for each row to quickly find duplicate rows.
++
++  @note
++    Note that this will not work on tables with blobs!
++*/
++
++static int remove_dup_with_hash_index(THD *thd, TABLE *table,
++				      uint field_count,
++				      Field **first_field,
++				      ulong key_length,
++				      Item *having)
++{
++  uchar *key_buffer, *key_pos, *record=table->record[0];
++  int error;
++  handler *file= table->file;
++  ulong extra_length= ALIGN_SIZE(key_length)-key_length;
++  uint *field_lengths,*field_length;
++  HASH hash;
++  DBUG_ENTER("remove_dup_with_hash_index");
++
++  if (!my_multi_malloc(MYF(MY_WME),
++		       &key_buffer,
++		       (uint) ((key_length + extra_length) *
++			       (long) file->stats.records),
++		       &field_lengths,
++		       (uint) (field_count*sizeof(*field_lengths)),
++		       NullS))
++    DBUG_RETURN(1);
++
++  {
++    Field **ptr;
++    ulong total_length= 0;
++    for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++)
++    {
++      uint length= (*ptr)->sort_length();
++      (*field_length++)= length;
++      total_length+= length;
++    }
++    DBUG_PRINT("info",("field_count: %u  key_length: %lu  total_length: %lu",
++                       field_count, key_length, total_length));
++    DBUG_ASSERT(total_length <= key_length);
++    key_length= total_length;
++    extra_length= ALIGN_SIZE(key_length)-key_length;
++  }
++
++  if (hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0, 
++		key_length, (hash_get_key) 0, 0, 0))
++  {
++    my_free((char*) key_buffer,MYF(0));
++    DBUG_RETURN(1);
++  }
++
++  file->ha_rnd_init(1);
++  key_pos=key_buffer;
++  for (;;)
++  {
++    uchar *org_key_pos;
++    if (thd->killed)
++    {
++      thd->send_kill_message();
++      error=0;
++      goto err;
++    }
++    if ((error=file->rnd_next(record)))
++    {
++      if (error == HA_ERR_RECORD_DELETED)
++	continue;
++      if (error == HA_ERR_END_OF_FILE)
++	break;
++      goto err;
++    }
++    if (having && !having->val_int())
++    {
++      if ((error=file->ha_delete_row(record)))
++	goto err;
++      continue;
++    }
++
++    /* copy fields to key buffer */
++    org_key_pos= key_pos;
++    field_length=field_lengths;
++    for (Field **ptr= first_field ; *ptr ; ptr++)
++    {
++      (*ptr)->sort_string(key_pos,*field_length);
++      key_pos+= *field_length++;
++    }
++    /* Check if it exists before */
++    if (hash_search(&hash, org_key_pos, key_length))
++    {
++      /* Duplicated found ; Remove the row */
++      if ((error=file->ha_delete_row(record)))
++	goto err;
++    }
++    else
++    {
++      if (my_hash_insert(&hash, org_key_pos))
++        goto err;
++    }
++    key_pos+=extra_length;
++  }
++  my_free((char*) key_buffer,MYF(0));
++  hash_free(&hash);
++  file->extra(HA_EXTRA_NO_CACHE);
++  (void) file->ha_rnd_end();
++  DBUG_RETURN(0);
++
++err:
++  my_free((char*) key_buffer,MYF(0));
++  hash_free(&hash);
++  file->extra(HA_EXTRA_NO_CACHE);
++  (void) file->ha_rnd_end();
++  if (error)
++    file->print_error(error,MYF(0));
++  DBUG_RETURN(1);
++}
++
++
++SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length,
++                                  SORT_FIELD *sortorder)
++{
++  uint count;
++  SORT_FIELD *sort,*pos;
++  DBUG_ENTER("make_unireg_sortorder");
++
++  count=0;
++  for (ORDER *tmp = order; tmp; tmp=tmp->next)
++    count++;
++  if (!sortorder)
++    sortorder= (SORT_FIELD*) sql_alloc(sizeof(SORT_FIELD) *
++                                       (max(count, *length) + 1));
++  pos= sort= sortorder;
++
++  if (!pos)
++    return 0;
++
++  for (;order;order=order->next,pos++)
++  {
++    Item *item= order->item[0]->real_item();
++    pos->field= 0; pos->item= 0;
++    if (item->type() == Item::FIELD_ITEM)
++      pos->field= ((Item_field*) item)->field;
++    else if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item())
++      pos->field= ((Item_sum*) item)->get_tmp_table_field();
++    else if (item->type() == Item::COPY_STR_ITEM)
++    {						// Blob patch
++      pos->item= ((Item_copy*) item)->get_item();
++    }
++    else
++      pos->item= *order->item;
++    pos->reverse=! order->asc;
++  }
++  *length=count;
++  DBUG_RETURN(sort);
++}
++
++
++/*****************************************************************************
++  Fill join cache with packed records
++  Records are stored in tab->cache.buffer and last record in
++  last record is stored with pointers to blobs to support very big
++  records
++******************************************************************************/
++
++static int
++join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
++{
++  reg1 uint i;
++  uint length, blobs;
++  size_t size;
++  CACHE_FIELD *copy,**blob_ptr;
++  JOIN_CACHE  *cache;
++  JOIN_TAB *join_tab;
++  DBUG_ENTER("join_init_cache");
++
++  cache= &tables[table_count].cache;
++  cache->fields=blobs=0;
++
++  join_tab=tables;
++  for (i=0 ; i < table_count ; i++,join_tab++)
++  {
++    if (!join_tab->used_fieldlength)		/* Not calced yet */
++      calc_used_field_length(thd, join_tab);
++    cache->fields+=join_tab->used_fields;
++    blobs+=join_tab->used_blobs;
++  }
++  if (!(cache->field=(CACHE_FIELD*)
++	sql_alloc(sizeof(CACHE_FIELD)*(cache->fields+table_count*2)+(blobs+1)*
++
++		  sizeof(CACHE_FIELD*))))
++  {
++    my_free((uchar*) cache->buff,MYF(0));		/* purecov: inspected */
++    cache->buff=0;				/* purecov: inspected */
++    DBUG_RETURN(1);				/* purecov: inspected */
++  }
++  copy=cache->field;
++  blob_ptr=cache->blob_ptr=(CACHE_FIELD**)
++    (cache->field+cache->fields+table_count*2);
++
++  length=0;
++  for (i=0 ; i < table_count ; i++)
++  {
++    bool have_bit_fields= FALSE;
++    uint null_fields=0,used_fields;
++    Field **f_ptr,*field;
++    MY_BITMAP *read_set= tables[i].table->read_set;
++    for (f_ptr=tables[i].table->field,used_fields=tables[i].used_fields ;
++	 used_fields ;
++	 f_ptr++)
++    {
++      field= *f_ptr;
++      if (bitmap_is_set(read_set, field->field_index))
++      {
++	used_fields--;
++	length+=field->fill_cache_field(copy);
++	if (copy->type == CACHE_BLOB)
++	  (*blob_ptr++)=copy;
++	if (field->real_maybe_null())
++	  null_fields++;
++        if (field->type() == MYSQL_TYPE_BIT &&
++            ((Field_bit*)field)->bit_len)
++          have_bit_fields= TRUE;    
++	copy++;
++      }
++    }
++    /* Copy null bits from table */
++    if (null_fields || have_bit_fields)
++    {						/* must copy null bits */
++      copy->str= tables[i].table->null_flags;
++      copy->length= tables[i].table->s->null_bytes;
++      copy->type=0;
++      copy->field=0;
++      length+=copy->length;
++      copy++;
++      cache->fields++;
++    }
++    /* If outer join table, copy null_row flag */
++    if (tables[i].table->maybe_null)
++    {
++      copy->str= (uchar*) &tables[i].table->null_row;
++      copy->length=sizeof(tables[i].table->null_row);
++      copy->type=0;
++      copy->field=0;
++      length+=copy->length;
++      copy++;
++      cache->fields++;
++    }
++  }
++
++  cache->length=length+blobs*sizeof(char*);
++  cache->blobs=blobs;
++  *blob_ptr=0;					/* End sequentel */
++  size=max(thd->variables.join_buff_size, cache->length);
++  if (!(cache->buff=(uchar*) my_malloc(size,MYF(0))))
++    DBUG_RETURN(1);				/* Don't use cache */ /* purecov: inspected */
++  cache->end=cache->buff+size;
++  reset_cache_write(cache);
++  DBUG_RETURN(0);
++}
++
++
++static ulong
++used_blob_length(CACHE_FIELD **ptr)
++{
++  uint length,blob_length;
++  for (length=0 ; *ptr ; ptr++)
++  {
++    Field_blob *field_blob= (Field_blob *) (*ptr)->field;
++    (*ptr)->blob_length=blob_length= field_blob->get_length();
++    length+=blob_length;
++    field_blob->get_ptr(&(*ptr)->str);
++  }
++  return length;
++}
++
++
++static bool
++store_record_in_cache(JOIN_CACHE *cache)
++{
++  uint length;
++  uchar *pos;
++  CACHE_FIELD *copy,*end_field;
++  bool last_record;
++
++  pos=cache->pos;
++  end_field=cache->field+cache->fields;
++
++  length=cache->length;
++  if (cache->blobs)
++    length+=used_blob_length(cache->blob_ptr);
++  if ((last_record= (length + cache->length > (size_t) (cache->end - pos))))
++    cache->ptr_record=cache->records;
++
++  /*
++    There is room in cache. Put record there
++  */
++  cache->records++;
++  for (copy=cache->field ; copy < end_field; copy++)
++  {
++    if (copy->type == CACHE_BLOB)
++    {
++      Field_blob *blob_field= (Field_blob *) copy->field;
++      if (last_record)
++      {
++	blob_field->get_image(pos, copy->length+sizeof(char*), 
++                              blob_field->charset());
++	pos+=copy->length+sizeof(char*);
++      }
++      else
++      {
++	blob_field->get_image(pos, copy->length, // blob length
++                              blob_field->charset());
++	memcpy(pos+copy->length,copy->str,copy->blob_length);  // Blob data
++	pos+=copy->length+copy->blob_length;
++      }
++    }
++    else
++    {
++      if (copy->type == CACHE_STRIPPED)
++      {
++	uchar *str,*end;
++        Field *field= copy->field;
++        if (field && field->maybe_null() && field->is_null())
++          end= str= copy->str;
++        else
++          for (str=copy->str,end= str+copy->length;
++               end > str && end[-1] == ' ' ;
++               end--) ;
++	length=(uint) (end-str);
++	memcpy(pos+2, str, length);
++        int2store(pos, length);
++	pos+= length+2;
++      }
++      else
++      {
++	memcpy(pos,copy->str,copy->length);
++	pos+=copy->length;
++      }
++    }
++  }
++  cache->pos=pos;
++  return last_record || (size_t) (cache->end - pos) < cache->length;
++}
++
++
++static void
++reset_cache_read(JOIN_CACHE *cache)
++{
++  cache->record_nr=0;
++  cache->pos=cache->buff;
++}
++
++
++static void reset_cache_write(JOIN_CACHE *cache)
++{
++  reset_cache_read(cache);
++  cache->records= 0;
++  cache->ptr_record= (uint) ~0;
++}
++
++
++static void
++read_cached_record(JOIN_TAB *tab)
++{
++  uchar *pos;
++  uint length;
++  bool last_record;
++  CACHE_FIELD *copy,*end_field;
++
++  last_record=tab->cache.record_nr++ == tab->cache.ptr_record;
++  pos=tab->cache.pos;
++
++  for (copy=tab->cache.field,end_field=copy+tab->cache.fields ;
++       copy < end_field;
++       copy++)
++  {
++    if (copy->type == CACHE_BLOB)
++    {
++      Field_blob *blob_field= (Field_blob *) copy->field;
++      if (last_record)
++      {
++	blob_field->set_image(pos, copy->length+sizeof(char*),
++                              blob_field->charset());
++	pos+=copy->length+sizeof(char*);
++      }
++      else
++      {
++	blob_field->set_ptr(pos, pos+copy->length);
++	pos+=copy->length + blob_field->get_length();
++      }
++    }
++    else
++    {
++      if (copy->type == CACHE_STRIPPED)
++      {
++        length= uint2korr(pos);
++	memcpy(copy->str, pos+2, length);
++	memset(copy->str+length, ' ', copy->length-length);
++	pos+= 2 + length;
++      }
++      else
++      {
++	memcpy(copy->str,pos,copy->length);
++	pos+=copy->length;
++      }
++    }
++  }
++  tab->cache.pos=pos;
++  return;
++}
++
++
++static bool
++cmp_buffer_with_ref(JOIN_TAB *tab)
++{
++  bool diff;
++  if (!(diff=tab->ref.key_err))
++  {
++    memcpy(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length);
++  }
++  if ((tab->ref.key_err= cp_buffer_from_ref(tab->join->thd, tab->table,
++                                            &tab->ref)) ||
++      diff)
++    return 1;
++  return memcmp(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length)
++    != 0;
++}
++
++
++bool
++cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
++{
++  enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
++  thd->count_cuted_fields= CHECK_FIELD_IGNORE;
++  my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
++  bool result= 0;
++
++  for (store_key **copy=ref->key_copy ; *copy ; copy++)
++  {
++    if ((*copy)->copy() & 1)
++    {
++      result= 1;
++      break;
++    }
++  }
++  thd->count_cuted_fields= save_count_cuted_fields;
++  dbug_tmp_restore_column_map(table->write_set, old_map);
++  return result;
++}
++
++
++/*****************************************************************************
++  Group and order functions
++*****************************************************************************/
++
++/**
++  Resolve an ORDER BY or GROUP BY column reference.
++
++  Given a column reference (represented by 'order') from a GROUP BY or ORDER
++  BY clause, find the actual column it represents. If the column being
++  resolved is from the GROUP BY clause, the procedure searches the SELECT
++  list 'fields' and the columns in the FROM list 'tables'. If 'order' is from
++  the ORDER BY clause, only the SELECT list is being searched.
++
++  If 'order' is resolved to an Item, then order->item is set to the found
++  Item. If there is no item for the found column (that is, it was resolved
++  into a table field), order->item is 'fixed' and is added to all_fields and
++  ref_pointer_array.
++
++  ref_pointer_array and all_fields are updated.
++
++  @param[in] thd		     Pointer to current thread structure
++  @param[in,out] ref_pointer_array  All select, group and order by fields
++  @param[in] tables                 List of tables to search in (usually
++    FROM clause)
++  @param[in] order                  Column reference to be resolved
++  @param[in] fields                 List of fields to search in (usually
++    SELECT list)
++  @param[in,out] all_fields         All select, group and order by fields
++  @param[in] is_group_field         True if order is a GROUP field, false if
++    ORDER by field
++
++  @retval
++    FALSE if OK
++  @retval
++    TRUE  if error occurred
++*/
++
++static bool
++find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
++                   ORDER *order, List<Item> &fields, List<Item> &all_fields,
++                   bool is_group_field)
++{
++  Item *order_item= *order->item; /* The item from the GROUP/ORDER caluse. */
++  Item::Type order_item_type;
++  Item **select_item; /* The corresponding item from the SELECT clause. */
++  Field *from_field;  /* The corresponding field from the FROM clause. */
++  uint counter;
++  enum_resolution_type resolution;
++
++  /*
++    Local SP variables may be int but are expressions, not positions.
++    (And they can't be used before fix_fields is called for them).
++  */
++  if (order_item->type() == Item::INT_ITEM && order_item->basic_const_item())
++  {						/* Order by position */
++    uint count= (uint) order_item->val_int();
++    if (!count || count > fields.elements)
++    {
++      my_error(ER_BAD_FIELD_ERROR, MYF(0),
++               order_item->full_name(), thd->where);
++      return TRUE;
++    }
++    order->item= ref_pointer_array + count - 1;
++    order->in_field_list= 1;
++    order->counter= count;
++    order->counter_used= 1;
++    return FALSE;
++  }
++  /* Lookup the current GROUP/ORDER field in the SELECT clause. */
++  select_item= find_item_in_list(order_item, fields, &counter,
++                                 REPORT_EXCEPT_NOT_FOUND, &resolution);
++  if (!select_item)
++    return TRUE; /* The item is not unique, or some other error occured. */
++
++
++  /* Check whether the resolved field is not ambiguos. */
++  if (select_item != not_found_item)
++  {
++    Item *view_ref= NULL;
++    /*
++      If we have found field not by its alias in select list but by its
++      original field name, we should additionaly check if we have conflict
++      for this name (in case if we would perform lookup in all tables).
++    */
++    if (resolution == RESOLVED_BEHIND_ALIAS && !order_item->fixed &&
++        order_item->fix_fields(thd, order->item))
++      return TRUE;
++
++    /* Lookup the current GROUP field in the FROM clause. */
++    order_item_type= order_item->type();
++    from_field= (Field*) not_found_field;
++    if ((is_group_field &&
++        order_item_type == Item::FIELD_ITEM) ||
++        order_item_type == Item::REF_ITEM)
++    {
++      from_field= find_field_in_tables(thd, (Item_ident*) order_item, tables,
++                                       NULL, &view_ref, IGNORE_ERRORS, TRUE,
++                                       FALSE);
++      if (!from_field)
++        from_field= (Field*) not_found_field;
++    }
++
++    if (from_field == not_found_field ||
++        (from_field != view_ref_found ?
++         /* it is field of base table => check that fields are same */
++         ((*select_item)->type() == Item::FIELD_ITEM &&
++          ((Item_field*) (*select_item))->field->eq(from_field)) :
++         /*
++           in is field of view table => check that references on translation
++           table are same
++         */
++         ((*select_item)->type() == Item::REF_ITEM &&
++          view_ref->type() == Item::REF_ITEM &&
++          ((Item_ref *) (*select_item))->ref ==
++          ((Item_ref *) view_ref)->ref)))
++    {
++      /*
++        If there is no such field in the FROM clause, or it is the same field
++        as the one found in the SELECT clause, then use the Item created for
++        the SELECT field. As a result if there was a derived field that
++        'shadowed' a table field with the same name, the table field will be
++        chosen over the derived field.
++      */
++      order->item= ref_pointer_array + counter;
++      order->in_field_list=1;
++      return FALSE;
++    }
++    else
++    {
++      /*
++        There is a field with the same name in the FROM clause. This
++        is the field that will be chosen. In this case we issue a
++        warning so the user knows that the field from the FROM clause
++        overshadows the column reference from the SELECT list.
++      */
++      push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR,
++                          ER(ER_NON_UNIQ_ERROR),
++                          ((Item_ident*) order_item)->field_name,
++                          current_thd->where);
++    }
++  }
++
++  order->in_field_list=0;
++  /*
++    The call to order_item->fix_fields() means that here we resolve
++    'order_item' to a column from a table in the list 'tables', or to
++    a column in some outer query. Exactly because of the second case
++    we come to this point even if (select_item == not_found_item),
++    inspite of that fix_fields() calls find_item_in_list() one more
++    time.
++
++    We check order_item->fixed because Item_func_group_concat can put
++    arguments for which fix_fields already was called.
++    
++    group_fix_field= TRUE is to resolve aliases from the SELECT list
++    without creating of Item_ref-s: JOIN::exec() wraps aliased items
++    in SELECT list with Item_copy items. To re-evaluate such a tree
++    that includes Item_copy items we have to refresh Item_copy caches,
++    but:
++      - filesort() never refresh Item_copy items,
++      - end_send_group() checks every record for group boundary by the
++        test_if_group_changed function that obtain data from these
++        Item_copy items, but the copy_fields function that
++        refreshes Item copy items is called after group boundaries only -
++        that is a vicious circle.
++    So we prevent inclusion of Item_copy items.
++  */
++  bool save_group_fix_field= thd->lex->current_select->group_fix_field;
++  if (is_group_field)
++    thd->lex->current_select->group_fix_field= TRUE;
++  bool ret= (!order_item->fixed &&
++      (order_item->fix_fields(thd, order->item) ||
++       (order_item= *order->item)->check_cols(1) ||
++       thd->is_fatal_error));
++  thd->lex->current_select->group_fix_field= save_group_fix_field;
++  if (ret)
++    return TRUE; /* Wrong field. */
++
++  uint el= all_fields.elements;
++  all_fields.push_front(order_item); /* Add new field to field list. */
++  ref_pointer_array[el]= order_item;
++  order->item= ref_pointer_array + el;
++  return FALSE;
++}
++
++
++/**
++  Change order to point at item in select list.
++
++  If item isn't a number and doesn't exits in the select list, add it the
++  the field list.
++*/
++
++int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
++		List<Item> &fields, List<Item> &all_fields, ORDER *order)
++{
++  thd->where="order clause";
++  for (; order; order=order->next)
++  {
++    if (find_order_in_list(thd, ref_pointer_array, tables, order, fields,
++			   all_fields, FALSE))
++      return 1;
++  }
++  return 0;
++}
++
++
++/**
++  Intitialize the GROUP BY list.
++
++  @param thd			Thread handler
++  @param ref_pointer_array	We store references to all fields that was
++                               not in 'fields' here.
++  @param fields		All fields in the select part. Any item in
++                               'order' that is part of these list is replaced
++                               by a pointer to this fields.
++  @param all_fields		Total list of all unique fields used by the
++                               select. All items in 'order' that was not part
++                               of fields will be added first to this list.
++  @param order			The fields we should do GROUP BY on.
++  @param hidden_group_fields	Pointer to flag that is set to 1 if we added
++                               any fields to all_fields.
++
++  @todo
++    change ER_WRONG_FIELD_WITH_GROUP to more detailed
++    ER_NON_GROUPING_FIELD_USED
++
++  @retval
++    0  ok
++  @retval
++    1  error (probably out of memory)
++*/
++
++int
++setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
++	    List<Item> &fields, List<Item> &all_fields, ORDER *order,
++	    bool *hidden_group_fields)
++{
++  *hidden_group_fields=0;
++  ORDER *ord;
++
++  if (!order)
++    return 0;				/* Everything is ok */
++
++  uint org_fields=all_fields.elements;
++
++  thd->where="group statement";
++  for (ord= order; ord; ord= ord->next)
++  {
++    if (find_order_in_list(thd, ref_pointer_array, tables, ord, fields,
++			   all_fields, TRUE))
++      return 1;
++    (*ord->item)->marker= UNDEF_POS;		/* Mark found */
++    if ((*ord->item)->with_sum_func)
++    {
++      my_error(ER_WRONG_GROUP_FIELD, MYF(0), (*ord->item)->full_name());
++      return 1;
++    }
++  }
++  if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY)
++  {
++    /*
++      Don't allow one to use fields that is not used in GROUP BY
++      For each select a list of field references that aren't under an
++      aggregate function is created. Each field in this list keeps the
++      position of the select list expression which it belongs to.
++
++      First we check an expression from the select list against the GROUP BY
++      list. If it's found there then it's ok. It's also ok if this expression
++      is a constant or an aggregate function. Otherwise we scan the list
++      of non-aggregated fields and if we'll find at least one field reference
++      that belongs to this expression and doesn't occur in the GROUP BY list
++      we throw an error. If there are no fields in the created list for a
++      select list expression this means that all fields in it are used under
++      aggregate functions.
++    */
++    Item *item;
++    Item_field *field;
++    int cur_pos_in_select_list= 0;
++    List_iterator<Item> li(fields);
++    List_iterator<Item_field> naf_it(thd->lex->current_select->non_agg_fields);
++
++    field= naf_it++;
++    while (field && (item=li++))
++    {
++      if (item->type() != Item::SUM_FUNC_ITEM && item->marker >= 0 &&
++          !item->const_item() &&
++          !(item->real_item()->type() == Item::FIELD_ITEM &&
++            item->used_tables() & OUTER_REF_TABLE_BIT))
++      {
++        while (field)
++        {
++          /* Skip fields from previous expressions. */
++          if (field->marker < cur_pos_in_select_list)
++            goto next_field;
++          /* Found a field from the next expression. */
++          if (field->marker > cur_pos_in_select_list)
++            break;
++          /*
++            Check whether the field occur in the GROUP BY list.
++            Throw the error later if the field isn't found.
++          */
++          for (ord= order; ord; ord= ord->next)
++            if ((*ord->item)->eq((Item*)field, 0))
++              goto next_field;
++          /*
++            TODO: change ER_WRONG_FIELD_WITH_GROUP to more detailed
++            ER_NON_GROUPING_FIELD_USED
++          */
++          my_error(ER_WRONG_FIELD_WITH_GROUP, MYF(0), field->full_name());
++          return 1;
++next_field:
++          field= naf_it++;
++        }
++      }
++      cur_pos_in_select_list++;
++    }
++  }
++  if (org_fields != all_fields.elements)
++    *hidden_group_fields=1;			// group fields is not used
++  return 0;
++}
++
++/**
++  Add fields with aren't used at start of field list.
++
++  @return
++    FALSE if ok
++*/
++
++static bool
++setup_new_fields(THD *thd, List<Item> &fields,
++		 List<Item> &all_fields, ORDER *new_field)
++{
++  Item	  **item;
++  uint counter;
++  enum_resolution_type not_used;
++  DBUG_ENTER("setup_new_fields");
++
++  thd->mark_used_columns= MARK_COLUMNS_READ;       // Not really needed, but...
++  for (; new_field ; new_field= new_field->next)
++  {
++    if ((item= find_item_in_list(*new_field->item, fields, &counter,
++				 IGNORE_ERRORS, &not_used)))
++      new_field->item=item;			/* Change to shared Item */
++    else
++    {
++      thd->where="procedure list";
++      if ((*new_field->item)->fix_fields(thd, new_field->item))
++	DBUG_RETURN(1); /* purecov: inspected */
++      all_fields.push_front(*new_field->item);
++      new_field->item=all_fields.head_ref();
++    }
++  }
++  DBUG_RETURN(0);
++}
++
++/**
++  Create a group by that consist of all non const fields.
++
++  Try to use the fields in the order given by 'order' to allow one to
++  optimize away 'order by'.
++*/
++
++static ORDER *
++create_distinct_group(THD *thd, Item **ref_pointer_array,
++                      ORDER *order_list, List<Item> &fields,
++                      List<Item> &all_fields,
++		      bool *all_order_by_fields_used)
++{
++  List_iterator<Item> li(fields);
++  Item *item, **orig_ref_pointer_array= ref_pointer_array;
++  ORDER *order,*group,**prev;
++
++  *all_order_by_fields_used= 1;
++  while ((item=li++))
++    item->marker=0;			/* Marker that field is not used */
++
++  prev= &group;  group=0;
++  for (order=order_list ; order; order=order->next)
++  {
++    if (order->in_field_list)
++    {
++      ORDER *ord=(ORDER*) thd->memdup((char*) order,sizeof(ORDER));
++      if (!ord)
++	return 0;
++      *prev=ord;
++      prev= &ord->next;
++      (*ord->item)->marker=1;
++    }
++    else
++      *all_order_by_fields_used= 0;
++  }
++
++  li.rewind();
++  while ((item=li++))
++  {
++    if (!item->const_item() && !item->with_sum_func && !item->marker)
++    {
++      /* 
++        Don't put duplicate columns from the SELECT list into the 
++        GROUP BY list.
++      */
++      ORDER *ord_iter;
++      for (ord_iter= group; ord_iter; ord_iter= ord_iter->next)
++        if ((*ord_iter->item)->eq(item, 1))
++          goto next_item;
++      
++      ORDER *ord=(ORDER*) thd->calloc(sizeof(ORDER));
++      if (!ord)
++	return 0;
++
++      if (item->type() == Item::FIELD_ITEM &&
++          item->field_type() == MYSQL_TYPE_BIT)
++      {
++        /*
++          Because HEAP tables can't index BIT fields we need to use an
++          additional hidden field for grouping because later it will be
++          converted to a LONG field. Original field will remain of the
++          BIT type and will be returned to a client.
++        */
++        Item_field *new_item= new Item_field(thd, (Item_field*)item);
++        int el= all_fields.elements;
++        orig_ref_pointer_array[el]= new_item;
++        all_fields.push_front(new_item);
++        ord->item= orig_ref_pointer_array + el;
++      }
++      else
++      {
++        /*
++          We have here only field_list (not all_field_list), so we can use
++          simple indexing of ref_pointer_array (order in the array and in the
++          list are same)
++        */
++        ord->item= ref_pointer_array;
++      }
++      ord->asc=1;
++      *prev=ord;
++      prev= &ord->next;
++    }
++next_item:
++    ref_pointer_array++;
++  }
++  *prev=0;
++  return group;
++}
++
++
++/**
++  Update join with count of the different type of fields.
++*/
++
++void
++count_field_types(SELECT_LEX *select_lex, TMP_TABLE_PARAM *param, 
++                  List<Item> &fields, bool reset_with_sum_func)
++{
++  List_iterator<Item> li(fields);
++  Item *field;
++
++  param->field_count=param->sum_func_count=param->func_count=
++    param->hidden_field_count=0;
++  param->quick_group=1;
++  while ((field=li++))
++  {
++    Item::Type real_type= field->real_item()->type();
++    if (real_type == Item::FIELD_ITEM)
++      param->field_count++;
++    else if (real_type == Item::SUM_FUNC_ITEM)
++    {
++      if (! field->const_item())
++      {
++	Item_sum *sum_item=(Item_sum*) field->real_item();
++        if (!sum_item->depended_from() ||
++            sum_item->depended_from() == select_lex)
++        {
++          if (!sum_item->quick_group)
++            param->quick_group=0;			// UDF SUM function
++          param->sum_func_count++;
++
++          for (uint i=0 ; i < sum_item->get_arg_count() ; i++)
++          {
++            if (sum_item->get_arg(i)->real_item()->type() == Item::FIELD_ITEM)
++              param->field_count++;
++            else
++              param->func_count++;
++          }
++        }
++        param->func_count++;
++      }
++    }
++    else
++    {
++      param->func_count++;
++      if (reset_with_sum_func)
++	field->with_sum_func=0;
++    }
++  }
++}
++
++
++/**
++  Return 1 if second is a subpart of first argument.
++
++  If first parts has different direction, change it to second part
++  (group is sorted like order)
++*/
++
++static bool
++test_if_subpart(ORDER *a,ORDER *b)
++{
++  for (; a && b; a=a->next,b=b->next)
++  {
++    if ((*a->item)->eq(*b->item,1))
++      a->asc=b->asc;
++    else
++      return 0;
++  }
++  return test(!b);
++}
++
++/**
++  Return table number if there is only one table in sort order
++  and group and order is compatible, else return 0.
++*/
++
++static TABLE *
++get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables)
++{
++  table_map map= (table_map) 0;
++  DBUG_ENTER("get_sort_by_table");
++
++  if (!a)
++    a=b;					// Only one need to be given
++  else if (!b)
++    b=a;
++
++  for (; a && b; a=a->next,b=b->next)
++  {
++    if (!(*a->item)->eq(*b->item,1))
++      DBUG_RETURN(0);
++    map|=a->item[0]->used_tables();
++  }
++  if (!map || (map & (RAND_TABLE_BIT | OUTER_REF_TABLE_BIT)))
++    DBUG_RETURN(0);
++
++  for (; !(map & tables->table->map); tables= tables->next_leaf) ;
++  if (map != tables->table->map)
++    DBUG_RETURN(0);				// More than one table
++  DBUG_PRINT("exit",("sort by table: %d",tables->table->tablenr));
++  DBUG_RETURN(tables->table);
++}
++
++
++/**
++  calc how big buffer we need for comparing group entries.
++*/
++
++static void
++calc_group_buffer(JOIN *join,ORDER *group)
++{
++  uint key_length=0, parts=0, null_parts=0;
++
++  if (group)
++    join->group= 1;
++  for (; group ; group=group->next)
++  {
++    Item *group_item= *group->item;
++    Field *field= group_item->get_tmp_table_field();
++    if (field)
++    {
++      enum_field_types type;
++      if ((type= field->type()) == MYSQL_TYPE_BLOB)
++	key_length+=MAX_BLOB_WIDTH;		// Can't be used as a key
++      else if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_VAR_STRING)
++        key_length+= field->field_length + HA_KEY_BLOB_LENGTH;
++      else if (type == MYSQL_TYPE_BIT)
++      {
++        /* Bit is usually stored as a longlong key for group fields */
++        key_length+= 8;                         // Big enough
++      }
++      else
++	key_length+= field->pack_length();
++    }
++    else
++    { 
++      switch (group_item->result_type()) {
++      case REAL_RESULT:
++        key_length+= sizeof(double);
++        break;
++      case INT_RESULT:
++        key_length+= sizeof(longlong);
++        break;
++      case DECIMAL_RESULT:
++        key_length+= my_decimal_get_binary_size(group_item->max_length - 
++                                                (group_item->decimals ? 1 : 0),
++                                                group_item->decimals);
++        break;
++      case STRING_RESULT:
++      {
++        enum enum_field_types type= group_item->field_type();
++        /*
++          As items represented as DATE/TIME fields in the group buffer
++          have STRING_RESULT result type, we increase the length 
++          by 8 as maximum pack length of such fields.
++        */
++        if (type == MYSQL_TYPE_TIME ||
++            type == MYSQL_TYPE_DATE ||
++            type == MYSQL_TYPE_DATETIME ||
++            type == MYSQL_TYPE_TIMESTAMP)
++        {
++          key_length+= 8;
++        }
++        else if (type == MYSQL_TYPE_BLOB)
++          key_length+= MAX_BLOB_WIDTH;		// Can't be used as a key
++        else
++        {
++          /*
++            Group strings are taken as varstrings and require an length field.
++            A field is not yet created by create_tmp_field()
++            and the sizes should match up.
++          */
++          key_length+= group_item->max_length + HA_KEY_BLOB_LENGTH;
++        }
++        break;
++      }
++      default:
++        /* This case should never be choosen */
++        DBUG_ASSERT(0);
++        my_error(ER_OUT_OF_RESOURCES, MYF(0));
++        join->thd->fatal_error();
++      }
++    }
++    parts++;
++    if (group_item->maybe_null)
++      null_parts++;
++  }
++  join->tmp_table_param.group_length=key_length+null_parts;
++  join->tmp_table_param.group_parts=parts;
++  join->tmp_table_param.group_null_parts=null_parts;
++}
++
++
++/**
++  allocate group fields or take prepared (cached).
++
++  @param main_join   join of current select
++  @param curr_join   current join (join of current select or temporary copy
++                     of it)
++
++  @retval
++    0   ok
++  @retval
++    1   failed
++*/
++
++static bool
++make_group_fields(JOIN *main_join, JOIN *curr_join)
++{
++  if (main_join->group_fields_cache.elements)
++  {
++    curr_join->group_fields= main_join->group_fields_cache;
++    curr_join->sort_and_group= 1;
++  }
++  else
++  {
++    if (alloc_group_fields(curr_join, curr_join->group_list))
++      return (1);
++    main_join->group_fields_cache= curr_join->group_fields;
++  }
++  return (0);
++}
++
++
++/**
++  Get a list of buffers for saveing last group.
++
++  Groups are saved in reverse order for easyer check loop.
++*/
++
++static bool
++alloc_group_fields(JOIN *join,ORDER *group)
++{
++  if (group)
++  {
++    for (; group ; group=group->next)
++    {
++      Cached_item *tmp=new_Cached_item(join->thd, *group->item);
++      if (!tmp || join->group_fields.push_front(tmp))
++	return TRUE;
++    }
++  }
++  join->sort_and_group=1;			/* Mark for do_select */
++  return FALSE;
++}
++
++
++static int
++test_if_group_changed(List<Cached_item> &list)
++{
++  DBUG_ENTER("test_if_group_changed");
++  List_iterator<Cached_item> li(list);
++  int idx= -1,i;
++  Cached_item *buff;
++
++  for (i=(int) list.elements-1 ; (buff=li++) ; i--)
++  {
++    if (buff->cmp())
++      idx=i;
++  }
++  DBUG_PRINT("info", ("idx: %d", idx));
++  DBUG_RETURN(idx);
++}
++
++
++/**
++  Setup copy_fields to save fields at start of new group.
++
++  Setup copy_fields to save fields at start of new group
++
++  Only FIELD_ITEM:s and FUNC_ITEM:s needs to be saved between groups.
++  Change old item_field to use a new field with points at saved fieldvalue
++  This function is only called before use of send_fields.
++
++  @param thd                   THD pointer
++  @param param                 temporary table parameters
++  @param ref_pointer_array     array of pointers to top elements of filed list
++  @param res_selected_fields   new list of items of select item list
++  @param res_all_fields        new list of all items
++  @param elements              number of elements in select item list
++  @param all_fields            all fields list
++
++  @todo
++    In most cases this result will be sent to the user.
++    This should be changed to use copy_int or copy_real depending
++    on how the value is to be used: In some cases this may be an
++    argument in a group function, like: IF(ISNULL(col),0,COUNT(*))
++
++  @retval
++    0     ok
++  @retval
++    !=0   error
++*/
++
++bool
++setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
++		  Item **ref_pointer_array,
++		  List<Item> &res_selected_fields, List<Item> &res_all_fields,
++		  uint elements, List<Item> &all_fields)
++{
++  Item *pos;
++  List_iterator_fast<Item> li(all_fields);
++  Copy_field *copy= NULL;
++  IF_DBUG(Copy_field *copy_start);
++  res_selected_fields.empty();
++  res_all_fields.empty();
++  List_iterator_fast<Item> itr(res_all_fields);
++  List<Item> extra_funcs;
++  uint i, border= all_fields.elements - elements;
++  DBUG_ENTER("setup_copy_fields");
++
++  if (param->field_count && 
++      !(copy=param->copy_field= new Copy_field[param->field_count]))
++    goto err2;
++
++  param->copy_funcs.empty();
++  IF_DBUG(copy_start= copy);
++  for (i= 0; (pos= li++); i++)
++  {
++    Field *field;
++    uchar *tmp;
++    Item *real_pos= pos->real_item();
++    /*
++      Aggregate functions can be substituted for fields (by e.g. temp tables).
++      We need to filter those substituted fields out.
++    */
++    if (real_pos->type() == Item::FIELD_ITEM &&
++        !(real_pos != pos &&
++          ((Item_ref *)pos)->ref_type() == Item_ref::AGGREGATE_REF))
++    {
++      Item_field *item;
++      if (!(item= new Item_field(thd, ((Item_field*) real_pos))))
++	goto err;
++      if (pos->type() == Item::REF_ITEM)
++      {
++        /* preserve the names of the ref when dereferncing */
++        Item_ref *ref= (Item_ref *) pos;
++        item->db_name= ref->db_name;
++        item->table_name= ref->table_name;
++        item->name= ref->name;
++      }
++      pos= item;
++      if (item->field->flags & BLOB_FLAG)
++      {
++	if (!(pos= Item_copy::create(pos)))
++	  goto err;
++       /*
++         Item_copy_string::copy for function can call 
++         Item_copy_string::val_int for blob via Item_ref.
++         But if Item_copy_string::copy for blob isn't called before,
++         it's value will be wrong
++         so let's insert Item_copy_string for blobs in the beginning of 
++         copy_funcs
++         (to see full test case look at having.test, BUG #4358) 
++       */
++	if (param->copy_funcs.push_front(pos))
++	  goto err;
++      }
++      else
++      {
++	/* 
++	   set up save buffer and change result_field to point at 
++	   saved value
++	*/
++	field= item->field;
++	item->result_field=field->new_field(thd->mem_root,field->table, 1);
++        /*
++          We need to allocate one extra byte for null handling and
++          another extra byte to not get warnings from purify in
++          Field_string::val_int
++        */
++	if (!(tmp= (uchar*) sql_alloc(field->pack_length()+2)))
++	  goto err;
++        if (copy)
++        {
++          DBUG_ASSERT (param->field_count > (uint) (copy - copy_start));
++          copy->set(tmp, item->result_field);
++          item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1);
++#ifdef HAVE_purify
++          copy->to_ptr[copy->from_length]= 0;
++#endif
++          copy++;
++        }
++      }
++    }
++    else if ((real_pos->type() == Item::FUNC_ITEM ||
++	      real_pos->type() == Item::SUBSELECT_ITEM ||
++	      real_pos->type() == Item::CACHE_ITEM ||
++	      real_pos->type() == Item::COND_ITEM) &&
++	     !real_pos->with_sum_func)
++    {						// Save for send fields
++      pos= real_pos;
++      /* TODO:
++	 In most cases this result will be sent to the user.
++	 This should be changed to use copy_int or copy_real depending
++	 on how the value is to be used: In some cases this may be an
++	 argument in a group function, like: IF(ISNULL(col),0,COUNT(*))
++      */
++      if (!(pos= Item_copy::create(pos)))
++	goto err;
++      if (i < border)                           // HAVING, ORDER and GROUP BY
++      {
++        if (extra_funcs.push_back(pos))
++          goto err;
++      }
++      else if (param->copy_funcs.push_back(pos))
++	goto err;
++    }
++    res_all_fields.push_back(pos);
++    ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]=
++      pos;
++  }
++  param->copy_field_end= copy;
++
++  for (i= 0; i < border; i++)
++    itr++;
++  itr.sublist(res_selected_fields, elements);
++  /*
++    Put elements from HAVING, ORDER BY and GROUP BY last to ensure that any
++    reference used in these will resolve to a item that is already calculated
++  */
++  param->copy_funcs.concat(&extra_funcs);
++
++  DBUG_RETURN(0);
++
++ err:
++  if (copy)
++    delete [] param->copy_field;			// This is never 0
++  param->copy_field=0;
++err2:
++  DBUG_RETURN(TRUE);
++}
++
++
++/**
++  Make a copy of all simple SELECT'ed items.
++
++  This is done at the start of a new group so that we can retrieve
++  these later when the group changes.
++*/
++
++void
++copy_fields(TMP_TABLE_PARAM *param)
++{
++  Copy_field *ptr=param->copy_field;
++  Copy_field *end=param->copy_field_end;
++
++  for (; ptr != end; ptr++)
++    (*ptr->do_copy)(ptr);
++
++  List_iterator_fast<Item> it(param->copy_funcs);
++  Item_copy *item;
++  while ((item = (Item_copy*) it++))
++    item->copy();
++}
++
++
++/**
++  Make an array of pointers to sum_functions to speed up
++  sum_func calculation.
++
++  @retval
++    0	ok
++  @retval
++    1	Error
++*/
++
++bool JOIN::alloc_func_list()
++{
++  uint func_count, group_parts;
++  DBUG_ENTER("alloc_func_list");
++
++  func_count= tmp_table_param.sum_func_count;
++  /*
++    If we are using rollup, we need a copy of the summary functions for
++    each level
++  */
++  if (rollup.state != ROLLUP::STATE_NONE)
++    func_count*= (send_group_parts+1);
++
++  group_parts= send_group_parts;
++  /*
++    If distinct, reserve memory for possible
++    disctinct->group_by optimization
++  */
++  if (select_distinct)
++  {
++    group_parts+= fields_list.elements;
++    /*
++      If the ORDER clause is specified then it's possible that
++      it also will be optimized, so reserve space for it too
++    */
++    if (order)
++    {
++      ORDER *ord;
++      for (ord= order; ord; ord= ord->next)
++        group_parts++;
++    }
++  }
++
++  /* This must use calloc() as rollup_make_fields depends on this */
++  sum_funcs= (Item_sum**) thd->calloc(sizeof(Item_sum**) * (func_count+1) +
++				      sizeof(Item_sum***) * (group_parts+1));
++  sum_funcs_end= (Item_sum***) (sum_funcs+func_count+1);
++  DBUG_RETURN(sum_funcs == 0);
++}
++
++
++/**
++  Initialize 'sum_funcs' array with all Item_sum objects.
++
++  @param field_list        All items
++  @param send_fields       Items in select list
++  @param before_group_by   Set to 1 if this is called before GROUP BY handling
++  @param recompute         Set to TRUE if sum_funcs must be recomputed
++
++  @retval
++    0  ok
++  @retval
++    1  error
++*/
++
++bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields,
++			      bool before_group_by, bool recompute)
++{
++  List_iterator_fast<Item> it(field_list);
++  Item_sum **func;
++  Item *item;
++  DBUG_ENTER("make_sum_func_list");
++
++  if (*sum_funcs && !recompute)
++    DBUG_RETURN(FALSE); /* We have already initialized sum_funcs. */
++
++  func= sum_funcs;
++  while ((item=it++))
++  {
++    if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item() &&
++        (!((Item_sum*) item)->depended_from() ||
++         ((Item_sum *)item)->depended_from() == select_lex))
++      *func++= (Item_sum*) item;
++  }
++  if (before_group_by && rollup.state == ROLLUP::STATE_INITED)
++  {
++    rollup.state= ROLLUP::STATE_READY;
++    if (rollup_make_fields(field_list, send_fields, &func))
++      DBUG_RETURN(TRUE);			// Should never happen
++  }
++  else if (rollup.state == ROLLUP::STATE_NONE)
++  {
++    for (uint i=0 ; i <= send_group_parts ;i++)
++      sum_funcs_end[i]= func;
++  }
++  else if (rollup.state == ROLLUP::STATE_READY)
++    DBUG_RETURN(FALSE);                         // Don't put end marker
++  *func=0;					// End marker
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  Change all funcs and sum_funcs to fields in tmp table, and create
++  new list of all items.
++
++  @param thd                   THD pointer
++  @param ref_pointer_array     array of pointers to top elements of filed list
++  @param res_selected_fields   new list of items of select item list
++  @param res_all_fields        new list of all items
++  @param elements              number of elements in select item list
++  @param all_fields            all fields list
++
++  @retval
++    0     ok
++  @retval
++    !=0   error
++*/
++
++static bool
++change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
++			 List<Item> &res_selected_fields,
++			 List<Item> &res_all_fields,
++			 uint elements, List<Item> &all_fields)
++{
++  List_iterator_fast<Item> it(all_fields);
++  Item *item_field,*item;
++  DBUG_ENTER("change_to_use_tmp_fields");
++
++  res_selected_fields.empty();
++  res_all_fields.empty();
++
++  uint i, border= all_fields.elements - elements;
++  for (i= 0; (item= it++); i++)
++  {
++    Field *field;
++
++    if ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) ||
++        (item->type() == Item::FUNC_ITEM &&
++         ((Item_func*)item)->functype() == Item_func::SUSERVAR_FUNC))
++      item_field= item;
++    else
++    {
++      if (item->type() == Item::FIELD_ITEM)
++      {
++	item_field= item->get_tmp_table_item(thd);
++      }
++      else if ((field= item->get_tmp_table_field()))
++      {
++	if (item->type() == Item::SUM_FUNC_ITEM && field->table->group)
++	  item_field= ((Item_sum*) item)->result_item(field);
++	else
++	  item_field= (Item*) new Item_field(field);
++	if (!item_field)
++	  DBUG_RETURN(TRUE);                    // Fatal error
++
++        if (item->real_item()->type() != Item::FIELD_ITEM)
++          field->orig_table= 0;
++	item_field->name= item->name;
++        if (item->type() == Item::REF_ITEM)
++        {
++          Item_field *ifield= (Item_field *) item_field;
++          Item_ref *iref= (Item_ref *) item;
++          ifield->table_name= iref->table_name;
++          ifield->db_name= iref->db_name;
++        }
++#ifndef DBUG_OFF
++	if (!item_field->name)
++	{
++	  char buff[256];
++	  String str(buff,sizeof(buff),&my_charset_bin);
++	  str.length(0);
++	  item->print(&str, QT_ORDINARY);
++	  item_field->name= sql_strmake(str.ptr(),str.length());
++	}
++#endif
++      }
++      else
++	item_field= item;
++    }
++    res_all_fields.push_back(item_field);
++    ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]=
++      item_field;
++  }
++
++  List_iterator_fast<Item> itr(res_all_fields);
++  for (i= 0; i < border; i++)
++    itr++;
++  itr.sublist(res_selected_fields, elements);
++  DBUG_RETURN(FALSE);
++}
++
++
++/**
++  Change all sum_func refs to fields to point at fields in tmp table.
++  Change all funcs to be fields in tmp table.
++
++  @param thd                   THD pointer
++  @param ref_pointer_array     array of pointers to top elements of filed list
++  @param res_selected_fields   new list of items of select item list
++  @param res_all_fields        new list of all items
++  @param elements              number of elements in select item list
++  @param all_fields            all fields list
++
++  @retval
++    0	ok
++  @retval
++    1	error
++*/
++
++static bool
++change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array,
++			  List<Item> &res_selected_fields,
++			  List<Item> &res_all_fields, uint elements,
++			  List<Item> &all_fields)
++{
++  List_iterator_fast<Item> it(all_fields);
++  Item *item, *new_item;
++  res_selected_fields.empty();
++  res_all_fields.empty();
++
++  uint i, border= all_fields.elements - elements;
++  for (i= 0; (item= it++); i++)
++  {
++    res_all_fields.push_back(new_item= item->get_tmp_table_item(thd));
++    ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]=
++      new_item;
++  }
++
++  List_iterator_fast<Item> itr(res_all_fields);
++  for (i= 0; i < border; i++)
++    itr++;
++  itr.sublist(res_selected_fields, elements);
++
++  return thd->is_fatal_error;
++}
++
++
++
++/******************************************************************************
++  Code for calculating functions
++******************************************************************************/
++
++
++/**
++  Call ::setup for all sum functions.
++
++  @param thd           thread handler
++  @param func_ptr      sum function list
++
++  @retval
++    FALSE  ok
++  @retval
++    TRUE   error
++*/
++
++static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr)
++{
++  Item_sum *func;
++  DBUG_ENTER("setup_sum_funcs");
++  while ((func= *(func_ptr++)))
++  {
++    if (func->setup(thd))
++      DBUG_RETURN(TRUE);
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++static void
++init_tmptable_sum_functions(Item_sum **func_ptr)
++{
++  Item_sum *func;
++  while ((func= *(func_ptr++)))
++    func->reset_field();
++}
++
++
++/** Update record 0 in tmp_table from record 1. */
++
++static void
++update_tmptable_sum_func(Item_sum **func_ptr,
++			 TABLE *tmp_table __attribute__((unused)))
++{
++  Item_sum *func;
++  while ((func= *(func_ptr++)))
++    func->update_field();
++}
++
++
++/** Copy result of sum functions to record in tmp_table. */
++
++static void
++copy_sum_funcs(Item_sum **func_ptr, Item_sum **end_ptr)
++{
++  for (; func_ptr != end_ptr ; func_ptr++)
++    (void) (*func_ptr)->save_in_result_field(1);
++  return;
++}
++
++
++static bool
++init_sum_functions(Item_sum **func_ptr, Item_sum **end_ptr)
++{
++  for (; func_ptr != end_ptr ;func_ptr++)
++  {
++    if ((*func_ptr)->reset())
++      return 1;
++  }
++  /* If rollup, calculate the upper sum levels */
++  for ( ; *func_ptr ; func_ptr++)
++  {
++    if ((*func_ptr)->add())
++      return 1;
++  }
++  return 0;
++}
++
++
++static bool
++update_sum_func(Item_sum **func_ptr)
++{
++  Item_sum *func;
++  for (; (func= (Item_sum*) *func_ptr) ; func_ptr++)
++    if (func->add())
++      return 1;
++  return 0;
++}
++
++/** 
++  Copy result of functions to record in tmp_table. 
++
++  Uses the thread pointer to check for errors in 
++  some of the val_xxx() methods called by the 
++  save_in_result_field() function.
++  TODO: make the Item::val_xxx() return error code
++
++  @param func_ptr  array of the function Items to copy to the tmp table
++  @param thd       pointer to the current thread for error checking
++  @retval
++    FALSE if OK
++  @retval
++    TRUE on error  
++*/
++
++bool
++copy_funcs(Item **func_ptr, const THD *thd)
++{
++  Item *func;
++  for (; (func = *func_ptr) ; func_ptr++)
++  {
++    func->save_in_result_field(1);
++    /*
++      Need to check the THD error state because Item::val_xxx() don't
++      return error code, but can generate errors
++      TODO: change it for a real status check when Item::val_xxx()
++      are extended to return status code.
++    */  
++    if (thd->is_error())
++      return TRUE;
++  }
++  return FALSE;
++}
++
++
++/**
++  Create a condition for a const reference and add this to the
++  currenct select for the table.
++*/
++
++static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
++{
++  DBUG_ENTER("add_ref_to_table_cond");
++  if (!join_tab->ref.key_parts)
++    DBUG_RETURN(FALSE);
++
++  Item_cond_and *cond=new Item_cond_and();
++  TABLE *table=join_tab->table;
++  int error= 0;
++  if (!cond)
++    DBUG_RETURN(TRUE);
++
++  for (uint i=0 ; i < join_tab->ref.key_parts ; i++)
++  {
++    Field *field=table->field[table->key_info[join_tab->ref.key].key_part[i].
++			      fieldnr-1];
++    Item *value=join_tab->ref.items[i];
++    cond->add(new Item_func_equal(new Item_field(field), value));
++  }
++  if (thd->is_fatal_error)
++    DBUG_RETURN(TRUE);
++
++  if (!cond->fixed)
++    cond->fix_fields(thd, (Item**)&cond);
++  if (join_tab->select)
++  {
++    if (join_tab->select->cond)
++      error=(int) cond->add(join_tab->select->cond);
++    join_tab->select_cond=join_tab->select->cond=cond;
++  }
++  else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond, 0,
++                                          &error)))
++    join_tab->select_cond=cond;
++
++  DBUG_RETURN(error ? TRUE : FALSE);
++}
++
++
++/**
++  Free joins of subselect of this select.
++
++  @param thd      THD pointer
++  @param select   pointer to st_select_lex which subselects joins we will free
++*/
++
++void free_underlaid_joins(THD *thd, SELECT_LEX *select)
++{
++  for (SELECT_LEX_UNIT *unit= select->first_inner_unit();
++       unit;
++       unit= unit->next_unit())
++    unit->cleanup();
++}
++
++/****************************************************************************
++  ROLLUP handling
++****************************************************************************/
++
++/**
++  Replace occurences of group by fields in an expression by ref items.
++
++  The function replaces occurrences of group by fields in expr
++  by ref objects for these fields unless they are under aggregate
++  functions.
++  The function also corrects value of the the maybe_null attribute
++  for the items of all subexpressions containing group by fields.
++
++  @b EXAMPLES
++    @code
++      SELECT a+1 FROM t1 GROUP BY a WITH ROLLUP
++      SELECT SUM(a)+a FROM t1 GROUP BY a WITH ROLLUP 
++  @endcode
++
++  @b IMPLEMENTATION
++
++    The function recursively traverses the tree of the expr expression,
++    looks for occurrences of the group by fields that are not under
++    aggregate functions and replaces them for the corresponding ref items.
++
++  @note
++    This substitution is needed GROUP BY queries with ROLLUP if
++    SELECT list contains expressions over group by attributes.
++
++  @param thd                  reference to the context
++  @param expr                 expression to make replacement
++  @param group_list           list of references to group by items
++  @param changed        out:  returns 1 if item contains a replaced field item
++
++  @todo
++    - TODO: Some functions are not null-preserving. For those functions
++    updating of the maybe_null attribute is an overkill. 
++
++  @retval
++    0	if ok
++  @retval
++    1   on error
++*/
++
++static bool change_group_ref(THD *thd, Item_func *expr, ORDER *group_list,
++                             bool *changed)
++{
++  if (expr->arg_count)
++  {
++    Name_resolution_context *context= &thd->lex->current_select->context;
++    Item **arg,**arg_end;
++    bool arg_changed= FALSE;
++    for (arg= expr->arguments(),
++         arg_end= expr->arguments()+expr->arg_count;
++         arg != arg_end; arg++)
++    {
++      Item *item= *arg;
++      if (item->type() == Item::FIELD_ITEM || item->type() == Item::REF_ITEM)
++      {
++        ORDER *group_tmp;
++        for (group_tmp= group_list; group_tmp; group_tmp= group_tmp->next)
++        {
++          if (item->eq(*group_tmp->item,0))
++          {
++            Item *new_item;
++            if (!(new_item= new Item_ref(context, group_tmp->item, 0,
++                                        item->name)))
++              return 1;                                 // fatal_error is set
++            thd->change_item_tree(arg, new_item);
++            arg_changed= TRUE;
++          }
++        }
++      }
++      else if (item->type() == Item::FUNC_ITEM)
++      {
++        if (change_group_ref(thd, (Item_func *) item, group_list, &arg_changed))
++          return 1;
++      }
++    }
++    if (arg_changed)
++    {
++      expr->maybe_null= 1;
++      *changed= TRUE;
++    }
++  }
++  return 0;
++}
++
++
++/** Allocate memory needed for other rollup functions. */
++
++bool JOIN::rollup_init()
++{
++  uint i,j;
++  Item **ref_array;
++
++  tmp_table_param.quick_group= 0;	// Can't create groups in tmp table
++  rollup.state= ROLLUP::STATE_INITED;
++
++  /*
++    Create pointers to the different sum function groups
++    These are updated by rollup_make_fields()
++  */
++  tmp_table_param.group_parts= send_group_parts;
++
++  if (!(rollup.null_items= (Item_null_result**) thd->alloc((sizeof(Item*) +
++                                                sizeof(Item**) +
++                                                sizeof(List<Item>) +
++				                ref_pointer_array_size)
++				                * send_group_parts )))
++    return 1;
++  
++  rollup.fields= (List<Item>*) (rollup.null_items + send_group_parts);
++  rollup.ref_pointer_arrays= (Item***) (rollup.fields + send_group_parts);
++  ref_array= (Item**) (rollup.ref_pointer_arrays+send_group_parts);
++
++  /*
++    Prepare space for field list for the different levels
++    These will be filled up in rollup_make_fields()
++  */
++  for (i= 0 ; i < send_group_parts ; i++)
++  {
++    rollup.null_items[i]= new (thd->mem_root) Item_null_result();
++    List<Item> *rollup_fields= &rollup.fields[i];
++    rollup_fields->empty();
++    rollup.ref_pointer_arrays[i]= ref_array;
++    ref_array+= all_fields.elements;
++  }
++  for (i= 0 ; i < send_group_parts; i++)
++  {
++    for (j=0 ; j < fields_list.elements ; j++)
++      rollup.fields[i].push_back(rollup.null_items[i]);
++  }
++  List_iterator<Item> it(all_fields);
++  Item *item;
++  while ((item= it++))
++  {
++    ORDER *group_tmp;
++    bool found_in_group= 0;
++
++    for (group_tmp= group_list; group_tmp; group_tmp= group_tmp->next)
++    {
++      if (*group_tmp->item == item)
++      {
++        item->maybe_null= 1;
++        found_in_group= 1;
++        break;
++      }
++    }
++    if (item->type() == Item::FUNC_ITEM && !found_in_group)
++    {
++      bool changed= FALSE;
++      if (change_group_ref(thd, (Item_func *) item, group_list, &changed))
++        return 1;
++      /*
++        We have to prevent creation of a field in a temporary table for
++        an expression that contains GROUP BY attributes.
++        Marking the expression item as 'with_sum_func' will ensure this.
++      */ 
++      if (changed)
++        item->with_sum_func= 1;
++    }
++  }
++  return 0;
++}
++
++/**
++   Wrap all constant Items in GROUP BY list.
++
++   For ROLLUP queries each constant item referenced in GROUP BY list
++   is wrapped up into an Item_func object yielding the same value
++   as the constant item. The objects of the wrapper class are never
++   considered as constant items and besides they inherit all
++   properties of the Item_result_field class.
++   This wrapping allows us to ensure writing constant items
++   into temporary tables whenever the result of the ROLLUP
++   operation has to be written into a temporary table, e.g. when
++   ROLLUP is used together with DISTINCT in the SELECT list.
++   Usually when creating temporary tables for a intermidiate
++   result we do not include fields for constant expressions.
++
++   @retval
++     0  if ok
++   @retval
++     1  on error
++*/
++
++bool JOIN::rollup_process_const_fields()
++{
++  ORDER *group_tmp;
++  Item *item;
++  List_iterator<Item> it(all_fields);
++
++  for (group_tmp= group_list; group_tmp; group_tmp= group_tmp->next)
++  {
++    if (!(*group_tmp->item)->const_item())
++      continue;
++    while ((item= it++))
++    {
++      if (*group_tmp->item == item)
++      {
++        Item* new_item= new Item_func_rollup_const(item);
++        if (!new_item)
++          return 1;
++        new_item->fix_fields(thd, (Item **) 0);
++        thd->change_item_tree(it.ref(), new_item);
++        for (ORDER *tmp= group_tmp; tmp; tmp= tmp->next)
++        {
++          if (*tmp->item == item)
++            thd->change_item_tree(tmp->item, new_item);
++        }
++        break;
++      }
++    }
++    it.rewind();
++  }
++  return 0;
++}
++  
++
++/**
++  Fill up rollup structures with pointers to fields to use.
++
++  Creates copies of item_sum items for each sum level.
++
++  @param fields_arg		List of all fields (hidden and real ones)
++  @param sel_fields		Pointer to selected fields
++  @param func			Store here a pointer to all fields
++
++  @retval
++    0	if ok;
++    In this case func is pointing to next not used element.
++  @retval
++    1    on error
++*/
++
++bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields,
++			      Item_sum ***func)
++{
++  List_iterator_fast<Item> it(fields_arg);
++  Item *first_field= sel_fields.head();
++  uint level;
++
++  /*
++    Create field lists for the different levels
++
++    The idea here is to have a separate field list for each rollup level to
++    avoid all runtime checks of which columns should be NULL.
++
++    The list is stored in reverse order to get sum function in such an order
++    in func that it makes it easy to reset them with init_sum_functions()
++
++    Assuming:  SELECT a, b, c SUM(b) FROM t1 GROUP BY a,b WITH ROLLUP
++
++    rollup.fields[0] will contain list where a,b,c is NULL
++    rollup.fields[1] will contain list where b,c is NULL
++    ...
++    rollup.ref_pointer_array[#] points to fields for rollup.fields[#]
++    ...
++    sum_funcs_end[0] points to all sum functions
++    sum_funcs_end[1] points to all sum functions, except grand totals
++    ...
++  */
++
++  for (level=0 ; level < send_group_parts ; level++)
++  {
++    uint i;
++    uint pos= send_group_parts - level -1;
++    bool real_fields= 0;
++    Item *item;
++    List_iterator<Item> new_it(rollup.fields[pos]);
++    Item **ref_array_start= rollup.ref_pointer_arrays[pos];
++    ORDER *start_group;
++
++    /* Point to first hidden field */
++    Item **ref_array= ref_array_start + fields_arg.elements-1;
++
++    /* Remember where the sum functions ends for the previous level */
++    sum_funcs_end[pos+1]= *func;
++
++    /* Find the start of the group for this level */
++    for (i= 0, start_group= group_list ;
++	 i++ < pos ;
++	 start_group= start_group->next)
++      ;
++
++    it.rewind();
++    while ((item= it++))
++    {
++      if (item == first_field)
++      {
++	real_fields= 1;				// End of hidden fields
++	ref_array= ref_array_start;
++      }
++
++      if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item() &&
++          (!((Item_sum*) item)->depended_from() ||
++           ((Item_sum *)item)->depended_from() == select_lex))
++          
++      {
++	/*
++	  This is a top level summary function that must be replaced with
++	  a sum function that is reset for this level.
++
++	  NOTE: This code creates an object which is not that nice in a
++	  sub select.  Fortunately it's not common to have rollup in
++	  sub selects.
++	*/
++	item= item->copy_or_same(thd);
++	((Item_sum*) item)->make_unique();
++	*(*func)= (Item_sum*) item;
++	(*func)++;
++      }
++      else 
++      {
++	/* Check if this is something that is part of this group by */
++	ORDER *group_tmp;
++	for (group_tmp= start_group, i= pos ;
++             group_tmp ; group_tmp= group_tmp->next, i++)
++	{
++          if (*group_tmp->item == item)
++	  {
++	    /*
++	      This is an element that is used by the GROUP BY and should be
++	      set to NULL in this level
++	    */
++            Item_null_result *null_item= new (thd->mem_root) Item_null_result();
++            if (!null_item)
++              return 1;
++	    item->maybe_null= 1;		// Value will be null sometimes
++            null_item->result_field= item->get_tmp_table_field();
++            item= null_item;
++	    break;
++	  }
++	}
++      }
++      *ref_array= item;
++      if (real_fields)
++      {
++	(void) new_it++;			// Point to next item
++	new_it.replace(item);			// Replace previous
++	ref_array++;
++      }
++      else
++	ref_array--;
++    }
++  }
++  sum_funcs_end[0]= *func;			// Point to last function
++  return 0;
++}
++
++/**
++  Send all rollup levels higher than the current one to the client.
++
++  @b SAMPLE
++    @code
++      SELECT a, b, c SUM(b) FROM t1 GROUP BY a,b WITH ROLLUP
++  @endcode
++
++  @param idx		Level we are on:
++                        - 0 = Total sum level
++                        - 1 = First group changed  (a)
++                        - 2 = Second group changed (a,b)
++
++  @retval
++    0   ok
++  @retval
++    1   If send_data_failed()
++*/
++
++int JOIN::rollup_send_data(uint idx)
++{
++  uint i;
++  for (i= send_group_parts ; i-- > idx ; )
++  {
++    /* Get reference pointers to sum functions in place */
++    memcpy((char*) ref_pointer_array,
++	   (char*) rollup.ref_pointer_arrays[i],
++	   ref_pointer_array_size);
++    if ((!having || having->val_int()))
++    {
++      if (send_records < unit->select_limit_cnt && do_send_rows &&
++	  result->send_data(rollup.fields[i]))
++	return 1;
++      send_records++;
++    }
++  }
++  /* Restore ref_pointer_array */
++  set_items_ref_array(current_ref_pointer_array);
++  return 0;
++}
++
++/**
++  Write all rollup levels higher than the current one to a temp table.
++
++  @b SAMPLE
++    @code
++      SELECT a, b, SUM(c) FROM t1 GROUP BY a,b WITH ROLLUP
++  @endcode
++
++  @param idx                 Level we are on:
++                               - 0 = Total sum level
++                               - 1 = First group changed  (a)
++                               - 2 = Second group changed (a,b)
++  @param table               reference to temp table
++
++  @retval
++    0   ok
++  @retval
++    1   if write_data_failed()
++*/
++
++int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
++{
++  uint i;
++  for (i= send_group_parts ; i-- > idx ; )
++  {
++    /* Get reference pointers to sum functions in place */
++    memcpy((char*) ref_pointer_array,
++	   (char*) rollup.ref_pointer_arrays[i],
++	   ref_pointer_array_size);
++    if ((!having || having->val_int()))
++    {
++      int write_error;
++      Item *item;
++      List_iterator_fast<Item> it(rollup.fields[i]);
++      while ((item= it++))
++      {
++        if (item->type() == Item::NULL_ITEM && item->is_result_field())
++          item->save_in_result_field(1);
++      }
++      copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
++      if ((write_error= table_arg->file->ha_write_row(table_arg->record[0])))
++      {
++	if (create_myisam_from_heap(thd, table_arg, &tmp_table_param,
++                                    write_error, 0))
++	  return 1;		     
++      }
++    }
++  }
++  /* Restore ref_pointer_array */
++  set_items_ref_array(current_ref_pointer_array);
++  return 0;
++}
++
++/**
++  clear results if there are not rows found for group
++  (end_send_group/end_write_group)
++*/
++
++void JOIN::clear()
++{
++  clear_tables(this);
++  copy_fields(&tmp_table_param);
++
++  if (sum_funcs)
++  {
++    Item_sum *func, **func_ptr= sum_funcs;
++    while ((func= *(func_ptr++)))
++      func->clear();
++  }
++}
++
++/**
++  EXPLAIN handling.
++
++  Send a description about what how the select will be done to stdout.
++*/
++
++static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
++			    bool distinct,const char *message)
++{
++  List<Item> field_list;
++  List<Item> item_list;
++  THD *thd=join->thd;
++  select_result *result=join->result;
++  Item *item_null= new Item_null();
++  CHARSET_INFO *cs= system_charset_info;
++  int quick_type;
++  DBUG_ENTER("select_describe");
++  DBUG_PRINT("info", ("Select 0x%lx, type %s, message %s",
++		      (ulong)join->select_lex, join->select_lex->type,
++		      message ? message : "NULL"));
++  /* Don't log this into the slow query log */
++  thd->server_status&= ~(SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED);
++  join->unit->offset_limit_cnt= 0;
++
++  /* 
++    NOTE: the number/types of items pushed into item_list must be in sync with
++    EXPLAIN column types as they're "defined" in THD::send_explain_fields()
++  */
++  if (message)
++  {
++    item_list.push_back(new Item_int((int32)
++				     join->select_lex->select_number));
++    item_list.push_back(new Item_string(join->select_lex->type,
++					strlen(join->select_lex->type), cs));
++    for (uint i=0 ; i < 7; i++)
++      item_list.push_back(item_null);
++    if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
++      item_list.push_back(item_null);
++    if (join->thd->lex->describe & DESCRIBE_EXTENDED)
++      item_list.push_back(item_null);
++  
++    item_list.push_back(new Item_string(message,strlen(message),cs));
++    if (result->send_data(item_list))
++      join->error= 1;
++  }
++  else if (join->select_lex == join->unit->fake_select_lex)
++  {
++    /* 
++      here we assume that the query will return at least two rows, so we
++      show "filesort" in EXPLAIN. Of course, sometimes we'll be wrong
++      and no filesort will be actually done, but executing all selects in
++      the UNION to provide precise EXPLAIN information will hardly be
++      appreciated :)
++    */
++    char table_name_buffer[NAME_LEN];
++    item_list.empty();
++    /* id */
++    item_list.push_back(new Item_null);
++    /* select_type */
++    item_list.push_back(new Item_string(join->select_lex->type,
++					strlen(join->select_lex->type),
++					cs));
++    /* table */
++    {
++      SELECT_LEX *sl= join->unit->first_select();
++      uint len= 6, lastop= 0;
++      memcpy(table_name_buffer, STRING_WITH_LEN("<union"));
++      for (; sl && len + lastop + 5 < NAME_LEN; sl= sl->next_select())
++      {
++        len+= lastop;
++        lastop= my_snprintf(table_name_buffer + len, NAME_LEN - len,
++                            "%u,", sl->select_number);
++      }
++      if (sl || len + lastop >= NAME_LEN)
++      {
++        memcpy(table_name_buffer + len, STRING_WITH_LEN("...>") + 1);
++        len+= 4;
++      }
++      else
++      {
++        len+= lastop;
++        table_name_buffer[len - 1]= '>';  // change ',' to '>'
++      }
++      item_list.push_back(new Item_string(table_name_buffer, len, cs));
++    }
++    /* partitions */
++    if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
++      item_list.push_back(item_null);
++    /* type */
++    item_list.push_back(new Item_string(join_type_str[JT_ALL],
++					  strlen(join_type_str[JT_ALL]),
++					  cs));
++    /* possible_keys */
++    item_list.push_back(item_null);
++    /* key*/
++    item_list.push_back(item_null);
++    /* key_len */
++    item_list.push_back(item_null);
++    /* ref */
++    item_list.push_back(item_null);
++    /* in_rows */
++    if (join->thd->lex->describe & DESCRIBE_EXTENDED)
++      item_list.push_back(item_null);
++    /* rows */
++    item_list.push_back(item_null);
++    /* extra */
++    if (join->unit->global_parameters->order_list.first)
++      item_list.push_back(new Item_string("Using filesort",
++					  14, cs));
++    else
++      item_list.push_back(new Item_string("", 0, cs));
++
++    if (result->send_data(item_list))
++      join->error= 1;
++  }
++  else
++  {
++    table_map used_tables=0;
++    for (uint i=0 ; i < join->tables ; i++)
++    {
++      JOIN_TAB *tab=join->join_tab+i;
++      TABLE *table=tab->table;
++      TABLE_LIST *table_list= tab->table->pos_in_table_list;
++      char buff[512]; 
++      char buff1[512], buff2[512], buff3[512];
++      char keylen_str_buf[64];
++      String extra(buff, sizeof(buff),cs);
++      char table_name_buffer[NAME_LEN];
++      String tmp1(buff1,sizeof(buff1),cs);
++      String tmp2(buff2,sizeof(buff2),cs);
++      String tmp3(buff3,sizeof(buff3),cs);
++      extra.length(0);
++      tmp1.length(0);
++      tmp2.length(0);
++      tmp3.length(0);
++
++      quick_type= -1;
++      item_list.empty();
++      /* id */
++      item_list.push_back(new Item_uint((uint32)
++				       join->select_lex->select_number));
++      /* select_type */
++      item_list.push_back(new Item_string(join->select_lex->type,
++					  strlen(join->select_lex->type),
++					  cs));
++      if (tab->type == JT_ALL && tab->select && tab->select->quick)
++      {
++        quick_type= tab->select->quick->get_type();
++        if ((quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) ||
++            (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) ||
++            (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION))
++          tab->type = JT_INDEX_MERGE;
++        else
++	  tab->type = JT_RANGE;
++      }
++      /* table */
++      if (table->derived_select_number)
++      {
++	/* Derived table name generation */
++	int len= my_snprintf(table_name_buffer, sizeof(table_name_buffer)-1,
++			     "<derived%u>",
++			     table->derived_select_number);
++	item_list.push_back(new Item_string(table_name_buffer, len, cs));
++      }
++      else
++      {
++        TABLE_LIST *real_table= table->pos_in_table_list; 
++	item_list.push_back(new Item_string(real_table->alias,
++					    strlen(real_table->alias),
++					    cs));
++      }
++      /* "partitions" column */
++      if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
++      {
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++        partition_info *part_info;
++        if (!table->derived_select_number && 
++            (part_info= table->part_info))
++        {          
++          Item_string *item_str= new Item_string(cs);
++          make_used_partitions_str(part_info, &item_str->str_value);
++          item_list.push_back(item_str);
++        }
++        else
++          item_list.push_back(item_null);
++#else
++        /* just produce empty column if partitioning is not compiled in */
++        item_list.push_back(item_null); 
++#endif
++      }
++      /* "type" column */
++      item_list.push_back(new Item_string(join_type_str[tab->type],
++					  strlen(join_type_str[tab->type]),
++					  cs));
++      /* Build "possible_keys" value and add it to item_list */
++      if (!tab->keys.is_clear_all())
++      {
++        uint j;
++        for (j=0 ; j < table->s->keys ; j++)
++        {
++          if (tab->keys.is_set(j))
++          {
++            if (tmp1.length())
++              tmp1.append(',');
++            tmp1.append(table->key_info[j].name, 
++			strlen(table->key_info[j].name),
++			system_charset_info);
++          }
++        }
++      }
++      if (tmp1.length())
++	item_list.push_back(new Item_string(tmp1.ptr(),tmp1.length(),cs));
++      else
++	item_list.push_back(item_null);
++
++      /* Build "key", "key_len", and "ref" values and add them to item_list */
++      if (tab->ref.key_parts)
++      {
++	KEY *key_info=table->key_info+ tab->ref.key;
++        register uint length;
++	item_list.push_back(new Item_string(key_info->name,
++					    strlen(key_info->name),
++					    system_charset_info));
++        length= longlong2str(tab->ref.key_length, keylen_str_buf, 10) - 
++                keylen_str_buf;
++        item_list.push_back(new Item_string(keylen_str_buf, length,
++                                            system_charset_info));
++	for (store_key **ref=tab->ref.key_copy ; *ref ; ref++)
++	{
++	  if (tmp2.length())
++	    tmp2.append(',');
++	  tmp2.append((*ref)->name(), strlen((*ref)->name()),
++		      system_charset_info);
++	}
++	item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs));
++      }
++      else if (tab->type == JT_NEXT)
++      {
++	KEY *key_info=table->key_info+ tab->index;
++        register uint length;
++	item_list.push_back(new Item_string(key_info->name,
++					    strlen(key_info->name),cs));
++        length= longlong2str(key_info->key_length, keylen_str_buf, 10) - 
++                keylen_str_buf;
++        item_list.push_back(new Item_string(keylen_str_buf, 
++                                            length,
++                                            system_charset_info));
++	item_list.push_back(item_null);
++      }
++      else if (tab->select && tab->select->quick)
++      {
++        tab->select->quick->add_keys_and_lengths(&tmp2, &tmp3);
++	item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs));
++	item_list.push_back(new Item_string(tmp3.ptr(),tmp3.length(),cs));
++	item_list.push_back(item_null);
++      }
++      else
++      {
++        if (table_list->schema_table &&
++            table_list->schema_table->i_s_requested_object & OPTIMIZE_I_S_TABLE)
++        {
++          const char *tmp_buff;
++          int f_idx;
++          if (table_list->has_db_lookup_value)
++          {
++            f_idx= table_list->schema_table->idx_field1;
++            tmp_buff= table_list->schema_table->fields_info[f_idx].field_name;
++            tmp2.append(tmp_buff, strlen(tmp_buff), cs);
++          }          
++          if (table_list->has_table_lookup_value)
++          {
++            if (table_list->has_db_lookup_value)
++              tmp2.append(',');
++            f_idx= table_list->schema_table->idx_field2;
++            tmp_buff= table_list->schema_table->fields_info[f_idx].field_name;
++            tmp2.append(tmp_buff, strlen(tmp_buff), cs);
++          }
++          if (tmp2.length())
++            item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs));
++          else
++            item_list.push_back(item_null);
++        }
++        else
++          item_list.push_back(item_null);
++	item_list.push_back(item_null);
++	item_list.push_back(item_null);
++      }
++      
++      /* Add "rows" field to item_list. */
++      if (table_list->schema_table)
++      {
++        /* in_rows */
++        if (join->thd->lex->describe & DESCRIBE_EXTENDED)
++          item_list.push_back(item_null);
++        /* rows */
++        item_list.push_back(item_null);
++      }
++      else
++      {
++        ha_rows examined_rows;
++        if (tab->select && tab->select->quick)
++          examined_rows= tab->select->quick->records;
++        else if (tab->type == JT_NEXT || tab->type == JT_ALL)
++        {
++          if (tab->limit)
++            examined_rows= tab->limit;
++          else
++          {
++            tab->table->file->info(HA_STATUS_VARIABLE);
++            examined_rows= tab->table->file->stats.records;
++          }
++        }
++        else
++          examined_rows=(ha_rows)join->best_positions[i].records_read; 
++ 
++        item_list.push_back(new Item_int((longlong) (ulonglong) examined_rows, 
++                                         MY_INT64_NUM_DECIMAL_DIGITS));
++
++        /* Add "filtered" field to item_list. */
++        if (join->thd->lex->describe & DESCRIBE_EXTENDED)
++        {
++          float f= 0.0; 
++          if (examined_rows)
++            f= (float) (100.0 * join->best_positions[i].records_read /
++                        examined_rows);
++          item_list.push_back(new Item_float(f, 2));
++        }
++      }
++
++      /* Build "Extra" field and add it to item_list. */
++      my_bool key_read=table->key_read;
++      if ((tab->type == JT_NEXT || tab->type == JT_CONST) &&
++          table->covering_keys.is_set(tab->index))
++	key_read=1;
++      if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT &&
++          !((QUICK_ROR_INTERSECT_SELECT*)tab->select->quick)->need_to_fetch_row)
++        key_read=1;
++        
++      if (tab->info)
++	item_list.push_back(new Item_string(tab->info,strlen(tab->info),cs));
++      else if (tab->packed_info & TAB_INFO_HAVE_VALUE)
++      {
++        if (tab->packed_info & TAB_INFO_USING_INDEX)
++          extra.append(STRING_WITH_LEN("; Using index"));
++        if (tab->packed_info & TAB_INFO_USING_WHERE)
++          extra.append(STRING_WITH_LEN("; Using where"));
++        if (tab->packed_info & TAB_INFO_FULL_SCAN_ON_NULL)
++          extra.append(STRING_WITH_LEN("; Full scan on NULL key"));
++        /* Skip initial "; "*/
++        const char *str= extra.ptr();
++        uint32 len= extra.length();
++        if (len)
++        {
++          str += 2;
++          len -= 2;
++        }
++	item_list.push_back(new Item_string(str, len, cs));
++      }
++      else
++      {
++        if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || 
++            quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT ||
++            quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE)
++        {
++          extra.append(STRING_WITH_LEN("; Using "));
++          tab->select->quick->add_info_string(&extra);
++        }
++	if (tab->select)
++	{
++	  if (tab->use_quick == 2)
++	  {
++            /* 4 bits per 1 hex digit + terminating '\0' */
++            char buf[MAX_KEY / 4 + 1];
++            extra.append(STRING_WITH_LEN("; Range checked for each "
++                                         "record (index map: 0x"));
++            extra.append(tab->keys.print(buf));
++            extra.append(')');
++	  }
++	  else if (tab->select->cond)
++          {
++            const COND *pushed_cond= tab->table->file->pushed_cond;
++
++            if (thd->variables.engine_condition_pushdown && pushed_cond)
++            {
++              extra.append(STRING_WITH_LEN("; Using where with pushed "
++                                           "condition"));
++              if (thd->lex->describe & DESCRIBE_EXTENDED)
++              {
++                extra.append(STRING_WITH_LEN(": "));
++                ((COND *)pushed_cond)->print(&extra, QT_ORDINARY);
++              }
++            }
++            else
++              extra.append(STRING_WITH_LEN("; Using where"));
++          }
++	}
++        if (table_list->schema_table &&
++            table_list->schema_table->i_s_requested_object & OPTIMIZE_I_S_TABLE)
++        {
++          if (!table_list->table_open_method)
++            extra.append(STRING_WITH_LEN("; Skip_open_table"));
++          else if (table_list->table_open_method == OPEN_FRM_ONLY)
++            extra.append(STRING_WITH_LEN("; Open_frm_only"));
++          else
++            extra.append(STRING_WITH_LEN("; Open_full_table"));
++          if (table_list->has_db_lookup_value &&
++              table_list->has_table_lookup_value)
++            extra.append(STRING_WITH_LEN("; Scanned 0 databases"));
++          else if (table_list->has_db_lookup_value ||
++                   table_list->has_table_lookup_value)
++            extra.append(STRING_WITH_LEN("; Scanned 1 database"));
++          else
++            extra.append(STRING_WITH_LEN("; Scanned all databases"));
++        }
++	if (key_read)
++        {
++          if (quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
++            extra.append(STRING_WITH_LEN("; Using index for group-by"));
++          else
++            extra.append(STRING_WITH_LEN("; Using index"));
++        }
++	if (table->reginfo.not_exists_optimize)
++	  extra.append(STRING_WITH_LEN("; Not exists"));
++	if (need_tmp_table)
++	{
++	  need_tmp_table=0;
++	  extra.append(STRING_WITH_LEN("; Using temporary"));
++	}
++	if (need_order)
++	{
++	  need_order=0;
++	  extra.append(STRING_WITH_LEN("; Using filesort"));
++	}
++	if (distinct & test_all_bits(used_tables,thd->used_tables))
++	  extra.append(STRING_WITH_LEN("; Distinct"));
++
++        for (uint part= 0; part < tab->ref.key_parts; part++)
++        {
++          if (tab->ref.cond_guards[part])
++          {
++            extra.append(STRING_WITH_LEN("; Full scan on NULL key"));
++            break;
++          }
++        }
++        if (i > 0 && tab[-1].next_select == sub_select_cache)
++          extra.append(STRING_WITH_LEN("; Using join buffer"));
++        
++        /* Skip initial "; "*/
++        const char *str= extra.ptr();
++        uint32 len= extra.length();
++        if (len)
++        {
++          str += 2;
++          len -= 2;
++        }
++	item_list.push_back(new Item_string(str, len, cs));
++      }
++      // For next iteration
++      used_tables|=table->map;
++      if (result->send_data(item_list))
++	join->error= 1;
++    }
++  }
++  for (SELECT_LEX_UNIT *unit= join->select_lex->first_inner_unit();
++       unit;
++       unit= unit->next_unit())
++  {
++    if (mysql_explain_union(thd, unit, result))
++      DBUG_VOID_RETURN;
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
++{
++  DBUG_ENTER("mysql_explain_union");
++  bool res= 0;
++  SELECT_LEX *first= unit->first_select();
++
++  for (SELECT_LEX *sl= first;
++       sl;
++       sl= sl->next_select())
++  {
++    // drop UNCACHEABLE_EXPLAIN, because it is for internal usage only
++    uint8 uncacheable= (sl->uncacheable & ~UNCACHEABLE_EXPLAIN);
++    sl->type= (((&thd->lex->select_lex)==sl)?
++	       (sl->first_inner_unit() || sl->next_select() ? 
++		"PRIMARY" : "SIMPLE"):
++	       ((sl == first)?
++		((sl->linkage == DERIVED_TABLE_TYPE) ?
++		 "DERIVED":
++		 ((uncacheable & UNCACHEABLE_DEPENDENT) ?
++		  "DEPENDENT SUBQUERY":
++		  (uncacheable?"UNCACHEABLE SUBQUERY":
++		   "SUBQUERY"))):
++		((uncacheable & UNCACHEABLE_DEPENDENT) ?
++		 "DEPENDENT UNION":
++		 uncacheable?"UNCACHEABLE UNION":
++		 "UNION")));
++    sl->options|= SELECT_DESCRIBE;
++  }
++  if (unit->is_union())
++  {
++    unit->fake_select_lex->select_number= UINT_MAX; // jost for initialization
++    unit->fake_select_lex->type= "UNION RESULT";
++    unit->fake_select_lex->options|= SELECT_DESCRIBE;
++    if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK | SELECT_DESCRIBE)))
++      res= unit->exec();
++    res|= unit->cleanup();
++  }
++  else
++  {
++    thd->lex->current_select= first;
++    unit->set_limit(unit->global_parameters);
++    res= mysql_select(thd, &first->ref_pointer_array,
++			first->table_list.first,
++			first->with_wild, first->item_list,
++			first->where,
++			first->order_list.elements +
++			first->group_list.elements,
++			first->order_list.first,
++			first->group_list.first,
++			first->having,
++			thd->lex->proc_list.first,
++			first->options | thd->options | SELECT_DESCRIBE,
++			result, unit, first);
++  }
++  DBUG_RETURN(res || thd->is_error());
++}
++
++
++/**
++  Print joins from the FROM clause.
++
++  @param thd     thread handler
++  @param str     string where table should be printed
++  @param tables  list of tables in join
++  @query_type    type of the query is being generated
++*/
++
++static void print_join(THD *thd,
++                       String *str,
++                       List<TABLE_LIST> *tables,
++                       enum_query_type query_type)
++{
++  /* List is reversed => we should reverse it before using */
++  List_iterator_fast<TABLE_LIST> ti(*tables);
++  TABLE_LIST **table= (TABLE_LIST **)thd->alloc(sizeof(TABLE_LIST*) *
++                                                tables->elements);
++  if (table == 0)
++    return;  // out of memory
++
++  for (TABLE_LIST **t= table + (tables->elements - 1); t >= table; t--)
++    *t= ti++;
++
++  DBUG_ASSERT(tables->elements >= 1);
++  (*table)->print(thd, str, query_type);
++
++  TABLE_LIST **end= table + tables->elements;
++  for (TABLE_LIST **tbl= table + 1; tbl < end; tbl++)
++  {
++    TABLE_LIST *curr= *tbl;
++    if (curr->outer_join)
++    {
++      /* MySQL converts right to left joins */
++      str->append(STRING_WITH_LEN(" left join "));
++    }
++    else if (curr->straight)
++      str->append(STRING_WITH_LEN(" straight_join "));
++    else
++      str->append(STRING_WITH_LEN(" join "));
++    curr->print(thd, str, query_type);
++    if (curr->on_expr)
++    {
++      str->append(STRING_WITH_LEN(" on("));
++      curr->on_expr->print(str, query_type);
++      str->append(')');
++    }
++  }
++}
++
++
++/**
++  @brief Print an index hint
++
++  @details Prints out the USE|FORCE|IGNORE index hint.
++
++  @param      thd         the current thread
++  @param[out] str         appends the index hint here
++  @param      hint        what the hint is (as string : "USE INDEX"|
++                          "FORCE INDEX"|"IGNORE INDEX")
++  @param      hint_length the length of the string in 'hint'
++  @param      indexes     a list of index names for the hint
++*/
++
++void 
++Index_hint::print(THD *thd, String *str)
++{
++  switch (type)
++  {
++    case INDEX_HINT_IGNORE: str->append(STRING_WITH_LEN("IGNORE INDEX")); break;
++    case INDEX_HINT_USE:    str->append(STRING_WITH_LEN("USE INDEX")); break;
++    case INDEX_HINT_FORCE:  str->append(STRING_WITH_LEN("FORCE INDEX")); break;
++  }
++  str->append (STRING_WITH_LEN(" ("));
++  if (key_name.length)
++  {
++    if (thd && !my_strnncoll(system_charset_info,
++                             (const uchar *)key_name.str, key_name.length, 
++                             (const uchar *)primary_key_name, 
++                             strlen(primary_key_name)))
++      str->append(primary_key_name);
++    else
++      append_identifier(thd, str, key_name.str, key_name.length);
++  }
++  str->append(')');
++}
++
++
++/**
++  Print table as it should be in join list.
++
++  @param str   string where table should be printed
++*/
++
++void TABLE_LIST::print(THD *thd, String *str, enum_query_type query_type)
++{
++  if (nested_join)
++  {
++    str->append('(');
++    print_join(thd, str, &nested_join->join_list, query_type);
++    str->append(')');
++  }
++  else
++  {
++    const char *cmp_name;                         // Name to compare with alias
++    if (view_name.str)
++    {
++      // A view
++
++      if (!(belong_to_view &&
++            belong_to_view->compact_view_format))
++      {
++        append_identifier(thd, str, view_db.str, view_db.length);
++        str->append('.');
++      }
++      append_identifier(thd, str, view_name.str, view_name.length);
++      cmp_name= view_name.str;
++    }
++    else if (derived)
++    {
++      // A derived table
++      str->append('(');
++      derived->print(str, query_type);
++      str->append(')');
++      cmp_name= "";                               // Force printing of alias
++    }
++    else
++    {
++      // A normal table
++
++      if (!(belong_to_view &&
++            belong_to_view->compact_view_format))
++      {
++        append_identifier(thd, str, db, db_length);
++        str->append('.');
++      }
++      if (schema_table)
++      {
++        append_identifier(thd, str, schema_table_name,
++                          strlen(schema_table_name));
++        cmp_name= schema_table_name;
++      }
++      else
++      {
++        append_identifier(thd, str, table_name, table_name_length);
++        cmp_name= table_name;
++      }
++    }
++    if (my_strcasecmp(table_alias_charset, cmp_name, alias))
++    {
++      char t_alias_buff[MAX_ALIAS_NAME];
++      const char *t_alias= alias;
++
++      str->append(' ');
++      if (lower_case_table_names== 1)
++      {
++        if (alias && alias[0])
++        {
++          strmov(t_alias_buff, alias);
++          my_casedn_str(files_charset_info, t_alias_buff);
++          t_alias= t_alias_buff;
++        }
++      }
++
++      append_identifier(thd, str, t_alias, strlen(t_alias));
++    }
++
++    if (index_hints)
++    {
++      List_iterator<Index_hint> it(*index_hints);
++      Index_hint *hint;
++
++      while ((hint= it++))
++      {
++        str->append (STRING_WITH_LEN(" "));
++        hint->print (thd, str);
++      }
++    }
++  }
++}
++
++
++void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
++{
++  /* QQ: thd may not be set for sub queries, but this should be fixed */
++  if (!thd)
++    thd= current_thd;
++
++  str->append(STRING_WITH_LEN("select "));
++
++  /* First add options */
++  if (options & SELECT_STRAIGHT_JOIN)
++    str->append(STRING_WITH_LEN("straight_join "));
++  if ((thd->lex->lock_option == TL_READ_HIGH_PRIORITY) &&
++      (this == &thd->lex->select_lex))
++    str->append(STRING_WITH_LEN("high_priority "));
++  if (options & SELECT_DISTINCT)
++    str->append(STRING_WITH_LEN("distinct "));
++  if (options & SELECT_SMALL_RESULT)
++    str->append(STRING_WITH_LEN("sql_small_result "));
++  if (options & SELECT_BIG_RESULT)
++    str->append(STRING_WITH_LEN("sql_big_result "));
++  if (options & OPTION_BUFFER_RESULT)
++    str->append(STRING_WITH_LEN("sql_buffer_result "));
++  if (options & OPTION_FOUND_ROWS)
++    str->append(STRING_WITH_LEN("sql_calc_found_rows "));
++  switch (sql_cache)
++  {
++    case SQL_NO_CACHE:
++      str->append(STRING_WITH_LEN("sql_no_cache "));
++      break;
++    case SQL_CACHE:
++      str->append(STRING_WITH_LEN("sql_cache "));
++      break;
++    case SQL_CACHE_UNSPECIFIED:
++      break;
++    default:
++      DBUG_ASSERT(0);
++  }
++
++  //Item List
++  bool first= 1;
++  List_iterator_fast<Item> it(item_list);
++  Item *item;
++  while ((item= it++))
++  {
++    if (first)
++      first= 0;
++    else
++      str->append(',');
++
++    if (master_unit()->item && item->is_autogenerated_name)
++    {
++      /*
++        Do not print auto-generated aliases in subqueries. It has no purpose
++        in a view definition or other contexts where the query is printed.
++      */
++      item->print(str, query_type);
++    }
++    else
++      item->print_item_w_name(str, query_type);
++  }
++
++  /*
++    from clause
++    TODO: support USING/FORCE/IGNORE index
++  */
++  if (table_list.elements)
++  {
++    str->append(STRING_WITH_LEN(" from "));
++    /* go through join tree */
++    print_join(thd, str, &top_join_list, query_type);
++  }
++  else if (where)
++  {
++    /*
++      "SELECT 1 FROM DUAL WHERE 2" should not be printed as 
++      "SELECT 1 WHERE 2": the 1st syntax is valid, but the 2nd is not.
++    */
++    str->append(STRING_WITH_LEN(" from DUAL "));
++  }
++
++  // Where
++  Item *cur_where= where;
++  if (join)
++    cur_where= join->conds;
++  if (cur_where || cond_value != Item::COND_UNDEF)
++  {
++    str->append(STRING_WITH_LEN(" where "));
++    if (cur_where)
++      cur_where->print(str, query_type);
++    else
++      str->append(cond_value != Item::COND_FALSE ? "1" : "0");
++  }
++
++  // group by & olap
++  if (group_list.elements)
++  {
++    str->append(STRING_WITH_LEN(" group by "));
++    print_order(str, group_list.first, query_type);
++    switch (olap)
++    {
++      case CUBE_TYPE:
++	str->append(STRING_WITH_LEN(" with cube"));
++	break;
++      case ROLLUP_TYPE:
++	str->append(STRING_WITH_LEN(" with rollup"));
++	break;
++      default:
++	;  //satisfy compiler
++    }
++  }
++
++  // having
++  Item *cur_having= having;
++  if (join)
++    cur_having= join->having;
++
++  if (cur_having || having_value != Item::COND_UNDEF)
++  {
++    str->append(STRING_WITH_LEN(" having "));
++    if (cur_having)
++      cur_having->print(str, query_type);
++    else
++      str->append(having_value != Item::COND_FALSE ? "1" : "0");
++  }
++
++  if (order_list.elements)
++  {
++    str->append(STRING_WITH_LEN(" order by "));
++    print_order(str, order_list.first, query_type);
++  }
++
++  // limit
++  print_limit(thd, str, query_type);
++
++  // PROCEDURE unsupported here
++}
++
++
++/**
++  change select_result object of JOIN.
++
++  @param res		new select_result object
++
++  @retval
++    FALSE   OK
++  @retval
++    TRUE    error
++*/
++
++bool JOIN::change_result(select_result *res)
++{
++  DBUG_ENTER("JOIN::change_result");
++  result= res;
++  if (!procedure && (result->prepare(fields_list, select_lex->master_unit()) ||
++                     result->prepare2()))
++  {
++    DBUG_RETURN(TRUE);
++  }
++  DBUG_RETURN(FALSE);
++}
++
++/**
++  @} (end of group Query_Optimizer)
++*/
+diff -urN mysql-old/sql/sql_show.cc mysql/sql/sql_show.cc
+--- mysql-old/sql/sql_show.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/sql_show.cc	2011-05-10 17:56:01.596682375 +0000
+@@ -753,7 +753,7 @@
+   {
+     field_list.push_back(new Item_empty_string("View",NAME_CHAR_LEN));
+     field_list.push_back(new Item_empty_string("Create View",
+-                                               max(buffer.length(),1024)));
++                                               MYSQL_MAX(buffer.length(),1024)));
+     field_list.push_back(new Item_empty_string("character_set_client",
+                                                MY_CS_NAME_SIZE));
+     field_list.push_back(new Item_empty_string("collation_connection",
+@@ -764,7 +764,7 @@
+     field_list.push_back(new Item_empty_string("Table",NAME_CHAR_LEN));
+     // 1024 is for not to confuse old clients
+     field_list.push_back(new Item_empty_string("Create Table",
+-                                               max(buffer.length(),1024)));
++                                               MYSQL_MAX(buffer.length(),1024)));
+   }
+ 
+   if (protocol->send_fields(&field_list,
+@@ -1871,7 +1871,7 @@
+         pthread_mutex_lock(&tmp->LOCK_thd_data);
+         if (tmp->query())
+         {
+-          uint length= min(max_query_length, tmp->query_length());
++          uint length= MYSQL_MIN(max_query_length, tmp->query_length());
+           thd_info->query= (char*) thd->strmake(tmp->query(),length);
+         }
+         pthread_mutex_unlock(&tmp->LOCK_thd_data);
+@@ -2002,7 +2002,7 @@
+       if (tmp->query())
+       {
+         table->field[7]->store(tmp->query(),
+-                               min(PROCESS_LIST_INFO_WIDTH,
++                               MYSQL_MIN(PROCESS_LIST_INFO_WIDTH,
+                                    tmp->query_length()), cs);
+         table->field[7]->set_notnull();
+       }
+@@ -3168,7 +3168,7 @@
+     for (ptr=tables->table->field; (field= *ptr) ; ptr++)
+     {
+       star_table_open_method=
+-        min(star_table_open_method,
++        MYSQL_MIN(star_table_open_method,
+             schema_table->fields_info[field_indx].open_method);
+       if (bitmap_is_set(tables->table->read_set, field->field_index))
+       {
+@@ -7044,7 +7044,7 @@
+ 
+     Item_empty_string *stmt_fld=
+       new Item_empty_string("SQL Original Statement",
+-                            max(trg_sql_original_stmt.length, 1024));
++                            MYSQL_MAX(trg_sql_original_stmt.length, 1024));
+ 
+     stmt_fld->maybe_null= TRUE;
+ 
+diff -urN mysql-old/sql/sql_show.cc.orig mysql/sql/sql_show.cc.orig
+--- mysql-old/sql/sql_show.cc.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/sql_show.cc.orig	2011-04-12 12:11:35.000000000 +0000
+@@ -0,0 +1,7275 @@
++/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License along
++   with this program; if not, write to the Free Software Foundation, Inc.,
++   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA */
++
++
++/* Function with list databases, tables or fields */
++
++#include "mysql_priv.h"
++#include "sql_select.h"                         // For select_describe
++#include "sql_show.h"
++#include "repl_failsafe.h"
++#include "sp.h"
++#include "sp_head.h"
++#include "sql_trigger.h"
++#include "authors.h"
++#include "contributors.h"
++#ifdef HAVE_EVENT_SCHEDULER
++#include "events.h"
++#include "event_data_objects.h"
++#endif
++#include <my_dir.h>
++#include "debug_sync.h"
++
++#define STR_OR_NIL(S) ((S) ? (S) : "<nil>")
++
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++#include "ha_partition.h"
++#endif
++enum enum_i_s_events_fields
++{
++  ISE_EVENT_CATALOG= 0,
++  ISE_EVENT_SCHEMA,
++  ISE_EVENT_NAME,
++  ISE_DEFINER,
++  ISE_TIME_ZONE,
++  ISE_EVENT_BODY,
++  ISE_EVENT_DEFINITION,
++  ISE_EVENT_TYPE,
++  ISE_EXECUTE_AT,
++  ISE_INTERVAL_VALUE,
++  ISE_INTERVAL_FIELD,
++  ISE_SQL_MODE,
++  ISE_STARTS,
++  ISE_ENDS,
++  ISE_STATUS,
++  ISE_ON_COMPLETION,
++  ISE_CREATED,
++  ISE_LAST_ALTERED,
++  ISE_LAST_EXECUTED,
++  ISE_EVENT_COMMENT,
++  ISE_ORIGINATOR,
++  ISE_CLIENT_CS,
++  ISE_CONNECTION_CL,
++  ISE_DB_CL
++};
++
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++static const char *grant_names[]={
++  "select","insert","update","delete","create","drop","reload","shutdown",
++  "process","file","grant","references","index","alter"};
++
++static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **),
++                               "grant_types",
++                               grant_names, NULL};
++#endif
++
++static void store_key_options(THD *thd, String *packet, TABLE *table,
++                              KEY *key_info);
++
++static void
++append_algorithm(TABLE_LIST *table, String *buff);
++
++static COND * make_cond_for_info_schema(COND *cond, TABLE_LIST *table);
++
++/***************************************************************************
++** List all table types supported
++***************************************************************************/
++
++static int make_version_string(char *buf, int buf_length, uint version)
++{
++  return my_snprintf(buf, buf_length, "%d.%d", version>>8,version&0xff);
++}
++
++static my_bool show_plugins(THD *thd, plugin_ref plugin,
++                            void *arg)
++{
++  TABLE *table= (TABLE*) arg;
++  struct st_mysql_plugin *plug= plugin_decl(plugin);
++  struct st_plugin_dl *plugin_dl= plugin_dlib(plugin);
++  CHARSET_INFO *cs= system_charset_info;
++  char version_buf[20];
++
++  restore_record(table, s->default_values);
++
++  table->field[0]->store(plugin_name(plugin)->str,
++                         plugin_name(plugin)->length, cs);
++
++  table->field[1]->store(version_buf,
++        make_version_string(version_buf, sizeof(version_buf), plug->version),
++        cs);
++
++
++  switch (plugin_state(plugin)) {
++  /* case PLUGIN_IS_FREED: does not happen */
++  case PLUGIN_IS_DELETED:
++    table->field[2]->store(STRING_WITH_LEN("DELETED"), cs);
++    break;
++  case PLUGIN_IS_UNINITIALIZED:
++    table->field[2]->store(STRING_WITH_LEN("INACTIVE"), cs);
++    break;
++  case PLUGIN_IS_READY:
++    table->field[2]->store(STRING_WITH_LEN("ACTIVE"), cs);
++    break;
++  case PLUGIN_IS_DISABLED:
++    table->field[2]->store(STRING_WITH_LEN("DISABLED"), cs);
++    break;
++  default:
++    DBUG_ASSERT(0);
++  }
++
++  table->field[3]->store(plugin_type_names[plug->type].str,
++                         plugin_type_names[plug->type].length,
++                         cs);
++  table->field[4]->store(version_buf,
++        make_version_string(version_buf, sizeof(version_buf),
++                            *(uint *)plug->info), cs);
++
++  if (plugin_dl)
++  {
++    table->field[5]->store(plugin_dl->dl.str, plugin_dl->dl.length, cs);
++    table->field[5]->set_notnull();
++    table->field[6]->store(version_buf,
++          make_version_string(version_buf, sizeof(version_buf),
++                              plugin_dl->version),
++          cs);
++    table->field[6]->set_notnull();
++  }
++  else
++  {
++    table->field[5]->set_null();
++    table->field[6]->set_null();
++  }
++
++
++  if (plug->author)
++  {
++    table->field[7]->store(plug->author, strlen(plug->author), cs);
++    table->field[7]->set_notnull();
++  }
++  else
++    table->field[7]->set_null();
++
++  if (plug->descr)
++  {
++    table->field[8]->store(plug->descr, strlen(plug->descr), cs);
++    table->field[8]->set_notnull();
++  }
++  else
++    table->field[8]->set_null();
++
++  switch (plug->license) {
++  case PLUGIN_LICENSE_GPL:
++    table->field[9]->store(PLUGIN_LICENSE_GPL_STRING, 
++                           strlen(PLUGIN_LICENSE_GPL_STRING), cs);
++    break;
++  case PLUGIN_LICENSE_BSD:
++    table->field[9]->store(PLUGIN_LICENSE_BSD_STRING, 
++                           strlen(PLUGIN_LICENSE_BSD_STRING), cs);
++    break;
++  default:
++    table->field[9]->store(PLUGIN_LICENSE_PROPRIETARY_STRING, 
++                           strlen(PLUGIN_LICENSE_PROPRIETARY_STRING), cs);
++    break;
++  }
++  table->field[9]->set_notnull();
++
++  return schema_table_store_record(thd, table);
++}
++
++
++int fill_plugins(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  DBUG_ENTER("fill_plugins");
++  TABLE *table= tables->table;
++
++  if (plugin_foreach_with_mask(thd, show_plugins, MYSQL_ANY_PLUGIN,
++                               ~PLUGIN_IS_FREED, table))
++    DBUG_RETURN(1);
++
++  DBUG_RETURN(0);
++}
++
++
++/***************************************************************************
++** List all Authors.
++** If you can update it, you get to be in it :)
++***************************************************************************/
++
++bool mysqld_show_authors(THD *thd)
++{
++  List<Item> field_list;
++  Protocol *protocol= thd->protocol;
++  DBUG_ENTER("mysqld_show_authors");
++
++  field_list.push_back(new Item_empty_string("Name",40));
++  field_list.push_back(new Item_empty_string("Location",40));
++  field_list.push_back(new Item_empty_string("Comment",80));
++
++  if (protocol->send_fields(&field_list,
++                            Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    DBUG_RETURN(TRUE);
++
++  show_table_authors_st *authors;
++  for (authors= show_table_authors; authors->name; authors++)
++  {
++    protocol->prepare_for_resend();
++    protocol->store(authors->name, system_charset_info);
++    protocol->store(authors->location, system_charset_info);
++    protocol->store(authors->comment, system_charset_info);
++    if (protocol->write())
++      DBUG_RETURN(TRUE);
++  }
++  my_eof(thd);
++  DBUG_RETURN(FALSE);
++}
++
++
++/***************************************************************************
++** List all Contributors.
++** Please get permission before updating
++***************************************************************************/
++
++bool mysqld_show_contributors(THD *thd)
++{
++  List<Item> field_list;
++  Protocol *protocol= thd->protocol;
++  DBUG_ENTER("mysqld_show_contributors");
++
++  field_list.push_back(new Item_empty_string("Name",40));
++  field_list.push_back(new Item_empty_string("Location",40));
++  field_list.push_back(new Item_empty_string("Comment",80));
++
++  if (protocol->send_fields(&field_list,
++                            Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    DBUG_RETURN(TRUE);
++
++  show_table_contributors_st *contributors;
++  for (contributors= show_table_contributors; contributors->name; contributors++)
++  {
++    protocol->prepare_for_resend();
++    protocol->store(contributors->name, system_charset_info);
++    protocol->store(contributors->location, system_charset_info);
++    protocol->store(contributors->comment, system_charset_info);
++    if (protocol->write())
++      DBUG_RETURN(TRUE);
++  }
++  my_eof(thd);
++  DBUG_RETURN(FALSE);
++}
++
++
++/***************************************************************************
++ List all privileges supported
++***************************************************************************/
++
++struct show_privileges_st {
++  const char *privilege;
++  const char *context;
++  const char *comment;
++};
++
++static struct show_privileges_st sys_privileges[]=
++{
++  {"Alter", "Tables",  "To alter the table"},
++  {"Alter routine", "Functions,Procedures",  "To alter or drop stored functions/procedures"},
++  {"Create", "Databases,Tables,Indexes",  "To create new databases and tables"},
++  {"Create routine","Databases","To use CREATE FUNCTION/PROCEDURE"},
++  {"Create temporary tables","Databases","To use CREATE TEMPORARY TABLE"},
++  {"Create view", "Tables",  "To create new views"},
++  {"Create user", "Server Admin",  "To create new users"},
++  {"Delete", "Tables",  "To delete existing rows"},
++  {"Drop", "Databases,Tables", "To drop databases, tables, and views"},
++#ifdef HAVE_EVENT_SCHEDULER
++  {"Event","Server Admin","To create, alter, drop and execute events"},
++#endif
++  {"Execute", "Functions,Procedures", "To execute stored routines"},
++  {"File", "File access on server",   "To read and write files on the server"},
++  {"Grant option",  "Databases,Tables,Functions,Procedures", "To give to other users those privileges you possess"},
++  {"Index", "Tables",  "To create or drop indexes"},
++  {"Insert", "Tables",  "To insert data into tables"},
++  {"Lock tables","Databases","To use LOCK TABLES (together with SELECT privilege)"},
++  {"Process", "Server Admin", "To view the plain text of currently executing queries"},
++  {"References", "Databases,Tables", "To have references on tables"},
++  {"Reload", "Server Admin", "To reload or refresh tables, logs and privileges"},
++  {"Replication client","Server Admin","To ask where the slave or master servers are"},
++  {"Replication slave","Server Admin","To read binary log events from the master"},
++  {"Select", "Tables",  "To retrieve rows from table"},
++  {"Show databases","Server Admin","To see all databases with SHOW DATABASES"},
++  {"Show view","Tables","To see views with SHOW CREATE VIEW"},
++  {"Shutdown","Server Admin", "To shut down the server"},
++  {"Super","Server Admin","To use KILL thread, SET GLOBAL, CHANGE MASTER, etc."},
++  {"Trigger","Tables", "To use triggers"},
++  {"Update", "Tables",  "To update existing rows"},
++  {"Usage","Server Admin","No privileges - allow connect only"},
++  {NullS, NullS, NullS}
++};
++
++bool mysqld_show_privileges(THD *thd)
++{
++  List<Item> field_list;
++  Protocol *protocol= thd->protocol;
++  DBUG_ENTER("mysqld_show_privileges");
++
++  field_list.push_back(new Item_empty_string("Privilege",10));
++  field_list.push_back(new Item_empty_string("Context",15));
++  field_list.push_back(new Item_empty_string("Comment",NAME_CHAR_LEN));
++
++  if (protocol->send_fields(&field_list,
++                            Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    DBUG_RETURN(TRUE);
++
++  show_privileges_st *privilege= sys_privileges;
++  for (privilege= sys_privileges; privilege->privilege ; privilege++)
++  {
++    protocol->prepare_for_resend();
++    protocol->store(privilege->privilege, system_charset_info);
++    protocol->store(privilege->context, system_charset_info);
++    protocol->store(privilege->comment, system_charset_info);
++    if (protocol->write())
++      DBUG_RETURN(TRUE);
++  }
++  my_eof(thd);
++  DBUG_RETURN(FALSE);
++}
++
++
++/***************************************************************************
++  List all column types
++***************************************************************************/
++
++struct show_column_type_st
++{
++  const char *type;
++  uint size;
++  const char *min_value;
++  const char *max_value;
++  uint precision;
++  uint scale;
++  const char *nullable;
++  const char *auto_increment;
++  const char *unsigned_attr;
++  const char *zerofill;
++  const char *searchable;
++  const char *case_sensitivity;
++  const char *default_value;
++  const char *comment;
++};
++
++/* TODO: Add remaning types */
++
++static struct show_column_type_st sys_column_types[]=
++{
++  {"tinyint",
++    1,  "-128",  "127",  0,  0,  "YES",  "YES",
++    "NO",   "YES", "YES",  "NO",  "NULL,0",
++    "A very small integer"},
++  {"tinyint unsigned",
++    1,  "0"   ,  "255",  0,  0,  "YES",  "YES",
++    "YES",  "YES",  "YES",  "NO",  "NULL,0",
++    "A very small integer"},
++};
++
++bool mysqld_show_column_types(THD *thd)
++{
++  List<Item> field_list;
++  Protocol *protocol= thd->protocol;
++  DBUG_ENTER("mysqld_show_column_types");
++
++  field_list.push_back(new Item_empty_string("Type",30));
++  field_list.push_back(new Item_int("Size",(longlong) 1,
++                                    MY_INT64_NUM_DECIMAL_DIGITS));
++  field_list.push_back(new Item_empty_string("Min_Value",20));
++  field_list.push_back(new Item_empty_string("Max_Value",20));
++  field_list.push_back(new Item_return_int("Prec", 4, MYSQL_TYPE_SHORT));
++  field_list.push_back(new Item_return_int("Scale", 4, MYSQL_TYPE_SHORT));
++  field_list.push_back(new Item_empty_string("Nullable",4));
++  field_list.push_back(new Item_empty_string("Auto_Increment",4));
++  field_list.push_back(new Item_empty_string("Unsigned",4));
++  field_list.push_back(new Item_empty_string("Zerofill",4));
++  field_list.push_back(new Item_empty_string("Searchable",4));
++  field_list.push_back(new Item_empty_string("Case_Sensitive",4));
++  field_list.push_back(new Item_empty_string("Default",NAME_CHAR_LEN));
++  field_list.push_back(new Item_empty_string("Comment",NAME_CHAR_LEN));
++
++  if (protocol->send_fields(&field_list,
++                            Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    DBUG_RETURN(TRUE);
++
++  /* TODO: Change the loop to not use 'i' */
++  for (uint i=0; i < sizeof(sys_column_types)/sizeof(sys_column_types[0]); i++)
++  {
++    protocol->prepare_for_resend();
++    protocol->store(sys_column_types[i].type, system_charset_info);
++    protocol->store((ulonglong) sys_column_types[i].size);
++    protocol->store(sys_column_types[i].min_value, system_charset_info);
++    protocol->store(sys_column_types[i].max_value, system_charset_info);
++    protocol->store_short((longlong) sys_column_types[i].precision);
++    protocol->store_short((longlong) sys_column_types[i].scale);
++    protocol->store(sys_column_types[i].nullable, system_charset_info);
++    protocol->store(sys_column_types[i].auto_increment, system_charset_info);
++    protocol->store(sys_column_types[i].unsigned_attr, system_charset_info);
++    protocol->store(sys_column_types[i].zerofill, system_charset_info);
++    protocol->store(sys_column_types[i].searchable, system_charset_info);
++    protocol->store(sys_column_types[i].case_sensitivity, system_charset_info);
++    protocol->store(sys_column_types[i].default_value, system_charset_info);
++    protocol->store(sys_column_types[i].comment, system_charset_info);
++    if (protocol->write())
++      DBUG_RETURN(TRUE);
++  }
++  my_eof(thd);
++  DBUG_RETURN(FALSE);
++}
++
++
++/*
++  find_files() - find files in a given directory.
++
++  SYNOPSIS
++    find_files()
++    thd                 thread handler
++    files               put found files in this list
++    db                  database name to set in TABLE_LIST structure
++    path                path to database
++    wild                filter for found files
++    dir                 read databases in path if TRUE, read .frm files in
++                        database otherwise
++
++  RETURN
++    FIND_FILES_OK       success
++    FIND_FILES_OOM      out of memory error
++    FIND_FILES_DIR      no such directory, or directory can't be read
++*/
++
++
++find_files_result
++find_files(THD *thd, List<LEX_STRING> *files, const char *db,
++           const char *path, const char *wild, bool dir)
++{
++  uint i;
++  char *ext;
++  MY_DIR *dirp;
++  FILEINFO *file;
++  LEX_STRING *file_name= 0;
++  uint file_name_len;
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  uint col_access=thd->col_access;
++#endif
++  uint wild_length= 0;
++  TABLE_LIST table_list;
++  DBUG_ENTER("find_files");
++
++  if (wild)
++  {
++    if (!wild[0])
++      wild= 0;
++    else
++      wild_length= strlen(wild);
++  }
++
++
++
++  bzero((char*) &table_list,sizeof(table_list));
++
++  if (!(dirp = my_dir(path,MYF(dir ? MY_WANT_STAT : 0))))
++  {
++    if (my_errno == ENOENT)
++      my_error(ER_BAD_DB_ERROR, MYF(ME_BELL+ME_WAITTANG), db);
++    else
++      my_error(ER_CANT_READ_DIR, MYF(ME_BELL+ME_WAITTANG), path, my_errno);
++    DBUG_RETURN(FIND_FILES_DIR);
++  }
++
++  for (i=0 ; i < (uint) dirp->number_off_files  ; i++)
++  {
++    char uname[NAME_LEN + 1];                   /* Unencoded name */
++    file=dirp->dir_entry+i;
++    if (dir)
++    {                                           /* Return databases */
++      if ((file->name[0] == '.' && 
++          ((file->name[1] == '.' && file->name[2] == '\0') ||
++            file->name[1] == '\0')))
++        continue;                               /* . or .. */
++#ifdef USE_SYMDIR
++      char *ext;
++      char buff[FN_REFLEN];
++      if (my_use_symdir && !strcmp(ext=fn_ext(file->name), ".sym"))
++      {
++	/* Only show the sym file if it points to a directory */
++	char *end;
++        *ext=0;                                 /* Remove extension */
++	unpack_dirname(buff, file->name);
++	end= strend(buff);
++	if (end != buff && end[-1] == FN_LIBCHAR)
++	  end[-1]= 0;				// Remove end FN_LIBCHAR
++        if (!my_stat(buff, file->mystat, MYF(0)))
++               continue;
++       }
++#endif
++      if (!MY_S_ISDIR(file->mystat->st_mode))
++        continue;
++
++      file_name_len= filename_to_tablename(file->name, uname, sizeof(uname));
++      if (wild)
++      {
++	if (lower_case_table_names)
++	{
++          if (my_wildcmp(files_charset_info,
++                         uname, uname + file_name_len,
++                         wild, wild + wild_length,
++                         wild_prefix, wild_one,wild_many))
++            continue;
++	}
++	else if (wild_compare(uname, wild, 0))
++	  continue;
++      }
++    }
++    else
++    {
++        // Return only .frm files which aren't temp files.
++      if (my_strcasecmp(system_charset_info, ext=fn_rext(file->name),reg_ext) ||
++          is_prefix(file->name, tmp_file_prefix))
++        continue;
++      *ext=0;
++      file_name_len= filename_to_tablename(file->name, uname, sizeof(uname));
++      if (wild)
++      {
++	if (lower_case_table_names)
++	{
++          if (my_wildcmp(files_charset_info,
++                         uname, uname + file_name_len,
++                         wild, wild + wild_length,
++                         wild_prefix, wild_one,wild_many))
++            continue;
++	}
++	else if (wild_compare(uname, wild, 0))
++	  continue;
++      }
++    }
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++    /* Don't show tables where we don't have any privileges */
++    if (db && !(col_access & TABLE_ACLS))
++    {
++      table_list.db= (char*) db;
++      table_list.db_length= strlen(db);
++      table_list.table_name= uname;
++      table_list.table_name_length= file_name_len;
++      table_list.grant.privilege=col_access;
++      if (check_grant(thd, TABLE_ACLS, &table_list, 1, 1, 1))
++        continue;
++    }
++#endif
++    if (!(file_name= 
++          thd->make_lex_string(file_name, uname, file_name_len, TRUE)) ||
++        files->push_back(file_name))
++    {
++      my_dirend(dirp);
++      DBUG_RETURN(FIND_FILES_OOM);
++    }
++  }
++  DBUG_PRINT("info",("found: %d files", files->elements));
++  my_dirend(dirp);
++
++  VOID(ha_find_files(thd, db, path, wild, dir, files));
++
++  DBUG_RETURN(FIND_FILES_OK);
++}
++
++
++/**
++   An Internal_error_handler that suppresses errors regarding views'
++   underlying tables that occur during privilege checking within SHOW CREATE
++   VIEW commands. This happens in the cases when
++
++   - A view's underlying table (e.g. referenced in its SELECT list) does not
++     exist. There should not be an error as no attempt was made to access it
++     per se.
++
++   - Access is denied for some table, column, function or stored procedure
++     such as mentioned above. This error gets raised automatically, since we
++     can't untangle its access checking from that of the view itself.
++ */
++class Show_create_error_handler : public Internal_error_handler {
++  
++  TABLE_LIST *m_top_view;
++  bool m_handling;
++  Security_context *m_sctx;
++
++  char m_view_access_denied_message[MYSQL_ERRMSG_SIZE];
++  char *m_view_access_denied_message_ptr;
++
++public:
++
++  /**
++     Creates a new Show_create_error_handler for the particular security
++     context and view. 
++
++     @thd Thread context, used for security context information if needed.
++     @top_view The view. We do not verify at this point that top_view is in
++     fact a view since, alas, these things do not stay constant.
++  */
++  explicit Show_create_error_handler(THD *thd, TABLE_LIST *top_view) : 
++    m_top_view(top_view), m_handling(FALSE),
++    m_view_access_denied_message_ptr(NULL) 
++  {
++    
++    m_sctx = test(m_top_view->security_ctx) ?
++      m_top_view->security_ctx : thd->security_ctx;
++  }
++
++  /**
++     Lazy instantiation of 'view access denied' message. The purpose of the
++     Show_create_error_handler is to hide details of underlying tables for
++     which we have no privileges behind ER_VIEW_INVALID messages. But this
++     obviously does not apply if we lack privileges on the view itself.
++     Unfortunately the information about for which table privilege checking
++     failed is not available at this point. The only way for us to check is by
++     reconstructing the actual error message and see if it's the same.
++  */
++  char* get_view_access_denied_message() 
++  {
++    if (!m_view_access_denied_message_ptr)
++    {
++      m_view_access_denied_message_ptr= m_view_access_denied_message;
++      my_snprintf(m_view_access_denied_message, MYSQL_ERRMSG_SIZE,
++                  ER(ER_TABLEACCESS_DENIED_ERROR), "SHOW VIEW",
++                  m_sctx->priv_user,
++                  m_sctx->host_or_ip, m_top_view->get_table_name());
++    }
++    return m_view_access_denied_message_ptr;
++  }
++
++  bool handle_error(uint sql_errno, const char *message, 
++                    MYSQL_ERROR::enum_warning_level level, THD *thd) {
++    /* 
++       The handler does not handle the errors raised by itself.
++       At this point we know if top_view is really a view.
++    */
++    if (m_handling || !m_top_view->view)
++      return FALSE;
++
++    m_handling= TRUE;
++
++    bool is_handled;
++    
++    switch (sql_errno)
++    {
++    case ER_TABLEACCESS_DENIED_ERROR:
++      if (!strcmp(get_view_access_denied_message(), message))
++      {
++        /* Access to top view is not granted, don't interfere. */
++        is_handled= FALSE;
++        break;
++      }
++    case ER_COLUMNACCESS_DENIED_ERROR:
++    case ER_VIEW_NO_EXPLAIN: /* Error was anonymized, ignore all the same. */
++    case ER_PROCACCESS_DENIED_ERROR:
++      is_handled= TRUE;
++      break;
++
++    case ER_NO_SUCH_TABLE:
++      /* Established behavior: warn if underlying tables are missing. */
++      push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 
++                          ER_VIEW_INVALID,
++                          ER(ER_VIEW_INVALID),
++                          m_top_view->get_db_name(),
++                          m_top_view->get_table_name());
++      is_handled= TRUE;
++      break;
++
++    case ER_SP_DOES_NOT_EXIST:
++      /* Established behavior: warn if underlying functions are missing. */
++      push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 
++                          ER_VIEW_INVALID,
++                          ER(ER_VIEW_INVALID),
++                          m_top_view->get_db_name(),
++                          m_top_view->get_table_name());
++      is_handled= TRUE;
++      break;
++    default:
++      is_handled= FALSE;
++    }
++
++    m_handling= FALSE;
++    return is_handled;
++  }
++};
++
++
++bool
++mysqld_show_create(THD *thd, TABLE_LIST *table_list)
++{
++  Protocol *protocol= thd->protocol;
++  char buff[2048];
++  String buffer(buff, sizeof(buff), system_charset_info);
++  DBUG_ENTER("mysqld_show_create");
++  DBUG_PRINT("enter",("db: %s  table: %s",table_list->db,
++                      table_list->table_name));
++
++  /* We want to preserve the tree for views. */
++  thd->lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW;
++
++  {
++    Show_create_error_handler view_error_suppressor(thd, table_list);
++    thd->push_internal_handler(&view_error_suppressor);
++    bool error= open_normal_and_derived_tables(thd, table_list, 0);
++    thd->pop_internal_handler();
++    if (error && (thd->killed || thd->main_da.is_error()))
++      DBUG_RETURN(TRUE);
++  }
++
++  /* TODO: add environment variables show when it become possible */
++  if (thd->lex->only_view && !table_list->view)
++  {
++    my_error(ER_WRONG_OBJECT, MYF(0),
++             table_list->db, table_list->table_name, "VIEW");
++    DBUG_RETURN(TRUE);
++  }
++
++  buffer.length(0);
++
++  if (table_list->view)
++    buffer.set_charset(table_list->view_creation_ctx->get_client_cs());
++
++  if ((table_list->view ?
++       view_store_create_info(thd, table_list, &buffer) :
++       store_create_info(thd, table_list, &buffer, NULL,
++                         FALSE /* show_database */)))
++    DBUG_RETURN(TRUE);
++
++  List<Item> field_list;
++  if (table_list->view)
++  {
++    field_list.push_back(new Item_empty_string("View",NAME_CHAR_LEN));
++    field_list.push_back(new Item_empty_string("Create View",
++                                               max(buffer.length(),1024)));
++    field_list.push_back(new Item_empty_string("character_set_client",
++                                               MY_CS_NAME_SIZE));
++    field_list.push_back(new Item_empty_string("collation_connection",
++                                               MY_CS_NAME_SIZE));
++  }
++  else
++  {
++    field_list.push_back(new Item_empty_string("Table",NAME_CHAR_LEN));
++    // 1024 is for not to confuse old clients
++    field_list.push_back(new Item_empty_string("Create Table",
++                                               max(buffer.length(),1024)));
++  }
++
++  if (protocol->send_fields(&field_list,
++                            Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    DBUG_RETURN(TRUE);
++  protocol->prepare_for_resend();
++  if (table_list->view)
++    protocol->store(table_list->view_name.str, system_charset_info);
++  else
++  {
++    if (table_list->schema_table)
++      protocol->store(table_list->schema_table->table_name,
++                      system_charset_info);
++    else
++      protocol->store(table_list->table->alias, system_charset_info);
++  }
++
++  if (table_list->view)
++  {
++    protocol->store(buffer.ptr(), buffer.length(),
++                    table_list->view_creation_ctx->get_client_cs());
++
++    protocol->store(table_list->view_creation_ctx->get_client_cs()->csname,
++                    system_charset_info);
++
++    protocol->store(table_list->view_creation_ctx->get_connection_cl()->name,
++                    system_charset_info);
++  }
++  else
++    protocol->store(buffer.ptr(), buffer.length(), buffer.charset());
++
++  if (protocol->write())
++    DBUG_RETURN(TRUE);
++
++  my_eof(thd);
++  DBUG_RETURN(FALSE);
++}
++
++bool mysqld_show_create_db(THD *thd, char *dbname,
++                           HA_CREATE_INFO *create_info)
++{
++  char buff[2048];
++  String buffer(buff, sizeof(buff), system_charset_info);
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  Security_context *sctx= thd->security_ctx;
++  uint db_access;
++#endif
++  HA_CREATE_INFO create;
++  uint create_options = create_info ? create_info->options : 0;
++  Protocol *protocol=thd->protocol;
++  DBUG_ENTER("mysql_show_create_db");
++
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  if (test_all_bits(sctx->master_access, DB_ACLS))
++    db_access=DB_ACLS;
++  else
++    db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, dbname, 0) |
++		sctx->master_access);
++  if (!(db_access & DB_ACLS) && check_grant_db(thd,dbname))
++  {
++    my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
++             sctx->priv_user, sctx->host_or_ip, dbname);
++    general_log_print(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR),
++                      sctx->priv_user, sctx->host_or_ip, dbname);
++    DBUG_RETURN(TRUE);
++  }
++#endif
++  if (is_schema_db(dbname))
++  {
++    dbname= INFORMATION_SCHEMA_NAME.str;
++    create.default_table_charset= system_charset_info;
++  }
++  else
++  {
++    if (check_db_dir_existence(dbname))
++    {
++      my_error(ER_BAD_DB_ERROR, MYF(0), dbname);
++      DBUG_RETURN(TRUE);
++    }
++
++    load_db_opt_by_name(thd, dbname, &create);
++  }
++  List<Item> field_list;
++  field_list.push_back(new Item_empty_string("Database",NAME_CHAR_LEN));
++  field_list.push_back(new Item_empty_string("Create Database",1024));
++
++  if (protocol->send_fields(&field_list,
++                            Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    DBUG_RETURN(TRUE);
++
++  protocol->prepare_for_resend();
++  protocol->store(dbname, strlen(dbname), system_charset_info);
++  buffer.length(0);
++  buffer.append(STRING_WITH_LEN("CREATE DATABASE "));
++  if (create_options & HA_LEX_CREATE_IF_NOT_EXISTS)
++    buffer.append(STRING_WITH_LEN("/*!32312 IF NOT EXISTS*/ "));
++  append_identifier(thd, &buffer, dbname, strlen(dbname));
++
++  if (create.default_table_charset)
++  {
++    buffer.append(STRING_WITH_LEN(" /*!40100"));
++    buffer.append(STRING_WITH_LEN(" DEFAULT CHARACTER SET "));
++    buffer.append(create.default_table_charset->csname);
++    if (!(create.default_table_charset->state & MY_CS_PRIMARY))
++    {
++      buffer.append(STRING_WITH_LEN(" COLLATE "));
++      buffer.append(create.default_table_charset->name);
++    }
++    buffer.append(STRING_WITH_LEN(" */"));
++  }
++  protocol->store(buffer.ptr(), buffer.length(), buffer.charset());
++
++  if (protocol->write())
++    DBUG_RETURN(TRUE);
++  my_eof(thd);
++  DBUG_RETURN(FALSE);
++}
++
++
++
++/****************************************************************************
++  Return only fields for API mysql_list_fields
++  Use "show table wildcard" in mysql instead of this
++****************************************************************************/
++
++void
++mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
++{
++  TABLE *table;
++  DBUG_ENTER("mysqld_list_fields");
++  DBUG_PRINT("enter",("table: %s",table_list->table_name));
++
++  if (open_normal_and_derived_tables(thd, table_list, 0))
++    DBUG_VOID_RETURN;
++  table= table_list->table;
++
++  List<Item> field_list;
++
++  Field **ptr,*field;
++  for (ptr=table->field ; (field= *ptr); ptr++)
++  {
++    if (!wild || !wild[0] || 
++        !wild_case_compare(system_charset_info, field->field_name,wild))
++    {
++      if (table_list->view)
++        field_list.push_back(new Item_ident_for_show(field,
++                                                     table_list->view_db.str,
++                                                     table_list->view_name.str));
++      else
++        field_list.push_back(new Item_field(field));
++    }
++  }
++  restore_record(table, s->default_values);              // Get empty record
++  table->use_all_columns();
++  if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS))
++    DBUG_VOID_RETURN;
++  my_eof(thd);
++  DBUG_VOID_RETURN;
++}
++
++
++int
++mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd)
++{
++  Protocol *protocol= thd->protocol;
++  String *packet= protocol->storage_packet();
++  DBUG_ENTER("mysqld_dump_create_info");
++  DBUG_PRINT("enter",("table: %s",table_list->table->s->table_name.str));
++
++  protocol->prepare_for_resend();
++  if (store_create_info(thd, table_list, packet, NULL,
++                        FALSE /* show_database */))
++    DBUG_RETURN(-1);
++
++  if (fd < 0)
++  {
++    if (protocol->write())
++      DBUG_RETURN(-1);
++    protocol->flush();
++  }
++  else
++  {
++    if (my_write(fd, (const uchar*) packet->ptr(), packet->length(),
++		 MYF(MY_WME)))
++      DBUG_RETURN(-1);
++  }
++  DBUG_RETURN(0);
++}
++
++/*
++  Go through all character combinations and ensure that sql_lex.cc can
++  parse it as an identifier.
++
++  SYNOPSIS
++  require_quotes()
++  name			attribute name
++  name_length		length of name
++
++  RETURN
++    #	Pointer to conflicting character
++    0	No conflicting character
++*/
++
++static const char *require_quotes(const char *name, uint name_length)
++{
++  uint length;
++  bool pure_digit= TRUE;
++  const char *end= name + name_length;
++
++  for (; name < end ; name++)
++  {
++    uchar chr= (uchar) *name;
++    length= my_mbcharlen(system_charset_info, chr);
++    if (length == 1 && !system_charset_info->ident_map[chr])
++      return name;
++    if (length == 1 && (chr < '0' || chr > '9'))
++      pure_digit= FALSE;
++  }
++  if (pure_digit)
++    return name;
++  return 0;
++}
++
++
++/*
++  Quote the given identifier if needed and append it to the target string.
++  If the given identifier is empty, it will be quoted.
++
++  SYNOPSIS
++  append_identifier()
++  thd                   thread handler
++  packet                target string
++  name                  the identifier to be appended
++  name_length           length of the appending identifier
++*/
++
++void
++append_identifier(THD *thd, String *packet, const char *name, uint length)
++{
++  const char *name_end;
++  char quote_char;
++  int q= get_quote_char_for_identifier(thd, name, length);
++
++  if (q == EOF)
++  {
++    packet->append(name, length, packet->charset());
++    return;
++  }
++
++  /*
++    The identifier must be quoted as it includes a quote character or
++   it's a keyword
++  */
++
++  VOID(packet->reserve(length*2 + 2));
++  quote_char= (char) q;
++  packet->append(&quote_char, 1, system_charset_info);
++
++  for (name_end= name+length ; name < name_end ; name+= length)
++  {
++    uchar chr= (uchar) *name;
++    length= my_mbcharlen(system_charset_info, chr);
++    /*
++      my_mbcharlen can return 0 on a wrong multibyte
++      sequence. It is possible when upgrading from 4.0,
++      and identifier contains some accented characters.
++      The manual says it does not work. So we'll just
++      change length to 1 not to hang in the endless loop.
++    */
++    if (!length)
++      length= 1;
++    if (length == 1 && chr == (uchar) quote_char)
++      packet->append(&quote_char, 1, system_charset_info);
++    packet->append(name, length, system_charset_info);
++  }
++  packet->append(&quote_char, 1, system_charset_info);
++}
++
++
++/*
++  Get the quote character for displaying an identifier.
++
++  SYNOPSIS
++    get_quote_char_for_identifier()
++    thd		Thread handler
++    name	name to quote
++    length	length of name
++
++  IMPLEMENTATION
++    Force quoting in the following cases:
++      - name is empty (for one, it is possible when we use this function for
++        quoting user and host names for DEFINER clause);
++      - name is a keyword;
++      - name includes a special character;
++    Otherwise identifier is quoted only if the option OPTION_QUOTE_SHOW_CREATE
++    is set.
++
++  RETURN
++    EOF	  No quote character is needed
++    #	  Quote character
++*/
++
++int get_quote_char_for_identifier(THD *thd, const char *name, uint length)
++{
++  if (length &&
++      !is_keyword(name,length) &&
++      !require_quotes(name, length) &&
++      !(thd->options & OPTION_QUOTE_SHOW_CREATE))
++    return EOF;
++  if (thd->variables.sql_mode & MODE_ANSI_QUOTES)
++    return '"';
++  return '`';
++}
++
++
++/* Append directory name (if exists) to CREATE INFO */
++
++static void append_directory(THD *thd, String *packet, const char *dir_type,
++			     const char *filename)
++{
++  if (filename && !(thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE))
++  {
++    uint length= dirname_length(filename);
++    packet->append(' ');
++    packet->append(dir_type);
++    packet->append(STRING_WITH_LEN(" DIRECTORY='"));
++#ifdef __WIN__
++    /* Convert \ to / to be able to create table on unix */
++    char *winfilename= (char*) thd->memdup(filename, length);
++    char *pos, *end;
++    for (pos= winfilename, end= pos+length ; pos < end ; pos++)
++    {
++      if (*pos == '\\')
++        *pos = '/';
++    }
++    filename= winfilename;
++#endif
++    packet->append(filename, length);
++    packet->append('\'');
++  }
++}
++
++
++#define LIST_PROCESS_HOST_LEN 64
++
++static bool get_field_default_value(THD *thd, TABLE *table,
++                                    Field *field, String *def_value,
++                                    bool quoted)
++{
++  bool has_default;
++  bool has_now_default;
++  enum enum_field_types field_type= field->type();
++  /* 
++     We are using CURRENT_TIMESTAMP instead of NOW because it is
++     more standard
++  */
++  has_now_default= table->timestamp_field == field && 
++    field->unireg_check != Field::TIMESTAMP_UN_FIELD;
++    
++  has_default= (field_type != FIELD_TYPE_BLOB &&
++                !(field->flags & NO_DEFAULT_VALUE_FLAG) &&
++                field->unireg_check != Field::NEXT_NUMBER &&
++                !((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
++                  && has_now_default));
++
++  def_value->length(0);
++  if (has_default)
++  {
++    if (has_now_default)
++      def_value->append(STRING_WITH_LEN("CURRENT_TIMESTAMP"));
++    else if (!field->is_null())
++    {                                             // Not null by default
++      char tmp[MAX_FIELD_WIDTH];
++      String type(tmp, sizeof(tmp), field->charset());
++      if (field_type == MYSQL_TYPE_BIT)
++      {
++        longlong dec= field->val_int();
++        char *ptr= longlong2str(dec, tmp + 2, 2);
++        uint32 length= (uint32) (ptr - tmp);
++        tmp[0]= 'b';
++        tmp[1]= '\'';        
++        tmp[length]= '\'';
++        type.length(length + 1);
++        quoted= 0;
++      }
++      else
++        field->val_str(&type);
++      if (type.length())
++      {
++        String def_val;
++        uint dummy_errors;
++        /* convert to system_charset_info == utf8 */
++        def_val.copy(type.ptr(), type.length(), field->charset(),
++                     system_charset_info, &dummy_errors);
++        if (quoted)
++          append_unescaped(def_value, def_val.ptr(), def_val.length());
++        else
++          def_value->append(def_val.ptr(), def_val.length());
++      }
++      else if (quoted)
++        def_value->append(STRING_WITH_LEN("''"));
++    }
++    else if (field->maybe_null() && quoted)
++      def_value->append(STRING_WITH_LEN("NULL"));    // Null as default
++    else
++      return 0;
++
++  }
++  return has_default;
++}
++
++/*
++  Build a CREATE TABLE statement for a table.
++
++  SYNOPSIS
++    store_create_info()
++    thd               The thread
++    table_list        A list containing one table to write statement
++                      for.
++    packet            Pointer to a string where statement will be
++                      written.
++    create_info_arg   Pointer to create information that can be used
++                      to tailor the format of the statement.  Can be
++                      NULL, in which case only SQL_MODE is considered
++                      when building the statement.
++  
++  NOTE
++    Currently always return 0, but might return error code in the
++    future.
++    
++  RETURN
++    0       OK
++ */
++
++int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
++                      HA_CREATE_INFO *create_info_arg, bool show_database)
++{
++  List<Item> field_list;
++  char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], def_value_buf[MAX_FIELD_WIDTH];
++  const char *alias;
++  String type(tmp, sizeof(tmp), system_charset_info);
++  String def_value(def_value_buf, sizeof(def_value_buf), system_charset_info);
++  Field **ptr,*field;
++  uint primary_key;
++  KEY *key_info;
++  TABLE *table= table_list->table;
++  handler *file= table->file;
++  TABLE_SHARE *share= table->s;
++  HA_CREATE_INFO create_info;
++  bool show_table_options= FALSE;
++  bool foreign_db_mode=  (thd->variables.sql_mode & (MODE_POSTGRESQL |
++                                                     MODE_ORACLE |
++                                                     MODE_MSSQL |
++                                                     MODE_DB2 |
++                                                     MODE_MAXDB |
++                                                     MODE_ANSI)) != 0;
++  bool limited_mysql_mode= (thd->variables.sql_mode & (MODE_NO_FIELD_OPTIONS |
++                                                       MODE_MYSQL323 |
++                                                       MODE_MYSQL40)) != 0;
++  my_bitmap_map *old_map;
++  DBUG_ENTER("store_create_info");
++  DBUG_PRINT("enter",("table: %s", table->s->table_name.str));
++
++  restore_record(table, s->default_values); // Get empty record
++
++  if (share->tmp_table)
++    packet->append(STRING_WITH_LEN("CREATE TEMPORARY TABLE "));
++  else
++    packet->append(STRING_WITH_LEN("CREATE TABLE "));
++  if (create_info_arg &&
++      (create_info_arg->options & HA_LEX_CREATE_IF_NOT_EXISTS))
++    packet->append(STRING_WITH_LEN("IF NOT EXISTS "));
++  if (table_list->schema_table)
++    alias= table_list->schema_table->table_name;
++  else
++  {
++    if (lower_case_table_names == 2)
++      alias= table->alias;
++    else
++    {
++      alias= share->table_name.str;
++    }
++  }
++
++  /*
++    Print the database before the table name if told to do that. The
++    database name is only printed in the event that it is different
++    from the current database.  The main reason for doing this is to
++    avoid having to update gazillions of tests and result files, but
++    it also saves a few bytes of the binary log.
++   */
++  if (show_database)
++  {
++    const LEX_STRING *const db=
++      table_list->schema_table ? &INFORMATION_SCHEMA_NAME : &table->s->db;
++    if (!thd->db || strcmp(db->str, thd->db))
++    {
++      append_identifier(thd, packet, db->str, db->length);
++      packet->append(STRING_WITH_LEN("."));
++    }
++  }
++
++  append_identifier(thd, packet, alias, strlen(alias));
++  packet->append(STRING_WITH_LEN(" (\n"));
++  /*
++    We need this to get default values from the table
++    We have to restore the read_set if we are called from insert in case
++    of row based replication.
++  */
++  old_map= tmp_use_all_columns(table, table->read_set);
++
++  for (ptr=table->field ; (field= *ptr); ptr++)
++  {
++    uint flags = field->flags;
++
++    if (ptr != table->field)
++      packet->append(STRING_WITH_LEN(",\n"));
++
++    packet->append(STRING_WITH_LEN("  "));
++    append_identifier(thd,packet,field->field_name, strlen(field->field_name));
++    packet->append(' ');
++    // check for surprises from the previous call to Field::sql_type()
++    if (type.ptr() != tmp)
++      type.set(tmp, sizeof(tmp), system_charset_info);
++    else
++      type.set_charset(system_charset_info);
++
++    field->sql_type(type);
++    packet->append(type.ptr(), type.length(), system_charset_info);
++
++    if (field->has_charset() && 
++        !(thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)))
++    {
++      if (field->charset() != share->table_charset)
++      {
++	packet->append(STRING_WITH_LEN(" CHARACTER SET "));
++	packet->append(field->charset()->csname);
++      }
++      /* 
++	For string types dump collation name only if 
++	collation is not primary for the given charset
++      */
++      if (!(field->charset()->state & MY_CS_PRIMARY))
++      {
++	packet->append(STRING_WITH_LEN(" COLLATE "));
++	packet->append(field->charset()->name);
++      }
++    }
++
++    if (flags & NOT_NULL_FLAG)
++      packet->append(STRING_WITH_LEN(" NOT NULL"));
++    else if (field->type() == MYSQL_TYPE_TIMESTAMP)
++    {
++      /*
++        TIMESTAMP field require explicit NULL flag, because unlike
++        all other fields they are treated as NOT NULL by default.
++      */
++      packet->append(STRING_WITH_LEN(" NULL"));
++    }
++
++    if (get_field_default_value(thd, table, field, &def_value, 1))
++    {
++      packet->append(STRING_WITH_LEN(" DEFAULT "));
++      packet->append(def_value.ptr(), def_value.length(), system_charset_info);
++    }
++
++    if (!limited_mysql_mode && table->timestamp_field == field && 
++        field->unireg_check != Field::TIMESTAMP_DN_FIELD)
++      packet->append(STRING_WITH_LEN(" ON UPDATE CURRENT_TIMESTAMP"));
++
++    if (field->unireg_check == Field::NEXT_NUMBER && 
++        !(thd->variables.sql_mode & MODE_NO_FIELD_OPTIONS))
++      packet->append(STRING_WITH_LEN(" AUTO_INCREMENT"));
++
++    if (field->comment.length)
++    {
++      packet->append(STRING_WITH_LEN(" COMMENT "));
++      append_unescaped(packet, field->comment.str, field->comment.length);
++    }
++  }
++
++  key_info= table->key_info;
++  bzero((char*) &create_info, sizeof(create_info));
++  /* Allow update_create_info to update row type */
++  create_info.row_type= share->row_type;
++  file->update_create_info(&create_info);
++  primary_key= share->primary_key;
++
++  for (uint i=0 ; i < share->keys ; i++,key_info++)
++  {
++    KEY_PART_INFO *key_part= key_info->key_part;
++    bool found_primary=0;
++    packet->append(STRING_WITH_LEN(",\n  "));
++
++    if (i == primary_key && !strcmp(key_info->name, primary_key_name))
++    {
++      found_primary=1;
++      /*
++        No space at end, because a space will be added after where the
++        identifier would go, but that is not added for primary key.
++      */
++      packet->append(STRING_WITH_LEN("PRIMARY KEY"));
++    }
++    else if (key_info->flags & HA_NOSAME)
++      packet->append(STRING_WITH_LEN("UNIQUE KEY "));
++    else if (key_info->flags & HA_FULLTEXT)
++      packet->append(STRING_WITH_LEN("FULLTEXT KEY "));
++    else if (key_info->flags & HA_SPATIAL)
++      packet->append(STRING_WITH_LEN("SPATIAL KEY "));
++    else
++      packet->append(STRING_WITH_LEN("KEY "));
++
++    if (!found_primary)
++     append_identifier(thd, packet, key_info->name, strlen(key_info->name));
++
++    packet->append(STRING_WITH_LEN(" ("));
++
++    for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
++    {
++      if (j)
++        packet->append(',');
++
++      if (key_part->field)
++        append_identifier(thd,packet,key_part->field->field_name,
++			  strlen(key_part->field->field_name));
++      if (key_part->field &&
++          (key_part->length !=
++           table->field[key_part->fieldnr-1]->key_length() &&
++           !(key_info->flags & (HA_FULLTEXT | HA_SPATIAL))))
++      {
++        char *end;
++        buff[0] = '(';
++        end= int10_to_str((long) key_part->length /
++                          key_part->field->charset()->mbmaxlen,
++                          buff + 1,10);
++        *end++ = ')';
++        packet->append(buff,(uint) (end-buff));
++      }
++    }
++    packet->append(')');
++    store_key_options(thd, packet, table, key_info);
++    if (key_info->parser)
++    {
++      LEX_STRING *parser_name= plugin_name(key_info->parser);
++      packet->append(STRING_WITH_LEN(" /*!50100 WITH PARSER "));
++      append_identifier(thd, packet, parser_name->str, parser_name->length);
++      packet->append(STRING_WITH_LEN(" */ "));
++    }
++  }
++
++  /*
++    Get possible foreign key definitions stored in InnoDB and append them
++    to the CREATE TABLE statement
++  */
++
++  if ((for_str= file->get_foreign_key_create_info()))
++  {
++    packet->append(for_str, strlen(for_str));
++    file->free_foreign_key_create_info(for_str);
++  }
++
++  packet->append(STRING_WITH_LEN("\n)"));
++  if (!(thd->variables.sql_mode & MODE_NO_TABLE_OPTIONS) && !foreign_db_mode)
++  {
++    show_table_options= TRUE;
++    /*
++      Get possible table space definitions and append them
++      to the CREATE TABLE statement
++    */
++
++    if ((for_str= file->get_tablespace_name(thd,0,0)))
++    {
++      packet->append(STRING_WITH_LEN(" /*!50100 TABLESPACE "));
++      packet->append(for_str, strlen(for_str));
++      packet->append(STRING_WITH_LEN(" STORAGE DISK */"));
++      my_free(for_str, MYF(0));
++    }
++
++    /*
++      IF   check_create_info
++      THEN add ENGINE only if it was used when creating the table
++    */
++    if (!create_info_arg ||
++        (create_info_arg->used_fields & HA_CREATE_USED_ENGINE))
++    {
++      if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
++        packet->append(STRING_WITH_LEN(" TYPE="));
++      else
++        packet->append(STRING_WITH_LEN(" ENGINE="));
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    if (table->part_info)
++      packet->append(ha_resolve_storage_engine_name(
++                        table->part_info->default_engine_type));
++    else
++      packet->append(file->table_type());
++#else
++      packet->append(file->table_type());
++#endif
++    }
++
++    /*
++      Add AUTO_INCREMENT=... if there is an AUTO_INCREMENT column,
++      and NEXT_ID > 1 (the default).  We must not print the clause
++      for engines that do not support this as it would break the
++      import of dumps, but as of this writing, the test for whether
++      AUTO_INCREMENT columns are allowed and wether AUTO_INCREMENT=...
++      is supported is identical, !(file->table_flags() & HA_NO_AUTO_INCREMENT))
++      Because of that, we do not explicitly test for the feature,
++      but may extrapolate its existence from that of an AUTO_INCREMENT column.
++    */
++
++    if (create_info.auto_increment_value > 1)
++    {
++      char *end;
++      packet->append(STRING_WITH_LEN(" AUTO_INCREMENT="));
++      end= longlong10_to_str(create_info.auto_increment_value, buff,10);
++      packet->append(buff, (uint) (end - buff));
++    }
++
++    
++    if (share->table_charset &&
++	!(thd->variables.sql_mode & MODE_MYSQL323) &&
++	!(thd->variables.sql_mode & MODE_MYSQL40))
++    {
++      /*
++        IF   check_create_info
++        THEN add DEFAULT CHARSET only if it was used when creating the table
++      */
++      if (!create_info_arg ||
++          (create_info_arg->used_fields & HA_CREATE_USED_DEFAULT_CHARSET))
++      {
++        packet->append(STRING_WITH_LEN(" DEFAULT CHARSET="));
++        packet->append(share->table_charset->csname);
++        if (!(share->table_charset->state & MY_CS_PRIMARY))
++        {
++          packet->append(STRING_WITH_LEN(" COLLATE="));
++          packet->append(table->s->table_charset->name);
++        }
++      }
++    }
++
++    if (share->min_rows)
++    {
++      char *end;
++      packet->append(STRING_WITH_LEN(" MIN_ROWS="));
++      end= longlong10_to_str(share->min_rows, buff, 10);
++      packet->append(buff, (uint) (end- buff));
++    }
++
++    if (share->max_rows && !table_list->schema_table)
++    {
++      char *end;
++      packet->append(STRING_WITH_LEN(" MAX_ROWS="));
++      end= longlong10_to_str(share->max_rows, buff, 10);
++      packet->append(buff, (uint) (end - buff));
++    }
++
++    if (share->avg_row_length)
++    {
++      char *end;
++      packet->append(STRING_WITH_LEN(" AVG_ROW_LENGTH="));
++      end= longlong10_to_str(share->avg_row_length, buff,10);
++      packet->append(buff, (uint) (end - buff));
++    }
++
++    if (share->db_create_options & HA_OPTION_PACK_KEYS)
++      packet->append(STRING_WITH_LEN(" PACK_KEYS=1"));
++    if (share->db_create_options & HA_OPTION_NO_PACK_KEYS)
++      packet->append(STRING_WITH_LEN(" PACK_KEYS=0"));
++    /* We use CHECKSUM, instead of TABLE_CHECKSUM, for backward compability */
++    if (share->db_create_options & HA_OPTION_CHECKSUM)
++      packet->append(STRING_WITH_LEN(" CHECKSUM=1"));
++    if (share->db_create_options & HA_OPTION_DELAY_KEY_WRITE)
++      packet->append(STRING_WITH_LEN(" DELAY_KEY_WRITE=1"));
++    if (create_info.row_type != ROW_TYPE_DEFAULT)
++    {
++      packet->append(STRING_WITH_LEN(" ROW_FORMAT="));
++      packet->append(ha_row_type[(uint) create_info.row_type]);
++    }
++    if (table->s->key_block_size)
++    {
++      char *end;
++      packet->append(STRING_WITH_LEN(" KEY_BLOCK_SIZE="));
++      end= longlong10_to_str(table->s->key_block_size, buff, 10);
++      packet->append(buff, (uint) (end - buff));
++    }
++    table->file->append_create_info(packet);
++    if (share->comment.length)
++    {
++      packet->append(STRING_WITH_LEN(" COMMENT="));
++      append_unescaped(packet, share->comment.str, share->comment.length);
++    }
++    if (share->connect_string.length)
++    {
++      packet->append(STRING_WITH_LEN(" CONNECTION="));
++      append_unescaped(packet, share->connect_string.str, share->connect_string.length);
++    }
++    append_directory(thd, packet, "DATA",  create_info.data_file_name);
++    append_directory(thd, packet, "INDEX", create_info.index_file_name);
++  }
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  {
++    /*
++      Partition syntax for CREATE TABLE is at the end of the syntax.
++    */
++    uint part_syntax_len;
++    char *part_syntax;
++    if (table->part_info &&
++        (!table->part_info->is_auto_partitioned) &&
++        ((part_syntax= generate_partition_syntax(table->part_info,
++                                                  &part_syntax_len,
++                                                  FALSE,
++                                                  show_table_options))))
++    {
++       packet->append(STRING_WITH_LEN("\n/*!50100"));
++       packet->append(part_syntax, part_syntax_len);
++       packet->append(STRING_WITH_LEN(" */"));
++       my_free(part_syntax, MYF(0));
++    }
++  }
++#endif
++  tmp_restore_column_map(table->read_set, old_map);
++  DBUG_RETURN(0);
++}
++
++
++static void store_key_options(THD *thd, String *packet, TABLE *table,
++                              KEY *key_info)
++{
++  bool limited_mysql_mode= (thd->variables.sql_mode &
++                            (MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 |
++                             MODE_MYSQL40)) != 0;
++  bool foreign_db_mode=  (thd->variables.sql_mode & (MODE_POSTGRESQL |
++                                                     MODE_ORACLE |
++                                                     MODE_MSSQL |
++                                                     MODE_DB2 |
++                                                     MODE_MAXDB |
++                                                     MODE_ANSI)) != 0;
++  char *end, buff[32];
++
++  if (!(thd->variables.sql_mode & MODE_NO_KEY_OPTIONS) &&
++      !limited_mysql_mode && !foreign_db_mode)
++  {
++
++    if (key_info->algorithm == HA_KEY_ALG_BTREE)
++      packet->append(STRING_WITH_LEN(" USING BTREE"));
++
++    if (key_info->algorithm == HA_KEY_ALG_HASH)
++      packet->append(STRING_WITH_LEN(" USING HASH"));
++
++    /* send USING only in non-default case: non-spatial rtree */
++    if ((key_info->algorithm == HA_KEY_ALG_RTREE) &&
++        !(key_info->flags & HA_SPATIAL))
++      packet->append(STRING_WITH_LEN(" USING RTREE"));
++
++    if ((key_info->flags & HA_USES_BLOCK_SIZE) &&
++        table->s->key_block_size != key_info->block_size)
++    {
++      packet->append(STRING_WITH_LEN(" KEY_BLOCK_SIZE="));
++      end= longlong10_to_str(key_info->block_size, buff, 10);
++      packet->append(buff, (uint) (end - buff));
++    }
++  }
++}
++
++
++void
++view_store_options(THD *thd, TABLE_LIST *table, String *buff)
++{
++  append_algorithm(table, buff);
++  append_definer(thd, buff, &table->definer.user, &table->definer.host);
++  if (table->view_suid)
++    buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER "));
++  else
++    buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER "));
++}
++
++
++/*
++  Append DEFINER clause to the given buffer.
++  
++  SYNOPSIS
++    append_definer()
++    thd           [in] thread handle
++    buffer        [inout] buffer to hold DEFINER clause
++    definer_user  [in] user name part of definer
++    definer_host  [in] host name part of definer
++*/
++
++static void append_algorithm(TABLE_LIST *table, String *buff)
++{
++  buff->append(STRING_WITH_LEN("ALGORITHM="));
++  switch ((int8)table->algorithm) {
++  case VIEW_ALGORITHM_UNDEFINED:
++    buff->append(STRING_WITH_LEN("UNDEFINED "));
++    break;
++  case VIEW_ALGORITHM_TMPTABLE:
++    buff->append(STRING_WITH_LEN("TEMPTABLE "));
++    break;
++  case VIEW_ALGORITHM_MERGE:
++    buff->append(STRING_WITH_LEN("MERGE "));
++    break;
++  default:
++    DBUG_ASSERT(0); // never should happen
++  }
++}
++
++/*
++  Append DEFINER clause to the given buffer.
++  
++  SYNOPSIS
++    append_definer()
++    thd           [in] thread handle
++    buffer        [inout] buffer to hold DEFINER clause
++    definer_user  [in] user name part of definer
++    definer_host  [in] host name part of definer
++*/
++
++void append_definer(THD *thd, String *buffer, const LEX_STRING *definer_user,
++                    const LEX_STRING *definer_host)
++{
++  buffer->append(STRING_WITH_LEN("DEFINER="));
++  append_identifier(thd, buffer, definer_user->str, definer_user->length);
++  buffer->append('@');
++  append_identifier(thd, buffer, definer_host->str, definer_host->length);
++  buffer->append(' ');
++}
++
++
++int
++view_store_create_info(THD *thd, TABLE_LIST *table, String *buff)
++{
++  my_bool compact_view_name= TRUE;
++  my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL |
++                                                       MODE_ORACLE |
++                                                       MODE_MSSQL |
++                                                       MODE_DB2 |
++                                                       MODE_MAXDB |
++                                                       MODE_ANSI)) != 0;
++
++  if (!thd->db || strcmp(thd->db, table->view_db.str))
++    /*
++      print compact view name if the view belongs to the current database
++    */
++    compact_view_name= table->compact_view_format= FALSE;
++  else
++  {
++    /*
++      Compact output format for view body can be used
++      if this view only references table inside it's own db
++    */
++    TABLE_LIST *tbl;
++    table->compact_view_format= TRUE;
++    for (tbl= thd->lex->query_tables;
++         tbl;
++         tbl= tbl->next_global)
++    {
++      if (strcmp(table->view_db.str, tbl->view ? tbl->view_db.str :tbl->db)!= 0)
++      {
++        table->compact_view_format= FALSE;
++        break;
++      }
++    }
++  }
++
++  buff->append(STRING_WITH_LEN("CREATE "));
++  if (!foreign_db_mode)
++  {
++    view_store_options(thd, table, buff);
++  }
++  buff->append(STRING_WITH_LEN("VIEW "));
++  if (!compact_view_name)
++  {
++    append_identifier(thd, buff, table->view_db.str, table->view_db.length);
++    buff->append('.');
++  }
++  append_identifier(thd, buff, table->view_name.str, table->view_name.length);
++  buff->append(STRING_WITH_LEN(" AS "));
++
++  /*
++    We can't just use table->query, because our SQL_MODE may trigger
++    a different syntax, like when ANSI_QUOTES is defined.
++  */
++  table->view->unit.print(buff, QT_ORDINARY);
++
++  if (table->with_check != VIEW_CHECK_NONE)
++  {
++    if (table->with_check == VIEW_CHECK_LOCAL)
++      buff->append(STRING_WITH_LEN(" WITH LOCAL CHECK OPTION"));
++    else
++      buff->append(STRING_WITH_LEN(" WITH CASCADED CHECK OPTION"));
++  }
++  return 0;
++}
++
++
++/****************************************************************************
++  Return info about all processes
++  returns for each thread: thread id, user, host, db, command, info
++****************************************************************************/
++
++class thread_info :public ilink {
++public:
++  static void *operator new(size_t size)
++  {
++    return (void*) sql_alloc((uint) size);
++  }
++  static void operator delete(void *ptr __attribute__((unused)),
++                              size_t size __attribute__((unused)))
++  { TRASH(ptr, size); }
++
++  ulong thread_id;
++  time_t start_time;
++  uint   command;
++  const char *user,*host,*db,*proc_info,*state_info;
++  char *query;
++};
++
++#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
++template class I_List<thread_info>;
++#endif
++
++void mysqld_list_processes(THD *thd,const char *user, bool verbose)
++{
++  Item *field;
++  List<Item> field_list;
++  I_List<thread_info> thread_infos;
++  ulong max_query_length= (verbose ? thd->variables.max_allowed_packet :
++			   PROCESS_LIST_WIDTH);
++  Protocol *protocol= thd->protocol;
++  DBUG_ENTER("mysqld_list_processes");
++
++  field_list.push_back(new Item_int("Id", 0, MY_INT32_NUM_DECIMAL_DIGITS));
++  field_list.push_back(new Item_empty_string("User",16));
++  field_list.push_back(new Item_empty_string("Host",LIST_PROCESS_HOST_LEN));
++  field_list.push_back(field=new Item_empty_string("db",NAME_CHAR_LEN));
++  field->maybe_null=1;
++  field_list.push_back(new Item_empty_string("Command",16));
++  field_list.push_back(field= new Item_return_int("Time",7, MYSQL_TYPE_LONG));
++  field->unsigned_flag= 0;
++  field_list.push_back(field=new Item_empty_string("State",30));
++  field->maybe_null=1;
++  field_list.push_back(field=new Item_empty_string("Info",max_query_length));
++  field->maybe_null=1;
++  if (protocol->send_fields(&field_list,
++                            Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    DBUG_VOID_RETURN;
++
++  VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list
++  if (!thd->killed)
++  {
++    I_List_iterator<THD> it(threads);
++    THD *tmp;
++    while ((tmp=it++))
++    {
++      Security_context *tmp_sctx= tmp->security_ctx;
++      struct st_my_thread_var *mysys_var;
++      if ((tmp->vio_ok() || tmp->system_thread) &&
++          (!user || (tmp_sctx->user && !strcmp(tmp_sctx->user, user))))
++      {
++        thread_info *thd_info= new thread_info;
++
++        thd_info->thread_id=tmp->thread_id;
++        thd_info->user= thd->strdup(tmp_sctx->user ? tmp_sctx->user :
++                                    (tmp->system_thread ?
++                                     "system user" : "unauthenticated user"));
++	if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) &&
++            thd->security_ctx->host_or_ip[0])
++	{
++	  if ((thd_info->host= (char*) thd->alloc(LIST_PROCESS_HOST_LEN+1)))
++	    my_snprintf((char *) thd_info->host, LIST_PROCESS_HOST_LEN,
++			"%s:%u", tmp_sctx->host_or_ip, tmp->peer_port);
++	}
++	else
++	  thd_info->host= thd->strdup(tmp_sctx->host_or_ip[0] ? 
++                                      tmp_sctx->host_or_ip : 
++                                      tmp_sctx->host ? tmp_sctx->host : "");
++        if ((thd_info->db=tmp->db))             // Safe test
++          thd_info->db=thd->strdup(thd_info->db);
++        thd_info->command=(int) tmp->command;
++        if ((mysys_var= tmp->mysys_var))
++          pthread_mutex_lock(&mysys_var->mutex);
++        thd_info->proc_info= (char*) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0);
++#ifndef EMBEDDED_LIBRARY
++        thd_info->state_info= (char*) (tmp->locked ? "Locked" :
++                                       tmp->net.reading_or_writing ?
++                                       (tmp->net.reading_or_writing == 2 ?
++                                        "Writing to net" :
++                                        thd_info->command == COM_SLEEP ? "" :
++                                        "Reading from net") :
++                                       tmp->proc_info ? tmp->proc_info :
++                                       tmp->mysys_var &&
++                                       tmp->mysys_var->current_cond ?
++                                       "Waiting on cond" : NullS);
++#else
++        thd_info->state_info= (char*)"Writing to net";
++#endif
++        if (mysys_var)
++          pthread_mutex_unlock(&mysys_var->mutex);
++
++        thd_info->start_time= tmp->start_time;
++        thd_info->query=0;
++        /* Lock THD mutex that protects its data when looking at it. */
++        pthread_mutex_lock(&tmp->LOCK_thd_data);
++        if (tmp->query())
++        {
++          uint length= min(max_query_length, tmp->query_length());
++          thd_info->query= (char*) thd->strmake(tmp->query(),length);
++        }
++        pthread_mutex_unlock(&tmp->LOCK_thd_data);
++        thread_infos.append(thd_info);
++      }
++    }
++  }
++  VOID(pthread_mutex_unlock(&LOCK_thread_count));
++
++  thread_info *thd_info;
++  time_t now= my_time(0);
++  while ((thd_info=thread_infos.get()))
++  {
++    protocol->prepare_for_resend();
++    protocol->store((ulonglong) thd_info->thread_id);
++    protocol->store(thd_info->user, system_charset_info);
++    protocol->store(thd_info->host, system_charset_info);
++    protocol->store(thd_info->db, system_charset_info);
++    if (thd_info->proc_info)
++      protocol->store(thd_info->proc_info, system_charset_info);
++    else
++      protocol->store(command_name[thd_info->command].str, system_charset_info);
++    if (thd_info->start_time)
++      protocol->store_long ((longlong) (now - thd_info->start_time));
++    else
++      protocol->store_null();
++    protocol->store(thd_info->state_info, system_charset_info);
++    protocol->store(thd_info->query, system_charset_info);
++    if (protocol->write())
++      break; /* purecov: inspected */
++  }
++  my_eof(thd);
++  DBUG_VOID_RETURN;
++}
++
++int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
++{
++  TABLE *table= tables->table;
++  CHARSET_INFO *cs= system_charset_info;
++  char *user;
++  time_t now= my_time(0);
++  DBUG_ENTER("fill_process_list");
++
++  user= thd->security_ctx->master_access & PROCESS_ACL ?
++        NullS : thd->security_ctx->priv_user;
++
++  VOID(pthread_mutex_lock(&LOCK_thread_count));
++
++  if (!thd->killed)
++  {
++    I_List_iterator<THD> it(threads);
++    THD* tmp;
++
++    while ((tmp= it++))
++    {
++      Security_context *tmp_sctx= tmp->security_ctx;
++      struct st_my_thread_var *mysys_var;
++      const char *val;
++
++      if ((!tmp->vio_ok() && !tmp->system_thread) ||
++          (user && (!tmp_sctx->user || strcmp(tmp_sctx->user, user))))
++        continue;
++
++      restore_record(table, s->default_values);
++      /* ID */
++      table->field[0]->store((longlong) tmp->thread_id, TRUE);
++      /* USER */
++      val= tmp_sctx->user ? tmp_sctx->user :
++            (tmp->system_thread ? "system user" : "unauthenticated user");
++      table->field[1]->store(val, strlen(val), cs);
++      /* HOST */
++      if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) &&
++          thd->security_ctx->host_or_ip[0])
++      {
++        char host[LIST_PROCESS_HOST_LEN + 1];
++        my_snprintf(host, LIST_PROCESS_HOST_LEN, "%s:%u",
++                    tmp_sctx->host_or_ip, tmp->peer_port);
++        table->field[2]->store(host, strlen(host), cs);
++      }
++      else
++        table->field[2]->store(tmp_sctx->host_or_ip,
++                               strlen(tmp_sctx->host_or_ip), cs);
++      /* DB */
++      if (tmp->db)
++      {
++        table->field[3]->store(tmp->db, strlen(tmp->db), cs);
++        table->field[3]->set_notnull();
++      }
++
++      if ((mysys_var= tmp->mysys_var))
++        pthread_mutex_lock(&mysys_var->mutex);
++      /* COMMAND */
++      if ((val= (char *) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0)))
++        table->field[4]->store(val, strlen(val), cs);
++      else
++        table->field[4]->store(command_name[tmp->command].str,
++                               command_name[tmp->command].length, cs);
++      /* MYSQL_TIME */
++      table->field[5]->store((longlong)(tmp->start_time ?
++                                      now - tmp->start_time : 0), FALSE);
++      /* STATE */
++#ifndef EMBEDDED_LIBRARY
++      val= (char*) (tmp->locked ? "Locked" :
++                    tmp->net.reading_or_writing ?
++                    (tmp->net.reading_or_writing == 2 ?
++                     "Writing to net" :
++                     tmp->command == COM_SLEEP ? "" :
++                     "Reading from net") :
++                    tmp->proc_info ? tmp->proc_info :
++                    tmp->mysys_var &&
++                    tmp->mysys_var->current_cond ?
++                    "Waiting on cond" : NullS);
++#else
++      val= (char *) (tmp->proc_info ? tmp->proc_info : NullS);
++#endif
++      if (val)
++      {
++        table->field[6]->store(val, strlen(val), cs);
++        table->field[6]->set_notnull();
++      }
++
++      if (mysys_var)
++        pthread_mutex_unlock(&mysys_var->mutex);
++
++      /* INFO */
++      /* Lock THD mutex that protects its data when looking at it. */
++      pthread_mutex_lock(&tmp->LOCK_thd_data);
++      if (tmp->query())
++      {
++        table->field[7]->store(tmp->query(),
++                               min(PROCESS_LIST_INFO_WIDTH,
++                                   tmp->query_length()), cs);
++        table->field[7]->set_notnull();
++      }
++      pthread_mutex_unlock(&tmp->LOCK_thd_data);
++
++      if (schema_table_store_record(thd, table))
++      {
++        VOID(pthread_mutex_unlock(&LOCK_thread_count));
++        DBUG_RETURN(1);
++      }
++    }
++  }
++
++  VOID(pthread_mutex_unlock(&LOCK_thread_count));
++  DBUG_RETURN(0);
++}
++
++/*****************************************************************************
++  Status functions
++*****************************************************************************/
++
++static DYNAMIC_ARRAY all_status_vars;
++static bool status_vars_inited= 0;
++static int show_var_cmp(const void *var1, const void *var2)
++{
++  return strcmp(((SHOW_VAR*)var1)->name, ((SHOW_VAR*)var2)->name);
++}
++
++/*
++  deletes all the SHOW_UNDEF elements from the array and calls
++  delete_dynamic() if it's completely empty.
++*/
++static void shrink_var_array(DYNAMIC_ARRAY *array)
++{
++  uint a,b;
++  SHOW_VAR *all= dynamic_element(array, 0, SHOW_VAR *);
++
++  for (a= b= 0; b < array->elements; b++)
++    if (all[b].type != SHOW_UNDEF)
++      all[a++]= all[b];
++  if (a)
++  {
++    bzero(all+a, sizeof(SHOW_VAR)); // writing NULL-element to the end
++    array->elements= a;
++  }
++  else // array is completely empty - delete it
++    delete_dynamic(array);
++}
++
++/*
++  Adds an array of SHOW_VAR entries to the output of SHOW STATUS
++
++  SYNOPSIS
++    add_status_vars(SHOW_VAR *list)
++    list - an array of SHOW_VAR entries to add to all_status_vars
++           the last entry must be {0,0,SHOW_UNDEF}
++
++  NOTE
++    The handling of all_status_vars[] is completely internal, it's allocated
++    automatically when something is added to it, and deleted completely when
++    the last entry is removed.
++
++    As a special optimization, if add_status_vars() is called before
++    init_status_vars(), it assumes "startup mode" - neither concurrent access
++    to the array nor SHOW STATUS are possible (thus it skips locks and qsort)
++
++    The last entry of the all_status_vars[] should always be {0,0,SHOW_UNDEF}
++*/
++int add_status_vars(SHOW_VAR *list)
++{
++  int res= 0;
++  if (status_vars_inited)
++    pthread_mutex_lock(&LOCK_status);
++  if (!all_status_vars.buffer && // array is not allocated yet - do it now
++      my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 200, 20))
++  {
++    res= 1;
++    goto err;
++  }
++  while (list->name)
++    res|= insert_dynamic(&all_status_vars, (uchar*)list++);
++  res|= insert_dynamic(&all_status_vars, (uchar*)list); // appending NULL-element
++  all_status_vars.elements--; // but next insert_dynamic should overwite it
++  if (status_vars_inited)
++    sort_dynamic(&all_status_vars, show_var_cmp);
++err:
++  if (status_vars_inited)
++    pthread_mutex_unlock(&LOCK_status);
++  return res;
++}
++
++/*
++  Make all_status_vars[] usable for SHOW STATUS
++
++  NOTE
++    See add_status_vars(). Before init_status_vars() call, add_status_vars()
++    works in a special fast "startup" mode. Thus init_status_vars()
++    should be called as late as possible but before enabling multi-threading.
++*/
++void init_status_vars()
++{
++  status_vars_inited=1;
++  sort_dynamic(&all_status_vars, show_var_cmp);
++}
++
++void reset_status_vars()
++{
++  SHOW_VAR *ptr= (SHOW_VAR*) all_status_vars.buffer;
++  SHOW_VAR *last= ptr + all_status_vars.elements;
++  for (; ptr < last; ptr++)
++  {
++    /* Note that SHOW_LONG_NOFLUSH variables are not reset */
++    if (ptr->type == SHOW_LONG)
++      *(ulong*) ptr->value= 0;
++  }  
++}
++
++/*
++  catch-all cleanup function, cleans up everything no matter what
++
++  DESCRIPTION
++    This function is not strictly required if all add_to_status/
++    remove_status_vars are properly paired, but it's a safety measure that
++    deletes everything from the all_status_vars[] even if some
++    remove_status_vars were forgotten
++*/
++void free_status_vars()
++{
++  delete_dynamic(&all_status_vars);
++}
++
++/*
++  Removes an array of SHOW_VAR entries from the output of SHOW STATUS
++
++  SYNOPSIS
++    remove_status_vars(SHOW_VAR *list)
++    list - an array of SHOW_VAR entries to remove to all_status_vars
++           the last entry must be {0,0,SHOW_UNDEF}
++
++  NOTE
++    there's lots of room for optimizing this, especially in non-sorted mode,
++    but nobody cares - it may be called only in case of failed plugin
++    initialization in the mysqld startup.
++*/
++
++void remove_status_vars(SHOW_VAR *list)
++{
++  if (status_vars_inited)
++  {
++    pthread_mutex_lock(&LOCK_status);
++    SHOW_VAR *all= dynamic_element(&all_status_vars, 0, SHOW_VAR *);
++    int a= 0, b= all_status_vars.elements, c= (a+b)/2;
++
++    for (; list->name; list++)
++    {
++      int res= 0;
++      for (a= 0, b= all_status_vars.elements; b-a > 1; c= (a+b)/2)
++      {
++        res= show_var_cmp(list, all+c);
++        if (res < 0)
++          b= c;
++        else if (res > 0)
++          a= c;
++        else
++          break;
++      }
++      if (res == 0)
++        all[c].type= SHOW_UNDEF;
++    }
++    shrink_var_array(&all_status_vars);
++    pthread_mutex_unlock(&LOCK_status);
++  }
++  else
++  {
++    SHOW_VAR *all= dynamic_element(&all_status_vars, 0, SHOW_VAR *);
++    uint i;
++    for (; list->name; list++)
++    {
++      for (i= 0; i < all_status_vars.elements; i++)
++      {
++        if (show_var_cmp(list, all+i))
++          continue;
++        all[i].type= SHOW_UNDEF;
++        break;
++      }
++    }
++    shrink_var_array(&all_status_vars);
++  }
++}
++
++inline void make_upper(char *buf)
++{
++  for (; *buf; buf++)
++    *buf= my_toupper(system_charset_info, *buf);
++}
++
++static bool show_status_array(THD *thd, const char *wild,
++                              SHOW_VAR *variables,
++                              enum enum_var_type value_type,
++                              struct system_status_var *status_var,
++                              const char *prefix, TABLE *table,
++                              bool ucase_names,
++                              COND *cond)
++{
++  my_aligned_storage<SHOW_VAR_FUNC_BUFF_SIZE, MY_ALIGNOF(long)> buffer;
++  char * const buff= buffer.data;
++  char *prefix_end;
++  /* the variable name should not be longer than 64 characters */
++  char name_buffer[64];
++  int len;
++  LEX_STRING null_lex_str;
++  SHOW_VAR tmp, *var;
++  COND *partial_cond= 0;
++  enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
++  bool res= FALSE;
++  CHARSET_INFO *charset= system_charset_info;
++  DBUG_ENTER("show_status_array");
++
++  thd->count_cuted_fields= CHECK_FIELD_WARN;  
++  null_lex_str.str= 0;				// For sys_var->value_ptr()
++  null_lex_str.length= 0;
++
++  prefix_end=strnmov(name_buffer, prefix, sizeof(name_buffer)-1);
++  if (*prefix)
++    *prefix_end++= '_';
++  len=name_buffer + sizeof(name_buffer) - prefix_end;
++  partial_cond= make_cond_for_info_schema(cond, table->pos_in_table_list);
++
++  for (; variables->name; variables++)
++  {
++    strnmov(prefix_end, variables->name, len);
++    name_buffer[sizeof(name_buffer)-1]=0;       /* Safety */
++    if (ucase_names)
++      make_upper(name_buffer);
++
++    restore_record(table, s->default_values);
++    table->field[0]->store(name_buffer, strlen(name_buffer),
++                           system_charset_info);
++    /*
++      if var->type is SHOW_FUNC, call the function.
++      Repeat as necessary, if new var is again SHOW_FUNC
++    */
++    for (var=variables; var->type == SHOW_FUNC; var= &tmp)
++      ((mysql_show_var_func)(var->value))(thd, &tmp, buff);
++
++    SHOW_TYPE show_type=var->type;
++    if (show_type == SHOW_ARRAY)
++    {
++      show_status_array(thd, wild, (SHOW_VAR *) var->value, value_type,
++                        status_var, name_buffer, table, ucase_names, partial_cond);
++    }
++    else
++    {
++      if (!(wild && wild[0] && wild_case_compare(system_charset_info,
++                                                 name_buffer, wild)) &&
++          (!partial_cond || partial_cond->val_int()))
++      {
++        char *value=var->value;
++        const char *pos, *end;                  // We assign a lot of const's
++
++        pthread_mutex_lock(&LOCK_global_system_variables);
++
++        if (show_type == SHOW_SYS)
++        {
++          sys_var *var= ((sys_var *) value);
++          show_type= var->show_type();
++          value= (char*) var->value_ptr(thd, value_type, &null_lex_str);
++          charset= var->charset(thd);
++        }
++
++        pos= end= buff;
++        /*
++          note that value may be == buff. All SHOW_xxx code below
++          should still work in this case
++        */
++        switch (show_type) {
++        case SHOW_DOUBLE_STATUS:
++          value= ((char *) status_var + (ulong) value);
++          /* fall through */
++        case SHOW_DOUBLE:
++          end= buff + sprintf(buff, "%f", *(double*) value);
++          break;
++        case SHOW_LONG_STATUS:
++          value= ((char *) status_var + (ulong) value);
++          /* fall through */
++        case SHOW_LONG:
++        case SHOW_LONG_NOFLUSH: // the difference lies in refresh_status()
++          end= int10_to_str(*(long*) value, buff, 10);
++          break;
++        case SHOW_LONGLONG_STATUS:
++          value= ((char *) status_var + (ulonglong) value);
++          /* fall through */
++        case SHOW_LONGLONG:
++          end= longlong10_to_str(*(longlong*) value, buff, 10);
++          break;
++        case SHOW_HA_ROWS:
++          end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10);
++          break;
++        case SHOW_BOOL:
++          end= strmov(buff, *(bool*) value ? "ON" : "OFF");
++          break;
++        case SHOW_MY_BOOL:
++          end= strmov(buff, *(my_bool*) value ? "ON" : "OFF");
++          break;
++        case SHOW_INT:
++          end= int10_to_str((long) *(uint32*) value, buff, 10);
++          break;
++        case SHOW_HAVE:
++        {
++          SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value;
++          pos= show_comp_option_name[(int) tmp];
++          end= strend(pos);
++          break;
++        }
++        case SHOW_CHAR:
++        {
++          if (!(pos= value))
++            pos= "";
++          end= strend(pos);
++          break;
++        }
++       case SHOW_CHAR_PTR:
++        {
++          if (!(pos= *(char**) value))
++            pos= "";
++          end= strend(pos);
++          break;
++        }
++        case SHOW_KEY_CACHE_LONG:
++          value= (char*) dflt_key_cache + (ulong)value;
++          end= int10_to_str(*(long*) value, buff, 10);
++          break;
++        case SHOW_KEY_CACHE_LONGLONG:
++          value= (char*) dflt_key_cache + (ulong)value;
++	  end= longlong10_to_str(*(longlong*) value, buff, 10);
++	  break;
++        case SHOW_UNDEF:
++          break;                                        // Return empty string
++        case SHOW_SYS:                                  // Cannot happen
++        default:
++          DBUG_ASSERT(0);
++          break;
++        }
++        table->field[1]->store(pos, (uint32) (end - pos), charset);
++        thd->count_cuted_fields= CHECK_FIELD_IGNORE;
++        table->field[1]->set_notnull();
++
++        pthread_mutex_unlock(&LOCK_global_system_variables);
++
++        if (schema_table_store_record(thd, table))
++        {
++          res= TRUE;
++          goto end;
++        }
++      }
++    }
++  }
++end:
++  thd->count_cuted_fields= save_count_cuted_fields;
++  DBUG_RETURN(res);
++}
++
++
++/* collect status for all running threads */
++
++void calc_sum_of_all_status(STATUS_VAR *to)
++{
++  DBUG_ENTER("calc_sum_of_all_status");
++
++  /* Ensure that thread id not killed during loop */
++  VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list
++
++  I_List_iterator<THD> it(threads);
++  THD *tmp;
++  
++  /* Get global values as base */
++  *to= global_status_var;
++  
++  /* Add to this status from existing threads */
++  while ((tmp= it++))
++    add_to_status(to, &tmp->status_var);
++  
++  VOID(pthread_mutex_unlock(&LOCK_thread_count));
++  DBUG_VOID_RETURN;
++}
++
++
++/* This is only used internally, but we need it here as a forward reference */
++extern ST_SCHEMA_TABLE schema_tables[];
++
++typedef struct st_lookup_field_values
++{
++  LEX_STRING db_value, table_value;
++  bool wild_db_value, wild_table_value;
++} LOOKUP_FIELD_VALUES;
++
++
++/*
++  Store record to I_S table, convert HEAP table
++  to MyISAM if necessary
++
++  SYNOPSIS
++    schema_table_store_record()
++    thd                   thread handler
++    table                 Information schema table to be updated
++
++  RETURN
++    0	                  success
++    1	                  error
++*/
++
++bool schema_table_store_record(THD *thd, TABLE *table)
++{
++  int error;
++  if ((error= table->file->ha_write_row(table->record[0])))
++  {
++    if (create_myisam_from_heap(thd, table, 
++                                table->pos_in_table_list->schema_table_param,
++                                error, 0))
++      return 1;
++  }
++  return 0;
++}
++
++
++int make_table_list(THD *thd, SELECT_LEX *sel,
++                    LEX_STRING *db_name, LEX_STRING *table_name)
++{
++  Table_ident *table_ident;
++  table_ident= new Table_ident(thd, *db_name, *table_name, 1);
++  sel->init_query();
++  if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ))
++    return 1;
++  return 0;
++}
++
++
++/**
++  @brief    Get lookup value from the part of 'WHERE' condition 
++
++  @details This function gets lookup value from 
++           the part of 'WHERE' condition if it's possible and 
++           fill appropriate lookup_field_vals struct field
++           with this value.
++
++  @param[in]      thd                   thread handler
++  @param[in]      item_func             part of WHERE condition
++  @param[in]      table                 I_S table
++  @param[in, out] lookup_field_vals     Struct which holds lookup values 
++
++  @return
++    0             success
++    1             error, there can be no matching records for the condition
++*/
++
++bool get_lookup_value(THD *thd, Item_func *item_func,
++                      TABLE_LIST *table, 
++                      LOOKUP_FIELD_VALUES *lookup_field_vals)
++{
++  ST_SCHEMA_TABLE *schema_table= table->schema_table;
++  ST_FIELD_INFO *field_info= schema_table->fields_info;
++  const char *field_name1= schema_table->idx_field1 >= 0 ?
++    field_info[schema_table->idx_field1].field_name : "";
++  const char *field_name2= schema_table->idx_field2 >= 0 ?
++    field_info[schema_table->idx_field2].field_name : "";
++
++  if (item_func->functype() == Item_func::EQ_FUNC ||
++      item_func->functype() == Item_func::EQUAL_FUNC)
++  {
++    int idx_field, idx_val;
++    char tmp[MAX_FIELD_WIDTH];
++    String *tmp_str, str_buff(tmp, sizeof(tmp), system_charset_info);
++    Item_field *item_field;
++    CHARSET_INFO *cs= system_charset_info;
++
++    if (item_func->arguments()[0]->type() == Item::FIELD_ITEM &&
++        item_func->arguments()[1]->const_item())
++    {
++      idx_field= 0;
++      idx_val= 1;
++    }
++    else if (item_func->arguments()[1]->type() == Item::FIELD_ITEM &&
++             item_func->arguments()[0]->const_item())
++    {
++      idx_field= 1;
++      idx_val= 0;
++    }
++    else
++      return 0;
++
++    item_field= (Item_field*) item_func->arguments()[idx_field];
++    if (table->table != item_field->field->table)
++      return 0;
++    tmp_str= item_func->arguments()[idx_val]->val_str(&str_buff);
++
++    /* impossible value */
++    if (!tmp_str)
++      return 1;
++
++    /* Lookup value is database name */
++    if (!cs->coll->strnncollsp(cs, (uchar *) field_name1, strlen(field_name1),
++                               (uchar *) item_field->field_name,
++                               strlen(item_field->field_name), 0))
++    {
++      thd->make_lex_string(&lookup_field_vals->db_value, tmp_str->ptr(),
++                           tmp_str->length(), FALSE);
++    }
++    /* Lookup value is table name */
++    else if (!cs->coll->strnncollsp(cs, (uchar *) field_name2,
++                                    strlen(field_name2),
++                                    (uchar *) item_field->field_name,
++                                    strlen(item_field->field_name), 0))
++    {
++      thd->make_lex_string(&lookup_field_vals->table_value, tmp_str->ptr(),
++                           tmp_str->length(), FALSE);
++    }
++  }
++  return 0;
++}
++
++
++/**
++  @brief    Calculates lookup values from 'WHERE' condition 
++
++  @details This function calculates lookup value(database name, table name)
++           from 'WHERE' condition if it's possible and 
++           fill lookup_field_vals struct fields with these values.
++
++  @param[in]      thd                   thread handler
++  @param[in]      cond                  WHERE condition
++  @param[in]      table                 I_S table
++  @param[in, out] lookup_field_vals     Struct which holds lookup values 
++
++  @return
++    0             success
++    1             error, there can be no matching records for the condition
++*/
++
++bool calc_lookup_values_from_cond(THD *thd, COND *cond, TABLE_LIST *table,
++                                  LOOKUP_FIELD_VALUES *lookup_field_vals)
++{
++  if (!cond)
++    return 0;
++
++  if (cond->type() == Item::COND_ITEM)
++  {
++    if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
++    {
++      List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
++      Item *item;
++      while ((item= li++))
++      {
++        if (item->type() == Item::FUNC_ITEM)
++        {
++          if (get_lookup_value(thd, (Item_func*)item, table, lookup_field_vals))
++            return 1;
++        }
++        else
++        {
++          if (calc_lookup_values_from_cond(thd, item, table, lookup_field_vals))
++            return 1;
++        }
++      }
++    }
++    return 0;
++  }
++  else if (cond->type() == Item::FUNC_ITEM &&
++           get_lookup_value(thd, (Item_func*) cond, table, lookup_field_vals))
++    return 1;
++  return 0;
++}
++
++
++bool uses_only_table_name_fields(Item *item, TABLE_LIST *table)
++{
++  if (item->type() == Item::FUNC_ITEM)
++  {
++    Item_func *item_func= (Item_func*)item;
++    for (uint i=0; i<item_func->argument_count(); i++)
++    {
++      if (!uses_only_table_name_fields(item_func->arguments()[i], table))
++        return 0;
++    }
++  }
++  else if (item->type() == Item::FIELD_ITEM)
++  {
++    Item_field *item_field= (Item_field*)item;
++    CHARSET_INFO *cs= system_charset_info;
++    ST_SCHEMA_TABLE *schema_table= table->schema_table;
++    ST_FIELD_INFO *field_info= schema_table->fields_info;
++    const char *field_name1= schema_table->idx_field1 >= 0 ?
++      field_info[schema_table->idx_field1].field_name : "";
++    const char *field_name2= schema_table->idx_field2 >= 0 ?
++      field_info[schema_table->idx_field2].field_name : "";
++    if (table->table != item_field->field->table ||
++        (cs->coll->strnncollsp(cs, (uchar *) field_name1, strlen(field_name1),
++                               (uchar *) item_field->field_name,
++                               strlen(item_field->field_name), 0) &&
++         cs->coll->strnncollsp(cs, (uchar *) field_name2, strlen(field_name2),
++                               (uchar *) item_field->field_name,
++                               strlen(item_field->field_name), 0)))
++      return 0;
++  }
++  else if (item->type() == Item::REF_ITEM)
++    return uses_only_table_name_fields(item->real_item(), table);
++
++  if (item->type() == Item::SUBSELECT_ITEM && !item->const_item())
++    return 0;
++
++  return 1;
++}
++
++
++static COND * make_cond_for_info_schema(COND *cond, TABLE_LIST *table)
++{
++  if (!cond)
++    return (COND*) 0;
++  if (cond->type() == Item::COND_ITEM)
++  {
++    if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
++    {
++      /* Create new top level AND item */
++      Item_cond_and *new_cond=new Item_cond_and;
++      if (!new_cond)
++	return (COND*) 0;
++      List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
++      Item *item;
++      while ((item=li++))
++      {
++	Item *fix= make_cond_for_info_schema(item, table);
++	if (fix)
++	  new_cond->argument_list()->push_back(fix);
++      }
++      switch (new_cond->argument_list()->elements) {
++      case 0:
++	return (COND*) 0;
++      case 1:
++	return new_cond->argument_list()->head();
++      default:
++	new_cond->quick_fix_field();
++	return new_cond;
++      }
++    }
++    else
++    {						// Or list
++      Item_cond_or *new_cond=new Item_cond_or;
++      if (!new_cond)
++	return (COND*) 0;
++      List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
++      Item *item;
++      while ((item=li++))
++      {
++	Item *fix=make_cond_for_info_schema(item, table);
++	if (!fix)
++	  return (COND*) 0;
++	new_cond->argument_list()->push_back(fix);
++      }
++      new_cond->quick_fix_field();
++      new_cond->top_level_item();
++      return new_cond;
++    }
++  }
++
++  if (!uses_only_table_name_fields(cond, table))
++    return (COND*) 0;
++  return cond;
++}
++
++
++/**
++  @brief   Calculate lookup values(database name, table name)
++
++  @details This function calculates lookup values(database name, table name)
++           from 'WHERE' condition or wild values (for 'SHOW' commands only)
++           from LEX struct and fill lookup_field_vals struct field
++           with these values.
++
++  @param[in]      thd                   thread handler
++  @param[in]      cond                  WHERE condition
++  @param[in]      tables                I_S table
++  @param[in, out] lookup_field_values   Struct which holds lookup values 
++
++  @return
++    0             success
++    1             error, there can be no matching records for the condition
++*/
++
++bool get_lookup_field_values(THD *thd, COND *cond, TABLE_LIST *tables,
++                             LOOKUP_FIELD_VALUES *lookup_field_values)
++{
++  LEX *lex= thd->lex;
++  const char *wild= lex->wild ? lex->wild->ptr() : NullS;
++  bool rc= 0;
++
++  bzero((char*) lookup_field_values, sizeof(LOOKUP_FIELD_VALUES));
++  switch (lex->sql_command) {
++  case SQLCOM_SHOW_DATABASES:
++    if (wild)
++    {
++      thd->make_lex_string(&lookup_field_values->db_value, 
++                           wild, strlen(wild), 0);
++      lookup_field_values->wild_db_value= 1;
++    }
++    break;
++  case SQLCOM_SHOW_TABLES:
++  case SQLCOM_SHOW_TABLE_STATUS:
++  case SQLCOM_SHOW_TRIGGERS:
++  case SQLCOM_SHOW_EVENTS:
++    thd->make_lex_string(&lookup_field_values->db_value, 
++                         lex->select_lex.db, strlen(lex->select_lex.db), 0);
++    if (wild)
++    {
++      thd->make_lex_string(&lookup_field_values->table_value, 
++                           wild, strlen(wild), 0);
++      lookup_field_values->wild_table_value= 1;
++    }
++    break;
++  default:
++    /*
++      The "default" is for queries over I_S.
++      All previous cases handle SHOW commands.
++    */
++    rc= calc_lookup_values_from_cond(thd, cond, tables, lookup_field_values);
++    break;
++  }
++
++  if (lower_case_table_names && !rc)
++  {
++    /* 
++      We can safely do in-place upgrades here since all of the above cases
++      are allocating a new memory buffer for these strings.
++    */  
++    if (lookup_field_values->db_value.str && lookup_field_values->db_value.str[0])
++      my_casedn_str(system_charset_info, lookup_field_values->db_value.str);
++    if (lookup_field_values->table_value.str && 
++        lookup_field_values->table_value.str[0])
++      my_casedn_str(system_charset_info, lookup_field_values->table_value.str);
++  }
++
++  return rc;
++}
++
++
++enum enum_schema_tables get_schema_table_idx(ST_SCHEMA_TABLE *schema_table)
++{
++  return (enum enum_schema_tables) (schema_table - &schema_tables[0]);
++}
++
++
++/*
++  Create db names list. Information schema name always is first in list
++
++  SYNOPSIS
++    make_db_list()
++    thd                   thread handler
++    files                 list of db names
++    wild                  wild string
++    idx_field_vals        idx_field_vals->db_name contains db name or
++                          wild string
++    with_i_schema         returns 1 if we added 'IS' name to list
++                          otherwise returns 0 
++
++  RETURN
++    zero                  success
++    non-zero              error
++*/
++
++int make_db_list(THD *thd, List<LEX_STRING> *files,
++                 LOOKUP_FIELD_VALUES *lookup_field_vals,
++                 bool *with_i_schema)
++{
++  LEX_STRING *i_s_name_copy= 0;
++  i_s_name_copy= thd->make_lex_string(i_s_name_copy,
++                                      INFORMATION_SCHEMA_NAME.str,
++                                      INFORMATION_SCHEMA_NAME.length, TRUE);
++  *with_i_schema= 0;
++  if (lookup_field_vals->wild_db_value)
++  {
++    /*
++      This part of code is only for SHOW DATABASES command.
++      idx_field_vals->db_value can be 0 when we don't use
++      LIKE clause (see also get_index_field_values() function)
++    */
++    if (!lookup_field_vals->db_value.str ||
++        !wild_case_compare(system_charset_info, 
++                           INFORMATION_SCHEMA_NAME.str,
++                           lookup_field_vals->db_value.str))
++    {
++      *with_i_schema= 1;
++      if (files->push_back(i_s_name_copy))
++        return 1;
++    }
++    return (find_files(thd, files, NullS, mysql_data_home,
++                       lookup_field_vals->db_value.str, 1) != FIND_FILES_OK);
++  }
++
++
++  /*
++    If we have db lookup vaule we just add it to list and
++    exit from the function
++  */
++  if (lookup_field_vals->db_value.str)
++  {
++    if (is_schema_db(lookup_field_vals->db_value.str, 
++                     lookup_field_vals->db_value.length))
++    {
++      *with_i_schema= 1;
++      if (files->push_back(i_s_name_copy))
++        return 1;
++      return 0;
++    }
++    if (files->push_back(&lookup_field_vals->db_value))
++      return 1;
++    return 0;
++  }
++
++  /*
++    Create list of existing databases. It is used in case
++    of select from information schema table
++  */
++  if (files->push_back(i_s_name_copy))
++    return 1;
++  *with_i_schema= 1;
++  return (find_files(thd, files, NullS,
++                     mysql_data_home, NullS, 1) != FIND_FILES_OK);
++}
++
++
++struct st_add_schema_table 
++{
++  List<LEX_STRING> *files;
++  const char *wild;
++};
++
++
++static my_bool add_schema_table(THD *thd, plugin_ref plugin,
++                                void* p_data)
++{
++  LEX_STRING *file_name= 0;
++  st_add_schema_table *data= (st_add_schema_table *)p_data;
++  List<LEX_STRING> *file_list= data->files;
++  const char *wild= data->wild;
++  ST_SCHEMA_TABLE *schema_table= plugin_data(plugin, ST_SCHEMA_TABLE *);
++  DBUG_ENTER("add_schema_table");
++
++  if (schema_table->hidden)
++      DBUG_RETURN(0);
++  if (wild)
++  {
++    if (lower_case_table_names)
++    {
++      if (wild_case_compare(files_charset_info,
++                            schema_table->table_name,
++                            wild))
++        DBUG_RETURN(0);
++    }
++    else if (wild_compare(schema_table->table_name, wild, 0))
++      DBUG_RETURN(0);
++  }
++
++  if ((file_name= thd->make_lex_string(file_name, schema_table->table_name,
++                                       strlen(schema_table->table_name),
++                                       TRUE)) &&
++      !file_list->push_back(file_name))
++    DBUG_RETURN(0);
++  DBUG_RETURN(1);
++}
++
++
++int schema_tables_add(THD *thd, List<LEX_STRING> *files, const char *wild)
++{
++  LEX_STRING *file_name= 0;
++  ST_SCHEMA_TABLE *tmp_schema_table= schema_tables;
++  st_add_schema_table add_data;
++  DBUG_ENTER("schema_tables_add");
++
++  for (; tmp_schema_table->table_name; tmp_schema_table++)
++  {
++    if (tmp_schema_table->hidden)
++      continue;
++    if (wild)
++    {
++      if (lower_case_table_names)
++      {
++        if (wild_case_compare(files_charset_info,
++                              tmp_schema_table->table_name,
++                              wild))
++          continue;
++      }
++      else if (wild_compare(tmp_schema_table->table_name, wild, 0))
++        continue;
++    }
++    if ((file_name= 
++         thd->make_lex_string(file_name, tmp_schema_table->table_name,
++                              strlen(tmp_schema_table->table_name), TRUE)) &&
++        !files->push_back(file_name))
++      continue;
++    DBUG_RETURN(1);
++  }
++
++  add_data.files= files;
++  add_data.wild= wild;
++  if (plugin_foreach(thd, add_schema_table,
++                     MYSQL_INFORMATION_SCHEMA_PLUGIN, &add_data))
++      DBUG_RETURN(1);
++
++  DBUG_RETURN(0);
++}
++
++
++/**
++  @brief          Create table names list
++
++  @details        The function creates the list of table names in
++                  database
++
++  @param[in]      thd                   thread handler
++  @param[in]      table_names           List of table names in database
++  @param[in]      lex                   pointer to LEX struct
++  @param[in]      lookup_field_vals     pointer to LOOKUP_FIELD_VALUE struct
++  @param[in]      with_i_schema         TRUE means that we add I_S tables to list
++  @param[in]      db_name               database name
++
++  @return         Operation status
++    @retval       0           ok
++    @retval       1           fatal error
++    @retval       2           Not fatal error; Safe to ignore this file list
++*/
++
++static int
++make_table_name_list(THD *thd, List<LEX_STRING> *table_names, LEX *lex,
++                     LOOKUP_FIELD_VALUES *lookup_field_vals,
++                     bool with_i_schema, LEX_STRING *db_name)
++{
++  char path[FN_REFLEN + 1];
++  build_table_filename(path, sizeof(path) - 1, db_name->str, "", "", 0);
++  if (!lookup_field_vals->wild_table_value &&
++      lookup_field_vals->table_value.str)
++  {
++    if (with_i_schema)
++    {
++      LEX_STRING *name;
++      ST_SCHEMA_TABLE *schema_table=
++        find_schema_table(thd, lookup_field_vals->table_value.str);
++      if (schema_table && !schema_table->hidden)
++      {
++        if (!(name= 
++              thd->make_lex_string(NULL, schema_table->table_name,
++                                   strlen(schema_table->table_name), TRUE)) ||
++            table_names->push_back(name))
++          return 1;
++      }
++    }
++    else
++    {    
++      if (table_names->push_back(&lookup_field_vals->table_value))
++        return 1;
++      /*
++        Check that table is relevant in current transaction.
++        (used for ndb engine, see ndbcluster_find_files(), ha_ndbcluster.cc)
++      */
++      VOID(ha_find_files(thd, db_name->str, path,
++                         lookup_field_vals->table_value.str, 0,
++                         table_names));
++    }
++    return 0;
++  }
++
++  /*
++    This call will add all matching the wildcards (if specified) IS tables
++    to the list
++  */
++  if (with_i_schema)
++    return (schema_tables_add(thd, table_names,
++                              lookup_field_vals->table_value.str));
++
++  find_files_result res= find_files(thd, table_names, db_name->str, path,
++                                    lookup_field_vals->table_value.str, 0);
++  if (res != FIND_FILES_OK)
++  {
++    /*
++      Downgrade errors about problems with database directory to
++      warnings if this is not a 'SHOW' command.  Another thread
++      may have dropped database, and we may still have a name
++      for that directory.
++    */
++    if (res == FIND_FILES_DIR)
++    {
++      if (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND)
++        return 1;
++      thd->clear_error();
++      return 2;
++    }
++    return 1;
++  }
++  return 0;
++}
++
++
++/**
++  @brief          Fill I_S table for SHOW COLUMNS|INDEX commands
++
++  @param[in]      thd                      thread handler
++  @param[in]      tables                   TABLE_LIST for I_S table
++  @param[in]      schema_table             pointer to I_S structure
++  @param[in]      open_tables_state_backup pointer to Open_tables_state object
++                                           which is used to save|restore original
++                                           status of variables related to
++                                           open tables state
++
++  @return         Operation status
++    @retval       0           success
++    @retval       1           error
++*/
++
++static int 
++fill_schema_show_cols_or_idxs(THD *thd, TABLE_LIST *tables,
++                              ST_SCHEMA_TABLE *schema_table,
++                              Open_tables_state *open_tables_state_backup)
++{
++  LEX *lex= thd->lex;
++  bool res;
++  LEX_STRING tmp_lex_string, tmp_lex_string1, *db_name, *table_name;
++  enum_sql_command save_sql_command= lex->sql_command;
++  TABLE_LIST *show_table_list= tables->schema_select_lex->table_list.first;
++  TABLE *table= tables->table;
++  int error= 1;
++  DBUG_ENTER("fill_schema_show");
++
++  lex->all_selects_list= tables->schema_select_lex;
++  /*
++    Restore thd->temporary_tables to be able to process
++    temporary tables(only for 'show index' & 'show columns').
++    This should be changed when processing of temporary tables for
++    I_S tables will be done.
++  */
++  thd->temporary_tables= open_tables_state_backup->temporary_tables;
++  /*
++    Let us set fake sql_command so views won't try to merge
++    themselves into main statement. If we don't do this,
++    SELECT * from information_schema.xxxx will cause problems.
++    SQLCOM_SHOW_FIELDS is used because it satisfies 'only_view_structure()' 
++  */
++  lex->sql_command= SQLCOM_SHOW_FIELDS;
++  res= open_normal_and_derived_tables(thd, show_table_list,
++                                      MYSQL_LOCK_IGNORE_FLUSH);
++  lex->sql_command= save_sql_command;
++  /*
++    get_all_tables() returns 1 on failure and 0 on success thus
++    return only these and not the result code of ::process_table()
++
++    We should use show_table_list->alias instead of 
++    show_table_list->table_name because table_name
++    could be changed during opening of I_S tables. It's safe
++    to use alias because alias contains original table name 
++    in this case(this part of code is used only for 
++    'show columns' & 'show statistics' commands).
++  */
++   table_name= thd->make_lex_string(&tmp_lex_string1, show_table_list->alias,
++                                    strlen(show_table_list->alias), FALSE);
++   if (!show_table_list->view)
++     db_name= thd->make_lex_string(&tmp_lex_string, show_table_list->db,
++                                   show_table_list->db_length, FALSE);
++   else
++     db_name= &show_table_list->view_db;
++      
++
++   error= test(schema_table->process_table(thd, show_table_list,
++                                           table, res, db_name,
++                                           table_name));
++   thd->temporary_tables= 0;
++   close_tables_for_reopen(thd, &show_table_list);
++   DBUG_RETURN(error);
++}
++
++
++/**
++  @brief          Fill I_S table for SHOW TABLE NAMES commands
++
++  @param[in]      thd                      thread handler
++  @param[in]      table                    TABLE struct for I_S table
++  @param[in]      db_name                  database name
++  @param[in]      table_name               table name
++  @param[in]      with_i_schema            I_S table if TRUE
++
++  @return         Operation status
++    @retval       0           success
++    @retval       1           error
++*/
++
++static int fill_schema_table_names(THD *thd, TABLE *table,
++                                   LEX_STRING *db_name, LEX_STRING *table_name,
++                                   bool with_i_schema)
++{
++  if (with_i_schema)
++  {
++    table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"),
++                           system_charset_info);
++  }
++  else
++  {
++    enum legacy_db_type not_used;
++    char path[FN_REFLEN + 1];
++    (void) build_table_filename(path, sizeof(path) - 1, db_name->str, 
++                                table_name->str, reg_ext, 0);
++    switch (mysql_frm_type(thd, path, &not_used)) {
++    case FRMTYPE_ERROR:
++      table->field[3]->store(STRING_WITH_LEN("ERROR"),
++                             system_charset_info);
++      break;
++    case FRMTYPE_TABLE:
++      table->field[3]->store(STRING_WITH_LEN("BASE TABLE"),
++                             system_charset_info);
++      break;
++    case FRMTYPE_VIEW:
++      table->field[3]->store(STRING_WITH_LEN("VIEW"),
++                             system_charset_info);
++      break;
++    default:
++      DBUG_ASSERT(0);
++    }
++    if (thd->is_error() && thd->main_da.sql_errno() == ER_NO_SUCH_TABLE)
++    {
++      thd->clear_error();
++      return 0;
++    }
++  }
++  if (schema_table_store_record(thd, table))
++    return 1;
++  return 0;
++}
++
++
++/**
++  @brief          Get open table method
++
++  @details        The function calculates the method which will be used
++                  for table opening:
++                  SKIP_OPEN_TABLE - do not open table
++                  OPEN_FRM_ONLY   - open FRM file only
++                  OPEN_FULL_TABLE - open FRM, data, index files
++  @param[in]      tables               I_S table table_list
++  @param[in]      schema_table         I_S table struct
++  @param[in]      schema_table_idx     I_S table index
++
++  @return         return a set of flags
++    @retval       SKIP_OPEN_TABLE | OPEN_FRM_ONLY | OPEN_FULL_TABLE
++*/
++
++uint get_table_open_method(TABLE_LIST *tables,
++                                  ST_SCHEMA_TABLE *schema_table,
++                                  enum enum_schema_tables schema_table_idx)
++{
++  /*
++    determine which method will be used for table opening
++  */
++  if (schema_table->i_s_requested_object & OPTIMIZE_I_S_TABLE)
++  {
++    Field **ptr, *field;
++    int table_open_method= 0, field_indx= 0;
++    uint star_table_open_method= OPEN_FULL_TABLE;
++    bool used_star= true;                  // true if '*' is used in select
++    for (ptr=tables->table->field; (field= *ptr) ; ptr++)
++    {
++      star_table_open_method=
++        min(star_table_open_method,
++            schema_table->fields_info[field_indx].open_method);
++      if (bitmap_is_set(tables->table->read_set, field->field_index))
++      {
++        used_star= false;
++        table_open_method|= schema_table->fields_info[field_indx].open_method;
++      }
++      field_indx++;
++    }
++    if (used_star)
++      return star_table_open_method;
++    return table_open_method;
++  }
++  /* I_S tables which use get_all_tables but can not be optimized */
++  return (uint) OPEN_FULL_TABLE;
++}
++
++
++/**
++  @brief          Fill I_S table with data from FRM file only
++
++  @param[in]      thd                      thread handler
++  @param[in]      table                    TABLE struct for I_S table
++  @param[in]      schema_table             I_S table struct
++  @param[in]      db_name                  database name
++  @param[in]      table_name               table name
++  @param[in]      schema_table_idx         I_S table index
++
++  @return         Operation status
++    @retval       0           Table is processed and we can continue
++                              with new table
++    @retval       1           It's view and we have to use
++                              open_tables function for this table
++*/
++
++static int fill_schema_table_from_frm(THD *thd,TABLE *table,
++                                      ST_SCHEMA_TABLE *schema_table, 
++                                      LEX_STRING *db_name,
++                                      LEX_STRING *table_name,
++                                      enum enum_schema_tables schema_table_idx)
++{
++  TABLE_SHARE *share;
++  TABLE tbl;
++  TABLE_LIST table_list;
++  uint res= 0;
++  int error;
++  char key[MAX_DBKEY_LENGTH];
++  uint key_length;
++
++  bzero((char*) &table_list, sizeof(TABLE_LIST));
++  bzero((char*) &tbl, sizeof(TABLE));
++
++  table_list.table_name= table_name->str;
++  table_list.db= db_name->str;
++  key_length= create_table_def_key(thd, key, &table_list, 0);
++  pthread_mutex_lock(&LOCK_open);
++  share= get_table_share(thd, &table_list, key,
++                         key_length, OPEN_VIEW, &error);
++  if (!share)
++  {
++    res= 0;
++    goto err;
++  }
++ 
++  if (share->is_view)
++  {
++    if (schema_table->i_s_requested_object & OPEN_TABLE_ONLY)
++    {
++      /* skip view processing */
++      res= 0;
++      goto err1;
++    }
++    else if (schema_table->i_s_requested_object & OPEN_VIEW_FULL)
++    {
++      /*
++        tell get_all_tables() to fall back to 
++        open_normal_and_derived_tables()
++      */
++      res= 1;
++      goto err1;
++    }
++  }
++
++  if (share->is_view ||
++      !open_table_from_share(thd, share, table_name->str, 0,
++                             (READ_KEYINFO | COMPUTE_TYPES |
++                              EXTRA_RECORD | OPEN_FRM_FILE_ONLY),
++                             thd->open_options, &tbl, FALSE))
++  {
++    tbl.s= share;
++    table_list.table= &tbl;
++    table_list.view= (st_lex*) share->is_view;
++    res= schema_table->process_table(thd, &table_list, table,
++                                     res, db_name, table_name);
++    closefrm(&tbl, true);
++    goto err;
++  }
++
++err1:
++  release_table_share(share, RELEASE_NORMAL);
++
++err:
++  pthread_mutex_unlock(&LOCK_open);
++  thd->clear_error();
++  return res;
++}
++
++
++
++/**
++  @brief          Fill I_S tables whose data are retrieved
++                  from frm files and storage engine
++
++  @details        The information schema tables are internally represented as
++                  temporary tables that are filled at query execution time.
++                  Those I_S tables whose data are retrieved
++                  from frm files and storage engine are filled by the function
++                  get_all_tables().
++
++  @param[in]      thd                      thread handler
++  @param[in]      tables                   I_S table
++  @param[in]      cond                     'WHERE' condition
++
++  @return         Operation status
++    @retval       0                        success
++    @retval       1                        error
++*/
++
++int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  LEX *lex= thd->lex;
++  TABLE *table= tables->table;
++  SELECT_LEX *old_all_select_lex= lex->all_selects_list;
++  enum_sql_command save_sql_command= lex->sql_command;
++  SELECT_LEX *lsel= tables->schema_select_lex;
++  ST_SCHEMA_TABLE *schema_table= tables->schema_table;
++  SELECT_LEX sel;
++  LOOKUP_FIELD_VALUES lookup_field_vals;
++  LEX_STRING *db_name, *table_name;
++  bool with_i_schema;
++  enum enum_schema_tables schema_table_idx;
++  List<LEX_STRING> db_names;
++  List_iterator_fast<LEX_STRING> it(db_names);
++  COND *partial_cond= 0;
++  uint derived_tables= lex->derived_tables; 
++  int error= 1;
++  Open_tables_state open_tables_state_backup;
++  uint8 save_context_analysis_only= lex->context_analysis_only;
++  Query_tables_list query_tables_list_backup;
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  Security_context *sctx= thd->security_ctx;
++#endif
++  uint table_open_method;
++  DBUG_ENTER("get_all_tables");
++
++  lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW;
++  lex->reset_n_backup_query_tables_list(&query_tables_list_backup);
++
++  /*
++    We should not introduce deadlocks even if we already have some
++    tables open and locked, since we won't lock tables which we will
++    open and will ignore possible name-locks for these tables.
++  */
++  thd->reset_n_backup_open_tables_state(&open_tables_state_backup);
++
++  /* 
++    this branch processes SHOW FIELDS, SHOW INDEXES commands.
++    see sql_parse.cc, prepare_schema_table() function where
++    this values are initialized
++  */
++  if (lsel && lsel->table_list.first)
++  {
++    error= fill_schema_show_cols_or_idxs(thd, tables, schema_table,
++                                         &open_tables_state_backup);
++    goto err;
++  }
++
++  schema_table_idx= get_schema_table_idx(schema_table);
++  if (get_lookup_field_values(thd, cond, tables, &lookup_field_vals))
++  {
++    error= 0;
++    goto err;
++  }
++
++  DBUG_PRINT("INDEX VALUES",("db_name='%s', table_name='%s'",
++                             STR_OR_NIL(lookup_field_vals.db_value.str),
++                             STR_OR_NIL(lookup_field_vals.table_value.str)));
++
++  if (!lookup_field_vals.wild_db_value && !lookup_field_vals.wild_table_value)
++  {
++    /* 
++      if lookup value is empty string then
++      it's impossible table name or db name
++    */
++    if ((lookup_field_vals.db_value.str &&
++         !lookup_field_vals.db_value.str[0]) ||
++        (lookup_field_vals.table_value.str &&
++         !lookup_field_vals.table_value.str[0]))
++    {
++      error= 0;
++      goto err;
++    }
++  }
++
++  if (lookup_field_vals.db_value.length &&
++      !lookup_field_vals.wild_db_value)
++    tables->has_db_lookup_value= TRUE;
++  if (lookup_field_vals.table_value.length &&
++      !lookup_field_vals.wild_table_value) 
++    tables->has_table_lookup_value= TRUE;
++
++  if (tables->has_db_lookup_value && tables->has_table_lookup_value)
++    partial_cond= 0;
++  else
++    partial_cond= make_cond_for_info_schema(cond, tables);
++
++  tables->table_open_method= table_open_method=
++    get_table_open_method(tables, schema_table, schema_table_idx);
++
++  if (lex->describe)
++  {
++    /* EXPLAIN SELECT */
++    error= 0;
++    goto err;
++  }
++
++  if (make_db_list(thd, &db_names, &lookup_field_vals, &with_i_schema))
++    goto err;
++  it.rewind(); /* To get access to new elements in basis list */
++  while ((db_name= it++))
++  {
++    LEX_STRING orig_db_name;
++
++    /* db_name can be changed in make_table_list() func */
++    if (!thd->make_lex_string(&orig_db_name, db_name->str,
++                              db_name->length, FALSE))
++      goto err;
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++    if (!(check_access(thd,SELECT_ACL, db_name->str, 
++                       &thd->col_access, 0, 1, with_i_schema) ||
++          (!thd->col_access && check_grant_db(thd, db_name->str))) ||
++        sctx->master_access & (DB_ACLS | SHOW_DB_ACL) ||
++        acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name->str, 0))
++#endif
++    {
++      thd->no_warnings_for_error= 1;
++      List<LEX_STRING> table_names;
++      int res= make_table_name_list(thd, &table_names, lex,
++                                    &lookup_field_vals,
++                                    with_i_schema, db_name);
++      if (res == 2)   /* Not fatal error, continue */
++        continue;
++      if (res)
++        goto err;
++
++      List_iterator_fast<LEX_STRING> it_files(table_names);
++      while ((table_name= it_files++))
++      {
++	restore_record(table, s->default_values);
++        table->field[schema_table->idx_field1]->
++          store(db_name->str, db_name->length, system_charset_info);
++        table->field[schema_table->idx_field2]->
++          store(table_name->str, table_name->length, system_charset_info);
++
++        if (!partial_cond || partial_cond->val_int())
++        {
++          /*
++            If table is I_S.tables and open_table_method is 0 (eg SKIP_OPEN)
++            we can skip table opening and we don't have lookup value for 
++            table name or lookup value is wild string(table name list is
++            already created by make_table_name_list() function).
++          */
++          if (!table_open_method && schema_table_idx == SCH_TABLES &&
++              (!lookup_field_vals.table_value.length ||
++               lookup_field_vals.wild_table_value))
++          {
++            if (schema_table_store_record(thd, table))
++              goto err;      /* Out of space in temporary table */
++            continue;
++          }
++
++          /* SHOW TABLE NAMES command */
++          if (schema_table_idx == SCH_TABLE_NAMES)
++          {
++            if (fill_schema_table_names(thd, tables->table, db_name,
++                                        table_name, with_i_schema))
++              continue;
++          }
++          else
++          {
++            if (!(table_open_method & ~OPEN_FRM_ONLY) && 
++                !with_i_schema)
++            {
++              if (!fill_schema_table_from_frm(thd, table, schema_table, db_name,
++                                              table_name, schema_table_idx))
++                continue;
++            }
++
++            int res;
++            LEX_STRING tmp_lex_string;
++            /*
++              Set the parent lex of 'sel' because it is needed by
++              sel.init_query() which is called inside make_table_list.
++            */
++            thd->no_warnings_for_error= 1;
++            sel.parent_lex= lex;
++            if (make_table_list(thd, &sel, db_name, table_name))
++              goto err;
++            TABLE_LIST *show_table_list= sel.table_list.first;
++            lex->all_selects_list= &sel;
++            lex->derived_tables= 0;
++            lex->sql_command= SQLCOM_SHOW_FIELDS;
++            show_table_list->i_s_requested_object=
++              schema_table->i_s_requested_object;
++            DEBUG_SYNC(thd, "before_open_in_get_all_tables");
++            res= open_normal_and_derived_tables(thd, show_table_list,
++                                                MYSQL_LOCK_IGNORE_FLUSH);
++            lex->sql_command= save_sql_command;
++            /*
++              XXX:  show_table_list has a flag i_is_requested,
++              and when it's set, open_normal_and_derived_tables()
++              can return an error without setting an error message
++              in THD, which is a hack. This is why we have to
++              check for res, then for thd->is_error() only then
++              for thd->main_da.sql_errno().
++            */
++            if (res && thd->is_error() &&
++                thd->main_da.sql_errno() == ER_NO_SUCH_TABLE)
++            {
++              /*
++                Hide error for not existing table.
++                This error can occur for example when we use
++                where condition with db name and table name and this
++                table does not exist.
++              */
++              res= 0;
++              thd->clear_error();
++            }
++            else
++            {
++              /*
++                We should use show_table_list->alias instead of 
++                show_table_list->table_name because table_name
++                could be changed during opening of I_S tables. It's safe
++                to use alias because alias contains original table name 
++                in this case.
++              */
++              thd->make_lex_string(&tmp_lex_string, show_table_list->alias,
++                                   strlen(show_table_list->alias), FALSE);
++              res= schema_table->process_table(thd, show_table_list, table,
++                                               res, &orig_db_name,
++                                               &tmp_lex_string);
++              close_tables_for_reopen(thd, &show_table_list);
++            }
++            DBUG_ASSERT(!lex->query_tables_own_last);
++            if (res)
++              goto err;
++          }
++        }
++      }
++      /*
++        If we have information schema its always the first table and only
++        the first table. Reset for other tables.
++      */
++      with_i_schema= 0;
++    }
++  }
++
++  error= 0;
++err:
++  thd->restore_backup_open_tables_state(&open_tables_state_backup);
++  lex->restore_backup_query_tables_list(&query_tables_list_backup);
++  lex->derived_tables= derived_tables;
++  lex->all_selects_list= old_all_select_lex;
++  lex->context_analysis_only= save_context_analysis_only;
++  lex->sql_command= save_sql_command;
++  DBUG_RETURN(error);
++}
++
++
++bool store_schema_shemata(THD* thd, TABLE *table, LEX_STRING *db_name,
++                          CHARSET_INFO *cs)
++{
++  restore_record(table, s->default_values);
++  table->field[1]->store(db_name->str, db_name->length, system_charset_info);
++  table->field[2]->store(cs->csname, strlen(cs->csname), system_charset_info);
++  table->field[3]->store(cs->name, strlen(cs->name), system_charset_info);
++  return schema_table_store_record(thd, table);
++}
++
++
++int fill_schema_schemata(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  /*
++    TODO: fill_schema_shemata() is called when new client is connected.
++    Returning error status in this case leads to client hangup.
++  */
++
++  LOOKUP_FIELD_VALUES lookup_field_vals;
++  List<LEX_STRING> db_names;
++  LEX_STRING *db_name;
++  bool with_i_schema;
++  HA_CREATE_INFO create;
++  TABLE *table= tables->table;
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++  Security_context *sctx= thd->security_ctx;
++#endif
++  DBUG_ENTER("fill_schema_shemata");
++
++  if (get_lookup_field_values(thd, cond, tables, &lookup_field_vals))
++    DBUG_RETURN(0);
++  DBUG_PRINT("INDEX VALUES",("db_name='%s', table_name='%s'",
++                             lookup_field_vals.db_value.str,
++                             lookup_field_vals.table_value.str));
++  if (make_db_list(thd, &db_names, &lookup_field_vals,
++                   &with_i_schema))
++    DBUG_RETURN(1);
++
++  /*
++    If we have lookup db value we should check that the database exists
++  */
++  if(lookup_field_vals.db_value.str && !lookup_field_vals.wild_db_value &&
++     !with_i_schema)
++  {
++    char path[FN_REFLEN+16];
++    uint path_len;
++    MY_STAT stat_info;
++    if (!lookup_field_vals.db_value.str[0])
++      DBUG_RETURN(0);
++    path_len= build_table_filename(path, sizeof(path) - 1,
++                                   lookup_field_vals.db_value.str, "", "", 0);
++    path[path_len-1]= 0;
++    if (!my_stat(path,&stat_info,MYF(0)))
++      DBUG_RETURN(0);
++  }
++
++  List_iterator_fast<LEX_STRING> it(db_names);
++  while ((db_name=it++))
++  {
++    if (with_i_schema)       // information schema name is always first in list
++    {
++      if (store_schema_shemata(thd, table, db_name,
++                               system_charset_info))
++        DBUG_RETURN(1);
++      with_i_schema= 0;
++      continue;
++    }
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++    if (sctx->master_access & (DB_ACLS | SHOW_DB_ACL) ||
++	acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name->str, 0) ||
++	!check_grant_db(thd, db_name->str))
++#endif
++    {
++      load_db_opt_by_name(thd, db_name->str, &create);
++      if (store_schema_shemata(thd, table, db_name,
++                               create.default_table_charset))
++        DBUG_RETURN(1);
++    }
++  }
++  DBUG_RETURN(0);
++}
++
++
++static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
++				    TABLE *table, bool res,
++				    LEX_STRING *db_name,
++				    LEX_STRING *table_name)
++{
++  const char *tmp_buff;
++  MYSQL_TIME time;
++  int info_error= 0;
++  CHARSET_INFO *cs= system_charset_info;
++  DBUG_ENTER("get_schema_tables_record");
++
++  restore_record(table, s->default_values);
++  table->field[1]->store(db_name->str, db_name->length, cs);
++  table->field[2]->store(table_name->str, table_name->length, cs);
++
++  if (res)
++  {
++    /* There was a table open error, so set the table type and return */
++    if (tables->view)
++      table->field[3]->store(STRING_WITH_LEN("VIEW"), cs);
++    else if (tables->schema_table)
++      table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs);
++    else
++      table->field[3]->store(STRING_WITH_LEN("BASE TABLE"), cs);
++
++    goto err;
++  }
++
++  if (tables->view)
++  {
++    table->field[3]->store(STRING_WITH_LEN("VIEW"), cs);
++    table->field[20]->store(STRING_WITH_LEN("VIEW"), cs);
++  }
++  else
++  {
++    char option_buff[350],*ptr;
++    TABLE *show_table= tables->table;
++    TABLE_SHARE *share= show_table->s;
++    handler *file= show_table->file;
++    handlerton *tmp_db_type= share->db_type();
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    bool is_partitioned= FALSE;
++#endif
++    if (share->tmp_table == SYSTEM_TMP_TABLE)
++      table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs);
++    else if (share->tmp_table)
++      table->field[3]->store(STRING_WITH_LEN("LOCAL TEMPORARY"), cs);
++    else
++      table->field[3]->store(STRING_WITH_LEN("BASE TABLE"), cs);
++
++    for (int i= 4; i < 20; i++)
++    {
++      if (i == 7 || (i > 12 && i < 17) || i == 18)
++        continue;
++      table->field[i]->set_notnull();
++    }
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    if (share->db_type() == partition_hton &&
++        share->partition_info_len)
++    {
++      tmp_db_type= share->default_part_db_type;
++      is_partitioned= TRUE;
++    }
++#endif
++    tmp_buff= (char *) ha_resolve_storage_engine_name(tmp_db_type);
++    table->field[4]->store(tmp_buff, strlen(tmp_buff), cs);
++    table->field[5]->store((longlong) share->frm_version, TRUE);
++
++    ptr=option_buff;
++    if (share->min_rows)
++    {
++      ptr=strmov(ptr," min_rows=");
++      ptr=longlong10_to_str(share->min_rows,ptr,10);
++    }
++    if (share->max_rows)
++    {
++      ptr=strmov(ptr," max_rows=");
++      ptr=longlong10_to_str(share->max_rows,ptr,10);
++    }
++    if (share->avg_row_length)
++    {
++      ptr=strmov(ptr," avg_row_length=");
++      ptr=longlong10_to_str(share->avg_row_length,ptr,10);
++    }
++    if (share->db_create_options & HA_OPTION_PACK_KEYS)
++      ptr=strmov(ptr," pack_keys=1");
++    if (share->db_create_options & HA_OPTION_NO_PACK_KEYS)
++      ptr=strmov(ptr," pack_keys=0");
++    /* We use CHECKSUM, instead of TABLE_CHECKSUM, for backward compability */
++    if (share->db_create_options & HA_OPTION_CHECKSUM)
++      ptr=strmov(ptr," checksum=1");
++    if (share->db_create_options & HA_OPTION_DELAY_KEY_WRITE)
++      ptr=strmov(ptr," delay_key_write=1");
++    if (share->row_type != ROW_TYPE_DEFAULT)
++      ptr=strxmov(ptr, " row_format=", 
++                  ha_row_type[(uint) share->row_type],
++                  NullS);
++    if (share->key_block_size)
++    {
++      ptr= strmov(ptr, " KEY_BLOCK_SIZE=");
++      ptr= longlong10_to_str(share->key_block_size, ptr, 10);
++    }
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    if (is_partitioned)
++      ptr= strmov(ptr, " partitioned");
++#endif
++    table->field[19]->store(option_buff+1,
++                            (ptr == option_buff ? 0 : 
++                             (uint) (ptr-option_buff)-1), cs);
++
++    tmp_buff= (share->table_charset ?
++               share->table_charset->name : "default");
++    table->field[17]->store(tmp_buff, strlen(tmp_buff), cs);
++
++    if (share->comment.str)
++      table->field[20]->store(share->comment.str, share->comment.length, cs);
++
++    if (file)
++    {
++      /* If info() fails, then there's nothing else to do */
++      if ((info_error= file->info(HA_STATUS_VARIABLE |
++                                  HA_STATUS_TIME |
++                                  HA_STATUS_AUTO)) != 0)
++        goto err;
++	  
++      enum row_type row_type = file->get_row_type();
++      switch (row_type) {
++      case ROW_TYPE_NOT_USED:
++      case ROW_TYPE_DEFAULT:
++        tmp_buff= ((share->db_options_in_use &
++                    HA_OPTION_COMPRESS_RECORD) ? "Compressed" :
++                   (share->db_options_in_use & HA_OPTION_PACK_RECORD) ?
++                   "Dynamic" : "Fixed");
++        break;
++      case ROW_TYPE_FIXED:
++        tmp_buff= "Fixed";
++        break;
++      case ROW_TYPE_DYNAMIC:
++        tmp_buff= "Dynamic";
++        break;
++      case ROW_TYPE_COMPRESSED:
++        tmp_buff= "Compressed";
++        break;
++      case ROW_TYPE_REDUNDANT:
++        tmp_buff= "Redundant";
++        break;
++      case ROW_TYPE_COMPACT:
++        tmp_buff= "Compact";
++        break;
++      case ROW_TYPE_PAGE:
++        tmp_buff= "Paged";
++        break;
++      }
++      table->field[6]->store(tmp_buff, strlen(tmp_buff), cs);
++      if (!tables->schema_table)
++      {
++        table->field[7]->store((longlong) file->stats.records, TRUE);
++        table->field[7]->set_notnull();
++      }
++      table->field[8]->store((longlong) file->stats.mean_rec_length, TRUE);
++      table->field[9]->store((longlong) file->stats.data_file_length, TRUE);
++      if (file->stats.max_data_file_length)
++      {
++        table->field[10]->store((longlong) file->stats.max_data_file_length,
++                                TRUE);
++      }
++      table->field[11]->store((longlong) file->stats.index_file_length, TRUE);
++      table->field[12]->store((longlong) file->stats.delete_length, TRUE);
++      if (show_table->found_next_number_field)
++      {
++        table->field[13]->store((longlong) file->stats.auto_increment_value,
++                                TRUE);
++        table->field[13]->set_notnull();
++      }
++      if (file->stats.create_time)
++      {
++        thd->variables.time_zone->gmt_sec_to_TIME(&time,
++                                                  (my_time_t) file->stats.create_time);
++        table->field[14]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++        table->field[14]->set_notnull();
++      }
++      if (file->stats.update_time)
++      {
++        thd->variables.time_zone->gmt_sec_to_TIME(&time,
++                                                  (my_time_t) file->stats.update_time);
++        table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++        table->field[15]->set_notnull();
++      }
++      if (file->stats.check_time)
++      {
++        thd->variables.time_zone->gmt_sec_to_TIME(&time,
++                                                  (my_time_t) file->stats.check_time);
++        table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++        table->field[16]->set_notnull();
++      }
++      if (file->ha_table_flags() & (ulong) HA_HAS_CHECKSUM)
++      {
++        table->field[18]->store((longlong) file->checksum(), TRUE);
++        table->field[18]->set_notnull();
++      }
++    }
++  }
++
++err:
++  if (res || info_error)
++  {
++    /*
++      If an error was encountered, push a warning, set the TABLE COMMENT
++      column with the error text, and clear the error so that the operation
++      can continue.
++    */
++    const char *error= thd->is_error() ? thd->main_da.message() : "";
++    table->field[20]->store(error, strlen(error), cs);
++
++    if (thd->is_error())
++    {
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                   thd->main_da.sql_errno(), thd->main_da.message());
++      thd->clear_error();
++    }
++  }
++  
++  DBUG_RETURN(schema_table_store_record(thd, table));
++}
++
++
++static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
++				    TABLE *table, bool res,
++				    LEX_STRING *db_name,
++				    LEX_STRING *table_name)
++{
++  LEX *lex= thd->lex;
++  const char *wild= lex->wild ? lex->wild->ptr() : NullS;
++  CHARSET_INFO *cs= system_charset_info;
++  TABLE *show_table;
++  Field **ptr,*field;
++  int count;
++  DBUG_ENTER("get_schema_column_record");
++
++  if (res)
++  {
++    if (lex->sql_command != SQLCOM_SHOW_FIELDS)
++    {
++      /*
++        I.e. we are in SELECT FROM INFORMATION_SCHEMA.COLUMS
++        rather than in SHOW COLUMNS
++      */ 
++      if (thd->is_error())
++        push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                     thd->main_da.sql_errno(), thd->main_da.message());
++      thd->clear_error();
++      res= 0;
++    }
++    DBUG_RETURN(res);
++  }
++
++  show_table= tables->table;
++  count= 0;
++  restore_record(show_table, s->default_values);
++  show_table->use_all_columns();               // Required for default
++
++  for (ptr= show_table->field; (field= *ptr) ; ptr++)
++  {
++    const char *tmp_buff;
++    uchar *pos;
++    bool is_blob;
++    uint flags=field->flags;
++    char tmp[MAX_FIELD_WIDTH];
++    String type(tmp,sizeof(tmp), system_charset_info);
++    int decimals, field_length;
++
++    if (wild && wild[0] &&
++        wild_case_compare(system_charset_info, field->field_name,wild))
++      continue;
++
++    flags= field->flags;
++    count++;
++    /* Get default row, with all NULL fields set to NULL */
++    restore_record(table, s->default_values);
++
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++    uint col_access;
++    check_access(thd,SELECT_ACL | EXTRA_ACL, db_name->str,
++                 &tables->grant.privilege, 0, 0, test(tables->schema_table));
++    col_access= get_column_grant(thd, &tables->grant, 
++                                 db_name->str, table_name->str,
++                                 field->field_name) & COL_ACLS;
++    if (!tables->schema_table && !col_access)
++      continue;
++    char *end= tmp;
++    for (uint bitnr=0; col_access ; col_access>>=1,bitnr++)
++    {
++      if (col_access & 1)
++      {
++        *end++=',';
++        end=strmov(end,grant_types.type_names[bitnr]);
++      }
++    }
++    table->field[17]->store(tmp+1,end == tmp ? 0 : (uint) (end-tmp-1), cs);
++
++#endif
++    table->field[1]->store(db_name->str, db_name->length, cs);
++    table->field[2]->store(table_name->str, table_name->length, cs);
++    table->field[3]->store(field->field_name, strlen(field->field_name),
++                           cs);
++    table->field[4]->store((longlong) count, TRUE);
++    field->sql_type(type);
++    table->field[14]->store(type.ptr(), type.length(), cs);
++    /*
++      MySQL column type has the following format:
++      base_type [(dimension)] [unsigned] [zerofill].
++      For DATA_TYPE column we extract only base type.
++    */
++    tmp_buff= strchr(type.ptr(), '(');
++    if (!tmp_buff)
++      /*
++        if there is no dimention part then check the presence of
++        [unsigned] [zerofill] attributes and cut them of if exist.
++      */
++      tmp_buff= strchr(type.ptr(), ' ');
++    table->field[7]->store(type.ptr(),
++                           (tmp_buff ? tmp_buff - type.ptr() :
++                            type.length()), cs);
++
++    if (get_field_default_value(thd, show_table, field, &type, 0))
++    {
++      table->field[5]->store(type.ptr(), type.length(), cs);
++      table->field[5]->set_notnull();
++    }
++    pos=(uchar*) ((flags & NOT_NULL_FLAG) ?  "NO" : "YES");
++    table->field[6]->store((const char*) pos,
++                           strlen((const char*) pos), cs);
++    is_blob= (field->type() == MYSQL_TYPE_BLOB);
++    if (field->has_charset() || is_blob ||
++        field->real_type() == MYSQL_TYPE_VARCHAR ||  // For varbinary type
++        field->real_type() == MYSQL_TYPE_STRING)     // For binary type
++    {
++      uint32 octet_max_length= field->max_display_length();
++      if (is_blob && octet_max_length != (uint32) 4294967295U)
++        octet_max_length /= field->charset()->mbmaxlen;
++      longlong char_max_len= is_blob ? 
++        (longlong) octet_max_length / field->charset()->mbminlen :
++        (longlong) octet_max_length / field->charset()->mbmaxlen;
++      table->field[8]->store(char_max_len, TRUE);
++      table->field[8]->set_notnull();
++      table->field[9]->store((longlong) octet_max_length, TRUE);
++      table->field[9]->set_notnull();
++    }
++
++    /*
++      Calculate field_length and decimals.
++      They are set to -1 if they should not be set (we should return NULL)
++    */
++
++    decimals= field->decimals();
++    switch (field->type()) {
++    case MYSQL_TYPE_NEWDECIMAL:
++      field_length= ((Field_new_decimal*) field)->precision;
++      break;
++    case MYSQL_TYPE_DECIMAL:
++      field_length= field->field_length - (decimals  ? 2 : 1);
++      break;
++    case MYSQL_TYPE_TINY:
++    case MYSQL_TYPE_SHORT:
++    case MYSQL_TYPE_LONG:
++    case MYSQL_TYPE_INT24:
++      field_length= field->max_display_length() - 1;
++      break;
++    case MYSQL_TYPE_LONGLONG:
++      field_length= field->max_display_length() - 
++        ((field->flags & UNSIGNED_FLAG) ? 0 : 1);
++      break;
++    case MYSQL_TYPE_BIT:
++      field_length= field->max_display_length();
++      decimals= -1;                             // return NULL
++      break;
++    case MYSQL_TYPE_FLOAT:  
++    case MYSQL_TYPE_DOUBLE:
++      field_length= field->field_length;
++      if (decimals == NOT_FIXED_DEC)
++        decimals= -1;                           // return NULL
++    break;
++    default:
++      field_length= decimals= -1;
++      break;
++    }
++
++    if (field_length >= 0)
++    {
++      table->field[10]->store((longlong) field_length, TRUE);
++      table->field[10]->set_notnull();
++    }
++    if (decimals >= 0)
++    {
++      table->field[11]->store((longlong) decimals, TRUE);
++      table->field[11]->set_notnull();
++    }
++
++    if (field->has_charset())
++    {
++      pos=(uchar*) field->charset()->csname;
++      table->field[12]->store((const char*) pos,
++                              strlen((const char*) pos), cs);
++      table->field[12]->set_notnull();
++      pos=(uchar*) field->charset()->name;
++      table->field[13]->store((const char*) pos,
++                              strlen((const char*) pos), cs);
++      table->field[13]->set_notnull();
++    }
++    pos=(uchar*) ((field->flags & PRI_KEY_FLAG) ? "PRI" :
++                 (field->flags & UNIQUE_KEY_FLAG) ? "UNI" :
++                 (field->flags & MULTIPLE_KEY_FLAG) ? "MUL":"");
++    table->field[15]->store((const char*) pos,
++                            strlen((const char*) pos), cs);
++
++    if (field->unireg_check == Field::NEXT_NUMBER)
++      table->field[16]->store(STRING_WITH_LEN("auto_increment"), cs);
++    if (show_table->timestamp_field == field &&
++        field->unireg_check != Field::TIMESTAMP_DN_FIELD)
++      table->field[16]->store(STRING_WITH_LEN("on update CURRENT_TIMESTAMP"),
++                              cs);
++
++    table->field[18]->store(field->comment.str, field->comment.length, cs);
++    if (schema_table_store_record(thd, table))
++      DBUG_RETURN(1);
++  }
++  DBUG_RETURN(0);
++}
++
++
++
++int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  CHARSET_INFO **cs;
++  const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
++  TABLE *table= tables->table;
++  CHARSET_INFO *scs= system_charset_info;
++
++  for (cs= all_charsets ; cs < all_charsets+255 ; cs++)
++  {
++    CHARSET_INFO *tmp_cs= cs[0];
++    if (tmp_cs && (tmp_cs->state & MY_CS_PRIMARY) && 
++        (tmp_cs->state & MY_CS_AVAILABLE) &&
++        !(tmp_cs->state & MY_CS_HIDDEN) &&
++        !(wild && wild[0] &&
++	  wild_case_compare(scs, tmp_cs->csname,wild)))
++    {
++      const char *comment;
++      restore_record(table, s->default_values);
++      table->field[0]->store(tmp_cs->csname, strlen(tmp_cs->csname), scs);
++      table->field[1]->store(tmp_cs->name, strlen(tmp_cs->name), scs);
++      comment= tmp_cs->comment ? tmp_cs->comment : "";
++      table->field[2]->store(comment, strlen(comment), scs);
++      table->field[3]->store((longlong) tmp_cs->mbmaxlen, TRUE);
++      if (schema_table_store_record(thd, table))
++        return 1;
++    }
++  }
++  return 0;
++}
++
++
++static my_bool iter_schema_engines(THD *thd, plugin_ref plugin,
++                                   void *ptable)
++{
++  TABLE *table= (TABLE *) ptable;
++  handlerton *hton= plugin_data(plugin, handlerton *);
++  const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
++  CHARSET_INFO *scs= system_charset_info;
++  handlerton *default_type= ha_default_handlerton(thd);
++  DBUG_ENTER("iter_schema_engines");
++
++
++  /* Disabled plugins */
++  if (plugin_state(plugin) != PLUGIN_IS_READY)
++  {
++
++    struct st_mysql_plugin *plug= plugin_decl(plugin);
++    if (!(wild && wild[0] &&
++          wild_case_compare(scs, plug->name,wild)))
++    {
++      restore_record(table, s->default_values);
++      table->field[0]->store(plug->name, strlen(plug->name), scs);
++      table->field[1]->store(C_STRING_WITH_LEN("NO"), scs);
++      table->field[2]->store(plug->descr, strlen(plug->descr), scs);
++      if (schema_table_store_record(thd, table))
++        DBUG_RETURN(1);
++    }
++    DBUG_RETURN(0);
++  }
++
++  if (!(hton->flags & HTON_HIDDEN))
++  {
++    LEX_STRING *name= plugin_name(plugin);
++    if (!(wild && wild[0] &&
++          wild_case_compare(scs, name->str,wild)))
++    {
++      LEX_STRING yesno[2]= {{ C_STRING_WITH_LEN("NO") },
++                            { C_STRING_WITH_LEN("YES") }};
++      LEX_STRING *tmp;
++      const char *option_name= show_comp_option_name[(int) hton->state];
++      restore_record(table, s->default_values);
++
++      table->field[0]->store(name->str, name->length, scs);
++      if (hton->state == SHOW_OPTION_YES && default_type == hton)
++        option_name= "DEFAULT";
++      table->field[1]->store(option_name, strlen(option_name), scs);
++      table->field[2]->store(plugin_decl(plugin)->descr,
++                             strlen(plugin_decl(plugin)->descr), scs);
++      tmp= &yesno[test(hton->commit)];
++      table->field[3]->store(tmp->str, tmp->length, scs);
++      table->field[3]->set_notnull();
++      tmp= &yesno[test(hton->prepare)];
++      table->field[4]->store(tmp->str, tmp->length, scs);
++      table->field[4]->set_notnull();
++      tmp= &yesno[test(hton->savepoint_set)];
++      table->field[5]->store(tmp->str, tmp->length, scs);
++      table->field[5]->set_notnull();
++
++      if (schema_table_store_record(thd, table))
++        DBUG_RETURN(1);
++    }
++  }
++  DBUG_RETURN(0);
++}
++
++int fill_schema_engines(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  DBUG_ENTER("fill_schema_engines");
++  if (plugin_foreach_with_mask(thd, iter_schema_engines,
++                               MYSQL_STORAGE_ENGINE_PLUGIN,
++                               ~PLUGIN_IS_FREED, tables->table))
++    DBUG_RETURN(1);
++  DBUG_RETURN(0);
++}
++
++
++int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  CHARSET_INFO **cs;
++  const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
++  TABLE *table= tables->table;
++  CHARSET_INFO *scs= system_charset_info;
++  for (cs= all_charsets ; cs < all_charsets+255 ; cs++ )
++  {
++    CHARSET_INFO **cl;
++    CHARSET_INFO *tmp_cs= cs[0];
++    if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) ||
++         (tmp_cs->state & MY_CS_HIDDEN) ||
++        !(tmp_cs->state & MY_CS_PRIMARY))
++      continue;
++    for (cl= all_charsets; cl < all_charsets+255 ;cl ++)
++    {
++      CHARSET_INFO *tmp_cl= cl[0];
++      if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) || 
++          !my_charset_same(tmp_cs, tmp_cl))
++	continue;
++      if (!(wild && wild[0] &&
++	  wild_case_compare(scs, tmp_cl->name,wild)))
++      {
++	const char *tmp_buff;
++	restore_record(table, s->default_values);
++	table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs);
++        table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs);
++        table->field[2]->store((longlong) tmp_cl->number, TRUE);
++        tmp_buff= (tmp_cl->state & MY_CS_PRIMARY) ? "Yes" : "";
++	table->field[3]->store(tmp_buff, strlen(tmp_buff), scs);
++        tmp_buff= (tmp_cl->state & MY_CS_COMPILED)? "Yes" : "";
++	table->field[4]->store(tmp_buff, strlen(tmp_buff), scs);
++        table->field[5]->store((longlong) tmp_cl->strxfrm_multiply, TRUE);
++        if (schema_table_store_record(thd, table))
++          return 1;
++      }
++    }
++  }
++  return 0;
++}
++
++
++int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  CHARSET_INFO **cs;
++  TABLE *table= tables->table;
++  CHARSET_INFO *scs= system_charset_info;
++  for (cs= all_charsets ; cs < all_charsets+255 ; cs++ )
++  {
++    CHARSET_INFO **cl;
++    CHARSET_INFO *tmp_cs= cs[0];
++    if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) || 
++        !(tmp_cs->state & MY_CS_PRIMARY))
++      continue;
++    for (cl= all_charsets; cl < all_charsets+255 ;cl ++)
++    {
++      CHARSET_INFO *tmp_cl= cl[0];
++      if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) || 
++          !my_charset_same(tmp_cs,tmp_cl))
++	continue;
++      restore_record(table, s->default_values);
++      table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs);
++      table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs);
++      if (schema_table_store_record(thd, table))
++        return 1;
++    }
++  }
++  return 0;
++}
++
++
++static inline void copy_field_as_string(Field *to_field, Field *from_field)
++{
++  char buff[MAX_FIELD_WIDTH];
++  String tmp_str(buff, sizeof(buff), system_charset_info);
++  from_field->val_str(&tmp_str);
++  to_field->store(tmp_str.ptr(), tmp_str.length(), system_charset_info);
++}
++
++
++bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
++                       const char *wild, bool full_access, const char *sp_user)
++{
++  MYSQL_TIME time;
++  LEX *lex= thd->lex;
++  CHARSET_INFO *cs= system_charset_info;
++  char sp_db_buff[NAME_LEN + 1], sp_name_buff[NAME_LEN + 1],
++    definer_buff[USERNAME_LENGTH + HOSTNAME_LENGTH + 2];
++  String sp_db(sp_db_buff, sizeof(sp_db_buff), cs);
++  String sp_name(sp_name_buff, sizeof(sp_name_buff), cs);
++  String definer(definer_buff, sizeof(definer_buff), cs);
++
++  proc_table->field[0]->val_str(&sp_db);
++  proc_table->field[1]->val_str(&sp_name);
++  proc_table->field[11]->val_str(&definer);
++
++  if (!full_access)
++    full_access= !strcmp(sp_user, definer.c_ptr_safe());
++  if (!full_access &&
++      check_some_routine_access(thd, sp_db.c_ptr_safe(), sp_name.c_ptr_safe(),
++                                proc_table->field[2]->val_int() ==
++                                TYPE_ENUM_PROCEDURE))
++    return 0;
++
++  if ((lex->sql_command == SQLCOM_SHOW_STATUS_PROC &&
++      proc_table->field[2]->val_int() == TYPE_ENUM_PROCEDURE) ||
++      (lex->sql_command == SQLCOM_SHOW_STATUS_FUNC &&
++      proc_table->field[2]->val_int() == TYPE_ENUM_FUNCTION) ||
++      (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0)
++  {
++    restore_record(table, s->default_values);
++    if (!wild || !wild[0] || !wild_compare(sp_name.c_ptr_safe(), wild, 0))
++    {
++      int enum_idx= (int) proc_table->field[5]->val_int();
++      table->field[3]->store(sp_name.ptr(), sp_name.length(), cs);
++      copy_field_as_string(table->field[0], proc_table->field[3]);
++      table->field[2]->store(sp_db.ptr(), sp_db.length(), cs);
++      copy_field_as_string(table->field[4], proc_table->field[2]);
++      if (proc_table->field[2]->val_int() == TYPE_ENUM_FUNCTION)
++      {
++        copy_field_as_string(table->field[5], proc_table->field[9]);
++        table->field[5]->set_notnull();
++      }
++      if (full_access)
++      {
++        copy_field_as_string(table->field[7], proc_table->field[19]);
++        table->field[7]->set_notnull();
++      }
++      table->field[6]->store(STRING_WITH_LEN("SQL"), cs);
++      table->field[10]->store(STRING_WITH_LEN("SQL"), cs);
++      copy_field_as_string(table->field[11], proc_table->field[6]);
++      table->field[12]->store(sp_data_access_name[enum_idx].str, 
++                              sp_data_access_name[enum_idx].length , cs);
++      copy_field_as_string(table->field[14], proc_table->field[7]);
++
++      bzero((char *)&time, sizeof(time));
++      ((Field_timestamp *) proc_table->field[12])->get_time(&time);
++      table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++      bzero((char *)&time, sizeof(time));
++      ((Field_timestamp *) proc_table->field[13])->get_time(&time);
++      table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++      copy_field_as_string(table->field[17], proc_table->field[14]);
++      copy_field_as_string(table->field[18], proc_table->field[15]);
++      table->field[19]->store(definer.ptr(), definer.length(), cs);
++      copy_field_as_string(table->field[20], proc_table->field[16]);
++      copy_field_as_string(table->field[21], proc_table->field[17]);
++      copy_field_as_string(table->field[22], proc_table->field[18]);
++
++      return schema_table_store_record(thd, table);
++    }
++  }
++  return 0;
++}
++
++
++int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  TABLE *proc_table;
++  TABLE_LIST proc_tables;
++  const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
++  int res= 0;
++  TABLE *table= tables->table;
++  bool full_access;
++  char definer[USER_HOST_BUFF_SIZE];
++  Open_tables_state open_tables_state_backup;
++  DBUG_ENTER("fill_schema_proc");
++
++  strxmov(definer, thd->security_ctx->priv_user, "@",
++          thd->security_ctx->priv_host, NullS);
++  /* We use this TABLE_LIST instance only for checking of privileges. */
++  bzero((char*) &proc_tables,sizeof(proc_tables));
++  proc_tables.db= (char*) "mysql";
++  proc_tables.db_length= 5;
++  proc_tables.table_name= proc_tables.alias= (char*) "proc";
++  proc_tables.table_name_length= 4;
++  proc_tables.lock_type= TL_READ;
++  full_access= !check_table_access(thd, SELECT_ACL, &proc_tables, 1, TRUE);
++  if (!(proc_table= open_proc_table_for_read(thd, &open_tables_state_backup)))
++  {
++    DBUG_RETURN(1);
++  }
++  proc_table->file->ha_index_init(0, 1);
++  if ((res= proc_table->file->index_first(proc_table->record[0])))
++  {
++    res= (res == HA_ERR_END_OF_FILE) ? 0 : 1;
++    goto err;
++  }
++  if (store_schema_proc(thd, table, proc_table, wild, full_access, definer))
++  {
++    res= 1;
++    goto err;
++  }
++  while (!proc_table->file->index_next(proc_table->record[0]))
++  {
++    if (store_schema_proc(thd, table, proc_table, wild, full_access, definer))
++    {
++      res= 1;
++      goto err;
++    }
++  }
++
++err:
++  proc_table->file->ha_index_end();
++  close_system_tables(thd, &open_tables_state_backup);
++  DBUG_RETURN(res);
++}
++
++
++static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
++				  TABLE *table, bool res,
++				  LEX_STRING *db_name,
++				  LEX_STRING *table_name)
++{
++  CHARSET_INFO *cs= system_charset_info;
++  DBUG_ENTER("get_schema_stat_record");
++  if (res)
++  {
++    if (thd->lex->sql_command != SQLCOM_SHOW_KEYS)
++    {
++      /*
++        I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS
++        rather than in SHOW KEYS
++      */
++      if (thd->is_error())
++        push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                     thd->main_da.sql_errno(), thd->main_da.message());
++      thd->clear_error();
++      res= 0;
++    }
++    DBUG_RETURN(res);
++  }
++  else if (!tables->view)
++  {
++    TABLE *show_table= tables->table;
++    KEY *key_info=show_table->s->key_info;
++    if (show_table->file)
++      show_table->file->info(HA_STATUS_VARIABLE |
++                             HA_STATUS_NO_LOCK |
++                             HA_STATUS_TIME);
++    for (uint i=0 ; i < show_table->s->keys ; i++,key_info++)
++    {
++      KEY_PART_INFO *key_part= key_info->key_part;
++      const char *str;
++      for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
++      {
++        restore_record(table, s->default_values);
++        table->field[1]->store(db_name->str, db_name->length, cs);
++        table->field[2]->store(table_name->str, table_name->length, cs);
++        table->field[3]->store((longlong) ((key_info->flags &
++                                            HA_NOSAME) ? 0 : 1), TRUE);
++        table->field[4]->store(db_name->str, db_name->length, cs);
++        table->field[5]->store(key_info->name, strlen(key_info->name), cs);
++        table->field[6]->store((longlong) (j+1), TRUE);
++        str=(key_part->field ? key_part->field->field_name :
++             "?unknown field?");
++        table->field[7]->store(str, strlen(str), cs);
++        if (show_table->file)
++        {
++          if (show_table->file->index_flags(i, j, 0) & HA_READ_ORDER)
++          {
++            table->field[8]->store(((key_part->key_part_flag &
++                                     HA_REVERSE_SORT) ?
++                                    "D" : "A"), 1, cs);
++            table->field[8]->set_notnull();
++          }
++          KEY *key=show_table->key_info+i;
++          if (key->rec_per_key[j])
++          {
++            ha_rows records=(show_table->file->stats.records /
++                             key->rec_per_key[j]);
++            table->field[9]->store((longlong) records, TRUE);
++            table->field[9]->set_notnull();
++          }
++          str= show_table->file->index_type(i);
++          table->field[13]->store(str, strlen(str), cs);
++        }
++        if (!(key_info->flags & HA_FULLTEXT) &&
++            (key_part->field &&
++             key_part->length !=
++             show_table->s->field[key_part->fieldnr-1]->key_length()))
++        {
++          table->field[10]->store((longlong) key_part->length /
++                                  key_part->field->charset()->mbmaxlen, TRUE);
++          table->field[10]->set_notnull();
++        }
++        uint flags= key_part->field ? key_part->field->flags : 0;
++        const char *pos=(char*) ((flags & NOT_NULL_FLAG) ? "" : "YES");
++        table->field[12]->store(pos, strlen(pos), cs);
++        if (!show_table->s->keys_in_use.is_set(i))
++          table->field[14]->store(STRING_WITH_LEN("disabled"), cs);
++        else
++          table->field[14]->store("", 0, cs);
++        table->field[14]->set_notnull();
++        if (schema_table_store_record(thd, table))
++          DBUG_RETURN(1);
++      }
++    }
++  }
++  DBUG_RETURN(res);
++}
++
++
++static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
++				   TABLE *table, bool res,
++				   LEX_STRING *db_name,
++				   LEX_STRING *table_name)
++{
++  CHARSET_INFO *cs= system_charset_info;
++  DBUG_ENTER("get_schema_views_record");
++  LEX_STRING *tmp_db_name, *tmp_table_name;
++  char definer[USER_HOST_BUFF_SIZE];
++  uint definer_len;
++  bool updatable_view;
++  /*
++    if SELECT FROM I_S.VIEWS uses only fields
++    which have OPEN_FRM_ONLY flag then 'tables'
++    structure is zeroed and only tables->view is set.
++    (see fill_schema_table_from_frm() function).
++    So we should disable other fields filling.
++  */
++  bool only_share= !tables->definer.user.str;
++
++  if (tables->view)
++  {
++    Security_context *sctx= thd->security_ctx;
++    if (!only_share && !tables->allowed_show)
++    {
++      if (!my_strcasecmp(system_charset_info, tables->definer.user.str,
++                         sctx->priv_user) &&
++          !my_strcasecmp(system_charset_info, tables->definer.host.str,
++                         sctx->priv_host))
++        tables->allowed_show= TRUE;
++#ifndef NO_EMBEDDED_ACCESS_CHECKS
++      else
++      {
++        if ((thd->col_access & (SHOW_VIEW_ACL|SELECT_ACL)) ==
++            (SHOW_VIEW_ACL|SELECT_ACL))
++          tables->allowed_show= TRUE;
++        else
++        {
++          TABLE_LIST table_list;
++          uint view_access;
++          memset(&table_list, 0, sizeof(table_list));
++          table_list.db= tables->view_db.str;
++          table_list.table_name= tables->view_name.str;
++          table_list.grant.privilege= thd->col_access;
++          view_access= get_table_grant(thd, &table_list);
++          if ((view_access & (SHOW_VIEW_ACL|SELECT_ACL)) ==
++              (SHOW_VIEW_ACL|SELECT_ACL))
++            tables->allowed_show= TRUE;
++        }
++      }
++#endif
++    }
++    restore_record(table, s->default_values);
++    tmp_db_name= &tables->view_db;
++    tmp_table_name= &tables->view_name;
++    if (only_share)
++    {
++      tmp_db_name= db_name;
++      tmp_table_name= table_name;
++    }
++    table->field[1]->store(tmp_db_name->str, tmp_db_name->length, cs);
++    table->field[2]->store(tmp_table_name->str, tmp_table_name->length, cs);
++    if (!only_share)
++    {
++      if (tables->allowed_show)
++      {
++        table->field[3]->store(tables->view_body_utf8.str,
++                               tables->view_body_utf8.length,
++                               cs);
++      }
++
++      if (tables->with_check != VIEW_CHECK_NONE)
++      {
++        if (tables->with_check == VIEW_CHECK_LOCAL)
++          table->field[4]->store(STRING_WITH_LEN("LOCAL"), cs);
++        else
++          table->field[4]->store(STRING_WITH_LEN("CASCADED"), cs);
++      }
++      else
++        table->field[4]->store(STRING_WITH_LEN("NONE"), cs);
++
++      updatable_view= 0;
++      if (tables->algorithm != VIEW_ALGORITHM_TMPTABLE)
++      {
++        /*
++          We should use tables->view->select_lex.item_list here and
++          can not use Field_iterator_view because the view always uses
++          temporary algorithm during opening for I_S and
++          TABLE_LIST fields 'field_translation' & 'field_translation_end'
++          are uninitialized is this case.
++        */
++        List<Item> *fields= &tables->view->select_lex.item_list;
++        List_iterator<Item> it(*fields);
++        Item *item;
++        Item_field *field;
++        /*
++          check that at least one column in view is updatable
++        */
++        while ((item= it++))
++        {
++          if ((field= item->filed_for_view_update()) && field->field &&
++              !field->field->table->pos_in_table_list->schema_table)
++          {
++            updatable_view= 1;
++            break;
++          }
++        }
++        if (updatable_view && !tables->view->can_be_merged())
++          updatable_view= 0;
++      }
++      if (updatable_view)
++        table->field[5]->store(STRING_WITH_LEN("YES"), cs);
++      else
++        table->field[5]->store(STRING_WITH_LEN("NO"), cs);
++      definer_len= (strxmov(definer, tables->definer.user.str, "@",
++                            tables->definer.host.str, NullS) - definer);
++      table->field[6]->store(definer, definer_len, cs);
++      if (tables->view_suid)
++        table->field[7]->store(STRING_WITH_LEN("DEFINER"), cs);
++      else
++        table->field[7]->store(STRING_WITH_LEN("INVOKER"), cs);
++
++      table->field[8]->store(tables->view_creation_ctx->get_client_cs()->csname,
++                             strlen(tables->view_creation_ctx->
++                                    get_client_cs()->csname), cs);
++
++      table->field[9]->store(tables->view_creation_ctx->
++                             get_connection_cl()->name,
++                             strlen(tables->view_creation_ctx->
++                                    get_connection_cl()->name), cs);
++    }
++
++    if (schema_table_store_record(thd, table))
++      DBUG_RETURN(1);
++    if (res && thd->is_error())
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 
++                   thd->main_da.sql_errno(), thd->main_da.message());
++  }
++  if (res) 
++    thd->clear_error();
++  DBUG_RETURN(0);
++}
++
++
++bool store_constraints(THD *thd, TABLE *table, LEX_STRING *db_name,
++                       LEX_STRING *table_name, const char *key_name,
++                       uint key_len, const char *con_type, uint con_len)
++{
++  CHARSET_INFO *cs= system_charset_info;
++  restore_record(table, s->default_values);
++  table->field[1]->store(db_name->str, db_name->length, cs);
++  table->field[2]->store(key_name, key_len, cs);
++  table->field[3]->store(db_name->str, db_name->length, cs);
++  table->field[4]->store(table_name->str, table_name->length, cs);
++  table->field[5]->store(con_type, con_len, cs);
++  return schema_table_store_record(thd, table);
++}
++
++
++static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
++					 TABLE *table, bool res,
++					 LEX_STRING *db_name,
++					 LEX_STRING *table_name)
++{
++  DBUG_ENTER("get_schema_constraints_record");
++  if (res)
++  {
++    if (thd->is_error())
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                   thd->main_da.sql_errno(), thd->main_da.message());
++    thd->clear_error();
++    DBUG_RETURN(0);
++  }
++  else if (!tables->view)
++  {
++    List<FOREIGN_KEY_INFO> f_key_list;
++    TABLE *show_table= tables->table;
++    KEY *key_info=show_table->key_info;
++    uint primary_key= show_table->s->primary_key;
++
++    // This is not needed since no statistics are displayed.
++    // show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME);
++
++    for (uint i=0 ; i < show_table->s->keys ; i++, key_info++)
++    {
++      if (i != primary_key && !(key_info->flags & HA_NOSAME))
++        continue;
++
++      if (i == primary_key && !strcmp(key_info->name, primary_key_name))
++      {
++        if (store_constraints(thd, table, db_name, table_name, key_info->name,
++                              strlen(key_info->name),
++                              STRING_WITH_LEN("PRIMARY KEY")))
++          DBUG_RETURN(1);
++      }
++      else if (key_info->flags & HA_NOSAME)
++      {
++        if (store_constraints(thd, table, db_name, table_name, key_info->name,
++                              strlen(key_info->name),
++                              STRING_WITH_LEN("UNIQUE")))
++          DBUG_RETURN(1);
++      }
++    }
++
++    show_table->file->get_foreign_key_list(thd, &f_key_list);
++    FOREIGN_KEY_INFO *f_key_info;
++    List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list);
++    while ((f_key_info=it++))
++    {
++      if (store_constraints(thd, table, db_name, table_name, 
++                            f_key_info->forein_id->str,
++                            strlen(f_key_info->forein_id->str),
++                            "FOREIGN KEY", 11))
++        DBUG_RETURN(1);
++    }
++  }
++  DBUG_RETURN(res);
++}
++
++
++static bool store_trigger(THD *thd, TABLE *table, LEX_STRING *db_name,
++                          LEX_STRING *table_name, LEX_STRING *trigger_name,
++                          enum trg_event_type event,
++                          enum trg_action_time_type timing,
++                          LEX_STRING *trigger_stmt,
++                          ulong sql_mode,
++                          LEX_STRING *definer_buffer,
++                          LEX_STRING *client_cs_name,
++                          LEX_STRING *connection_cl_name,
++                          LEX_STRING *db_cl_name)
++{
++  CHARSET_INFO *cs= system_charset_info;
++  LEX_STRING sql_mode_rep;
++
++  restore_record(table, s->default_values);
++  table->field[1]->store(db_name->str, db_name->length, cs);
++  table->field[2]->store(trigger_name->str, trigger_name->length, cs);
++  table->field[3]->store(trg_event_type_names[event].str,
++                         trg_event_type_names[event].length, cs);
++  table->field[5]->store(db_name->str, db_name->length, cs);
++  table->field[6]->store(table_name->str, table_name->length, cs);
++  table->field[9]->store(trigger_stmt->str, trigger_stmt->length, cs);
++  table->field[10]->store(STRING_WITH_LEN("ROW"), cs);
++  table->field[11]->store(trg_action_time_type_names[timing].str,
++                          trg_action_time_type_names[timing].length, cs);
++  table->field[14]->store(STRING_WITH_LEN("OLD"), cs);
++  table->field[15]->store(STRING_WITH_LEN("NEW"), cs);
++
++  sys_var_thd_sql_mode::symbolic_mode_representation(thd, sql_mode,
++                                                     &sql_mode_rep);
++  table->field[17]->store(sql_mode_rep.str, sql_mode_rep.length, cs);
++  table->field[18]->store(definer_buffer->str, definer_buffer->length, cs);
++  table->field[19]->store(client_cs_name->str, client_cs_name->length, cs);
++  table->field[20]->store(connection_cl_name->str,
++                          connection_cl_name->length, cs);
++  table->field[21]->store(db_cl_name->str, db_cl_name->length, cs);
++
++  return schema_table_store_record(thd, table);
++}
++
++
++static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables,
++				      TABLE *table, bool res,
++				      LEX_STRING *db_name,
++				      LEX_STRING *table_name)
++{
++  DBUG_ENTER("get_schema_triggers_record");
++  /*
++    res can be non zero value when processed table is a view or
++    error happened during opening of processed table.
++  */
++  if (res)
++  {
++    if (thd->is_error())
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                   thd->main_da.sql_errno(), thd->main_da.message());
++    thd->clear_error();
++    DBUG_RETURN(0);
++  }
++  if (!tables->view && tables->table->triggers)
++  {
++    Table_triggers_list *triggers= tables->table->triggers;
++    int event, timing;
++
++    if (check_table_access(thd, TRIGGER_ACL, tables, 1, TRUE))
++      goto ret;
++
++    for (event= 0; event < (int)TRG_EVENT_MAX; event++)
++    {
++      for (timing= 0; timing < (int)TRG_ACTION_MAX; timing++)
++      {
++        LEX_STRING trigger_name;
++        LEX_STRING trigger_stmt;
++        ulong sql_mode;
++        char definer_holder[USER_HOST_BUFF_SIZE];
++        LEX_STRING definer_buffer;
++        LEX_STRING client_cs_name;
++        LEX_STRING connection_cl_name;
++        LEX_STRING db_cl_name;
++
++        definer_buffer.str= definer_holder;
++        if (triggers->get_trigger_info(thd, (enum trg_event_type) event,
++                                       (enum trg_action_time_type)timing,
++                                       &trigger_name, &trigger_stmt,
++                                       &sql_mode,
++                                       &definer_buffer,
++                                       &client_cs_name,
++                                       &connection_cl_name,
++                                       &db_cl_name))
++          continue;
++
++        if (store_trigger(thd, table, db_name, table_name, &trigger_name,
++                         (enum trg_event_type) event,
++                         (enum trg_action_time_type) timing, &trigger_stmt,
++                         sql_mode,
++                         &definer_buffer,
++                         &client_cs_name,
++                         &connection_cl_name,
++                         &db_cl_name))
++          DBUG_RETURN(1);
++      }
++    }
++  }
++ret:
++  DBUG_RETURN(0);
++}
++
++
++void store_key_column_usage(TABLE *table, LEX_STRING *db_name,
++                            LEX_STRING *table_name, const char *key_name,
++                            uint key_len, const char *con_type, uint con_len,
++                            longlong idx)
++{
++  CHARSET_INFO *cs= system_charset_info;
++  table->field[1]->store(db_name->str, db_name->length, cs);
++  table->field[2]->store(key_name, key_len, cs);
++  table->field[4]->store(db_name->str, db_name->length, cs);
++  table->field[5]->store(table_name->str, table_name->length, cs);
++  table->field[6]->store(con_type, con_len, cs);
++  table->field[7]->store((longlong) idx, TRUE);
++}
++
++
++static int get_schema_key_column_usage_record(THD *thd,
++					      TABLE_LIST *tables,
++					      TABLE *table, bool res,
++					      LEX_STRING *db_name,
++					      LEX_STRING *table_name)
++{
++  DBUG_ENTER("get_schema_key_column_usage_record");
++  if (res)
++  {
++    if (thd->is_error())
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                   thd->main_da.sql_errno(), thd->main_da.message());
++    thd->clear_error();
++    DBUG_RETURN(0);
++  }
++  else if (!tables->view)
++  {
++    List<FOREIGN_KEY_INFO> f_key_list;
++    TABLE *show_table= tables->table;
++    KEY *key_info=show_table->key_info;
++    uint primary_key= show_table->s->primary_key;
++
++    // This is not needed since no statistics are displayed.
++    // show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME);
++
++    for (uint i=0 ; i < show_table->s->keys ; i++, key_info++)
++    {
++      if (i != primary_key && !(key_info->flags & HA_NOSAME))
++        continue;
++      uint f_idx= 0;
++      KEY_PART_INFO *key_part= key_info->key_part;
++      for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
++      {
++        if (key_part->field)
++        {
++          f_idx++;
++          restore_record(table, s->default_values);
++          store_key_column_usage(table, db_name, table_name,
++                                 key_info->name,
++                                 strlen(key_info->name), 
++                                 key_part->field->field_name, 
++                                 strlen(key_part->field->field_name),
++                                 (longlong) f_idx);
++          if (schema_table_store_record(thd, table))
++            DBUG_RETURN(1);
++        }
++      }
++    }
++
++    show_table->file->get_foreign_key_list(thd, &f_key_list);
++    FOREIGN_KEY_INFO *f_key_info;
++    List_iterator_fast<FOREIGN_KEY_INFO> fkey_it(f_key_list);
++    while ((f_key_info= fkey_it++))
++    {
++      LEX_STRING *f_info;
++      LEX_STRING *r_info;
++      List_iterator_fast<LEX_STRING> it(f_key_info->foreign_fields),
++        it1(f_key_info->referenced_fields);
++      uint f_idx= 0;
++      while ((f_info= it++))
++      {
++        r_info= it1++;
++        f_idx++;
++        restore_record(table, s->default_values);
++        store_key_column_usage(table, db_name, table_name,
++                               f_key_info->forein_id->str,
++                               f_key_info->forein_id->length,
++                               f_info->str, f_info->length,
++                               (longlong) f_idx);
++        table->field[8]->store((longlong) f_idx, TRUE);
++        table->field[8]->set_notnull();
++        table->field[9]->store(f_key_info->referenced_db->str,
++                               f_key_info->referenced_db->length,
++                               system_charset_info);
++        table->field[9]->set_notnull();
++        table->field[10]->store(f_key_info->referenced_table->str,
++                                f_key_info->referenced_table->length, 
++                                system_charset_info);
++        table->field[10]->set_notnull();
++        table->field[11]->store(r_info->str, r_info->length,
++                                system_charset_info);
++        table->field[11]->set_notnull();
++        if (schema_table_store_record(thd, table))
++          DBUG_RETURN(1);
++      }
++    }
++  }
++  DBUG_RETURN(res);
++}
++
++
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++static void collect_partition_expr(List<char> &field_list, String *str)
++{
++  List_iterator<char> part_it(field_list);
++  ulong no_fields= field_list.elements;
++  const char *field_str;
++  str->length(0);
++  while ((field_str= part_it++))
++  {
++    str->append(field_str);
++    if (--no_fields != 0)
++      str->append(",");
++  }
++  return;
++}
++#endif
++
++
++static void store_schema_partitions_record(THD *thd, TABLE *schema_table,
++                                           TABLE *showing_table,
++                                           partition_element *part_elem,
++                                           handler *file, uint part_id)
++{
++  TABLE* table= schema_table;
++  CHARSET_INFO *cs= system_charset_info;
++  PARTITION_INFO stat_info;
++  MYSQL_TIME time;
++  file->get_dynamic_partition_info(&stat_info, part_id);
++  table->field[12]->store((longlong) stat_info.records, TRUE);
++  table->field[13]->store((longlong) stat_info.mean_rec_length, TRUE);
++  table->field[14]->store((longlong) stat_info.data_file_length, TRUE);
++  if (stat_info.max_data_file_length)
++  {
++    table->field[15]->store((longlong) stat_info.max_data_file_length, TRUE);
++    table->field[15]->set_notnull();
++  }
++  table->field[16]->store((longlong) stat_info.index_file_length, TRUE);
++  table->field[17]->store((longlong) stat_info.delete_length, TRUE);
++  if (stat_info.create_time)
++  {
++    thd->variables.time_zone->gmt_sec_to_TIME(&time,
++                                              (my_time_t)stat_info.create_time);
++    table->field[18]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++    table->field[18]->set_notnull();
++  }
++  if (stat_info.update_time)
++  {
++    thd->variables.time_zone->gmt_sec_to_TIME(&time,
++                                              (my_time_t)stat_info.update_time);
++    table->field[19]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++    table->field[19]->set_notnull();
++  }
++  if (stat_info.check_time)
++  {
++    thd->variables.time_zone->gmt_sec_to_TIME(&time,
++                                              (my_time_t)stat_info.check_time);
++    table->field[20]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++    table->field[20]->set_notnull();
++  }
++  if (file->ha_table_flags() & (ulong) HA_HAS_CHECKSUM)
++  {
++    table->field[21]->store((longlong) stat_info.check_sum, TRUE);
++    table->field[21]->set_notnull();
++  }
++  if (part_elem)
++  {
++    if (part_elem->part_comment)
++      table->field[22]->store(part_elem->part_comment,
++                              strlen(part_elem->part_comment), cs);
++    else
++      table->field[22]->store(STRING_WITH_LEN(""), cs);
++    if (part_elem->nodegroup_id != UNDEF_NODEGROUP)
++      table->field[23]->store((longlong) part_elem->nodegroup_id, TRUE);
++    else
++      table->field[23]->store(STRING_WITH_LEN("default"), cs);
++
++    table->field[24]->set_notnull();
++    if (part_elem->tablespace_name)
++      table->field[24]->store(part_elem->tablespace_name,
++                              strlen(part_elem->tablespace_name), cs);
++    else
++    {
++      char *ts= showing_table->file->get_tablespace_name(thd,0,0);
++      if(ts)
++      {
++        table->field[24]->store(ts, strlen(ts), cs);
++        my_free(ts, MYF(0));
++      }
++      else
++        table->field[24]->set_null();
++    }
++  }
++  return;
++}
++
++
++static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
++                                        TABLE *table, bool res,
++                                        LEX_STRING *db_name,
++                                        LEX_STRING *table_name)
++{
++  CHARSET_INFO *cs= system_charset_info;
++  char buff[61];
++  String tmp_res(buff, sizeof(buff), cs);
++  String tmp_str;
++  TABLE *show_table= tables->table;
++  handler *file;
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  partition_info *part_info;
++#endif
++  DBUG_ENTER("get_schema_partitions_record");
++
++  if (res)
++  {
++    if (thd->is_error())
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                   thd->main_da.sql_errno(), thd->main_da.message());
++    thd->clear_error();
++    DBUG_RETURN(0);
++  }
++  file= show_table->file;
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  part_info= show_table->part_info;
++  if (part_info)
++  {
++    partition_element *part_elem;
++    List_iterator<partition_element> part_it(part_info->partitions);
++    uint part_pos= 0, part_id= 0;
++
++    restore_record(table, s->default_values);
++    table->field[1]->store(db_name->str, db_name->length, cs);
++    table->field[2]->store(table_name->str, table_name->length, cs);
++
++
++    /* Partition method*/
++    switch (part_info->part_type) {
++    case RANGE_PARTITION:
++      table->field[7]->store(partition_keywords[PKW_RANGE].str,
++                             partition_keywords[PKW_RANGE].length, cs);
++      break;
++    case LIST_PARTITION:
++      table->field[7]->store(partition_keywords[PKW_LIST].str,
++                             partition_keywords[PKW_LIST].length, cs);
++      break;
++    case HASH_PARTITION:
++      tmp_res.length(0);
++      if (part_info->linear_hash_ind)
++        tmp_res.append(partition_keywords[PKW_LINEAR].str,
++                       partition_keywords[PKW_LINEAR].length);
++      if (part_info->list_of_part_fields)
++        tmp_res.append(partition_keywords[PKW_KEY].str,
++                       partition_keywords[PKW_KEY].length);
++      else
++        tmp_res.append(partition_keywords[PKW_HASH].str, 
++                       partition_keywords[PKW_HASH].length);
++      table->field[7]->store(tmp_res.ptr(), tmp_res.length(), cs);
++      break;
++    default:
++      DBUG_ASSERT(0);
++      my_error(ER_OUT_OF_RESOURCES, MYF(0));
++      current_thd->fatal_error();
++      DBUG_RETURN(1);
++    }
++    table->field[7]->set_notnull();
++
++    /* Partition expression */
++    if (part_info->part_expr)
++    {
++      table->field[9]->store(part_info->part_func_string,
++                             part_info->part_func_len, cs);
++    }
++    else if (part_info->list_of_part_fields)
++    {
++      collect_partition_expr(part_info->part_field_list, &tmp_str);
++      table->field[9]->store(tmp_str.ptr(), tmp_str.length(), cs);
++    }
++    table->field[9]->set_notnull();
++
++    if (part_info->is_sub_partitioned())
++    {
++      /* Subpartition method */
++      tmp_res.length(0);
++      if (part_info->linear_hash_ind)
++        tmp_res.append(partition_keywords[PKW_LINEAR].str,
++                       partition_keywords[PKW_LINEAR].length);
++      if (part_info->list_of_subpart_fields)
++        tmp_res.append(partition_keywords[PKW_KEY].str,
++                       partition_keywords[PKW_KEY].length);
++      else
++        tmp_res.append(partition_keywords[PKW_HASH].str, 
++                       partition_keywords[PKW_HASH].length);
++      table->field[8]->store(tmp_res.ptr(), tmp_res.length(), cs);
++      table->field[8]->set_notnull();
++
++      /* Subpartition expression */
++      if (part_info->subpart_expr)
++      {
++        table->field[10]->store(part_info->subpart_func_string,
++                                part_info->subpart_func_len, cs);
++      }
++      else if (part_info->list_of_subpart_fields)
++      {
++        collect_partition_expr(part_info->subpart_field_list, &tmp_str);
++        table->field[10]->store(tmp_str.ptr(), tmp_str.length(), cs);
++      }
++      table->field[10]->set_notnull();
++    }
++
++    while ((part_elem= part_it++))
++    {
++      table->field[3]->store(part_elem->partition_name,
++                             strlen(part_elem->partition_name), cs);
++      table->field[3]->set_notnull();
++      /* PARTITION_ORDINAL_POSITION */
++      table->field[5]->store((longlong) ++part_pos, TRUE);
++      table->field[5]->set_notnull();
++
++      /* Partition description */
++      if (part_info->part_type == RANGE_PARTITION)
++      {
++        if (part_elem->range_value != LONGLONG_MAX)
++          table->field[11]->store((longlong) part_elem->range_value, FALSE);
++        else
++          table->field[11]->store(partition_keywords[PKW_MAXVALUE].str,
++                                 partition_keywords[PKW_MAXVALUE].length, cs);
++        table->field[11]->set_notnull();
++      }
++      else if (part_info->part_type == LIST_PARTITION)
++      {
++        List_iterator<part_elem_value> list_val_it(part_elem->list_val_list);
++        part_elem_value *list_value;
++        uint no_items= part_elem->list_val_list.elements;
++        tmp_str.length(0);
++        tmp_res.length(0);
++        if (part_elem->has_null_value)
++        {
++          tmp_str.append("NULL");
++          if (no_items > 0)
++            tmp_str.append(",");
++        }
++        while ((list_value= list_val_it++))
++        {
++          if (!list_value->unsigned_flag)
++            tmp_res.set(list_value->value, cs);
++          else
++            tmp_res.set((ulonglong)list_value->value, cs);
++          tmp_str.append(tmp_res);
++          if (--no_items != 0)
++            tmp_str.append(",");
++        };
++        table->field[11]->store(tmp_str.ptr(), tmp_str.length(), cs);
++        table->field[11]->set_notnull();
++      }
++
++      if (part_elem->subpartitions.elements)
++      {
++        List_iterator<partition_element> sub_it(part_elem->subpartitions);
++        partition_element *subpart_elem;
++        uint subpart_pos= 0;
++
++        while ((subpart_elem= sub_it++))
++        {
++          table->field[4]->store(subpart_elem->partition_name,
++                                 strlen(subpart_elem->partition_name), cs);
++          table->field[4]->set_notnull();
++          /* SUBPARTITION_ORDINAL_POSITION */
++          table->field[6]->store((longlong) ++subpart_pos, TRUE);
++          table->field[6]->set_notnull();
++          
++          store_schema_partitions_record(thd, table, show_table, subpart_elem,
++                                         file, part_id);
++          part_id++;
++          if(schema_table_store_record(thd, table))
++            DBUG_RETURN(1);
++        }
++      }
++      else
++      {
++        store_schema_partitions_record(thd, table, show_table, part_elem,
++                                       file, part_id);
++        part_id++;
++        if(schema_table_store_record(thd, table))
++          DBUG_RETURN(1);
++      }
++    }
++    DBUG_RETURN(0);
++  }
++  else
++#endif
++  {
++    store_schema_partitions_record(thd, table, show_table, 0, file, 0);
++    if(schema_table_store_record(thd, table))
++      DBUG_RETURN(1);
++  }
++  DBUG_RETURN(0);
++}
++
++
++#ifdef NOT_USED
++static interval_type get_real_interval_type(interval_type i_type)
++{
++  switch (i_type) {
++  case INTERVAL_YEAR:
++    return INTERVAL_YEAR;
++
++  case INTERVAL_QUARTER:
++  case INTERVAL_YEAR_MONTH:
++  case INTERVAL_MONTH:
++    return INTERVAL_MONTH;
++
++  case INTERVAL_WEEK:
++  case INTERVAL_DAY:
++    return INTERVAL_DAY;
++
++  case INTERVAL_DAY_HOUR:
++  case INTERVAL_HOUR:
++    return INTERVAL_HOUR;
++
++  case INTERVAL_DAY_MINUTE:
++  case INTERVAL_HOUR_MINUTE:
++  case INTERVAL_MINUTE:
++    return INTERVAL_MINUTE;
++
++  case INTERVAL_DAY_SECOND:
++  case INTERVAL_HOUR_SECOND:
++  case INTERVAL_MINUTE_SECOND:
++  case INTERVAL_SECOND:
++    return INTERVAL_SECOND;
++
++  case INTERVAL_DAY_MICROSECOND:
++  case INTERVAL_HOUR_MICROSECOND:
++  case INTERVAL_MINUTE_MICROSECOND:
++  case INTERVAL_SECOND_MICROSECOND:
++  case INTERVAL_MICROSECOND:
++    return INTERVAL_MICROSECOND;
++  case INTERVAL_LAST:
++    DBUG_ASSERT(0);
++  }
++  DBUG_ASSERT(0);
++  return INTERVAL_SECOND;
++}
++
++#endif
++
++#ifdef HAVE_EVENT_SCHEDULER
++/*
++  Loads an event from mysql.event and copies it's data to a row of
++  I_S.EVENTS
++
++  Synopsis
++    copy_event_to_schema_table()
++      thd         Thread
++      sch_table   The schema table (information_schema.event)
++      event_table The event table to use for loading (mysql.event).
++
++  Returns
++    0  OK
++    1  Error
++*/
++
++int
++copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table)
++{
++  const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
++  CHARSET_INFO *scs= system_charset_info;
++  MYSQL_TIME time;
++  Event_timed et;
++  DBUG_ENTER("copy_event_to_schema_table");
++
++  restore_record(sch_table, s->default_values);
++
++  if (et.load_from_row(thd, event_table))
++  {
++    my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0), event_table->alias);
++    DBUG_RETURN(1);
++  }
++
++  if (!(!wild || !wild[0] || !wild_compare(et.name.str, wild, 0)))
++    DBUG_RETURN(0);
++
++  /*
++    Skip events in schemas one does not have access to. The check is
++    optimized. It's guaranteed in case of SHOW EVENTS that the user
++    has access.
++  */
++  if (thd->lex->sql_command != SQLCOM_SHOW_EVENTS &&
++      check_access(thd, EVENT_ACL, et.dbname.str, 0, 0, 1,
++                   is_schema_db(et.dbname.str, et.dbname.length)))
++    DBUG_RETURN(0);
++
++  /* ->field[0] is EVENT_CATALOG and is by default NULL */
++
++  sch_table->field[ISE_EVENT_SCHEMA]->
++                                store(et.dbname.str, et.dbname.length,scs);
++  sch_table->field[ISE_EVENT_NAME]->
++                                store(et.name.str, et.name.length, scs);
++  sch_table->field[ISE_DEFINER]->
++                                store(et.definer.str, et.definer.length, scs);
++  const String *tz_name= et.time_zone->get_name();
++  sch_table->field[ISE_TIME_ZONE]->
++                                store(tz_name->ptr(), tz_name->length(), scs);
++  sch_table->field[ISE_EVENT_BODY]->
++                                store(STRING_WITH_LEN("SQL"), scs);
++  sch_table->field[ISE_EVENT_DEFINITION]->store(
++    et.body_utf8.str, et.body_utf8.length, scs);
++
++  /* SQL_MODE */
++  {
++    LEX_STRING sql_mode;
++    sys_var_thd_sql_mode::symbolic_mode_representation(thd, et.sql_mode,
++                                                       &sql_mode);
++    sch_table->field[ISE_SQL_MODE]->
++                                store(sql_mode.str, sql_mode.length, scs);
++  }
++
++  int not_used=0;
++
++  if (et.expression)
++  {
++    String show_str;
++    /* type */
++    sch_table->field[ISE_EVENT_TYPE]->store(STRING_WITH_LEN("RECURRING"), scs);
++
++    if (Events::reconstruct_interval_expression(&show_str, et.interval,
++                                                et.expression))
++      DBUG_RETURN(1);
++
++    sch_table->field[ISE_INTERVAL_VALUE]->set_notnull();
++    sch_table->field[ISE_INTERVAL_VALUE]->
++                                store(show_str.ptr(), show_str.length(), scs);
++
++    LEX_STRING *ival= &interval_type_to_name[et.interval];
++    sch_table->field[ISE_INTERVAL_FIELD]->set_notnull();
++    sch_table->field[ISE_INTERVAL_FIELD]->store(ival->str, ival->length, scs);
++
++    /* starts & ends . STARTS is always set - see sql_yacc.yy */
++    et.time_zone->gmt_sec_to_TIME(&time, et.starts);
++    sch_table->field[ISE_STARTS]->set_notnull();
++    sch_table->field[ISE_STARTS]->
++                                store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++
++    if (!et.ends_null)
++    {
++      et.time_zone->gmt_sec_to_TIME(&time, et.ends);
++      sch_table->field[ISE_ENDS]->set_notnull();
++      sch_table->field[ISE_ENDS]->
++                                store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++    }
++  }
++  else
++  {
++    /* type */
++    sch_table->field[ISE_EVENT_TYPE]->store(STRING_WITH_LEN("ONE TIME"), scs);
++
++    et.time_zone->gmt_sec_to_TIME(&time, et.execute_at);
++    sch_table->field[ISE_EXECUTE_AT]->set_notnull();
++    sch_table->field[ISE_EXECUTE_AT]->
++                          store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++  }
++
++  /* status */
++
++  switch (et.status)
++  {
++    case Event_parse_data::ENABLED:
++      sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("ENABLED"), scs);
++      break;
++    case Event_parse_data::SLAVESIDE_DISABLED:
++      sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("SLAVESIDE_DISABLED"),
++                                          scs);
++      break;
++    case Event_parse_data::DISABLED:
++      sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("DISABLED"), scs);
++      break;
++    default:
++      DBUG_ASSERT(0);
++  }
++  sch_table->field[ISE_ORIGINATOR]->store(et.originator, TRUE);
++
++  /* on_completion */
++  if (et.on_completion == Event_parse_data::ON_COMPLETION_DROP)
++    sch_table->field[ISE_ON_COMPLETION]->
++                                store(STRING_WITH_LEN("NOT PRESERVE"), scs);
++  else
++    sch_table->field[ISE_ON_COMPLETION]->
++                                store(STRING_WITH_LEN("PRESERVE"), scs);
++    
++  number_to_datetime(et.created, &time, 0, &not_used);
++  DBUG_ASSERT(not_used==0);
++  sch_table->field[ISE_CREATED]->store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++
++  number_to_datetime(et.modified, &time, 0, &not_used);
++  DBUG_ASSERT(not_used==0);
++  sch_table->field[ISE_LAST_ALTERED]->
++                                store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++
++  if (et.last_executed)
++  {
++    et.time_zone->gmt_sec_to_TIME(&time, et.last_executed);
++    sch_table->field[ISE_LAST_EXECUTED]->set_notnull();
++    sch_table->field[ISE_LAST_EXECUTED]->
++                       store_time(&time, MYSQL_TIMESTAMP_DATETIME);
++  }
++
++  sch_table->field[ISE_EVENT_COMMENT]->
++                      store(et.comment.str, et.comment.length, scs);
++
++  sch_table->field[ISE_CLIENT_CS]->set_notnull();
++  sch_table->field[ISE_CLIENT_CS]->store(
++    et.creation_ctx->get_client_cs()->csname,
++    strlen(et.creation_ctx->get_client_cs()->csname),
++    scs);
++
++  sch_table->field[ISE_CONNECTION_CL]->set_notnull();
++  sch_table->field[ISE_CONNECTION_CL]->store(
++    et.creation_ctx->get_connection_cl()->name,
++    strlen(et.creation_ctx->get_connection_cl()->name),
++    scs);
++
++  sch_table->field[ISE_DB_CL]->set_notnull();
++  sch_table->field[ISE_DB_CL]->store(
++    et.creation_ctx->get_db_cl()->name,
++    strlen(et.creation_ctx->get_db_cl()->name),
++    scs);
++
++  if (schema_table_store_record(thd, sch_table))
++    DBUG_RETURN(1);
++
++  DBUG_RETURN(0);
++}
++#endif
++
++int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  DBUG_ENTER("fill_open_tables");
++  const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
++  TABLE *table= tables->table;
++  CHARSET_INFO *cs= system_charset_info;
++  OPEN_TABLE_LIST *open_list;
++  if (!(open_list=list_open_tables(thd,thd->lex->select_lex.db, wild))
++            && thd->is_fatal_error)
++    DBUG_RETURN(1);
++
++  for (; open_list ; open_list=open_list->next)
++  {
++    restore_record(table, s->default_values);
++    table->field[0]->store(open_list->db, strlen(open_list->db), cs);
++    table->field[1]->store(open_list->table, strlen(open_list->table), cs);
++    table->field[2]->store((longlong) open_list->in_use, TRUE);
++    table->field[3]->store((longlong) open_list->locked, TRUE);
++    if (schema_table_store_record(thd, table))
++      DBUG_RETURN(1);
++  }
++  DBUG_RETURN(0);
++}
++
++
++int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  DBUG_ENTER("fill_variables");
++  int res= 0;
++  LEX *lex= thd->lex;
++  const char *wild= lex->wild ? lex->wild->ptr() : NullS;
++  enum enum_schema_tables schema_table_idx=
++    get_schema_table_idx(tables->schema_table);
++  enum enum_var_type option_type= OPT_SESSION;
++  bool upper_case_names= (schema_table_idx != SCH_VARIABLES);
++  bool sorted_vars= (schema_table_idx == SCH_VARIABLES);
++
++  if (lex->option_type == OPT_GLOBAL ||
++      schema_table_idx == SCH_GLOBAL_VARIABLES)
++    option_type= OPT_GLOBAL;
++
++  rw_rdlock(&LOCK_system_variables_hash);
++  res= show_status_array(thd, wild, enumerate_sys_vars(thd, sorted_vars),
++                         option_type, NULL, "", tables->table, upper_case_names, cond);
++  rw_unlock(&LOCK_system_variables_hash);
++  DBUG_RETURN(res);
++}
++
++
++int fill_status(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  DBUG_ENTER("fill_status");
++  LEX *lex= thd->lex;
++  const char *wild= lex->wild ? lex->wild->ptr() : NullS;
++  int res= 0;
++  STATUS_VAR *tmp1, tmp;
++  enum enum_schema_tables schema_table_idx=
++    get_schema_table_idx(tables->schema_table);
++  enum enum_var_type option_type;
++  bool upper_case_names= (schema_table_idx != SCH_STATUS);
++
++  if (schema_table_idx == SCH_STATUS)
++  {
++    option_type= lex->option_type;
++    if (option_type == OPT_GLOBAL)
++      tmp1= &tmp;
++    else
++      tmp1= thd->initial_status_var;
++  }
++  else if (schema_table_idx == SCH_GLOBAL_STATUS)
++  {
++    option_type= OPT_GLOBAL;
++    tmp1= &tmp;
++  }
++  else
++  { 
++    option_type= OPT_SESSION;
++    tmp1= &thd->status_var;
++  }
++
++  pthread_mutex_lock(&LOCK_status);
++  if (option_type == OPT_GLOBAL)
++    calc_sum_of_all_status(&tmp);
++  res= show_status_array(thd, wild,
++                         (SHOW_VAR *)all_status_vars.buffer,
++                         option_type, tmp1, "", tables->table,
++                         upper_case_names, cond);
++  pthread_mutex_unlock(&LOCK_status);
++  DBUG_RETURN(res);
++}
++
++
++/*
++  Fill and store records into I_S.referential_constraints table
++
++  SYNOPSIS
++    get_referential_constraints_record()
++    thd                 thread handle
++    tables              table list struct(processed table)
++    table               I_S table
++    res                 1 means the error during opening of the processed table
++                        0 means processed table is opened without error
++    base_name           db name
++    file_name           table name
++
++  RETURN
++    0	ok
++    #   error
++*/
++
++static int
++get_referential_constraints_record(THD *thd, TABLE_LIST *tables,
++                                   TABLE *table, bool res,
++                                   LEX_STRING *db_name, LEX_STRING *table_name)
++{
++  CHARSET_INFO *cs= system_charset_info;
++  DBUG_ENTER("get_referential_constraints_record");
++
++  if (res)
++  {
++    if (thd->is_error())
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                   thd->main_da.sql_errno(), thd->main_da.message());
++    thd->clear_error();
++    DBUG_RETURN(0);
++  }
++  if (!tables->view)
++  {
++    List<FOREIGN_KEY_INFO> f_key_list;
++    TABLE *show_table= tables->table;
++
++    // This is not needed since no statistics are displayed.
++    // show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME);
++
++    show_table->file->get_foreign_key_list(thd, &f_key_list);
++    FOREIGN_KEY_INFO *f_key_info;
++    List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list);
++    while ((f_key_info= it++))
++    {
++      restore_record(table, s->default_values);
++      table->field[1]->store(db_name->str, db_name->length, cs);
++      table->field[9]->store(table_name->str, table_name->length, cs);
++      table->field[2]->store(f_key_info->forein_id->str,
++                             f_key_info->forein_id->length, cs);
++      table->field[4]->store(f_key_info->referenced_db->str, 
++                             f_key_info->referenced_db->length, cs);
++      table->field[10]->store(f_key_info->referenced_table->str, 
++                             f_key_info->referenced_table->length, cs);
++      if (f_key_info->referenced_key_name)
++      {
++        table->field[5]->store(f_key_info->referenced_key_name->str, 
++                               f_key_info->referenced_key_name->length, cs);
++        table->field[5]->set_notnull();
++      }
++      else
++        table->field[5]->set_null();
++      table->field[6]->store(STRING_WITH_LEN("NONE"), cs);
++      table->field[7]->store(f_key_info->update_method->str, 
++                             f_key_info->update_method->length, cs);
++      table->field[8]->store(f_key_info->delete_method->str, 
++                             f_key_info->delete_method->length, cs);
++      if (schema_table_store_record(thd, table))
++        DBUG_RETURN(1);
++    }
++  }
++  DBUG_RETURN(0);
++}
++
++struct schema_table_ref 
++{
++  const char *table_name;
++  ST_SCHEMA_TABLE *schema_table;
++};
++
++
++/*
++  Find schema_tables elment by name
++
++  SYNOPSIS
++    find_schema_table_in_plugin()
++    thd                 thread handler
++    plugin              plugin
++    table_name          table name
++
++  RETURN
++    0	table not found
++    1   found the schema table
++*/
++static my_bool find_schema_table_in_plugin(THD *thd, plugin_ref plugin,
++                                           void* p_table)
++{
++  schema_table_ref *p_schema_table= (schema_table_ref *)p_table;
++  const char* table_name= p_schema_table->table_name;
++  ST_SCHEMA_TABLE *schema_table= plugin_data(plugin, ST_SCHEMA_TABLE *);
++  DBUG_ENTER("find_schema_table_in_plugin");
++
++  if (!my_strcasecmp(system_charset_info,
++                     schema_table->table_name,
++                     table_name)) {
++    p_schema_table->schema_table= schema_table;
++    DBUG_RETURN(1);
++  }
++
++  DBUG_RETURN(0);
++}
++
++
++/*
++  Find schema_tables elment by name
++
++  SYNOPSIS
++    find_schema_table()
++    thd                 thread handler
++    table_name          table name
++
++  RETURN
++    0	table not found
++    #   pointer to 'schema_tables' element
++*/
++
++ST_SCHEMA_TABLE *find_schema_table(THD *thd, const char* table_name)
++{
++  schema_table_ref schema_table_a;
++  ST_SCHEMA_TABLE *schema_table= schema_tables;
++  DBUG_ENTER("find_schema_table");
++
++  for (; schema_table->table_name; schema_table++)
++  {
++    if (!my_strcasecmp(system_charset_info,
++                       schema_table->table_name,
++                       table_name))
++      DBUG_RETURN(schema_table);
++  }
++
++  schema_table_a.table_name= table_name;
++  if (plugin_foreach(thd, find_schema_table_in_plugin, 
++                     MYSQL_INFORMATION_SCHEMA_PLUGIN, &schema_table_a))
++    DBUG_RETURN(schema_table_a.schema_table);
++
++  DBUG_RETURN(NULL);
++}
++
++
++ST_SCHEMA_TABLE *get_schema_table(enum enum_schema_tables schema_table_idx)
++{
++  return &schema_tables[schema_table_idx];
++}
++
++
++/**
++  Create information_schema table using schema_table data.
++
++  @note
++    For MYSQL_TYPE_DECIMAL fields only, the field_length member has encoded
++    into it two numbers, based on modulus of base-10 numbers.  In the ones
++    position is the number of decimals.  Tens position is unused.  In the
++    hundreds and thousands position is a two-digit decimal number representing
++    length.  Encode this value with  (decimals*100)+length  , where
++    0<decimals<10 and 0<=length<100 .
++
++  @param
++    thd	       	          thread handler
++
++  @param table_list Used to pass I_S table information(fields info, tables
++  parameters etc) and table name.
++
++  @retval  \#             Pointer to created table
++  @retval  NULL           Can't create table
++*/
++
++TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
++{
++  int field_count= 0;
++  Item *item;
++  TABLE *table;
++  List<Item> field_list;
++  ST_SCHEMA_TABLE *schema_table= table_list->schema_table;
++  ST_FIELD_INFO *fields_info= schema_table->fields_info;
++  CHARSET_INFO *cs= system_charset_info;
++  DBUG_ENTER("create_schema_table");
++
++  for (; fields_info->field_name; fields_info++)
++  {
++    switch (fields_info->field_type) {
++    case MYSQL_TYPE_TINY:
++    case MYSQL_TYPE_LONG:
++    case MYSQL_TYPE_SHORT:
++    case MYSQL_TYPE_LONGLONG:
++    case MYSQL_TYPE_INT24:
++      if (!(item= new Item_return_int(fields_info->field_name,
++                                      fields_info->field_length,
++                                      fields_info->field_type,
++                                      fields_info->value)))
++      {
++        DBUG_RETURN(0);
++      }
++      item->unsigned_flag= (fields_info->field_flags & MY_I_S_UNSIGNED);
++      break;
++    case MYSQL_TYPE_DATE:
++    case MYSQL_TYPE_TIME:
++    case MYSQL_TYPE_TIMESTAMP:
++    case MYSQL_TYPE_DATETIME:
++      if (!(item=new Item_return_date_time(fields_info->field_name,
++                                           fields_info->field_type)))
++      {
++        DBUG_RETURN(0);
++      }
++      break;
++    case MYSQL_TYPE_FLOAT:
++    case MYSQL_TYPE_DOUBLE:
++      if ((item= new Item_float(fields_info->field_name, 0.0, NOT_FIXED_DEC, 
++                           fields_info->field_length)) == NULL)
++        DBUG_RETURN(NULL);
++      break;
++    case MYSQL_TYPE_DECIMAL:
++    case MYSQL_TYPE_NEWDECIMAL:
++      if (!(item= new Item_decimal((longlong) fields_info->value, false)))
++      {
++        DBUG_RETURN(0);
++      }
++      item->unsigned_flag= (fields_info->field_flags & MY_I_S_UNSIGNED);
++      item->decimals= fields_info->field_length%10;
++      item->max_length= (fields_info->field_length/100)%100;
++      if (item->unsigned_flag == 0)
++        item->max_length+= 1;
++      if (item->decimals > 0)
++        item->max_length+= 1;
++      item->set_name(fields_info->field_name,
++                     strlen(fields_info->field_name), cs);
++      break;
++    case MYSQL_TYPE_TINY_BLOB:
++    case MYSQL_TYPE_MEDIUM_BLOB:
++    case MYSQL_TYPE_LONG_BLOB:
++    case MYSQL_TYPE_BLOB:
++      if (!(item= new Item_blob(fields_info->field_name,
++                                fields_info->field_length)))
++      {
++        DBUG_RETURN(0);
++      }
++      break;
++    default:
++      /* Don't let unimplemented types pass through. Could be a grave error. */
++      DBUG_ASSERT(fields_info->field_type == MYSQL_TYPE_STRING);
++
++      if (!(item= new Item_empty_string("", fields_info->field_length, cs)))
++      {
++        DBUG_RETURN(0);
++      }
++      item->set_name(fields_info->field_name,
++                     strlen(fields_info->field_name), cs);
++      break;
++    }
++    field_list.push_back(item);
++    item->maybe_null= (fields_info->field_flags & MY_I_S_MAYBE_NULL);
++    field_count++;
++  }
++  TMP_TABLE_PARAM *tmp_table_param =
++    (TMP_TABLE_PARAM*) (thd->alloc(sizeof(TMP_TABLE_PARAM)));
++  tmp_table_param->init();
++  tmp_table_param->table_charset= cs;
++  tmp_table_param->field_count= field_count;
++  tmp_table_param->schema_table= 1;
++  SELECT_LEX *select_lex= thd->lex->current_select;
++  if (!(table= create_tmp_table(thd, tmp_table_param,
++                                field_list, (ORDER*) 0, 0, 0, 
++                                (select_lex->options | thd->options |
++                                 TMP_TABLE_ALL_COLUMNS),
++                                HA_POS_ERROR, table_list->alias)))
++    DBUG_RETURN(0);
++  my_bitmap_map* bitmaps=
++    (my_bitmap_map*) thd->alloc(bitmap_buffer_size(field_count));
++  bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
++              FALSE);
++  table->read_set= &table->def_read_set;
++  bitmap_clear_all(table->read_set);
++  table_list->schema_table_param= tmp_table_param;
++  DBUG_RETURN(table);
++}
++
++
++/*
++  For old SHOW compatibility. It is used when
++  old SHOW doesn't have generated column names
++  Make list of fields for SHOW
++
++  SYNOPSIS
++    make_old_format()
++    thd			thread handler
++    schema_table        pointer to 'schema_tables' element
++
++  RETURN
++   1	error
++   0	success
++*/
++
++int make_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
++{
++  ST_FIELD_INFO *field_info= schema_table->fields_info;
++  Name_resolution_context *context= &thd->lex->select_lex.context;
++  for (; field_info->field_name; field_info++)
++  {
++    if (field_info->old_name)
++    {
++      Item_field *field= new Item_field(context,
++                                        NullS, NullS, field_info->field_name);
++      if (field)
++      {
++        field->set_name(field_info->old_name,
++                        strlen(field_info->old_name),
++                        system_charset_info);
++        if (add_item_to_list(thd, field))
++          return 1;
++      }
++    }
++  }
++  return 0;
++}
++
++
++int make_schemata_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
++{
++  char tmp[128];
++  LEX *lex= thd->lex;
++  SELECT_LEX *sel= lex->current_select;
++  Name_resolution_context *context= &sel->context;
++
++  if (!sel->item_list.elements)
++  {
++    ST_FIELD_INFO *field_info= &schema_table->fields_info[1];
++    String buffer(tmp,sizeof(tmp), system_charset_info);
++    Item_field *field= new Item_field(context,
++                                      NullS, NullS, field_info->field_name);
++    if (!field || add_item_to_list(thd, field))
++      return 1;
++    buffer.length(0);
++    buffer.append(field_info->old_name);
++    if (lex->wild && lex->wild->ptr())
++    {
++      buffer.append(STRING_WITH_LEN(" ("));
++      buffer.append(lex->wild->ptr());
++      buffer.append(')');
++    }
++    field->set_name(buffer.ptr(), buffer.length(), system_charset_info);
++  }
++  return 0;
++}
++
++
++int make_table_names_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
++{
++  char tmp[128];
++  String buffer(tmp,sizeof(tmp), thd->charset());
++  LEX *lex= thd->lex;
++  Name_resolution_context *context= &lex->select_lex.context;
++
++  ST_FIELD_INFO *field_info= &schema_table->fields_info[2];
++  buffer.length(0);
++  buffer.append(field_info->old_name);
++  buffer.append(lex->select_lex.db);
++  if (lex->wild && lex->wild->ptr())
++  {
++    buffer.append(STRING_WITH_LEN(" ("));
++    buffer.append(lex->wild->ptr());
++    buffer.append(')');
++  }
++  Item_field *field= new Item_field(context,
++                                    NullS, NullS, field_info->field_name);
++  if (add_item_to_list(thd, field))
++    return 1;
++  field->set_name(buffer.ptr(), buffer.length(), system_charset_info);
++  if (thd->lex->verbose)
++  {
++    field->set_name(buffer.ptr(), buffer.length(), system_charset_info);
++    field_info= &schema_table->fields_info[3];
++    field= new Item_field(context, NullS, NullS, field_info->field_name);
++    if (add_item_to_list(thd, field))
++      return 1;
++    field->set_name(field_info->old_name, strlen(field_info->old_name),
++                    system_charset_info);
++  }
++  return 0;
++}
++
++
++int make_columns_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
++{
++  int fields_arr[]= {3, 14, 13, 6, 15, 5, 16, 17, 18, -1};
++  int *field_num= fields_arr;
++  ST_FIELD_INFO *field_info;
++  Name_resolution_context *context= &thd->lex->select_lex.context;
++
++  for (; *field_num >= 0; field_num++)
++  {
++    field_info= &schema_table->fields_info[*field_num];
++    if (!thd->lex->verbose && (*field_num == 13 ||
++                               *field_num == 17 ||
++                               *field_num == 18))
++      continue;
++    Item_field *field= new Item_field(context,
++                                      NullS, NullS, field_info->field_name);
++    if (field)
++    {
++      field->set_name(field_info->old_name,
++                      strlen(field_info->old_name),
++                      system_charset_info);
++      if (add_item_to_list(thd, field))
++        return 1;
++    }
++  }
++  return 0;
++}
++
++
++int make_character_sets_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
++{
++  int fields_arr[]= {0, 2, 1, 3, -1};
++  int *field_num= fields_arr;
++  ST_FIELD_INFO *field_info;
++  Name_resolution_context *context= &thd->lex->select_lex.context;
++
++  for (; *field_num >= 0; field_num++)
++  {
++    field_info= &schema_table->fields_info[*field_num];
++    Item_field *field= new Item_field(context,
++                                      NullS, NullS, field_info->field_name);
++    if (field)
++    {
++      field->set_name(field_info->old_name,
++                      strlen(field_info->old_name),
++                      system_charset_info);
++      if (add_item_to_list(thd, field))
++        return 1;
++    }
++  }
++  return 0;
++}
++
++
++int make_proc_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
++{
++  int fields_arr[]= {2, 3, 4, 19, 16, 15, 14, 18, 20, 21, 22, -1};
++  int *field_num= fields_arr;
++  ST_FIELD_INFO *field_info;
++  Name_resolution_context *context= &thd->lex->select_lex.context;
++
++  for (; *field_num >= 0; field_num++)
++  {
++    field_info= &schema_table->fields_info[*field_num];
++    Item_field *field= new Item_field(context,
++                                      NullS, NullS, field_info->field_name);
++    if (field)
++    {
++      field->set_name(field_info->old_name,
++                      strlen(field_info->old_name),
++                      system_charset_info);
++      if (add_item_to_list(thd, field))
++        return 1;
++    }
++  }
++  return 0;
++}
++
++
++/*
++  Create information_schema table
++
++  SYNOPSIS
++  mysql_schema_table()
++    thd                thread handler
++    lex                pointer to LEX
++    table_list         pointer to table_list
++
++  RETURN
++    0	success
++    1   error
++*/
++
++int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list)
++{
++  TABLE *table;
++  DBUG_ENTER("mysql_schema_table");
++  if (!(table= table_list->schema_table->create_table(thd, table_list)))
++    DBUG_RETURN(1);
++  table->s->tmp_table= SYSTEM_TMP_TABLE;
++  table->grant.privilege= SELECT_ACL;
++  /*
++    This test is necessary to make
++    case insensitive file systems +
++    upper case table names(information schema tables) +
++    views
++    working correctly
++  */
++  if (table_list->schema_table_name)
++    table->alias_name_used= my_strcasecmp(table_alias_charset,
++                                          table_list->schema_table_name,
++                                          table_list->alias);
++  table_list->table_name= table->s->table_name.str;
++  table_list->table_name_length= table->s->table_name.length;
++  table_list->table= table;
++  table->next= thd->derived_tables;
++  thd->derived_tables= table;
++  table_list->select_lex->options |= OPTION_SCHEMA_TABLE;
++  lex->safe_to_cache_query= 0;
++
++  if (table_list->schema_table_reformed) // show command
++  {
++    SELECT_LEX *sel= lex->current_select;
++    Item *item;
++    Field_translator *transl, *org_transl;
++
++    if (table_list->field_translation)
++    {
++      Field_translator *end= table_list->field_translation_end;
++      for (transl= table_list->field_translation; transl < end; transl++)
++      {
++        if (!transl->item->fixed &&
++            transl->item->fix_fields(thd, &transl->item))
++          DBUG_RETURN(1);
++      }
++      DBUG_RETURN(0);
++    }
++    List_iterator_fast<Item> it(sel->item_list);
++    if (!(transl=
++          (Field_translator*)(thd->stmt_arena->
++                              alloc(sel->item_list.elements *
++                                    sizeof(Field_translator)))))
++    {
++      DBUG_RETURN(1);
++    }
++    for (org_transl= transl; (item= it++); transl++)
++    {
++      transl->item= item;
++      transl->name= item->name;
++      if (!item->fixed && item->fix_fields(thd, &transl->item))
++      {
++        DBUG_RETURN(1);
++      }
++    }
++    table_list->field_translation= org_transl;
++    table_list->field_translation_end= transl;
++  }
++
++  DBUG_RETURN(0);
++}
++
++
++/*
++  Generate select from information_schema table
++
++  SYNOPSIS
++    make_schema_select()
++    thd                  thread handler
++    sel                  pointer to SELECT_LEX
++    schema_table_idx     index of 'schema_tables' element
++
++  RETURN
++    0	success
++    1   error
++*/
++
++int make_schema_select(THD *thd, SELECT_LEX *sel,
++		       enum enum_schema_tables schema_table_idx)
++{
++  ST_SCHEMA_TABLE *schema_table= get_schema_table(schema_table_idx);
++  LEX_STRING db, table;
++  DBUG_ENTER("make_schema_select");
++  DBUG_PRINT("enter", ("mysql_schema_select: %s", schema_table->table_name));
++  /*
++     We have to make non const db_name & table_name
++     because of lower_case_table_names
++  */
++  thd->make_lex_string(&db, INFORMATION_SCHEMA_NAME.str,
++                       INFORMATION_SCHEMA_NAME.length, 0);
++  thd->make_lex_string(&table, schema_table->table_name,
++                       strlen(schema_table->table_name), 0);
++  if (schema_table->old_format(thd, schema_table) ||   /* Handle old syntax */
++      !sel->add_table_to_list(thd, new Table_ident(thd, db, table, 0),
++                              0, 0, TL_READ))
++  {
++    DBUG_RETURN(1);
++  }
++  DBUG_RETURN(0);
++}
++
++
++/*
++  Fill temporary schema tables before SELECT
++
++  SYNOPSIS
++    get_schema_tables_result()
++    join  join which use schema tables
++    executed_place place where I_S table processed
++
++  RETURN
++    FALSE success
++    TRUE  error
++*/
++
++bool get_schema_tables_result(JOIN *join,
++                              enum enum_schema_table_state executed_place)
++{
++  JOIN_TAB *tmp_join_tab= join->join_tab+join->tables;
++  THD *thd= join->thd;
++  LEX *lex= thd->lex;
++  bool result= 0;
++  DBUG_ENTER("get_schema_tables_result");
++
++  thd->no_warnings_for_error= 1;
++  for (JOIN_TAB *tab= join->join_tab; tab < tmp_join_tab; tab++)
++  {  
++    if (!tab->table || !tab->table->pos_in_table_list)
++      break;
++
++    TABLE_LIST *table_list= tab->table->pos_in_table_list;
++    if (table_list->schema_table && thd->fill_information_schema_tables())
++    {
++      bool is_subselect= (&lex->unit != lex->current_select->master_unit() &&
++                          lex->current_select->master_unit()->item);
++
++      /* A value of 0 indicates a dummy implementation */
++      if (table_list->schema_table->fill_table == 0)
++        continue;
++
++      /* skip I_S optimizations specific to get_all_tables */
++      if (thd->lex->describe &&
++          (table_list->schema_table->fill_table != get_all_tables))
++        continue;
++
++      /*
++        If schema table is already processed and
++        the statement is not a subselect then
++        we don't need to fill this table again.
++        If schema table is already processed and
++        schema_table_state != executed_place then
++        table is already processed and
++        we should skip second data processing.
++      */
++      if (table_list->schema_table_state &&
++          (!is_subselect || table_list->schema_table_state != executed_place))
++        continue;
++
++      /*
++        if table is used in a subselect and
++        table has been processed earlier with the same
++        'executed_place' value then we should refresh the table.
++      */
++      if (table_list->schema_table_state && is_subselect)
++      {
++        table_list->table->file->extra(HA_EXTRA_NO_CACHE);
++        table_list->table->file->extra(HA_EXTRA_RESET_STATE);
++        table_list->table->file->ha_delete_all_rows();
++        free_io_cache(table_list->table);
++        filesort_free_buffers(table_list->table,1);
++        table_list->table->null_row= 0;
++      }
++      else
++        table_list->table->file->stats.records= 0;
++
++      if (table_list->schema_table->fill_table(thd, table_list,
++                                               tab->select_cond))
++      {
++        result= 1;
++        join->error= 1;
++        tab->read_record.file= table_list->table->file;
++        table_list->schema_table_state= executed_place;
++        break;
++      }
++      tab->read_record.file= table_list->table->file;
++      table_list->schema_table_state= executed_place;
++    }
++  }
++  thd->no_warnings_for_error= 0;
++  DBUG_RETURN(result);
++}
++
++struct run_hton_fill_schema_files_args
++{
++  TABLE_LIST *tables;
++  COND *cond;
++};
++
++static my_bool run_hton_fill_schema_files(THD *thd, plugin_ref plugin,
++                                          void *arg)
++{
++  struct run_hton_fill_schema_files_args *args=
++    (run_hton_fill_schema_files_args *) arg;
++  handlerton *hton= plugin_data(plugin, handlerton *);
++  if(hton->fill_files_table && hton->state == SHOW_OPTION_YES)
++    hton->fill_files_table(hton, thd, args->tables, args->cond);
++  return false;
++}
++
++int fill_schema_files(THD *thd, TABLE_LIST *tables, COND *cond)
++{
++  DBUG_ENTER("fill_schema_files");
++
++  struct run_hton_fill_schema_files_args args;
++  args.tables= tables;
++  args.cond= cond;
++
++  plugin_foreach(thd, run_hton_fill_schema_files,
++                 MYSQL_STORAGE_ENGINE_PLUGIN, &args);
++
++  DBUG_RETURN(0);
++}
++
++
++ST_FIELD_INFO schema_fields_info[]=
++{
++  {"CATALOG_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"SCHEMA_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Database",
++   SKIP_OPEN_TABLE},
++  {"DEFAULT_CHARACTER_SET_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, 0,
++   SKIP_OPEN_TABLE},
++  {"DEFAULT_COLLATION_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, 0,
++   SKIP_OPEN_TABLE},
++  {"SQL_PATH", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO tables_fields_info[]=
++{
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name",
++   SKIP_OPEN_TABLE},
++  {"TABLE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
++  {"ENGINE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, "Engine", OPEN_FRM_ONLY},
++  {"VERSION", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Version", OPEN_FRM_ONLY},
++  {"ROW_FORMAT", 10, MYSQL_TYPE_STRING, 0, 1, "Row_format", OPEN_FULL_TABLE},
++  {"TABLE_ROWS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Rows", OPEN_FULL_TABLE},
++  {"AVG_ROW_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Avg_row_length", OPEN_FULL_TABLE},
++  {"DATA_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Data_length", OPEN_FULL_TABLE},
++  {"MAX_DATA_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Max_data_length", OPEN_FULL_TABLE},
++  {"INDEX_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Index_length", OPEN_FULL_TABLE},
++  {"DATA_FREE", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Data_free", OPEN_FULL_TABLE},
++  {"AUTO_INCREMENT", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Auto_increment", OPEN_FULL_TABLE},
++  {"CREATE_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, "Create_time", OPEN_FULL_TABLE},
++  {"UPDATE_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, "Update_time", OPEN_FULL_TABLE},
++  {"CHECK_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, "Check_time", OPEN_FULL_TABLE},
++  {"TABLE_COLLATION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 1, "Collation",
++   OPEN_FRM_ONLY},
++  {"CHECKSUM", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Checksum", OPEN_FULL_TABLE},
++  {"CREATE_OPTIONS", 255, MYSQL_TYPE_STRING, 0, 1, "Create_options",
++   OPEN_FRM_ONLY},
++  {"TABLE_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, "Comment", OPEN_FRM_ONLY},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO columns_fields_info[]=
++{
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FRM_ONLY},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
++  {"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Field",
++   OPEN_FRM_ONLY},
++  {"ORDINAL_POSITION", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
++   MY_I_S_UNSIGNED, 0, OPEN_FRM_ONLY},
++  {"COLUMN_DEFAULT", MAX_FIELD_VARCHARLENGTH, MYSQL_TYPE_STRING, 0,
++   1, "Default", OPEN_FRM_ONLY},
++  {"IS_NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null", OPEN_FRM_ONLY},
++  {"DATA_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
++  {"CHARACTER_MAXIMUM_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG,
++   0, (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FRM_ONLY},
++  {"CHARACTER_OCTET_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONGLONG,
++   0, (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FRM_ONLY},
++  {"NUMERIC_PRECISION", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG,
++   0, (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FRM_ONLY},
++  {"NUMERIC_SCALE", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONGLONG,
++   0, (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FRM_ONLY},
++  {"CHARACTER_SET_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FRM_ONLY},
++  {"COLLATION_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 1, "Collation",
++   OPEN_FRM_ONLY},
++  {"COLUMN_TYPE", 65535, MYSQL_TYPE_STRING, 0, 0, "Type", OPEN_FRM_ONLY},
++  {"COLUMN_KEY", 3, MYSQL_TYPE_STRING, 0, 0, "Key", OPEN_FRM_ONLY},
++  {"EXTRA", 27, MYSQL_TYPE_STRING, 0, 0, "Extra", OPEN_FRM_ONLY},
++  {"PRIVILEGES", 80, MYSQL_TYPE_STRING, 0, 0, "Privileges", OPEN_FRM_ONLY},
++  {"COLUMN_COMMENT", 255, MYSQL_TYPE_STRING, 0, 0, "Comment", OPEN_FRM_ONLY},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO charsets_fields_info[]=
++{
++  {"CHARACTER_SET_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, "Charset",
++   SKIP_OPEN_TABLE},
++  {"DEFAULT_COLLATE_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "Default collation", SKIP_OPEN_TABLE},
++  {"DESCRIPTION", 60, MYSQL_TYPE_STRING, 0, 0, "Description",
++   SKIP_OPEN_TABLE},
++  {"MAXLEN", 3, MYSQL_TYPE_LONGLONG, 0, 0, "Maxlen", SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO collation_fields_info[]=
++{
++  {"COLLATION_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, "Collation",
++   SKIP_OPEN_TABLE},
++  {"CHARACTER_SET_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, "Charset",
++   SKIP_OPEN_TABLE},
++  {"ID", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0, 0, "Id",
++   SKIP_OPEN_TABLE},
++  {"IS_DEFAULT", 3, MYSQL_TYPE_STRING, 0, 0, "Default", SKIP_OPEN_TABLE},
++  {"IS_COMPILED", 3, MYSQL_TYPE_STRING, 0, 0, "Compiled", SKIP_OPEN_TABLE},
++  {"SORTLEN", 3, MYSQL_TYPE_LONGLONG, 0, 0, "Sortlen", SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO engines_fields_info[]=
++{
++  {"ENGINE", 64, MYSQL_TYPE_STRING, 0, 0, "Engine", SKIP_OPEN_TABLE},
++  {"SUPPORT", 8, MYSQL_TYPE_STRING, 0, 0, "Support", SKIP_OPEN_TABLE},
++  {"COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, "Comment", SKIP_OPEN_TABLE},
++  {"TRANSACTIONS", 3, MYSQL_TYPE_STRING, 0, 1, "Transactions", SKIP_OPEN_TABLE},
++  {"XA", 3, MYSQL_TYPE_STRING, 0, 1, "XA", SKIP_OPEN_TABLE},
++  {"SAVEPOINTS", 3 ,MYSQL_TYPE_STRING, 0, 1, "Savepoints", SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO events_fields_info[]=
++{
++  {"EVENT_CATALOG", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"EVENT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Db",
++   SKIP_OPEN_TABLE},
++  {"EVENT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name",
++   SKIP_OPEN_TABLE},
++  {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer", SKIP_OPEN_TABLE},
++  {"TIME_ZONE", 64, MYSQL_TYPE_STRING, 0, 0, "Time zone", SKIP_OPEN_TABLE},
++  {"EVENT_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"EVENT_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"EVENT_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type", SKIP_OPEN_TABLE},
++  {"EXECUTE_AT", 0, MYSQL_TYPE_DATETIME, 0, 1, "Execute at", SKIP_OPEN_TABLE},
++  {"INTERVAL_VALUE", 256, MYSQL_TYPE_STRING, 0, 1, "Interval value",
++   SKIP_OPEN_TABLE},
++  {"INTERVAL_FIELD", 18, MYSQL_TYPE_STRING, 0, 1, "Interval field",
++   SKIP_OPEN_TABLE},
++  {"SQL_MODE", 32*256, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"STARTS", 0, MYSQL_TYPE_DATETIME, 0, 1, "Starts", SKIP_OPEN_TABLE},
++  {"ENDS", 0, MYSQL_TYPE_DATETIME, 0, 1, "Ends", SKIP_OPEN_TABLE},
++  {"STATUS", 18, MYSQL_TYPE_STRING, 0, 0, "Status", SKIP_OPEN_TABLE},
++  {"ON_COMPLETION", 12, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"CREATED", 0, MYSQL_TYPE_DATETIME, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"LAST_ALTERED", 0, MYSQL_TYPE_DATETIME, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"LAST_EXECUTED", 0, MYSQL_TYPE_DATETIME, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"EVENT_COMMENT", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"ORIGINATOR", 10, MYSQL_TYPE_LONGLONG, 0, 0, "Originator", SKIP_OPEN_TABLE},
++  {"CHARACTER_SET_CLIENT", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "character_set_client", SKIP_OPEN_TABLE},
++  {"COLLATION_CONNECTION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "collation_connection", SKIP_OPEN_TABLE},
++  {"DATABASE_COLLATION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "Database Collation", SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++
++ST_FIELD_INFO coll_charset_app_fields_info[]=
++{
++  {"COLLATION_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, 0,
++   SKIP_OPEN_TABLE},
++  {"CHARACTER_SET_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, 0,
++   SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO proc_fields_info[]=
++{
++  {"SPECIFIC_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"ROUTINE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"ROUTINE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Db",
++   SKIP_OPEN_TABLE},
++  {"ROUTINE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name",
++   SKIP_OPEN_TABLE},
++  {"ROUTINE_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type", SKIP_OPEN_TABLE},
++  {"DTD_IDENTIFIER", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"ROUTINE_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"ROUTINE_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"EXTERNAL_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"EXTERNAL_LANGUAGE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   SKIP_OPEN_TABLE},
++  {"PARAMETER_STYLE", 8, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"IS_DETERMINISTIC", 3, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"SQL_DATA_ACCESS", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   SKIP_OPEN_TABLE},
++  {"SQL_PATH", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"SECURITY_TYPE", 7, MYSQL_TYPE_STRING, 0, 0, "Security_type",
++   SKIP_OPEN_TABLE},
++  {"CREATED", 0, MYSQL_TYPE_DATETIME, 0, 0, "Created", SKIP_OPEN_TABLE},
++  {"LAST_ALTERED", 0, MYSQL_TYPE_DATETIME, 0, 0, "Modified", SKIP_OPEN_TABLE},
++  {"SQL_MODE", 32*256, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"ROUTINE_COMMENT", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Comment",
++   SKIP_OPEN_TABLE},
++  {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer", SKIP_OPEN_TABLE},
++  {"CHARACTER_SET_CLIENT", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "character_set_client", SKIP_OPEN_TABLE},
++  {"COLLATION_CONNECTION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "collation_connection", SKIP_OPEN_TABLE},
++  {"DATABASE_COLLATION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "Database Collation", SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO stat_fields_info[]=
++{
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FRM_ONLY},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Table", OPEN_FRM_ONLY},
++  {"NON_UNIQUE", 1, MYSQL_TYPE_LONGLONG, 0, 0, "Non_unique", OPEN_FRM_ONLY},
++  {"INDEX_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
++  {"INDEX_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Key_name",
++   OPEN_FRM_ONLY},
++  {"SEQ_IN_INDEX", 2, MYSQL_TYPE_LONGLONG, 0, 0, "Seq_in_index", OPEN_FRM_ONLY},
++  {"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Column_name",
++   OPEN_FRM_ONLY},
++  {"COLLATION", 1, MYSQL_TYPE_STRING, 0, 1, "Collation", OPEN_FRM_ONLY},
++  {"CARDINALITY", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0, 1,
++   "Cardinality", OPEN_FULL_TABLE},
++  {"SUB_PART", 3, MYSQL_TYPE_LONGLONG, 0, 1, "Sub_part", OPEN_FRM_ONLY},
++  {"PACKED", 10, MYSQL_TYPE_STRING, 0, 1, "Packed", OPEN_FRM_ONLY},
++  {"NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null", OPEN_FRM_ONLY},
++  {"INDEX_TYPE", 16, MYSQL_TYPE_STRING, 0, 0, "Index_type", OPEN_FULL_TABLE},
++  {"COMMENT", 16, MYSQL_TYPE_STRING, 0, 1, "Comment", OPEN_FRM_ONLY},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO view_fields_info[]=
++{
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FRM_ONLY},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
++  {"VIEW_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"CHECK_OPTION", 8, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"IS_UPDATABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"SECURITY_TYPE", 7, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"CHARACTER_SET_CLIENT", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {"COLLATION_CONNECTION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO user_privileges_fields_info[]=
++{
++  {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO schema_privileges_fields_info[]=
++{
++  {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO table_privileges_fields_info[]=
++{
++  {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO column_privileges_fields_info[]=
++{
++  {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO table_constraints_fields_info[]=
++{
++  {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"CONSTRAINT_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO key_column_usage_fields_info[]=
++{
++  {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"ORDINAL_POSITION", 10 ,MYSQL_TYPE_LONGLONG, 0, 0, 0, OPEN_FULL_TABLE},
++  {"POSITION_IN_UNIQUE_CONSTRAINT", 10 ,MYSQL_TYPE_LONGLONG, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {"REFERENCED_TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {"REFERENCED_TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {"REFERENCED_COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO table_names_fields_info[]=
++{
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"TABLE_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Tables_in_",
++   SKIP_OPEN_TABLE},
++  {"TABLE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Table_type",
++   OPEN_FRM_ONLY},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO open_tables_fields_info[]=
++{
++  {"Database", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Database",
++   SKIP_OPEN_TABLE},
++  {"Table",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Table", SKIP_OPEN_TABLE},
++  {"In_use", 1, MYSQL_TYPE_LONGLONG, 0, 0, "In_use", SKIP_OPEN_TABLE},
++  {"Name_locked", 4, MYSQL_TYPE_LONGLONG, 0, 0, "Name_locked", SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO triggers_fields_info[]=
++{
++  {"TRIGGER_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"TRIGGER_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"TRIGGER_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Trigger",
++   OPEN_FULL_TABLE},
++  {"EVENT_MANIPULATION", 6, MYSQL_TYPE_STRING, 0, 0, "Event", OPEN_FULL_TABLE},
++  {"EVENT_OBJECT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {"EVENT_OBJECT_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {"EVENT_OBJECT_TABLE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Table",
++   OPEN_FULL_TABLE},
++  {"ACTION_ORDER", 4, MYSQL_TYPE_LONGLONG, 0, 0, 0, OPEN_FULL_TABLE},
++  {"ACTION_CONDITION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"ACTION_STATEMENT", 65535, MYSQL_TYPE_STRING, 0, 0, "Statement",
++   OPEN_FULL_TABLE},
++  {"ACTION_ORIENTATION", 9, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"ACTION_TIMING", 6, MYSQL_TYPE_STRING, 0, 0, "Timing", OPEN_FULL_TABLE},
++  {"ACTION_REFERENCE_OLD_TABLE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {"ACTION_REFERENCE_NEW_TABLE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {"ACTION_REFERENCE_OLD_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"ACTION_REFERENCE_NEW_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"CREATED", 0, MYSQL_TYPE_DATETIME, 0, 1, "Created", OPEN_FULL_TABLE},
++  {"SQL_MODE", 32*256, MYSQL_TYPE_STRING, 0, 0, "sql_mode", OPEN_FULL_TABLE},
++  {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer", OPEN_FULL_TABLE},
++  {"CHARACTER_SET_CLIENT", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "character_set_client", OPEN_FULL_TABLE},
++  {"COLLATION_CONNECTION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "collation_connection", OPEN_FULL_TABLE},
++  {"DATABASE_COLLATION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
++   "Database Collation", OPEN_FULL_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO partitions_fields_info[]=
++{
++  {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"TABLE_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"PARTITION_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"SUBPARTITION_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {"PARTITION_ORDINAL_POSITION", 21 , MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FULL_TABLE},
++  {"SUBPARTITION_ORDINAL_POSITION", 21 , MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FULL_TABLE},
++  {"PARTITION_METHOD", 12, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"SUBPARTITION_METHOD", 12, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"PARTITION_EXPRESSION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"SUBPARTITION_EXPRESSION", 65535, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {"PARTITION_DESCRIPTION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"TABLE_ROWS", 21 , MYSQL_TYPE_LONGLONG, 0, MY_I_S_UNSIGNED, 0,
++   OPEN_FULL_TABLE},
++  {"AVG_ROW_LENGTH", 21 , MYSQL_TYPE_LONGLONG, 0, MY_I_S_UNSIGNED, 0,
++   OPEN_FULL_TABLE},
++  {"DATA_LENGTH", 21 , MYSQL_TYPE_LONGLONG, 0, MY_I_S_UNSIGNED, 0,
++   OPEN_FULL_TABLE},
++  {"MAX_DATA_LENGTH", 21 , MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FULL_TABLE},
++  {"INDEX_LENGTH", 21 , MYSQL_TYPE_LONGLONG, 0, MY_I_S_UNSIGNED, 0,
++   OPEN_FULL_TABLE},
++  {"DATA_FREE", 21 , MYSQL_TYPE_LONGLONG, 0, MY_I_S_UNSIGNED, 0,
++   OPEN_FULL_TABLE},
++  {"CREATE_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, 0, OPEN_FULL_TABLE},
++  {"UPDATE_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, 0, OPEN_FULL_TABLE},
++  {"CHECK_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, 0, OPEN_FULL_TABLE},
++  {"CHECKSUM", 21 , MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FULL_TABLE},
++  {"PARTITION_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"NODEGROUP", 12 , MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"TABLESPACE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO variables_fields_info[]=
++{
++  {"VARIABLE_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Variable_name",
++   SKIP_OPEN_TABLE},
++  {"VARIABLE_VALUE", 1024, MYSQL_TYPE_STRING, 0, 1, "Value", SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO processlist_fields_info[]=
++{
++  {"ID", 4, MYSQL_TYPE_LONGLONG, 0, 0, "Id", SKIP_OPEN_TABLE},
++  {"USER", 16, MYSQL_TYPE_STRING, 0, 0, "User", SKIP_OPEN_TABLE},
++  {"HOST", LIST_PROCESS_HOST_LEN,  MYSQL_TYPE_STRING, 0, 0, "Host",
++   SKIP_OPEN_TABLE},
++  {"DB", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, "Db", SKIP_OPEN_TABLE},
++  {"COMMAND", 16, MYSQL_TYPE_STRING, 0, 0, "Command", SKIP_OPEN_TABLE},
++  {"TIME", 7, MYSQL_TYPE_LONG, 0, 0, "Time", SKIP_OPEN_TABLE},
++  {"STATE", 64, MYSQL_TYPE_STRING, 0, 1, "State", SKIP_OPEN_TABLE},
++  {"INFO", PROCESS_LIST_INFO_WIDTH, MYSQL_TYPE_STRING, 0, 1, "Info",
++   SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++ST_FIELD_INFO plugin_fields_info[]=
++{
++  {"PLUGIN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name",
++   SKIP_OPEN_TABLE},
++  {"PLUGIN_VERSION", 20, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"PLUGIN_STATUS", 10, MYSQL_TYPE_STRING, 0, 0, "Status", SKIP_OPEN_TABLE},
++  {"PLUGIN_TYPE", 80, MYSQL_TYPE_STRING, 0, 0, "Type", SKIP_OPEN_TABLE},
++  {"PLUGIN_TYPE_VERSION", 20, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"PLUGIN_LIBRARY", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, "Library",
++   SKIP_OPEN_TABLE},
++  {"PLUGIN_LIBRARY_VERSION", 20, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"PLUGIN_AUTHOR", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"PLUGIN_DESCRIPTION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"PLUGIN_LICENSE", 80, MYSQL_TYPE_STRING, 0, 1, "License", SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++ST_FIELD_INFO files_fields_info[]=
++{
++  {"FILE_ID", 4, MYSQL_TYPE_LONGLONG, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"FILE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"FILE_TYPE", 20, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"TABLESPACE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   SKIP_OPEN_TABLE},
++  {"TABLE_CATALOG", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"LOGFILE_GROUP_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   SKIP_OPEN_TABLE},
++  {"LOGFILE_GROUP_NUMBER", 4, MYSQL_TYPE_LONGLONG, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"ENGINE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"FULLTEXT_KEYS", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"DELETED_ROWS", 4, MYSQL_TYPE_LONGLONG, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"UPDATE_COUNT", 4, MYSQL_TYPE_LONGLONG, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"FREE_EXTENTS", 4, MYSQL_TYPE_LONGLONG, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"TOTAL_EXTENTS", 4, MYSQL_TYPE_LONGLONG, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"EXTENT_SIZE", 4, MYSQL_TYPE_LONGLONG, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"INITIAL_SIZE", 21, MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, SKIP_OPEN_TABLE},
++  {"MAXIMUM_SIZE", 21, MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, SKIP_OPEN_TABLE},
++  {"AUTOEXTEND_SIZE", 21, MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, SKIP_OPEN_TABLE},
++  {"CREATION_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"LAST_UPDATE_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"LAST_ACCESS_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"RECOVER_TIME", 4, MYSQL_TYPE_LONGLONG, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"TRANSACTION_COUNTER", 4, MYSQL_TYPE_LONGLONG, 0, 1, 0, SKIP_OPEN_TABLE},
++  {"VERSION", 21 , MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Version", SKIP_OPEN_TABLE},
++  {"ROW_FORMAT", 10, MYSQL_TYPE_STRING, 0, 1, "Row_format", SKIP_OPEN_TABLE},
++  {"TABLE_ROWS", 21 , MYSQL_TYPE_LONGLONG, 0,
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Rows", SKIP_OPEN_TABLE},
++  {"AVG_ROW_LENGTH", 21 , MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Avg_row_length", SKIP_OPEN_TABLE},
++  {"DATA_LENGTH", 21 , MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Data_length", SKIP_OPEN_TABLE},
++  {"MAX_DATA_LENGTH", 21 , MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Max_data_length", SKIP_OPEN_TABLE},
++  {"INDEX_LENGTH", 21 , MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Index_length", SKIP_OPEN_TABLE},
++  {"DATA_FREE", 21 , MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Data_free", SKIP_OPEN_TABLE},
++  {"CREATE_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, "Create_time", SKIP_OPEN_TABLE},
++  {"UPDATE_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, "Update_time", SKIP_OPEN_TABLE},
++  {"CHECK_TIME", 0, MYSQL_TYPE_DATETIME, 0, 1, "Check_time", SKIP_OPEN_TABLE},
++  {"CHECKSUM", 21 , MYSQL_TYPE_LONGLONG, 0, 
++   (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), "Checksum", SKIP_OPEN_TABLE},
++  {"STATUS", 20, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
++  {"EXTRA", 255, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++void init_fill_schema_files_row(TABLE* table)
++{
++  int i;
++  for(i=0; files_fields_info[i].field_name!=NULL; i++)
++    table->field[i]->set_null();
++
++  table->field[IS_FILES_STATUS]->set_notnull();
++  table->field[IS_FILES_STATUS]->store("NORMAL", 6, system_charset_info);
++}
++
++ST_FIELD_INFO referential_constraints_fields_info[]=
++{
++  {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
++  {"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {"UNIQUE_CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0,
++   OPEN_FULL_TABLE},
++  {"UNIQUE_CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {"UNIQUE_CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0,
++   MY_I_S_MAYBE_NULL, 0, OPEN_FULL_TABLE},
++  {"MATCH_OPTION", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"UPDATE_RULE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"DELETE_RULE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
++  {"REFERENCED_TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
++   OPEN_FULL_TABLE},
++  {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
++};
++
++
++/*
++  Description of ST_FIELD_INFO in table.h
++
++  Make sure that the order of schema_tables and enum_schema_tables are the same.
++
++*/
++
++ST_SCHEMA_TABLE schema_tables[]=
++{
++  {"CHARACTER_SETS", charsets_fields_info, create_schema_table, 
++   fill_schema_charsets, make_character_sets_old_format, 0, -1, -1, 0, 0},
++  {"COLLATIONS", collation_fields_info, create_schema_table, 
++   fill_schema_collation, make_old_format, 0, -1, -1, 0, 0},
++  {"COLLATION_CHARACTER_SET_APPLICABILITY", coll_charset_app_fields_info,
++   create_schema_table, fill_schema_coll_charset_app, 0, 0, -1, -1, 0, 0},
++  {"COLUMNS", columns_fields_info, create_schema_table, 
++   get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2, 0,
++   OPTIMIZE_I_S_TABLE|OPEN_VIEW_FULL},
++  {"COLUMN_PRIVILEGES", column_privileges_fields_info, create_schema_table,
++   fill_schema_column_privileges, 0, 0, -1, -1, 0, 0},
++  {"ENGINES", engines_fields_info, create_schema_table,
++   fill_schema_engines, make_old_format, 0, -1, -1, 0, 0},
++#ifdef HAVE_EVENT_SCHEDULER
++  {"EVENTS", events_fields_info, create_schema_table,
++   Events::fill_schema_events, make_old_format, 0, -1, -1, 0, 0},
++#else
++  {"EVENTS", events_fields_info, create_schema_table,
++   0, make_old_format, 0, -1, -1, 0, 0},
++#endif
++  {"FILES", files_fields_info, create_schema_table,
++   fill_schema_files, 0, 0, -1, -1, 0, 0},
++  {"GLOBAL_STATUS", variables_fields_info, create_schema_table,
++   fill_status, make_old_format, 0, 0, -1, 0, 0},
++  {"GLOBAL_VARIABLES", variables_fields_info, create_schema_table,
++   fill_variables, make_old_format, 0, 0, -1, 0, 0},
++  {"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table,
++   get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0,
++   OPEN_TABLE_ONLY},
++  {"OPEN_TABLES", open_tables_fields_info, create_schema_table,
++   fill_open_tables, make_old_format, 0, -1, -1, 1, 0},
++  {"PARTITIONS", partitions_fields_info, create_schema_table,
++   get_all_tables, 0, get_schema_partitions_record, 1, 2, 0, OPEN_TABLE_ONLY},
++  {"PLUGINS", plugin_fields_info, create_schema_table,
++   fill_plugins, make_old_format, 0, -1, -1, 0, 0},
++  {"PROCESSLIST", processlist_fields_info, create_schema_table,
++   fill_schema_processlist, make_old_format, 0, -1, -1, 0, 0},
++  {"PROFILING", query_profile_statistics_info, create_schema_table,
++    fill_query_profile_statistics_info, make_profile_table_for_show, 
++    NULL, -1, -1, false, 0},
++  {"REFERENTIAL_CONSTRAINTS", referential_constraints_fields_info,
++   create_schema_table, get_all_tables, 0, get_referential_constraints_record,
++   1, 9, 0, OPEN_TABLE_ONLY},
++  {"ROUTINES", proc_fields_info, create_schema_table, 
++   fill_schema_proc, make_proc_old_format, 0, -1, -1, 0, 0},
++  {"SCHEMATA", schema_fields_info, create_schema_table,
++   fill_schema_schemata, make_schemata_old_format, 0, 1, -1, 0, 0},
++  {"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table,
++   fill_schema_schema_privileges, 0, 0, -1, -1, 0, 0},
++  {"SESSION_STATUS", variables_fields_info, create_schema_table,
++   fill_status, make_old_format, 0, 0, -1, 0, 0},
++  {"SESSION_VARIABLES", variables_fields_info, create_schema_table,
++   fill_variables, make_old_format, 0, 0, -1, 0, 0},
++  {"STATISTICS", stat_fields_info, create_schema_table, 
++   get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0,
++   OPEN_TABLE_ONLY|OPTIMIZE_I_S_TABLE},
++  {"STATUS", variables_fields_info, create_schema_table, fill_status, 
++   make_old_format, 0, 0, -1, 1, 0},
++  {"TABLES", tables_fields_info, create_schema_table, 
++   get_all_tables, make_old_format, get_schema_tables_record, 1, 2, 0,
++   OPTIMIZE_I_S_TABLE},
++  {"TABLE_CONSTRAINTS", table_constraints_fields_info, create_schema_table,
++   get_all_tables, 0, get_schema_constraints_record, 3, 4, 0, OPEN_TABLE_ONLY},
++  {"TABLE_NAMES", table_names_fields_info, create_schema_table,
++   get_all_tables, make_table_names_old_format, 0, 1, 2, 1, 0},
++  {"TABLE_PRIVILEGES", table_privileges_fields_info, create_schema_table,
++   fill_schema_table_privileges, 0, 0, -1, -1, 0, 0},
++  {"TRIGGERS", triggers_fields_info, create_schema_table,
++   get_all_tables, make_old_format, get_schema_triggers_record, 5, 6, 0,
++   OPEN_TABLE_ONLY},
++  {"USER_PRIVILEGES", user_privileges_fields_info, create_schema_table, 
++   fill_schema_user_privileges, 0, 0, -1, -1, 0, 0},
++  {"VARIABLES", variables_fields_info, create_schema_table, fill_variables,
++   make_old_format, 0, 0, -1, 1, 0},
++  {"VIEWS", view_fields_info, create_schema_table, 
++   get_all_tables, 0, get_schema_views_record, 1, 2, 0,
++   OPEN_VIEW_ONLY|OPTIMIZE_I_S_TABLE},
++  {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
++};
++
++
++#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
++template class List_iterator_fast<char>;
++template class List<char>;
++#endif
++
++int initialize_schema_table(st_plugin_int *plugin)
++{
++  ST_SCHEMA_TABLE *schema_table;
++  DBUG_ENTER("initialize_schema_table");
++
++  if (!(schema_table= (ST_SCHEMA_TABLE *)my_malloc(sizeof(ST_SCHEMA_TABLE),
++                                MYF(MY_WME | MY_ZEROFILL))))
++      DBUG_RETURN(1);
++  /* Historical Requirement */
++  plugin->data= schema_table; // shortcut for the future
++  if (plugin->plugin->init)
++  {
++    schema_table->create_table= create_schema_table;
++    schema_table->old_format= make_old_format;
++    schema_table->idx_field1= -1, 
++    schema_table->idx_field2= -1; 
++
++    /* Make the name available to the init() function. */
++    schema_table->table_name= plugin->name.str;
++
++    if (plugin->plugin->init(schema_table))
++    {
++      sql_print_error("Plugin '%s' init function returned error.",
++                      plugin->name.str);
++      plugin->data= NULL;
++      my_free(schema_table, MYF(0));
++      DBUG_RETURN(1);
++    }
++    
++    /* Make sure the plugin name is not set inside the init() function. */
++    schema_table->table_name= plugin->name.str;
++  }
++  DBUG_RETURN(0);
++}
++
++int finalize_schema_table(st_plugin_int *plugin)
++{
++  ST_SCHEMA_TABLE *schema_table= (ST_SCHEMA_TABLE *)plugin->data;
++  DBUG_ENTER("finalize_schema_table");
++
++  if (schema_table)
++  {
++    if (plugin->plugin->deinit)
++    {
++      DBUG_PRINT("info", ("Deinitializing plugin: '%s'", plugin->name.str));
++      if (plugin->plugin->deinit(NULL))
++      {
++        DBUG_PRINT("warning", ("Plugin '%s' deinit function returned error.",
++                               plugin->name.str));
++      }
++    }
++    my_free(schema_table, MYF(0));
++  }
++  DBUG_RETURN(0);
++}
++
++
++/**
++  Output trigger information (SHOW CREATE TRIGGER) to the client.
++
++  @param thd          Thread context.
++  @param triggers     List of triggers for the table.
++  @param trigger_idx  Index of the trigger to dump.
++
++  @return Operation status
++    @retval TRUE Error.
++    @retval FALSE Success.
++*/
++
++static bool show_create_trigger_impl(THD *thd,
++                                     Table_triggers_list *triggers,
++                                     int trigger_idx)
++{
++  int ret_code;
++
++  Protocol *p= thd->protocol;
++  List<Item> fields;
++
++  LEX_STRING trg_name;
++  ulonglong trg_sql_mode;
++  LEX_STRING trg_sql_mode_str;
++  LEX_STRING trg_sql_original_stmt;
++  LEX_STRING trg_client_cs_name;
++  LEX_STRING trg_connection_cl_name;
++  LEX_STRING trg_db_cl_name;
++
++  CHARSET_INFO *trg_client_cs;
++
++  /*
++    TODO: Check privileges here. This functionality will be added by
++    implementation of the following WL items:
++      - WL#2227: New privileges for new objects
++      - WL#3482: Protect SHOW CREATE PROCEDURE | FUNCTION | VIEW | TRIGGER
++        properly
++
++    SHOW TRIGGERS and I_S.TRIGGERS will be affected too.
++  */
++
++  /* Prepare trigger "object". */
++
++  triggers->get_trigger_info(thd,
++                             trigger_idx,
++                             &trg_name,
++                             &trg_sql_mode,
++                             &trg_sql_original_stmt,
++                             &trg_client_cs_name,
++                             &trg_connection_cl_name,
++                             &trg_db_cl_name);
++
++  sys_var_thd_sql_mode::symbolic_mode_representation(thd,
++                                                     trg_sql_mode,
++                                                     &trg_sql_mode_str);
++
++  /* Resolve trigger client character set. */
++
++  if (resolve_charset(trg_client_cs_name.str, NULL, &trg_client_cs))
++    return TRUE;
++
++  /* Send header. */
++
++  fields.push_back(new Item_empty_string("Trigger", NAME_LEN));
++  fields.push_back(new Item_empty_string("sql_mode", trg_sql_mode_str.length));
++
++  {
++    /*
++      NOTE: SQL statement field must be not less than 1024 in order not to
++      confuse old clients.
++    */
++
++    Item_empty_string *stmt_fld=
++      new Item_empty_string("SQL Original Statement",
++                            max(trg_sql_original_stmt.length, 1024));
++
++    stmt_fld->maybe_null= TRUE;
++
++    fields.push_back(stmt_fld);
++  }
++
++  fields.push_back(new Item_empty_string("character_set_client",
++                                         MY_CS_NAME_SIZE));
++
++  fields.push_back(new Item_empty_string("collation_connection",
++                                         MY_CS_NAME_SIZE));
++
++  fields.push_back(new Item_empty_string("Database Collation",
++                                         MY_CS_NAME_SIZE));
++
++  if (p->send_fields(&fields, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    return TRUE;
++
++  /* Send data. */
++
++  p->prepare_for_resend();
++
++  p->store(trg_name.str,
++           trg_name.length,
++           system_charset_info);
++
++  p->store(trg_sql_mode_str.str,
++           trg_sql_mode_str.length,
++           system_charset_info);
++
++  p->store(trg_sql_original_stmt.str,
++           trg_sql_original_stmt.length,
++           trg_client_cs);
++
++  p->store(trg_client_cs_name.str,
++           trg_client_cs_name.length,
++           system_charset_info);
++
++  p->store(trg_connection_cl_name.str,
++           trg_connection_cl_name.length,
++           system_charset_info);
++
++  p->store(trg_db_cl_name.str,
++           trg_db_cl_name.length,
++           system_charset_info);
++
++  ret_code= p->write();
++
++  if (!ret_code)
++    my_eof(thd);
++
++  return ret_code != 0;
++}
++
++
++/**
++  Read TRN and TRG files to obtain base table name for the specified
++  trigger name and construct TABE_LIST object for the base table.
++
++  @param thd      Thread context.
++  @param trg_name Trigger name.
++
++  @return TABLE_LIST object corresponding to the base table.
++
++  TODO: This function is a copy&paste from add_table_to_list() and
++  sp_add_to_query_tables(). The problem is that in order to be compatible
++  with Stored Programs (Prepared Statements), we should not touch thd->lex.
++  The "source" functions also add created TABLE_LIST object to the
++  thd->lex->query_tables.
++
++  The plan to eliminate this copy&paste is to:
++
++    - get rid of sp_add_to_query_tables() and use Lex::add_table_to_list().
++      Only add_table_to_list() must be used to add tables from the parser
++      into Lex::query_tables list.
++
++    - do not update Lex::query_tables in add_table_to_list().
++*/
++
++static TABLE_LIST *get_trigger_table_impl(
++  THD *thd,
++  const sp_name *trg_name)
++{
++  char trn_path_buff[FN_REFLEN];
++
++  LEX_STRING trn_path= { trn_path_buff, 0 };
++  LEX_STRING tbl_name;
++
++  build_trn_path(thd, trg_name, &trn_path);
++
++  if (check_trn_exists(&trn_path))
++  {
++    my_error(ER_TRG_DOES_NOT_EXIST, MYF(0));
++    return NULL;
++  }
++
++  if (load_table_name_for_trigger(thd, trg_name, &trn_path, &tbl_name))
++    return NULL;
++
++  /* We need to reset statement table list to be PS/SP friendly. */
++
++  TABLE_LIST *table;
++
++  if (!(table= (TABLE_LIST *)thd->calloc(sizeof(TABLE_LIST))))
++  {
++    my_error(ER_OUTOFMEMORY, MYF(0), sizeof(TABLE_LIST));
++    return NULL;
++  }
++
++  table->db_length= trg_name->m_db.length;
++  table->db= thd->strmake(trg_name->m_db.str, trg_name->m_db.length);
++
++  table->table_name_length= tbl_name.length;
++  table->table_name= thd->strmake(tbl_name.str, tbl_name.length);
++
++  table->alias= thd->strmake(tbl_name.str, tbl_name.length);
++
++  table->lock_type= TL_IGNORE;
++  table->cacheable_table= 0;
++
++  return table;
++}
++
++/**
++  Read TRN and TRG files to obtain base table name for the specified
++  trigger name and construct TABE_LIST object for the base table. Acquire
++  LOCK_open when doing this.
++
++  @param thd      Thread context.
++  @param trg_name Trigger name.
++
++  @return TABLE_LIST object corresponding to the base table.
++*/
++
++static TABLE_LIST *get_trigger_table(THD *thd, const sp_name *trg_name)
++{
++  /* Acquire LOCK_open (stop the server). */
++
++  pthread_mutex_lock(&LOCK_open);
++
++  /*
++    Load base table name from the TRN-file and create TABLE_LIST object.
++  */
++
++  TABLE_LIST *lst= get_trigger_table_impl(thd, trg_name);
++
++  /* Release LOCK_open (continue the server). */
++
++  pthread_mutex_unlock(&LOCK_open);
++
++  /* That's it. */
++
++  return lst;
++}
++
++
++/**
++  SHOW CREATE TRIGGER high-level implementation.
++
++  @param thd      Thread context.
++  @param trg_name Trigger name.
++
++  @return Operation status
++    @retval TRUE Error.
++    @retval FALSE Success.
++*/
++
++bool show_create_trigger(THD *thd, const sp_name *trg_name)
++{
++  TABLE_LIST *lst= get_trigger_table(thd, trg_name);
++
++  if (!lst)
++    return TRUE;
++
++  if (check_table_access(thd, TRIGGER_ACL, lst, 1, TRUE))
++  {
++    my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "TRIGGER");
++    return TRUE;
++  }
++
++  /*
++    Open the table by name in order to load Table_triggers_list object.
++
++    NOTE: there is race condition here -- the table can be dropped after
++    LOCK_open is released. It will be fixed later by introducing
++    acquire-shared-table-name-lock functionality.
++  */
++
++  uint num_tables; /* NOTE: unused, only to pass to open_tables(). */
++
++  if (open_tables(thd, &lst, &num_tables, 0))
++  {
++    my_error(ER_TRG_CANT_OPEN_TABLE, MYF(0),
++             (const char *) trg_name->m_db.str,
++             (const char *) lst->table_name);
++
++    return TRUE;
++
++    /* Perform closing actions and return error status. */
++  }
++
++  Table_triggers_list *triggers= lst->table->triggers;
++
++  if (!triggers)
++  {
++    my_error(ER_TRG_DOES_NOT_EXIST, MYF(0));
++    return TRUE;
++  }
++
++  int trigger_idx= triggers->find_trigger_by_name(&trg_name->m_name);
++
++  if (trigger_idx < 0)
++  {
++    my_error(ER_TRG_CORRUPTED_FILE, MYF(0),
++             (const char *) trg_name->m_db.str,
++             (const char *) lst->table_name);
++
++    return TRUE;
++  }
++
++  return show_create_trigger_impl(thd, triggers, trigger_idx);
++
++  /*
++    NOTE: if show_create_trigger_impl() failed, that means we could not
++    send data to the client. In this case we simply raise the error
++    status and client connection will be closed.
++  */
++}
+diff -urN mysql-old/sql/sql_string.cc mysql/sql/sql_string.cc
+--- mysql-old/sql/sql_string.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/sql_string.cc	2011-05-10 17:56:01.616682376 +0000
+@@ -695,7 +695,7 @@
+ {
+   if (Alloced_length < str_length + space_needed)
+   {
+-    if (realloc(Alloced_length + max(space_needed, grow_by) - 1))
++    if (realloc(Alloced_length + MYSQL_MAX(space_needed, grow_by) - 1))
+       return TRUE;
+   }
+   return FALSE;
+@@ -781,7 +781,7 @@
+ 
+ int stringcmp(const String *s,const String *t)
+ {
+-  uint32 s_len=s->length(),t_len=t->length(),len=min(s_len,t_len);
++  uint32 s_len=s->length(),t_len=t->length(),len=MYSQL_MIN(s_len,t_len);
+   int cmp= memcmp(s->ptr(), t->ptr(), len);
+   return (cmp) ? cmp : (int) (s_len - t_len);
+ }
+@@ -798,7 +798,7 @@
+   }
+   if (to->realloc(from_length))
+     return from;				// Actually an error
+-  if ((to->str_length=min(from->str_length,from_length)))
++  if ((to->str_length=MYSQL_MIN(from->str_length,from_length)))
+     memcpy(to->Ptr,from->Ptr,to->str_length);
+   to->str_charset=from->str_charset;
+   return to;
+@@ -999,7 +999,7 @@
+ 
+     if (to_cs == &my_charset_bin)
+     {
+-      res= min(min(nchars, to_length), from_length);
++      res= MYSQL_MIN(MYSQL_MIN(nchars, to_length), from_length);
+       memmove(to, from, res);
+       *from_end_pos= from + res;
+       *well_formed_error_pos= NULL;
+@@ -1185,7 +1185,7 @@
+   char *t= to;
+   char *t_end= to + to_len - 1; // '- 1' is for the '\0' at the end
+   const char *f= from;
+-  const char *f_end= from + (nbytes ? min(from_len, nbytes) : from_len);
++  const char *f_end= from + (nbytes ? MYSQL_MIN(from_len, nbytes) : from_len);
+   char *dots= to; // last safe place to append '...'
+ 
+   if (!f || t == t_end)
+diff -urN mysql-old/sql/sql_table.cc mysql/sql/sql_table.cc
+--- mysql-old/sql/sql_table.cc	2011-05-10 17:45:45.626682377 +0000
++++ mysql/sql/sql_table.cc	2011-05-10 17:56:01.620015709 +0000
+@@ -3274,7 +3274,7 @@
+ 	  if ((length=column->length) > max_key_length ||
+ 	      length > file->max_key_part_length())
+ 	  {
+-	    length=min(max_key_length, file->max_key_part_length());
++	    length=MYSQL_MIN(max_key_length, file->max_key_part_length());
+ 	    if (key->type == Key::MULTIPLE)
+ 	    {
+ 	      /* not a critical problem */
+diff -urN mysql-old/sql/sql_table.cc.orig mysql/sql/sql_table.cc.orig
+--- mysql-old/sql/sql_table.cc.orig	1969-12-31 23:00:00.000000000 -0100
++++ mysql/sql/sql_table.cc.orig	2011-04-12 12:11:35.000000000 +0000
+@@ -0,0 +1,8126 @@
++/* Copyright 2000-2011, Oracle and/or its affiliates. All rights reserved. 
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; version 2 of the License.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,  
++   MA 02110-1301  USA */
++
++/* drop and alter of tables */
++
++#include "mysql_priv.h"
++#include <hash.h>
++#include <myisam.h>
++#include <my_dir.h>
++#include "sp_head.h"
++#include "sql_trigger.h"
++#include "sql_show.h"
++#include "debug_sync.h"
++
++#ifdef __WIN__
++#include <io.h>
++#endif
++
++int creating_table= 0;        // How many mysql_create_table are running
++
++const char *primary_key_name="PRIMARY";
++
++static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
++static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end);
++static int copy_data_between_tables(TABLE *from,TABLE *to,
++                                    List<Create_field> &create, bool ignore,
++				    uint order_num, ORDER *order,
++				    ha_rows *copied,ha_rows *deleted,
++                                    enum enum_enable_or_disable keys_onoff,
++                                    bool error_if_not_empty);
++
++static bool prepare_blob_field(THD *thd, Create_field *sql_field);
++static bool check_engine(THD *, const char *, HA_CREATE_INFO *);
++static int
++mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
++                           Alter_info *alter_info,
++                           bool tmp_table,
++                           uint *db_options,
++                           handler *file, KEY **key_info_buffer,
++                           uint *key_count, int select_field_count);
++static bool
++mysql_prepare_alter_table(THD *thd, TABLE *table,
++                          HA_CREATE_INFO *create_info,
++                          Alter_info *alter_info);
++
++#ifndef DBUG_OFF
++
++/* Wait until we get a 'mysql_kill' signal */
++
++static void wait_for_kill_signal(THD *thd)
++{
++  while (thd->killed == 0)
++    sleep(1);
++  // Reset signal and continue as if nothing happend
++  thd->killed= THD::NOT_KILLED;
++}
++#endif
++
++
++/**
++  @brief Helper function for explain_filename
++  @param thd          Thread handle
++  @param to_p         Explained name in system_charset_info
++  @param end_p        End of the to_p buffer
++  @param name         Name to be converted
++  @param name_len     Length of the name, in bytes
++*/
++static char* add_identifier(THD* thd, char *to_p, const char * end_p,
++                            const char* name, uint name_len)
++{
++  uint res;
++  uint errors;
++  const char *conv_name;
++  char tmp_name[FN_REFLEN];
++  char conv_string[FN_REFLEN];
++  int quote;
++
++  DBUG_ENTER("add_identifier");
++  if (!name[name_len])
++    conv_name= name;
++  else
++  {
++    strnmov(tmp_name, name, name_len);
++    tmp_name[name_len]= 0;
++    conv_name= tmp_name;
++  }
++  res= strconvert(&my_charset_filename, conv_name, system_charset_info,
++                  conv_string, FN_REFLEN, &errors);
++  if (!res || errors)
++  {
++    DBUG_PRINT("error", ("strconvert of '%s' failed with %u (errors: %u)", conv_name, res, errors));
++    conv_name= name;
++  }
++  else
++  {
++    DBUG_PRINT("info", ("conv '%s' -> '%s'", conv_name, conv_string));
++    conv_name= conv_string;
++  }
++
++  quote = thd ? get_quote_char_for_identifier(thd, conv_name, res - 1) : '"';
++
++  if (quote != EOF && (end_p - to_p > 2))
++  {
++    *(to_p++)= (char) quote;
++    while (*conv_name && (end_p - to_p - 1) > 0)
++    {
++      uint length= my_mbcharlen(system_charset_info, *conv_name);
++      if (!length)
++        length= 1;
++      if (length == 1 && *conv_name == (char) quote)
++      { 
++        if ((end_p - to_p) < 3)
++          break;
++        *(to_p++)= (char) quote;
++        *(to_p++)= *(conv_name++);
++      }
++      else if (((long) length) < (end_p - to_p))
++      {
++        to_p= strnmov(to_p, conv_name, length);
++        conv_name+= length;
++      }
++      else
++        break;                               /* string already filled */
++    }
++    if (end_p > to_p) {
++      *(to_p++)= (char) quote;
++      if (end_p > to_p)
++	*to_p= 0; /* terminate by NUL, but do not include it in the count */
++    }
++  }
++  else
++    to_p= strnmov(to_p, conv_name, end_p - to_p);
++  DBUG_RETURN(to_p);
++}
++
++
++/**
++  @brief Explain a path name by split it to database, table etc.
++  
++  @details Break down the path name to its logic parts
++  (database, table, partition, subpartition).
++  filename_to_tablename cannot be used on partitions, due to the #P# part.
++  There can be up to 6 '#', #P# for partition, #SP# for subpartition
++  and #TMP# or #REN# for temporary or renamed partitions.
++  This should be used when something should be presented to a user in a
++  diagnostic, error etc. when it would be useful to know what a particular
++  file [and directory] means. Such as SHOW ENGINE STATUS, error messages etc.
++
++   @param      thd          Thread handle
++   @param      from         Path name in my_charset_filename
++                            Null terminated in my_charset_filename, normalized
++                            to use '/' as directory separation character.
++   @param      to           Explained name in system_charset_info
++   @param      to_length    Size of to buffer
++   @param      explain_mode Requested output format.
++                            EXPLAIN_ALL_VERBOSE ->
++                            [Database `db`, ]Table `tbl`[,[ Temporary| Renamed]
++                            Partition `p` [, Subpartition `sp`]]
++                            EXPLAIN_PARTITIONS_VERBOSE -> `db`.`tbl`
++                            [[ Temporary| Renamed] Partition `p`
++                            [, Subpartition `sp`]]
++                            EXPLAIN_PARTITIONS_AS_COMMENT -> `db`.`tbl` |*
++                            [,[ Temporary| Renamed] Partition `p`
++                            [, Subpartition `sp`]] *|
++                            (| is really a /, and it is all in one line)
++
++   @retval     Length of returned string
++*/
++
++uint explain_filename(THD* thd,
++		      const char *from,
++                      char *to,
++                      uint to_length,
++                      enum_explain_filename_mode explain_mode)
++{
++  uint res= 0;
++  char *to_p= to;
++  char *end_p= to_p + to_length;
++  const char *db_name= NULL;
++  int  db_name_len= 0;
++  const char *table_name;
++  int  table_name_len= 0;
++  const char *part_name= NULL;
++  int  part_name_len= 0;
++  const char *subpart_name= NULL;
++  int  subpart_name_len= 0;
++  enum enum_file_name_type {NORMAL, TEMP, RENAMED} name_type= NORMAL;
++  const char *tmp_p;
++  DBUG_ENTER("explain_filename");
++  DBUG_PRINT("enter", ("from '%s'", from));
++  tmp_p= from;
++  table_name= from;
++  /*
++    If '/' then take last directory part as database.
++    '/' is the directory separator, not FN_LIB_CHAR
++  */
++  while ((tmp_p= strchr(tmp_p, '/')))
++  {
++    db_name= table_name;
++    /* calculate the length */
++    db_name_len= tmp_p - db_name;
++    tmp_p++;
++    table_name= tmp_p;
++  }
++  tmp_p= table_name;
++  while (!res && (tmp_p= strchr(tmp_p, '#')))
++  {
++    tmp_p++;
++    switch (tmp_p[0]) {
++    case 'P':
++    case 'p':
++      if (tmp_p[1] == '#')
++        part_name= tmp_p + 2;
++      else
++        res= 1;
++      tmp_p+= 2;
++      break;
++    case 'S':
++    case 's':
++      if ((tmp_p[1] == 'P' || tmp_p[1] == 'p') && tmp_p[2] == '#')
++      {
++        part_name_len= tmp_p - part_name - 1;
++        subpart_name= tmp_p + 3;
++      }
++      else
++        res= 2;
++      tmp_p+= 3;
++      break;
++    case 'T':
++    case 't':
++      if ((tmp_p[1] == 'M' || tmp_p[1] == 'm') &&
++          (tmp_p[2] == 'P' || tmp_p[2] == 'p') &&
++          tmp_p[3] == '#' && !tmp_p[4])
++        name_type= TEMP;
++      else
++        res= 3;
++      tmp_p+= 4;
++      break;
++    case 'R':
++    case 'r':
++      if ((tmp_p[1] == 'E' || tmp_p[1] == 'e') &&
++          (tmp_p[2] == 'N' || tmp_p[2] == 'n') &&
++          tmp_p[3] == '#' && !tmp_p[4])
++        name_type= RENAMED;
++      else
++        res= 4;
++      tmp_p+= 4;
++      break;
++    default:
++      res= 5;
++    }
++  }
++  if (res)
++  {
++    /* Better to give something back if we fail parsing, than nothing at all */
++    DBUG_PRINT("info", ("Error in explain_filename: %u", res));
++    sql_print_warning("Invalid (old?) table or database name '%s'", from);
++    DBUG_RETURN(my_snprintf(to, to_length,
++                            "<result %u when explaining filename '%s'>",
++                            res, from));
++  }
++  if (part_name)
++  {
++    table_name_len= part_name - table_name - 3;
++    if (subpart_name)
++      subpart_name_len= strlen(subpart_name);
++    else
++      part_name_len= strlen(part_name);
++    if (name_type != NORMAL)
++    {
++      if (subpart_name)
++        subpart_name_len-= 5;
++      else
++        part_name_len-= 5;
++    }
++  }
++  else
++    table_name_len= strlen(table_name);
++  if (db_name)
++  {
++    if (explain_mode == EXPLAIN_ALL_VERBOSE)
++    {
++      to_p= strnmov(to_p, ER(ER_DATABASE_NAME), end_p - to_p);
++      *(to_p++)= ' ';
++      to_p= add_identifier(thd, to_p, end_p, db_name, db_name_len);
++      to_p= strnmov(to_p, ", ", end_p - to_p);
++    }
++    else
++    {
++      to_p= add_identifier(thd, to_p, end_p, db_name, db_name_len);
++      to_p= strnmov(to_p, ".", end_p - to_p);
++    }
++  }
++  if (explain_mode == EXPLAIN_ALL_VERBOSE)
++  {
++    to_p= strnmov(to_p, ER(ER_TABLE_NAME), end_p - to_p);
++    *(to_p++)= ' ';
++    to_p= add_identifier(thd, to_p, end_p, table_name, table_name_len);
++  }
++  else
++    to_p= add_identifier(thd, to_p, end_p, table_name, table_name_len);
++  if (part_name)
++  {
++    if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT)
++      to_p= strnmov(to_p, " /* ", end_p - to_p);
++    else if (explain_mode == EXPLAIN_PARTITIONS_VERBOSE)
++      to_p= strnmov(to_p, " ", end_p - to_p);
++    else
++      to_p= strnmov(to_p, ", ", end_p - to_p);
++    if (name_type != NORMAL)
++    {
++      if (name_type == TEMP)
++        to_p= strnmov(to_p, ER(ER_TEMPORARY_NAME), end_p - to_p);
++      else
++        to_p= strnmov(to_p, ER(ER_RENAMED_NAME), end_p - to_p);
++      to_p= strnmov(to_p, " ", end_p - to_p);
++    }
++    to_p= strnmov(to_p, ER(ER_PARTITION_NAME), end_p - to_p);
++    *(to_p++)= ' ';
++    to_p= add_identifier(thd, to_p, end_p, part_name, part_name_len);
++    if (subpart_name)
++    {
++      to_p= strnmov(to_p, ", ", end_p - to_p);
++      to_p= strnmov(to_p, ER(ER_SUBPARTITION_NAME), end_p - to_p);
++      *(to_p++)= ' ';
++      to_p= add_identifier(thd, to_p, end_p, subpart_name, subpart_name_len);
++    }
++    if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT)
++      to_p= strnmov(to_p, " */", end_p - to_p);
++  }
++  DBUG_PRINT("exit", ("to '%s'", to));
++  DBUG_RETURN(to_p - to);
++}
++
++
++/*
++  Translate a file name to a table name (WL #1324).
++
++  SYNOPSIS
++    filename_to_tablename()
++      from                      The file name in my_charset_filename.
++      to                OUT     The table name in system_charset_info.
++      to_length                 The size of the table name buffer.
++
++  RETURN
++    Table name length.
++*/
++
++uint filename_to_tablename(const char *from, char *to, uint to_length)
++{
++  uint errors;
++  size_t res;
++  DBUG_ENTER("filename_to_tablename");
++  DBUG_PRINT("enter", ("from '%s'", from));
++
++  if (!memcmp(from, tmp_file_prefix, tmp_file_prefix_length))
++  {
++    /* Temporary table name. */
++    res= (strnmov(to, from, to_length) - to);
++  }
++  else
++  {
++    res= strconvert(&my_charset_filename, from,
++                    system_charset_info,  to, to_length, &errors);
++    if (errors) // Old 5.0 name
++    {
++      res= (strxnmov(to, to_length, MYSQL50_TABLE_NAME_PREFIX,  from, NullS) -
++            to);
++      sql_print_error("Invalid (old?) table or database name '%s'", from);
++      /*
++        TODO: add a stored procedure for fix table and database names,
++        and mention its name in error log.
++      */
++    }
++  }
++
++  DBUG_PRINT("exit", ("to '%s'", to));
++  DBUG_RETURN(res);
++}
++
++
++/**
++  Check if given string begins with "#mysql50#" prefix
++  
++  @param   name          string to check cut 
++  
++  @retval
++    FALSE  no prefix found
++  @retval
++    TRUE   prefix found
++*/
++
++bool check_mysql50_prefix(const char *name)
++{
++  return (name[0] == '#' && 
++         !strncmp(name, MYSQL50_TABLE_NAME_PREFIX,
++                  MYSQL50_TABLE_NAME_PREFIX_LENGTH));
++}
++
++
++/**
++  Check if given string begins with "#mysql50#" prefix, cut it if so.
++  
++  @param   from          string to check and cut 
++  @param   to[out]       buffer for result string
++  @param   to_length     its size
++  
++  @retval
++    0      no prefix found
++  @retval
++    non-0  result string length
++*/
++
++uint check_n_cut_mysql50_prefix(const char *from, char *to, uint to_length)
++{
++  if (check_mysql50_prefix(from))
++    return (uint) (strmake(to, from + MYSQL50_TABLE_NAME_PREFIX_LENGTH,
++                           to_length - 1) - to);
++  return 0;
++}
++
++
++/*
++  Translate a table name to a file name (WL #1324).
++
++  SYNOPSIS
++    tablename_to_filename()
++      from                      The table name in system_charset_info.
++      to                OUT     The file name in my_charset_filename.
++      to_length                 The size of the file name buffer.
++
++  RETURN
++    File name length.
++*/
++
++uint tablename_to_filename(const char *from, char *to, uint to_length)
++{
++  uint errors, length;
++  DBUG_ENTER("tablename_to_filename");
++  DBUG_PRINT("enter", ("from '%s'", from));
++
++  if ((length= check_n_cut_mysql50_prefix(from, to, to_length)))
++  {
++    /*
++      Check if the name supplied is a valid mysql 5.0 name and 
++      make the name a zero length string if it's not.
++      Note that just returning zero length is not enough : 
++      a lot of places don't check the return value and expect 
++      a zero terminated string.
++    */  
++    if (check_table_name(to, length, TRUE))
++    {
++      to[0]= 0;
++      length= 0;
++    }
++    DBUG_RETURN(length);
++  }
++  length= strconvert(system_charset_info, from,
++                     &my_charset_filename, to, to_length, &errors);
++  if (check_if_legal_tablename(to) &&
++      length + 4 < to_length)
++  {
++    memcpy(to + length, "@@@", 4);
++    length+= 3;
++  }
++  DBUG_PRINT("exit", ("to '%s'", to));
++  DBUG_RETURN(length);
++}
++
++
++/*
++  Creates path to a file: mysql_data_dir/db/table.ext
++
++  SYNOPSIS
++   build_table_filename()
++     buff                       Where to write result in my_charset_filename.
++                                This may be the same as table_name.
++     bufflen                    buff size
++     db                         Database name in system_charset_info.
++     table_name                 Table name in system_charset_info.
++     ext                        File extension.
++     flags                      FN_FROM_IS_TMP or FN_TO_IS_TMP or FN_IS_TMP
++                                table_name is temporary, do not change.
++
++  NOTES
++
++    Uses database and table name, and extension to create
++    a file name in mysql_data_dir. Database and table
++    names are converted from system_charset_info into "fscs".
++    Unless flags indicate a temporary table name.
++    'db' is always converted.
++    'ext' is not converted.
++
++    The conversion suppression is required for ALTER TABLE. This
++    statement creates intermediate tables. These are regular
++    (non-temporary) tables with a temporary name. Their path names must
++    be derivable from the table name. So we cannot use
++    build_tmptable_filename() for them.
++
++  RETURN
++    path length
++*/
++
++uint build_table_filename(char *buff, size_t bufflen, const char *db,
++                          const char *table_name, const char *ext, uint flags)
++{
++  char dbbuff[FN_REFLEN];
++  char tbbuff[FN_REFLEN];
++  DBUG_ENTER("build_table_filename");
++  DBUG_PRINT("enter", ("db: '%s'  table_name: '%s'  ext: '%s'  flags: %x",
++                       db, table_name, ext, flags));
++
++  if (flags & FN_IS_TMP) // FN_FROM_IS_TMP | FN_TO_IS_TMP
++    strnmov(tbbuff, table_name, sizeof(tbbuff));
++  else
++    VOID(tablename_to_filename(table_name, tbbuff, sizeof(tbbuff)));
++
++  VOID(tablename_to_filename(db, dbbuff, sizeof(dbbuff)));
++
++  char *end = buff + bufflen;
++  /* Don't add FN_ROOTDIR if mysql_data_home already includes it */
++  char *pos = strnmov(buff, mysql_data_home, bufflen);
++  size_t rootdir_len= strlen(FN_ROOTDIR);
++  if (pos - rootdir_len >= buff &&
++      memcmp(pos - rootdir_len, FN_ROOTDIR, rootdir_len) != 0)
++    pos= strnmov(pos, FN_ROOTDIR, end - pos);
++  pos= strxnmov(pos, end - pos, dbbuff, FN_ROOTDIR, NullS);
++#ifdef USE_SYMDIR
++  unpack_dirname(buff, buff);
++  pos= strend(buff);
++#endif
++  pos= strxnmov(pos, end - pos, tbbuff, ext, NullS);
++
++  DBUG_PRINT("exit", ("buff: '%s'", buff));
++  DBUG_RETURN(pos - buff);
++}
++
++
++/*
++  Creates path to a file: mysql_tmpdir/#sql1234_12_1.ext
++
++  SYNOPSIS
++   build_tmptable_filename()
++     thd                        The thread handle.
++     buff                       Where to write result in my_charset_filename.
++     bufflen                    buff size
++
++  NOTES
++
++    Uses current_pid, thread_id, and tmp_table counter to create
++    a file name in mysql_tmpdir.
++
++  RETURN
++    path length
++*/
++
++uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen)
++{
++  DBUG_ENTER("build_tmptable_filename");
++
++  char *p= strnmov(buff, mysql_tmpdir, bufflen);
++  my_snprintf(p, bufflen - (p - buff), "/%s%lx_%lx_%x%s",
++              tmp_file_prefix, current_pid,
++              thd->thread_id, thd->tmp_table++, reg_ext);
++
++  if (lower_case_table_names)
++  {
++    /* Convert all except tmpdir to lower case */
++    my_casedn_str(files_charset_info, p);
++  }
++
++  size_t length= unpack_filename(buff, buff);
++  DBUG_PRINT("exit", ("buff: '%s'", buff));
++  DBUG_RETURN(length);
++}
++
++/*
++--------------------------------------------------------------------------
++
++   MODULE: DDL log
++   -----------------
++
++   This module is used to ensure that we can recover from crashes that occur
++   in the middle of a meta-data operation in MySQL. E.g. DROP TABLE t1, t2;
++   We need to ensure that both t1 and t2 are dropped and not only t1 and
++   also that each table drop is entirely done and not "half-baked".
++
++   To support this we create log entries for each meta-data statement in the
++   ddl log while we are executing. These entries are dropped when the
++   operation is completed.
++
++   At recovery those entries that were not completed will be executed.
++
++   There is only one ddl log in the system and it is protected by a mutex
++   and there is a global struct that contains information about its current
++   state.
++
++   History:
++   First version written in 2006 by Mikael Ronstrom
++--------------------------------------------------------------------------
++*/
++
++
++struct st_global_ddl_log
++{
++  /*
++    We need to adjust buffer size to be able to handle downgrades/upgrades
++    where IO_SIZE has changed. We'll set the buffer size such that we can
++    handle that the buffer size was upto 4 times bigger in the version
++    that wrote the DDL log.
++  */
++  char file_entry_buf[4*IO_SIZE];
++  char file_name_str[FN_REFLEN];
++  char *file_name;
++  DDL_LOG_MEMORY_ENTRY *first_free;
++  DDL_LOG_MEMORY_ENTRY *first_used;
++  uint num_entries;
++  File file_id;
++  uint name_len;
++  uint io_size;
++  bool inited;
++  bool do_release;
++  bool recovery_phase;
++  st_global_ddl_log() : inited(false), do_release(false) {}
++};
++
++st_global_ddl_log global_ddl_log;
++
++pthread_mutex_t LOCK_gdl;
++
++#define DDL_LOG_ENTRY_TYPE_POS 0
++#define DDL_LOG_ACTION_TYPE_POS 1
++#define DDL_LOG_PHASE_POS 2
++#define DDL_LOG_NEXT_ENTRY_POS 4
++#define DDL_LOG_NAME_POS 8
++
++#define DDL_LOG_NUM_ENTRY_POS 0
++#define DDL_LOG_NAME_LEN_POS 4
++#define DDL_LOG_IO_SIZE_POS 8
++
++/*
++  Read one entry from ddl log file
++  SYNOPSIS
++    read_ddl_log_file_entry()
++    entry_no                     Entry number to read
++  RETURN VALUES
++    TRUE                         Error
++    FALSE                        Success
++*/
++
++static bool read_ddl_log_file_entry(uint entry_no)
++{
++  bool error= FALSE;
++  File file_id= global_ddl_log.file_id;
++  uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf;
++  uint io_size= global_ddl_log.io_size;
++  DBUG_ENTER("read_ddl_log_file_entry");
++
++  if (my_pread(file_id, file_entry_buf, io_size, io_size * entry_no,
++               MYF(MY_WME)) != io_size)
++    error= TRUE;
++  DBUG_RETURN(error);
++}
++
++
++/*
++  Write one entry from ddl log file
++  SYNOPSIS
++    write_ddl_log_file_entry()
++    entry_no                     Entry number to write
++  RETURN VALUES
++    TRUE                         Error
++    FALSE                        Success
++*/
++
++static bool write_ddl_log_file_entry(uint entry_no)
++{
++  bool error= FALSE;
++  File file_id= global_ddl_log.file_id;
++  char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
++  DBUG_ENTER("write_ddl_log_file_entry");
++
++  if (my_pwrite(file_id, (uchar*)file_entry_buf,
++                IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE)
++    error= TRUE;
++  DBUG_RETURN(error);
++}
++
++
++/*
++  Write ddl log header
++  SYNOPSIS
++    write_ddl_log_header()
++  RETURN VALUES
++    TRUE                      Error
++    FALSE                     Success
++*/
++
++static bool write_ddl_log_header()
++{
++  uint16 const_var;
++  bool error= FALSE;
++  DBUG_ENTER("write_ddl_log_header");
++
++  int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
++            global_ddl_log.num_entries);
++  const_var= FN_LEN;
++  int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS],
++            (ulong) const_var);
++  const_var= IO_SIZE;
++  int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS],
++            (ulong) const_var);
++  if (write_ddl_log_file_entry(0UL))
++  {
++    sql_print_error("Error writing ddl log header");
++    DBUG_RETURN(TRUE);
++  }
++  VOID(sync_ddl_log());
++  DBUG_RETURN(error);
++}
++
++
++/*
++  Create ddl log file name
++  SYNOPSIS
++    create_ddl_log_file_name()
++    file_name                   Filename setup
++  RETURN VALUES
++    NONE
++*/
++
++static inline void create_ddl_log_file_name(char *file_name)
++{
++  strxmov(file_name, mysql_data_home, "/", "ddl_log.log", NullS);
++}
++
++
++/*
++  Read header of ddl log file
++  SYNOPSIS
++    read_ddl_log_header()
++  RETURN VALUES
++    > 0                  Last entry in ddl log
++    0                    No entries in ddl log
++  DESCRIPTION
++    When we read the ddl log header we get information about maximum sizes
++    of names in the ddl log and we also get information about the number
++    of entries in the ddl log.
++*/
++
++static uint read_ddl_log_header()
++{
++  char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
++  char file_name[FN_REFLEN];
++  uint entry_no;
++  bool successful_open= FALSE;
++  DBUG_ENTER("read_ddl_log_header");
++
++  create_ddl_log_file_name(file_name);
++  if ((global_ddl_log.file_id= my_open(file_name,
++                                        O_RDWR | O_BINARY, MYF(0))) >= 0)
++  {
++    if (read_ddl_log_file_entry(0UL))
++    {
++      /* Write message into error log */
++      sql_print_error("Failed to read ddl log file in recovery");
++    }
++    else
++      successful_open= TRUE;
++  }
++  if (successful_open)
++  {
++    entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]);
++    global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]);
++    global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]);
++    DBUG_ASSERT(global_ddl_log.io_size <=
++                sizeof(global_ddl_log.file_entry_buf));
++  }
++  else
++  {
++    entry_no= 0;
++  }
++  global_ddl_log.first_free= NULL;
++  global_ddl_log.first_used= NULL;
++  global_ddl_log.num_entries= 0;
++  VOID(pthread_mutex_init(&LOCK_gdl, MY_MUTEX_INIT_FAST));
++  global_ddl_log.do_release= true;
++  DBUG_RETURN(entry_no);
++}
++
++
++/*
++  Read a ddl log entry
++  SYNOPSIS
++    read_ddl_log_entry()
++    read_entry               Number of entry to read
++    out:entry_info           Information from entry
++  RETURN VALUES
++    TRUE                     Error
++    FALSE                    Success
++  DESCRIPTION
++    Read a specified entry in the ddl log
++*/
++
++bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
++{
++  char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf;
++  uint inx;
++  uchar single_char;
++  DBUG_ENTER("read_ddl_log_entry");
++
++  if (read_ddl_log_file_entry(read_entry))
++  {
++    DBUG_RETURN(TRUE);
++  }
++  ddl_log_entry->entry_pos= read_entry;
++  single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS];
++  ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char;
++  single_char= file_entry_buf[DDL_LOG_ACTION_TYPE_POS];
++  ddl_log_entry->action_type= (enum ddl_log_action_code)single_char;
++  ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS];
++  ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]);
++  ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS];
++  inx= DDL_LOG_NAME_POS + global_ddl_log.name_len;
++  ddl_log_entry->from_name= &file_entry_buf[inx];
++  inx+= global_ddl_log.name_len;
++  ddl_log_entry->handler_name= &file_entry_buf[inx];
++  DBUG_RETURN(FALSE);
++}
++
++
++/*
++  Initialise ddl log
++  SYNOPSIS
++    init_ddl_log()
++
++  DESCRIPTION
++    Write the header of the ddl log file and length of names. Also set
++    number of entries to zero.
++
++  RETURN VALUES
++    TRUE                     Error
++    FALSE                    Success
++*/
++
++static bool init_ddl_log()
++{
++  char file_name[FN_REFLEN];
++  DBUG_ENTER("init_ddl_log");
++
++  if (global_ddl_log.inited)
++    goto end;
++
++  global_ddl_log.io_size= IO_SIZE;
++  global_ddl_log.name_len= FN_LEN;
++  create_ddl_log_file_name(file_name);
++  if ((global_ddl_log.file_id= my_create(file_name,
++                                         CREATE_MODE,
++                                         O_RDWR | O_TRUNC | O_BINARY,
++                                         MYF(MY_WME))) < 0)
++  {
++    /* Couldn't create ddl log file, this is serious error */
++    sql_print_error("Failed to open ddl log file");
++    DBUG_RETURN(TRUE);
++  }
++  global_ddl_log.inited= TRUE;
++  if (write_ddl_log_header())
++  {
++    VOID(my_close(global_ddl_log.file_id, MYF(MY_WME)));
++    global_ddl_log.inited= FALSE;
++    DBUG_RETURN(TRUE);
++  }
++
++end:
++  DBUG_RETURN(FALSE);
++}
++
++
++/*
++  Execute one action in a ddl log entry
++  SYNOPSIS
++    execute_ddl_log_action()
++    ddl_log_entry              Information in action entry to execute
++  RETURN VALUES
++    TRUE                       Error
++    FALSE                      Success
++*/
++
++static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
++{
++  bool frm_action= FALSE;
++  LEX_STRING handler_name;
++  handler *file= NULL;
++  MEM_ROOT mem_root;
++  int error= TRUE;
++  char to_path[FN_REFLEN];
++  char from_path[FN_REFLEN];
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  char *par_ext= (char*)".par";
++#endif
++  handlerton *hton;
++  DBUG_ENTER("execute_ddl_log_action");
++
++  if (ddl_log_entry->entry_type == DDL_IGNORE_LOG_ENTRY_CODE)
++  {
++    DBUG_RETURN(FALSE);
++  }
++  DBUG_PRINT("ddl_log",
++             ("execute type %c next %u name '%s' from_name '%s' handler '%s'",
++             ddl_log_entry->action_type,
++             ddl_log_entry->next_entry,
++             ddl_log_entry->name,
++             ddl_log_entry->from_name,
++             ddl_log_entry->handler_name));
++  handler_name.str= (char*)ddl_log_entry->handler_name;
++  handler_name.length= strlen(ddl_log_entry->handler_name);
++  init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0); 
++  if (!strcmp(ddl_log_entry->handler_name, reg_ext))
++    frm_action= TRUE;
++  else
++  {
++    plugin_ref plugin= ha_resolve_by_name(thd, &handler_name);
++    if (!plugin)
++    {
++      my_error(ER_ILLEGAL_HA, MYF(0), ddl_log_entry->handler_name);
++      goto error;
++    }
++    hton= plugin_data(plugin, handlerton*);
++    file= get_new_handler((TABLE_SHARE*)0, &mem_root, hton);
++    if (!file)
++    {
++      mem_alloc_error(sizeof(handler));
++      goto error;
++    }
++  }
++  switch (ddl_log_entry->action_type)
++  {
++    case DDL_LOG_REPLACE_ACTION:
++    case DDL_LOG_DELETE_ACTION:
++    {
++      if (ddl_log_entry->phase == 0)
++      {
++        if (frm_action)
++        {
++          strxmov(to_path, ddl_log_entry->name, reg_ext, NullS);
++          if ((error= my_delete(to_path, MYF(MY_WME))))
++          {
++            if (my_errno != ENOENT)
++              break;
++          }
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++          strxmov(to_path, ddl_log_entry->name, par_ext, NullS);
++          VOID(my_delete(to_path, MYF(MY_WME)));
++#endif
++        }
++        else
++        {
++          if ((error= file->ha_delete_table(ddl_log_entry->name)))
++          {
++            if (error != ENOENT && error != HA_ERR_NO_SUCH_TABLE)
++              break;
++          }
++        }
++        if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
++          break;
++        VOID(sync_ddl_log());
++        error= FALSE;
++        if (ddl_log_entry->action_type == DDL_LOG_DELETE_ACTION)
++          break;
++      }
++      DBUG_ASSERT(ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION);
++      /*
++        Fall through and perform the rename action of the replace
++        action. We have already indicated the success of the delete
++        action in the log entry by stepping up the phase.
++      */
++    }
++    case DDL_LOG_RENAME_ACTION:
++    {
++      error= TRUE;
++      if (frm_action)
++      {
++        strxmov(to_path, ddl_log_entry->name, reg_ext, NullS);
++        strxmov(from_path, ddl_log_entry->from_name, reg_ext, NullS);
++        if (my_rename(from_path, to_path, MYF(MY_WME)))
++          break;
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++        strxmov(to_path, ddl_log_entry->name, par_ext, NullS);
++        strxmov(from_path, ddl_log_entry->from_name, par_ext, NullS);
++        VOID(my_rename(from_path, to_path, MYF(MY_WME)));
++#endif
++      }
++      else
++      {
++        if (file->ha_rename_table(ddl_log_entry->from_name,
++                                  ddl_log_entry->name))
++          break;
++      }
++      if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
++        break;
++      VOID(sync_ddl_log());
++      error= FALSE;
++      break;
++    }
++    default:
++      DBUG_ASSERT(0);
++      break;
++  }
++  delete file;
++error:
++  free_root(&mem_root, MYF(0)); 
++  DBUG_RETURN(error);
++}
++
++
++/*
++  Get a free entry in the ddl log
++  SYNOPSIS
++    get_free_ddl_log_entry()
++    out:active_entry                A ddl log memory entry returned
++  RETURN VALUES
++    TRUE                       Error
++    FALSE                      Success
++*/
++
++static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
++                                   bool *write_header)
++{
++  DDL_LOG_MEMORY_ENTRY *used_entry;
++  DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used;
++  DBUG_ENTER("get_free_ddl_log_entry");
++
++  if (global_ddl_log.first_free == NULL)
++  {
++    if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc(
++                              sizeof(DDL_LOG_MEMORY_ENTRY), MYF(MY_WME))))
++    {
++      sql_print_error("Failed to allocate memory for ddl log free list");
++      DBUG_RETURN(TRUE);
++    }
++    global_ddl_log.num_entries++;
++    used_entry->entry_pos= global_ddl_log.num_entries;
++    *write_header= TRUE;
++  }
++  else
++  {
++    used_entry= global_ddl_log.first_free;
++    global_ddl_log.first_free= used_entry->next_log_entry;
++    *write_header= FALSE;
++  }
++  /*
++    Move from free list to used list
++  */
++  used_entry->next_log_entry= first_used;
++  used_entry->prev_log_entry= NULL;
++  global_ddl_log.first_used= used_entry;
++  if (first_used)
++    first_used->prev_log_entry= used_entry;
++
++  *active_entry= used_entry;
++  DBUG_RETURN(FALSE);
++}
++
++
++/*
++  External interface methods for the DDL log Module
++  ---------------------------------------------------
++*/
++
++/*
++  SYNOPSIS
++    write_ddl_log_entry()
++    ddl_log_entry         Information about log entry
++    out:entry_written     Entry information written into   
++
++  RETURN VALUES
++    TRUE                      Error
++    FALSE                     Success
++
++  DESCRIPTION
++    A careful write of the ddl log is performed to ensure that we can
++    handle crashes occurring during CREATE and ALTER TABLE processing.
++*/
++
++bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
++                         DDL_LOG_MEMORY_ENTRY **active_entry)
++{
++  bool error, write_header;
++  DBUG_ENTER("write_ddl_log_entry");
++
++  if (init_ddl_log())
++  {
++    DBUG_RETURN(TRUE);
++  }
++  global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
++                                    (char)DDL_LOG_ENTRY_CODE;
++  global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
++                                    (char)ddl_log_entry->action_type;
++  global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0;
++  int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
++            ddl_log_entry->next_entry);
++  DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_LEN);
++  strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
++          ddl_log_entry->name, FN_LEN - 1);
++  if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION ||
++      ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION)
++  {
++    DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_LEN);
++    strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN],
++          ddl_log_entry->from_name, FN_LEN - 1);
++  }
++  else
++    global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
++  DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_LEN);
++  strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_LEN)],
++          ddl_log_entry->handler_name, FN_LEN - 1);
++  if (get_free_ddl_log_entry(active_entry, &write_header))
++  {
++    DBUG_RETURN(TRUE);
++  }
++  error= FALSE;
++  DBUG_PRINT("ddl_log",
++             ("write type %c next %u name '%s' from_name '%s' handler '%s'",
++             (char) global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS],
++             ddl_log_entry->next_entry,
++             (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
++             (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
++                                                    + FN_LEN],
++             (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
++                                                    + (2*FN_LEN)]));
++  if (write_ddl_log_file_entry((*active_entry)->entry_pos))
++  {
++    error= TRUE;
++    sql_print_error("Failed to write entry_no = %u",
++                    (*active_entry)->entry_pos);
++  }
++  if (write_header && !error)
++  {
++    VOID(sync_ddl_log());
++    if (write_ddl_log_header())
++      error= TRUE;
++  }
++  if (error)
++    release_ddl_log_memory_entry(*active_entry);
++  DBUG_RETURN(error);
++}
++
++
++/*
++  Write final entry in the ddl log
++  SYNOPSIS
++    write_execute_ddl_log_entry()
++    first_entry                    First entry in linked list of entries
++                                   to execute, if 0 = NULL it means that
++                                   the entry is removed and the entries
++                                   are put into the free list.
++    complete                       Flag indicating we are simply writing
++                                   info about that entry has been completed
++    in:out:active_entry            Entry to execute, 0 = NULL if the entry
++                                   is written first time and needs to be
++                                   returned. In this case the entry written
++                                   is returned in this parameter
++  RETURN VALUES
++    TRUE                           Error
++    FALSE                          Success
++
++  DESCRIPTION
++    This is the last write in the ddl log. The previous log entries have
++    already been written but not yet synched to disk.
++    We write a couple of log entries that describes action to perform.
++    This entries are set-up in a linked list, however only when a first
++    execute entry is put as the first entry these will be executed.
++    This routine writes this first 
++*/ 
++
++bool write_execute_ddl_log_entry(uint first_entry,
++                                 bool complete,
++                                 DDL_LOG_MEMORY_ENTRY **active_entry)
++{
++  bool write_header= FALSE;
++  char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
++  DBUG_ENTER("write_execute_ddl_log_entry");
++
++  if (init_ddl_log())
++  {
++    DBUG_RETURN(TRUE);
++  }
++  if (!complete)
++  {
++    /*
++      We haven't synched the log entries yet, we synch them now before
++      writing the execute entry. If complete is true we haven't written
++      any log entries before, we are only here to write the execute
++      entry to indicate it is done.
++    */
++    VOID(sync_ddl_log());
++    file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_LOG_EXECUTE_CODE;
++  }
++  else
++    file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE;
++  file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */
++  file_entry_buf[DDL_LOG_PHASE_POS]= 0;
++  int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry);
++  file_entry_buf[DDL_LOG_NAME_POS]= 0;
++  file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
++  file_entry_buf[DDL_LOG_NAME_POS + 2*FN_LEN]= 0;
++  if (!(*active_entry))
++  {
++    if (get_free_ddl_log_entry(active_entry, &write_header))
++    {
++      DBUG_RETURN(TRUE);
++    }
++  }
++  if (write_ddl_log_file_entry((*active_entry)->entry_pos))
++  {
++    sql_print_error("Error writing execute entry in ddl log");
++    release_ddl_log_memory_entry(*active_entry);
++    DBUG_RETURN(TRUE);
++  }
++  VOID(sync_ddl_log());
++  if (write_header)
++  {
++    if (write_ddl_log_header())
++    {
++      release_ddl_log_memory_entry(*active_entry);
++      DBUG_RETURN(TRUE);
++    }
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++/*
++  For complex rename operations we need to deactivate individual entries.
++  SYNOPSIS
++    deactivate_ddl_log_entry()
++    entry_no                      Entry position of record to change
++  RETURN VALUES
++    TRUE                         Error
++    FALSE                        Success
++  DESCRIPTION
++    During replace operations where we start with an existing table called
++    t1 and a replacement table called t1#temp or something else and where
++    we want to delete t1 and rename t1#temp to t1 this is not possible to
++    do in a safe manner unless the ddl log is informed of the phases in
++    the change.
++
++    Delete actions are 1-phase actions that can be ignored immediately after
++    being executed.
++    Rename actions from x to y is also a 1-phase action since there is no
++    interaction with any other handlers named x and y.
++    Replace action where drop y and x -> y happens needs to be a two-phase
++    action. Thus the first phase will drop y and the second phase will
++    rename x -> y.
++*/
++
++bool deactivate_ddl_log_entry(uint entry_no)
++{
++  char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
++  DBUG_ENTER("deactivate_ddl_log_entry");
++
++  if (!read_ddl_log_file_entry(entry_no))
++  {
++    if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE)
++    {
++      if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_DELETE_ACTION ||
++          file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_RENAME_ACTION ||
++          (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION &&
++           file_entry_buf[DDL_LOG_PHASE_POS] == 1))
++        file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE;
++      else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION)
++      {
++        DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] == 0);
++        file_entry_buf[DDL_LOG_PHASE_POS]= 1;
++      }
++      else
++      {
++        DBUG_ASSERT(0);
++      }
++      if (write_ddl_log_file_entry(entry_no))
++      {
++        sql_print_error("Error in deactivating log entry. Position = %u",
++                        entry_no);
++        DBUG_RETURN(TRUE);
++      }
++    }
++  }
++  else
++  {
++    sql_print_error("Failed in reading entry before deactivating it");
++    DBUG_RETURN(TRUE);
++  }
++  DBUG_RETURN(FALSE);
++}
++
++
++/*
++  Sync ddl log file
++  SYNOPSIS
++    sync_ddl_log()
++  RETURN VALUES
++    TRUE                      Error
++    FALSE                     Success
++*/
++
++bool sync_ddl_log()
++{
++  bool error= FALSE;
++  DBUG_ENTER("sync_ddl_log");
++
++  if ((!global_ddl_log.recovery_phase) &&
++      init_ddl_log())
++  {
++    DBUG_RETURN(TRUE);
++  }
++  if (my_sync(global_ddl_log.file_id, MYF(0)))
++  {
++    /* Write to error log */
++    sql_print_error("Failed to sync ddl log");
++    error= TRUE;
++  }
++  DBUG_RETURN(error);
++}
++
++
++/*
++  Release a log memory entry
++  SYNOPSIS
++    release_ddl_log_memory_entry()
++    log_memory_entry                Log memory entry to release
++  RETURN VALUES
++    NONE
++*/
++
++void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
++{
++  DDL_LOG_MEMORY_ENTRY *first_free= global_ddl_log.first_free;
++  DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry;
++  DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry;
++  DBUG_ENTER("release_ddl_log_memory_entry");
++
++  global_ddl_log.first_free= log_entry;
++  log_entry->next_log_entry= first_free;
++
++  if (prev_log_entry)
++    prev_log_entry->next_log_entry= next_log_entry;
++  else
++    global_ddl_log.first_used= next_log_entry;
++  if (next_log_entry)
++    next_log_entry->prev_log_entry= prev_log_entry;
++  DBUG_VOID_RETURN;
++}
++
++
++/*
++  Execute one entry in the ddl log. Executing an entry means executing
++  a linked list of actions.
++  SYNOPSIS
++    execute_ddl_log_entry()
++    first_entry                Reference to first action in entry
++  RETURN VALUES
++    TRUE                       Error
++    FALSE                      Success
++*/
++
++bool execute_ddl_log_entry(THD *thd, uint first_entry)
++{
++  DDL_LOG_ENTRY ddl_log_entry;
++  uint read_entry= first_entry;
++  DBUG_ENTER("execute_ddl_log_entry");
++
++  pthread_mutex_lock(&LOCK_gdl);
++  do
++  {
++    if (read_ddl_log_entry(read_entry, &ddl_log_entry))
++    {
++      /* Write to error log and continue with next log entry */
++      sql_print_error("Failed to read entry = %u from ddl log",
++                      read_entry);
++      break;
++    }
++    DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE ||
++                ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE);
++
++    if (execute_ddl_log_action(thd, &ddl_log_entry))
++    {
++      /* Write to error log and continue with next log entry */
++      sql_print_error("Failed to execute action for entry = %u from ddl log",
++                      read_entry);
++      break;
++    }
++    read_entry= ddl_log_entry.next_entry;
++  } while (read_entry);
++  pthread_mutex_unlock(&LOCK_gdl);
++  DBUG_RETURN(FALSE);
++}
++
++
++/*
++  Close the ddl log
++  SYNOPSIS
++    close_ddl_log()
++  RETURN VALUES
++    NONE
++*/
++
++static void close_ddl_log()
++{
++  DBUG_ENTER("close_ddl_log");
++  if (global_ddl_log.file_id >= 0)
++  {
++    VOID(my_close(global_ddl_log.file_id, MYF(MY_WME)));
++    global_ddl_log.file_id= (File) -1;
++  }
++  DBUG_VOID_RETURN;
++}
++
++
++/*
++  Execute the ddl log at recovery of MySQL Server
++  SYNOPSIS
++    execute_ddl_log_recovery()
++  RETURN VALUES
++    NONE
++*/
++
++void execute_ddl_log_recovery()
++{
++  uint num_entries, i;
++  THD *thd;
++  DDL_LOG_ENTRY ddl_log_entry;
++  char file_name[FN_REFLEN];
++  DBUG_ENTER("execute_ddl_log_recovery");
++
++  /*
++    Initialise global_ddl_log struct
++  */
++  bzero(global_ddl_log.file_entry_buf, sizeof(global_ddl_log.file_entry_buf));
++  global_ddl_log.inited= FALSE;
++  global_ddl_log.recovery_phase= TRUE;
++  global_ddl_log.io_size= IO_SIZE;
++  global_ddl_log.file_id= (File) -1;
++
++  /*
++    To be able to run this from boot, we allocate a temporary THD
++  */
++  if (!(thd=new THD))
++    DBUG_VOID_RETURN;
++  thd->thread_stack= (char*) &thd;
++  thd->store_globals();
++
++  num_entries= read_ddl_log_header();
++  for (i= 1; i < num_entries + 1; i++)
++  {
++    if (read_ddl_log_entry(i, &ddl_log_entry))
++    {
++      sql_print_error("Failed to read entry no = %u from ddl log",
++                       i);
++      continue;
++    }
++    if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE)
++    {
++      if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry))
++      {
++        /* Real unpleasant scenario but we continue anyways.  */
++        continue;
++      }
++    }
++  }
++  close_ddl_log();
++  create_ddl_log_file_name(file_name);
++  VOID(my_delete(file_name, MYF(0)));
++  global_ddl_log.recovery_phase= FALSE;
++  delete thd;
++  /* Remember that we don't have a THD */
++  my_pthread_setspecific_ptr(THR_THD,  0);
++  DBUG_VOID_RETURN;
++}
++
++
++/*
++  Release all memory allocated to the ddl log
++  SYNOPSIS
++    release_ddl_log()
++  RETURN VALUES
++    NONE
++*/
++
++void release_ddl_log()
++{
++  DDL_LOG_MEMORY_ENTRY *free_list= global_ddl_log.first_free;
++  DDL_LOG_MEMORY_ENTRY *used_list= global_ddl_log.first_used;
++  DBUG_ENTER("release_ddl_log");
++
++  if (!global_ddl_log.do_release)
++    DBUG_VOID_RETURN;
++
++  pthread_mutex_lock(&LOCK_gdl);
++  while (used_list)
++  {
++    DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry;
++    my_free(used_list, MYF(0));
++    used_list= tmp;
++  }
++  while (free_list)
++  {
++    DDL_LOG_MEMORY_ENTRY *tmp= free_list->next_log_entry;
++    my_free(free_list, MYF(0));
++    free_list= tmp;
++  }
++  close_ddl_log();
++  global_ddl_log.inited= 0;
++  pthread_mutex_unlock(&LOCK_gdl);
++  VOID(pthread_mutex_destroy(&LOCK_gdl));
++  global_ddl_log.do_release= false;
++  DBUG_VOID_RETURN;
++}
++
++
++/*
++---------------------------------------------------------------------------
++
++  END MODULE DDL log
++  --------------------
++
++---------------------------------------------------------------------------
++*/
++
++
++/**
++   @brief construct a temporary shadow file name.
++
++   @details Make a shadow file name used by ALTER TABLE to construct the
++   modified table (with keeping the original). The modified table is then
++   moved back as original table. The name must start with the temp file
++   prefix so it gets filtered out by table files listing routines. 
++    
++   @param[out] buff      buffer to receive the constructed name
++   @param      bufflen   size of buff
++   @param      lpt       alter table data structure
++
++   @retval     path length
++*/
++
++uint build_table_shadow_filename(char *buff, size_t bufflen, 
++                                 ALTER_PARTITION_PARAM_TYPE *lpt)
++{
++  char tmp_name[FN_REFLEN];
++  my_snprintf (tmp_name, sizeof (tmp_name), "%s-%s", tmp_file_prefix,
++               lpt->table_name);
++  return build_table_filename(buff, bufflen, lpt->db, tmp_name, "", FN_IS_TMP);
++}
++
++
++/*
++  SYNOPSIS
++    mysql_write_frm()
++    lpt                    Struct carrying many parameters needed for this
++                           method
++    flags                  Flags as defined below
++      WFRM_INITIAL_WRITE        If set we need to prepare table before
++                                creating the frm file
++      WFRM_INSTALL_SHADOW       If set we should install the new frm
++      WFRM_KEEP_SHARE           If set we know that the share is to be
++                                retained and thus we should ensure share
++                                object is correct, if not set we don't
++                                set the new partition syntax string since
++                                we know the share object is destroyed.
++      WFRM_PACK_FRM             If set we should pack the frm file and delete
++                                the frm file
++
++  RETURN VALUES
++    TRUE                   Error
++    FALSE                  Success
++
++  DESCRIPTION
++    A support method that creates a new frm file and in this process it
++    regenerates the partition data. It works fine also for non-partitioned
++    tables since it only handles partitioned data if it exists.
++*/
++
++bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
++{
++  /*
++    Prepare table to prepare for writing a new frm file where the
++    partitions in add/drop state have temporarily changed their state
++    We set tmp_table to avoid get errors on naming of primary key index.
++  */
++  int error= 0;
++  char path[FN_REFLEN+1];
++  char shadow_path[FN_REFLEN+1];
++  char shadow_frm_name[FN_REFLEN+1];
++  char frm_name[FN_REFLEN+1];
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  char *part_syntax_buf;
++  uint syntax_len;
++#endif
++  DBUG_ENTER("mysql_write_frm");
++
++  /*
++    Build shadow frm file name
++  */
++  build_table_shadow_filename(shadow_path, sizeof(shadow_path) - 1, lpt);
++  strxmov(shadow_frm_name, shadow_path, reg_ext, NullS);
++  if (flags & WFRM_WRITE_SHADOW)
++  {
++    if (mysql_prepare_create_table(lpt->thd, lpt->create_info,
++                                   lpt->alter_info,
++                                   /*tmp_table*/ 1,
++                                   &lpt->db_options,
++                                   lpt->table->file,
++                                   &lpt->key_info_buffer,
++                                   &lpt->key_count,
++                                   /*select_field_count*/ 0))
++    {
++      DBUG_RETURN(TRUE);
++    }
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    {
++      partition_info *part_info= lpt->table->part_info;
++      if (part_info)
++      {
++        if (!(part_syntax_buf= generate_partition_syntax(part_info,
++                                                         &syntax_len,
++                                                         TRUE, TRUE)))
++        {
++          DBUG_RETURN(TRUE);
++        }
++        part_info->part_info_string= part_syntax_buf;
++        part_info->part_info_len= syntax_len;
++      }
++    }
++#endif
++    /* Write shadow frm file */
++    lpt->create_info->table_options= lpt->db_options;
++    if ((mysql_create_frm(lpt->thd, shadow_frm_name, lpt->db,
++                          lpt->table_name, lpt->create_info,
++                          lpt->alter_info->create_list, lpt->key_count,
++                          lpt->key_info_buffer, lpt->table->file)) ||
++        lpt->table->file->ha_create_handler_files(shadow_path, NULL,
++                                                  CHF_CREATE_FLAG,
++                                                  lpt->create_info))
++    {
++      my_delete(shadow_frm_name, MYF(0));
++      error= 1;
++      goto end;
++    }
++  }
++  if (flags & WFRM_PACK_FRM)
++  {
++    /*
++      We need to pack the frm file and after packing it we delete the
++      frm file to ensure it doesn't get used. This is only used for
++      handlers that have the main version of the frm file stored in the
++      handler.
++    */
++    uchar *data;
++    size_t length;
++    if (readfrm(shadow_path, &data, &length) ||
++        packfrm(data, length, &lpt->pack_frm_data, &lpt->pack_frm_len))
++    {
++      my_free(data, MYF(MY_ALLOW_ZERO_PTR));
++      my_free(lpt->pack_frm_data, MYF(MY_ALLOW_ZERO_PTR));
++      mem_alloc_error(length);
++      error= 1;
++      goto end;
++    }
++    error= my_delete(shadow_frm_name, MYF(MY_WME));
++  }
++  if (flags & WFRM_INSTALL_SHADOW)
++  {
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    partition_info *part_info= lpt->part_info;
++#endif
++    /*
++      Build frm file name
++    */
++    build_table_filename(path, sizeof(path) - 1, lpt->db,
++                         lpt->table_name, "", 0);
++    strxmov(frm_name, path, reg_ext, NullS);
++    /*
++      When we are changing to use new frm file we need to ensure that we
++      don't collide with another thread in process to open the frm file.
++      We start by deleting the .frm file and possible .par file. Then we
++      write to the DDL log that we have completed the delete phase by
++      increasing the phase of the log entry. Next step is to rename the
++      new .frm file and the new .par file to the real name. After
++      completing this we write a new phase to the log entry that will
++      deactivate it.
++    */
++    VOID(pthread_mutex_lock(&LOCK_open));
++    if (my_delete(frm_name, MYF(MY_WME)) ||
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++        lpt->table->file->ha_create_handler_files(path, shadow_path,
++                                                  CHF_DELETE_FLAG, NULL) ||
++        deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos) ||
++        (sync_ddl_log(), FALSE) ||
++#endif
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++        my_rename(shadow_frm_name, frm_name, MYF(MY_WME)) ||
++        lpt->table->file->ha_create_handler_files(path, shadow_path,
++                                                  CHF_RENAME_FLAG, NULL))
++#else
++        my_rename(shadow_frm_name, frm_name, MYF(MY_WME)))
++#endif
++    {
++      error= 1;
++      goto err;
++    }
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    if (part_info && (flags & WFRM_KEEP_SHARE))
++    {
++      TABLE_SHARE *share= lpt->table->s;
++      char *tmp_part_syntax_str;
++      if (!(part_syntax_buf= generate_partition_syntax(part_info,
++                                                       &syntax_len,
++                                                       TRUE, TRUE)))
++      {
++        error= 1;
++        goto err;
++      }
++      if (share->partition_info_buffer_size < syntax_len + 1)
++      {
++        share->partition_info_buffer_size= syntax_len+1;
++        if (!(tmp_part_syntax_str= (char*) strmake_root(&share->mem_root,
++                                                        part_syntax_buf,
++                                                        syntax_len)))
++        {
++          error= 1;
++          goto err;
++        }
++        share->partition_info= tmp_part_syntax_str;
++      }
++      else
++        memcpy((char*) share->partition_info, part_syntax_buf, syntax_len + 1);
++      share->partition_info_len= part_info->part_info_len= syntax_len;
++      part_info->part_info_string= part_syntax_buf;
++    }
++#endif
++
++err:
++    VOID(pthread_mutex_unlock(&LOCK_open));
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos);
++    part_info->frm_log_entry= NULL;
++    VOID(sync_ddl_log());
++#endif
++  }
++
++end:
++  DBUG_RETURN(error);
++}
++
++
++/*
++  SYNOPSIS
++    write_bin_log()
++    thd                           Thread object
++    clear_error                   is clear_error to be called
++    query                         Query to log
++    query_length                  Length of query
++
++  RETURN VALUES
++    NONE
++
++  DESCRIPTION
++    Write the binlog if open, routine used in multiple places in this
++    file
++*/
++
++int write_bin_log(THD *thd, bool clear_error,
++                  char const *query, ulong query_length)
++{
++  int error= 0;
++  if (mysql_bin_log.is_open())
++  {
++    int errcode= 0;
++    if (clear_error)
++      thd->clear_error();
++    else
++      errcode= query_error_code(thd, TRUE);
++    error= thd->binlog_query(THD::STMT_QUERY_TYPE,
++                             query, query_length, FALSE, FALSE, errcode);
++  }
++  return error;
++}
++
++
++/*
++ delete (drop) tables.
++
++  SYNOPSIS
++   mysql_rm_table()
++   thd			Thread handle
++   tables		List of tables to delete
++   if_exists		If 1, don't give error if one table doesn't exists
++
++  NOTES
++    Will delete all tables that can be deleted and give a compact error
++    messages for tables that could not be deleted.
++    If a table is in use, we will wait for all users to free the table
++    before dropping it
++
++    Wait if global_read_lock (FLUSH TABLES WITH READ LOCK) is set.
++
++  RETURN
++    FALSE OK.  In this case ok packet is sent to user
++    TRUE  Error
++
++*/
++
++bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
++                    my_bool drop_temporary)
++{
++  bool error= FALSE, need_start_waiters= FALSE;
++  Drop_table_error_handler err_handler(thd->get_internal_handler());
++  DBUG_ENTER("mysql_rm_table");
++
++  /* mark for close and remove all cached entries */
++
++  if (!drop_temporary)
++  {
++    if ((error= wait_if_global_read_lock(thd, 0, 1)))
++    {
++      my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), tables->table_name);
++      DBUG_RETURN(TRUE);
++    }
++    else
++      need_start_waiters= TRUE;
++  }
++
++  /*
++    Acquire LOCK_open after wait_if_global_read_lock(). If we would hold
++    LOCK_open during wait_if_global_read_lock(), other threads could not
++    close their tables. This would make a pretty deadlock.
++  */
++  thd->push_internal_handler(&err_handler);
++  error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0);
++  thd->pop_internal_handler();
++
++
++  if (need_start_waiters)
++    start_waiting_global_read_lock(thd);
++
++  if (error)
++    DBUG_RETURN(TRUE);
++  my_ok(thd);
++  DBUG_RETURN(FALSE);
++}
++
++/*
++  Execute the drop of a normal or temporary table
++
++  SYNOPSIS
++    mysql_rm_table_part2()
++    thd			Thread handler
++    tables		Tables to drop
++    if_exists		If set, don't give an error if table doesn't exists.
++			In this case we give an warning of level 'NOTE'
++    drop_temporary	Only drop temporary tables
++    drop_view		Allow to delete VIEW .frm
++    dont_log_query	Don't write query to log files. This will also not
++			generate warnings if the handler files doesn't exists  
++
++  TODO:
++    When logging to the binary log, we should log
++    tmp_tables and transactional tables as separate statements if we
++    are in a transaction;  This is needed to get these tables into the
++    cached binary log that is only written on COMMIT.
++
++   The current code only writes DROP statements that only uses temporary
++   tables to the cache binary log.  This should be ok on most cases, but
++   not all.
++
++ RETURN
++   0	ok
++   1	Error
++   -1	Thread was killed
++*/
++
++int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
++			 bool drop_temporary, bool drop_view,
++			 bool dont_log_query)
++{
++  TABLE_LIST *table;
++  char path[FN_REFLEN + 1], *alias;
++  uint path_length;
++  String wrong_tables;
++  int error= 0;
++  int non_temp_tables_count= 0;
++  bool some_tables_deleted=0, tmp_table_deleted=0, foreign_key_error=0;
++  String built_query;
++  String built_tmp_query;
++  DBUG_ENTER("mysql_rm_table_part2");
++
++  LINT_INIT(alias);
++  LINT_INIT(path_length);
++
++  if (thd->current_stmt_binlog_row_based && !dont_log_query)
++  {
++    built_query.set_charset(system_charset_info);
++    if (if_exists)
++      built_query.append("DROP TABLE IF EXISTS ");
++    else
++      built_query.append("DROP TABLE ");
++  }
++
++  mysql_ha_rm_tables(thd, tables, FALSE);
++
++  pthread_mutex_lock(&LOCK_open);
++
++  /* Disable drop of enabled log tables, must be done before name locking */
++  for (table= tables; table; table= table->next_local)
++  {
++    if (check_if_log_table(table->db_length, table->db,
++                           table->table_name_length, table->table_name, 1))
++    {
++      my_error(ER_BAD_LOG_STATEMENT, MYF(0), "DROP");
++      pthread_mutex_unlock(&LOCK_open);
++      DBUG_RETURN(1);
++    }
++  }
++
++  if (!drop_temporary && lock_table_names_exclusively(thd, tables))
++  {
++    pthread_mutex_unlock(&LOCK_open);
++    DBUG_RETURN(1);
++  }
++
++  for (table= tables; table; table= table->next_local)
++  {
++    char *db=table->db;
++    handlerton *table_type;
++    enum legacy_db_type frm_db_type= DB_TYPE_UNKNOWN;
++
++    DBUG_PRINT("table", ("table_l: '%s'.'%s'  table: 0x%lx  s: 0x%lx",
++                         table->db, table->table_name, (long) table->table,
++                         table->table ? (long) table->table->s : (long) -1));
++
++    error= drop_temporary_table(thd, table);
++
++    switch (error) {
++    case  0:
++      // removed temporary table
++      tmp_table_deleted= 1;
++      if (thd->variables.binlog_format == BINLOG_FORMAT_MIXED &&
++          thd->current_stmt_binlog_row_based)
++      {
++        if (built_tmp_query.is_empty()) 
++        {
++          built_tmp_query.set_charset(system_charset_info);
++          built_tmp_query.append("DROP TEMPORARY TABLE IF EXISTS ");
++        }
++
++        built_tmp_query.append("`");
++        if (thd->db == NULL || strcmp(db,thd->db) != 0)
++        {
++          built_tmp_query.append(db);
++          built_tmp_query.append("`.`");
++        }
++        built_tmp_query.append(table->table_name);
++        built_tmp_query.append("`,");
++      }
++
++      continue;
++    case -1:
++      DBUG_ASSERT(thd->in_sub_stmt);
++      error= 1;
++      goto err_with_placeholders;
++    default:
++      // temporary table not found
++      error= 0;
++    }
++
++    /*
++      If row-based replication is used and the table is not a
++      temporary table, we add the table name to the drop statement
++      being built.  The string always end in a comma and the comma
++      will be chopped off before being written to the binary log.
++      */
++    if (!drop_temporary && thd->current_stmt_binlog_row_based && !dont_log_query)
++    {
++      non_temp_tables_count++;
++      /*
++        Don't write the database name if it is the current one (or if
++        thd->db is NULL).
++      */
++      built_query.append("`");
++      if (thd->db == NULL || strcmp(db,thd->db) != 0)
++      {
++        built_query.append(db);
++        built_query.append("`.`");
++      }
++
++      built_query.append(table->table_name);
++      built_query.append("`,");
++    }
++
++    if (!drop_temporary)
++    {
++      TABLE *locked_table;
++      abort_locked_tables(thd, db, table->table_name);
++      remove_table_from_cache(thd, db, table->table_name,
++	                      RTFC_WAIT_OTHER_THREAD_FLAG |
++			      RTFC_CHECK_KILLED_FLAG);
++      /*
++        If the table was used in lock tables, remember it so that
++        unlock_table_names can free it
++      */
++      if ((locked_table= drop_locked_tables(thd, db, table->table_name)))
++        table->table= locked_table;
++
++      if (thd->killed)
++      {
++        error= -1;
++        goto err_with_placeholders;
++      }
++      alias= (lower_case_table_names == 2) ? table->alias : table->table_name;
++      /* remove .frm file and engine files */
++      path_length= build_table_filename(path, sizeof(path) - 1, db, alias,
++                                        reg_ext,
++                                        table->internal_tmp_table ?
++                                        FN_IS_TMP : 0);
++    }
++    DEBUG_SYNC(thd, "rm_table_part2_before_delete_table");
++    if (drop_temporary ||
++        ((access(path, F_OK) &&
++          ha_create_table_from_engine(thd, db, alias)) ||
++         (!drop_view &&
++          mysql_frm_type(thd, path, &frm_db_type) != FRMTYPE_TABLE)))
++    {
++      // Table was not found on disk and table can't be created from engine
++      if (if_exists)
++	push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++			    ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR),
++			    table->table_name);
++      else
++        error= 1;
++    }
++    else
++    {
++      char *end;
++      /*
++        Cannot use the db_type from the table, since that might have changed
++        while waiting for the exclusive name lock. We are under LOCK_open,
++        so reading from the frm-file is safe.
++      */
++      if (frm_db_type == DB_TYPE_UNKNOWN)
++      {
++        mysql_frm_type(thd, path, &frm_db_type);
++        DBUG_PRINT("info", ("frm_db_type %d from %s", frm_db_type, path));
++      }
++      table_type= ha_resolve_by_legacy_type(thd, frm_db_type);
++      // Remove extension for delete
++      *(end= path + path_length - reg_ext_length)= '\0';
++      DBUG_PRINT("info", ("deleting table of type %d",
++                          (table_type ? table_type->db_type : 0)));
++      error= ha_delete_table(thd, table_type, path, db, table->table_name,
++                             !dont_log_query);
++
++      /* No error if non existent table and 'IF EXIST' clause or view */
++      if ((error == ENOENT || error == HA_ERR_NO_SUCH_TABLE) && 
++	  (if_exists || table_type == NULL))
++      {
++	error= 0;
++        thd->clear_error();
++      }
++      if (error == HA_ERR_ROW_IS_REFERENCED)
++      {
++	/* the table is referenced by a foreign key constraint */
++	foreign_key_error=1;
++      }
++      if (!error || error == ENOENT || error == HA_ERR_NO_SUCH_TABLE)
++      {
++        int new_error;
++	/* Delete the table definition file */
++	strmov(end,reg_ext);
++	if (!(new_error=my_delete(path,MYF(MY_WME))))
++        {
++	  some_tables_deleted=1;
++          new_error= Table_triggers_list::drop_all_triggers(thd, db,
++                                                            table->table_name);
++        }
++        error|= new_error;
++      }
++    }
++    if (error)
++    {
++      if (wrong_tables.length())
++	wrong_tables.append(',');
++      wrong_tables.append(String(table->table_name,system_charset_info));
++    }
++    DBUG_PRINT("table", ("table: 0x%lx  s: 0x%lx", (long) table->table,
++                         table->table ? (long) table->table->s : (long) -1));
++  }
++  /*
++    It's safe to unlock LOCK_open: we have an exclusive lock
++    on the table name.
++  */
++  pthread_mutex_unlock(&LOCK_open);
++  DEBUG_SYNC(thd, "rm_table_part2_before_binlog");
++  thd->thread_specific_used|= tmp_table_deleted;
++  error= 0;
++  if (wrong_tables.length())
++  {
++    if (!foreign_key_error)
++      my_printf_error(ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), MYF(0),
++                      wrong_tables.c_ptr());
++    else
++      my_message(ER_ROW_IS_REFERENCED, ER(ER_ROW_IS_REFERENCED), MYF(0));
++    error= 1;
++  }
++
++  if (some_tables_deleted || tmp_table_deleted || !error)
++  {
++    query_cache_invalidate3(thd, tables, 0);
++    if (!dont_log_query)
++    {
++      if (!thd->current_stmt_binlog_row_based ||
++          (non_temp_tables_count > 0 && !tmp_table_deleted))
++      {
++        /*
++          In this case, we are either using statement-based
++          replication or using row-based replication but have only
++          deleted one or more non-temporary tables (and no temporary
++          tables).  In this case, we can write the original query into
++          the binary log.
++         */
++        error |= write_bin_log(thd, !error, thd->query(), thd->query_length());
++      }
++      else if (thd->current_stmt_binlog_row_based &&
++               tmp_table_deleted)
++      {
++        if (non_temp_tables_count > 0)
++        {
++          /*
++            In this case we have deleted both temporary and
++            non-temporary tables, so:
++            - since we have deleted a non-temporary table we have to
++              binlog the statement, but
++            - since we have deleted a temporary table we cannot binlog
++              the statement (since the table may have not been created on the
++              slave - check "if" branch below, this might cause the slave to 
++              stop).
++
++            Instead, we write a built statement, only containing the
++            non-temporary tables, to the binary log
++          */
++          built_query.chop();                  // Chop of the last comma
++          built_query.append(" /* generated by server */");
++          error|= write_bin_log(thd, !error, built_query.ptr(), built_query.length());
++        }
++
++        /*
++          One needs to always log any temporary table drop, if:
++            1. thread logging format is mixed mode; AND
++            2. current statement logging format is set to row.
++        */
++        if (thd->variables.binlog_format == BINLOG_FORMAT_MIXED)
++        {
++          /*
++            In this case we have deleted some temporary tables but we are using
++            row based logging for the statement. However, thread uses mixed mode
++            format, thence we need to log the dropping as we cannot tell for
++            sure whether the create was logged as statement previously or not, ie,
++            before switching to row mode.
++          */
++          built_tmp_query.chop();                  // Chop of the last comma
++          built_tmp_query.append(" /* generated by server */");
++          error|= write_bin_log(thd, !error, built_tmp_query.ptr(), built_tmp_query.length());
++        }
++      }
++
++      /*
++        The remaining cases are:
++        - no tables were deleted and
++        - only temporary tables were deleted and row-based
++          replication is used.
++        In both these cases, nothing should be written to the binary
++        log.
++      */
++    }
++  }
++  pthread_mutex_lock(&LOCK_open);
++err_with_placeholders:
++  unlock_table_names(thd, tables, (TABLE_LIST*) 0);
++  pthread_mutex_unlock(&LOCK_open);
++  DBUG_RETURN(error);
++}
++
++
++/*
++  Quickly remove a table.
++
++  SYNOPSIS
++    quick_rm_table()
++      base                      The handlerton handle.
++      db                        The database name.
++      table_name                The table name.
++      flags                     flags for build_table_filename().
++
++  RETURN
++    0           OK
++    != 0        Error
++*/
++
++bool quick_rm_table(handlerton *base,const char *db,
++                    const char *table_name, uint flags)
++{
++  char path[FN_REFLEN + 1];
++  bool error= 0;
++  DBUG_ENTER("quick_rm_table");
++
++  uint path_length= build_table_filename(path, sizeof(path) - 1,
++                                         db, table_name, reg_ext, flags);
++  if (my_delete(path,MYF(0)))
++    error= 1; /* purecov: inspected */
++  path[path_length - reg_ext_length]= '\0'; // Remove reg_ext
++  if (!(flags & FRM_ONLY))
++    error|= ha_delete_table(current_thd, base, path, db, table_name, 0);
++  DBUG_RETURN(error);
++}
++
++/*
++  Sort keys in the following order:
++  - PRIMARY KEY
++  - UNIQUE keys where all column are NOT NULL
++  - UNIQUE keys that don't contain partial segments
++  - Other UNIQUE keys
++  - Normal keys
++  - Fulltext keys
++
++  This will make checking for duplicated keys faster and ensure that
++  PRIMARY keys are prioritized.
++*/
++
++static int sort_keys(KEY *a, KEY *b)
++{
++  ulong a_flags= a->flags, b_flags= b->flags;
++  
++  if (a_flags & HA_NOSAME)
++  {
++    if (!(b_flags & HA_NOSAME))
++      return -1;
++    if ((a_flags ^ b_flags) & (HA_NULL_PART_KEY | HA_END_SPACE_KEY))
++    {
++      /* Sort NOT NULL keys before other keys */
++      return (a_flags & (HA_NULL_PART_KEY | HA_END_SPACE_KEY)) ? 1 : -1;
++    }
++    if (a->name == primary_key_name)
++      return -1;
++    if (b->name == primary_key_name)
++      return 1;
++    /* Sort keys don't containing partial segments before others */
++    if ((a_flags ^ b_flags) & HA_KEY_HAS_PART_KEY_SEG)
++      return (a_flags & HA_KEY_HAS_PART_KEY_SEG) ? 1 : -1;
++  }
++  else if (b_flags & HA_NOSAME)
++    return 1;					// Prefer b
++
++  if ((a_flags ^ b_flags) & HA_FULLTEXT)
++  {
++    return (a_flags & HA_FULLTEXT) ? 1 : -1;
++  }
++  /*
++    Prefer original key order.	usable_key_parts contains here
++    the original key position.
++  */
++  return ((a->usable_key_parts < b->usable_key_parts) ? -1 :
++	  (a->usable_key_parts > b->usable_key_parts) ? 1 :
++	  0);
++}
++
++/*
++  Check TYPELIB (set or enum) for duplicates
++
++  SYNOPSIS
++    check_duplicates_in_interval()
++    set_or_name   "SET" or "ENUM" string for warning message
++    name	  name of the checked column
++    typelib	  list of values for the column
++    dup_val_count  returns count of duplicate elements
++
++  DESCRIPTION
++    This function prints an warning for each value in list
++    which has some duplicates on its right
++
++  RETURN VALUES
++    0             ok
++    1             Error
++*/
++
++bool check_duplicates_in_interval(const char *set_or_name,
++                                  const char *name, TYPELIB *typelib,
++                                  CHARSET_INFO *cs, unsigned int *dup_val_count)
++{
++  TYPELIB tmp= *typelib;
++  const char **cur_value= typelib->type_names;
++  unsigned int *cur_length= typelib->type_lengths;
++  *dup_val_count= 0;  
++  
++  for ( ; tmp.count > 1; cur_value++, cur_length++)
++  {
++    tmp.type_names++;
++    tmp.type_lengths++;
++    tmp.count--;
++    if (find_type2(&tmp, (const char*)*cur_value, *cur_length, cs))
++    {
++      if ((current_thd->variables.sql_mode &
++         (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)))
++      {
++        my_error(ER_DUPLICATED_VALUE_IN_TYPE, MYF(0),
++                 name,*cur_value,set_or_name);
++        return 1;
++      }
++      push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_NOTE,
++			  ER_DUPLICATED_VALUE_IN_TYPE,
++			  ER(ER_DUPLICATED_VALUE_IN_TYPE),
++			  name,*cur_value,set_or_name);
++      (*dup_val_count)++;
++    }
++  }
++  return 0;
++}
++
++
++/*
++  Check TYPELIB (set or enum) max and total lengths
++
++  SYNOPSIS
++    calculate_interval_lengths()
++    cs            charset+collation pair of the interval
++    typelib       list of values for the column
++    max_length    length of the longest item
++    tot_length    sum of the item lengths
++
++  DESCRIPTION
++    After this function call:
++    - ENUM uses max_length
++    - SET uses tot_length.
++
++  RETURN VALUES
++    void
++*/
++void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval,
++                                uint32 *max_length, uint32 *tot_length)
++{
++  const char **pos;
++  uint *len;
++  *max_length= *tot_length= 0;
++  for (pos= interval->type_names, len= interval->type_lengths;
++       *pos ; pos++, len++)
++  {
++    size_t length= cs->cset->numchars(cs, *pos, *pos + *len);
++    *tot_length+= length;
++    set_if_bigger(*max_length, (uint32)length);
++  }
++}
++
++
++/*
++  Prepare a create_table instance for packing
++
++  SYNOPSIS
++    prepare_create_field()
++    sql_field     field to prepare for packing
++    blob_columns  count for BLOBs
++    timestamps    count for timestamps
++    table_flags   table flags
++
++  DESCRIPTION
++    This function prepares a Create_field instance.
++    Fields such as pack_flag are valid after this call.
++
++  RETURN VALUES
++   0	ok
++   1	Error
++*/
++
++int prepare_create_field(Create_field *sql_field, 
++			 uint *blob_columns, 
++			 int *timestamps, int *timestamps_with_niladic,
++			 longlong table_flags)
++{
++  unsigned int dup_val_count;
++  DBUG_ENTER("prepare_field");
++
++  /*
++    This code came from mysql_prepare_create_table.
++    Indent preserved to make patching easier
++  */
++  DBUG_ASSERT(sql_field->charset);
++
++  switch (sql_field->sql_type) {
++  case MYSQL_TYPE_BLOB:
++  case MYSQL_TYPE_MEDIUM_BLOB:
++  case MYSQL_TYPE_TINY_BLOB:
++  case MYSQL_TYPE_LONG_BLOB:
++    sql_field->pack_flag=FIELDFLAG_BLOB |
++      pack_length_to_packflag(sql_field->pack_length -
++                              portable_sizeof_char_ptr);
++    if (sql_field->charset->state & MY_CS_BINSORT)
++      sql_field->pack_flag|=FIELDFLAG_BINARY;
++    sql_field->length=8;			// Unireg field length
++    sql_field->unireg_check=Field::BLOB_FIELD;
++    (*blob_columns)++;
++    break;
++  case MYSQL_TYPE_GEOMETRY:
++#ifdef HAVE_SPATIAL
++    if (!(table_flags & HA_CAN_GEOMETRY))
++    {
++      my_printf_error(ER_CHECK_NOT_IMPLEMENTED, ER(ER_CHECK_NOT_IMPLEMENTED),
++                      MYF(0), "GEOMETRY");
++      DBUG_RETURN(1);
++    }
++    sql_field->pack_flag=FIELDFLAG_GEOM |
++      pack_length_to_packflag(sql_field->pack_length -
++                              portable_sizeof_char_ptr);
++    if (sql_field->charset->state & MY_CS_BINSORT)
++      sql_field->pack_flag|=FIELDFLAG_BINARY;
++    sql_field->length=8;			// Unireg field length
++    sql_field->unireg_check=Field::BLOB_FIELD;
++    (*blob_columns)++;
++    break;
++#else
++    my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED), MYF(0),
++                    sym_group_geom.name, sym_group_geom.needed_define);
++    DBUG_RETURN(1);
++#endif /*HAVE_SPATIAL*/
++  case MYSQL_TYPE_VARCHAR:
++#ifndef QQ_ALL_HANDLERS_SUPPORT_VARCHAR
++    if (table_flags & HA_NO_VARCHAR)
++    {
++      /* convert VARCHAR to CHAR because handler is not yet up to date */
++      sql_field->sql_type=    MYSQL_TYPE_VAR_STRING;
++      sql_field->pack_length= calc_pack_length(sql_field->sql_type,
++                                               (uint) sql_field->length);
++      if ((sql_field->length / sql_field->charset->mbmaxlen) >
++          MAX_FIELD_CHARLENGTH)
++      {
++        my_printf_error(ER_TOO_BIG_FIELDLENGTH, ER(ER_TOO_BIG_FIELDLENGTH),
++                        MYF(0), sql_field->field_name, MAX_FIELD_CHARLENGTH);
++        DBUG_RETURN(1);
++      }
++    }
++#endif
++    /* fall through */
++  case MYSQL_TYPE_STRING:
++    sql_field->pack_flag=0;
++    if (sql_field->charset->state & MY_CS_BINSORT)
++      sql_field->pack_flag|=FIELDFLAG_BINARY;
++    break;
++  case MYSQL_TYPE_ENUM:
++    sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) |
++      FIELDFLAG_INTERVAL;
++    if (sql_field->charset->state & MY_CS_BINSORT)
++      sql_field->pack_flag|=FIELDFLAG_BINARY;
++    sql_field->unireg_check=Field::INTERVAL_FIELD;
++    if (check_duplicates_in_interval("ENUM",sql_field->field_name,
++                                     sql_field->interval,
++                                     sql_field->charset, &dup_val_count))
++      DBUG_RETURN(1);
++    break;
++  case MYSQL_TYPE_SET:
++    sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) |
++      FIELDFLAG_BITFIELD;
++    if (sql_field->charset->state & MY_CS_BINSORT)
++      sql_field->pack_flag|=FIELDFLAG_BINARY;
++    sql_field->unireg_check=Field::BIT_FIELD;
++    if (check_duplicates_in_interval("SET",sql_field->field_name,
++                                     sql_field->interval,
++                                     sql_field->charset, &dup_val_count))
++      DBUG_RETURN(1);
++    /* Check that count of unique members is not more then 64 */
++    if (sql_field->interval->count -  dup_val_count > sizeof(longlong)*8)
++    {
++       my_error(ER_TOO_BIG_SET, MYF(0), sql_field->field_name);
++       DBUG_RETURN(1);
++    }
++    break;
++  case MYSQL_TYPE_DATE:			// Rest of string types
++  case MYSQL_TYPE_NEWDATE:
++  case MYSQL_TYPE_TIME:
++  case MYSQL_TYPE_DATETIME:
++  case MYSQL_TYPE_NULL:
++    sql_field->pack_flag=f_settype((uint) sql_field->sql_type);
++    break;
++  case MYSQL_TYPE_BIT:
++    /* 
++      We have sql_field->pack_flag already set here, see
++      mysql_prepare_create_table().
++    */
++    break;
++  case MYSQL_TYPE_NEWDECIMAL:
++    sql_field->pack_flag=(FIELDFLAG_NUMBER |
++                          (sql_field->flags & UNSIGNED_FLAG ? 0 :
++                           FIELDFLAG_DECIMAL) |
++                          (sql_field->flags & ZEROFILL_FLAG ?
++                           FIELDFLAG_ZEROFILL : 0) |
++                          (sql_field->decimals << FIELDFLAG_DEC_SHIFT));
++    break;
++  case MYSQL_TYPE_TIMESTAMP:
++    /* We should replace old TIMESTAMP fields with their newer analogs */
++    if (sql_field->unireg_check == Field::TIMESTAMP_OLD_FIELD)
++    {
++      if (!*timestamps)
++      {
++        sql_field->unireg_check= Field::TIMESTAMP_DNUN_FIELD;
++        (*timestamps_with_niladic)++;
++      }
++      else
++        sql_field->unireg_check= Field::NONE;
++    }
++    else if (sql_field->unireg_check != Field::NONE)
++      (*timestamps_with_niladic)++;
++
++    (*timestamps)++;
++    /* fall-through */
++  default:
++    sql_field->pack_flag=(FIELDFLAG_NUMBER |
++                          (sql_field->flags & UNSIGNED_FLAG ? 0 :
++                           FIELDFLAG_DECIMAL) |
++                          (sql_field->flags & ZEROFILL_FLAG ?
++                           FIELDFLAG_ZEROFILL : 0) |
++                          f_settype((uint) sql_field->sql_type) |
++                          (sql_field->decimals << FIELDFLAG_DEC_SHIFT));
++    break;
++  }
++  if (!(sql_field->flags & NOT_NULL_FLAG))
++    sql_field->pack_flag|= FIELDFLAG_MAYBE_NULL;
++  if (sql_field->flags & NO_DEFAULT_VALUE_FLAG)
++    sql_field->pack_flag|= FIELDFLAG_NO_DEFAULT;
++  DBUG_RETURN(0);
++}
++
++/*
++  Preparation for table creation
++
++  SYNOPSIS
++    mysql_prepare_create_table()
++      thd                       Thread object.
++      create_info               Create information (like MAX_ROWS).
++      alter_info                List of columns and indexes to create
++      tmp_table                 If a temporary table is to be created.
++      db_options          INOUT Table options (like HA_OPTION_PACK_RECORD).
++      file                      The handler for the new table.
++      key_info_buffer     OUT   An array of KEY structs for the indexes.
++      key_count           OUT   The number of elements in the array.
++      select_field_count        The number of fields coming from a select table.
++
++  DESCRIPTION
++    Prepares the table and key structures for table creation.
++
++  NOTES
++    sets create_info->varchar if the table has a varchar
++
++  RETURN VALUES
++    FALSE    OK
++    TRUE     error
++*/
++
++static int
++mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
++                           Alter_info *alter_info,
++                           bool tmp_table,
++                           uint *db_options,
++                           handler *file, KEY **key_info_buffer,
++                           uint *key_count, int select_field_count)
++{
++  const char	*key_name;
++  Create_field	*sql_field,*dup_field;
++  uint		field,null_fields,blob_columns,max_key_length;
++  ulong		record_offset= 0;
++  KEY		*key_info;
++  KEY_PART_INFO *key_part_info;
++  int		timestamps= 0, timestamps_with_niladic= 0;
++  int		field_no,dup_no;
++  int		select_field_pos,auto_increment=0;
++  List_iterator<Create_field> it(alter_info->create_list);
++  List_iterator<Create_field> it2(alter_info->create_list);
++  uint total_uneven_bit_length= 0;
++  DBUG_ENTER("mysql_prepare_create_table");
++
++  select_field_pos= alter_info->create_list.elements - select_field_count;
++  null_fields=blob_columns=0;
++  create_info->varchar= 0;
++  max_key_length= file->max_key_length();
++
++  for (field_no=0; (sql_field=it++) ; field_no++)
++  {
++    CHARSET_INFO *save_cs;
++
++    /*
++      Initialize length from its original value (number of characters),
++      which was set in the parser. This is necessary if we're
++      executing a prepared statement for the second time.
++    */
++    sql_field->length= sql_field->char_length;
++    if (!sql_field->charset)
++      sql_field->charset= create_info->default_table_charset;
++    /*
++      table_charset is set in ALTER TABLE if we want change character set
++      for all varchar/char columns.
++      But the table charset must not affect the BLOB fields, so don't
++      allow to change my_charset_bin to somethig else.
++    */
++    if (create_info->table_charset && sql_field->charset != &my_charset_bin)
++      sql_field->charset= create_info->table_charset;
++
++    save_cs= sql_field->charset;
++    if ((sql_field->flags & BINCMP_FLAG) &&
++	!(sql_field->charset= get_charset_by_csname(sql_field->charset->csname,
++						    MY_CS_BINSORT,MYF(0))))
++    {
++      char tmp[65];
++      strmake(strmake(tmp, save_cs->csname, sizeof(tmp)-4),
++              STRING_WITH_LEN("_bin"));
++      my_error(ER_UNKNOWN_COLLATION, MYF(0), tmp);
++      DBUG_RETURN(TRUE);
++    }
++
++    /*
++      Convert the default value from client character
++      set into the column character set if necessary.
++    */
++    if (sql_field->def && 
++        save_cs != sql_field->def->collation.collation &&
++        (sql_field->sql_type == MYSQL_TYPE_VAR_STRING ||
++         sql_field->sql_type == MYSQL_TYPE_STRING ||
++         sql_field->sql_type == MYSQL_TYPE_SET ||
++         sql_field->sql_type == MYSQL_TYPE_ENUM))
++    {
++      /*
++        Starting from 5.1 we work here with a copy of Create_field
++        created by the caller, not with the instance that was
++        originally created during parsing. It's OK to create
++        a temporary item and initialize with it a member of the
++        copy -- this item will be thrown away along with the copy
++        at the end of execution, and thus not introduce a dangling
++        pointer in the parsed tree of a prepared statement or a
++        stored procedure statement.
++      */
++      sql_field->def= sql_field->def->safe_charset_converter(save_cs);
++
++      if (sql_field->def == NULL)
++      {
++        /* Could not convert */
++        my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
++        DBUG_RETURN(TRUE);
++      }
++    }
++
++    if (sql_field->sql_type == MYSQL_TYPE_SET ||
++        sql_field->sql_type == MYSQL_TYPE_ENUM)
++    {
++      uint32 dummy;
++      CHARSET_INFO *cs= sql_field->charset;
++      TYPELIB *interval= sql_field->interval;
++
++      /*
++        Create typelib from interval_list, and if necessary
++        convert strings from client character set to the
++        column character set.
++      */
++      if (!interval)
++      {
++        /*
++          Create the typelib in runtime memory - we will free the
++          occupied memory at the same time when we free this
++          sql_field -- at the end of execution.
++        */
++        interval= sql_field->interval= typelib(thd->mem_root,
++                                               sql_field->interval_list);
++        List_iterator<String> int_it(sql_field->interval_list);
++        String conv, *tmp;
++        char comma_buf[2];
++        int comma_length= cs->cset->wc_mb(cs, ',', (uchar*) comma_buf,
++                                          (uchar*) comma_buf + 
++                                          sizeof(comma_buf));
++        DBUG_ASSERT(comma_length > 0);
++        for (uint i= 0; (tmp= int_it++); i++)
++        {
++          size_t lengthsp;
++          if (String::needs_conversion(tmp->length(), tmp->charset(),
++                                       cs, &dummy))
++          {
++            uint cnv_errs;
++            conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs);
++            interval->type_names[i]= strmake_root(thd->mem_root, conv.ptr(),
++                                                  conv.length());
++            interval->type_lengths[i]= conv.length();
++          }
++
++          // Strip trailing spaces.
++          lengthsp= cs->cset->lengthsp(cs, interval->type_names[i],
++                                       interval->type_lengths[i]);
++          interval->type_lengths[i]= lengthsp;
++          ((uchar *)interval->type_names[i])[lengthsp]= '\0';
++          if (sql_field->sql_type == MYSQL_TYPE_SET)
++          {
++            if (cs->coll->instr(cs, interval->type_names[i], 
++                                interval->type_lengths[i], 
++                                comma_buf, comma_length, NULL, 0))
++            {
++              my_error(ER_ILLEGAL_VALUE_FOR_TYPE, MYF(0), "set", tmp->ptr());
++              DBUG_RETURN(TRUE);
++            }
++          }
++        }
++        sql_field->interval_list.empty(); // Don't need interval_list anymore
++      }
++
++      if (sql_field->sql_type == MYSQL_TYPE_SET)
++      {
++        uint32 field_length;
++        if (sql_field->def != NULL)
++        {
++          char *not_used;
++          uint not_used2;
++          bool not_found= 0;
++          String str, *def= sql_field->def->val_str(&str);
++          if (def == NULL) /* SQL "NULL" maps to NULL */
++          {
++            if ((sql_field->flags & NOT_NULL_FLAG) != 0)
++            {
++              my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
++              DBUG_RETURN(TRUE);
++            }
++
++            /* else, NULL is an allowed value */
++            (void) find_set(interval, NULL, 0,
++                            cs, &not_used, &not_used2, &not_found);
++          }
++          else /* not NULL */
++          {
++            (void) find_set(interval, def->ptr(), def->length(),
++                            cs, &not_used, &not_used2, &not_found);
++          }
++
++          if (not_found)
++          {
++            my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
++            DBUG_RETURN(TRUE);
++          }
++        }
++        calculate_interval_lengths(cs, interval, &dummy, &field_length);
++        sql_field->length= field_length + (interval->count - 1);
++      }
++      else  /* MYSQL_TYPE_ENUM */
++      {
++        uint32 field_length;
++        DBUG_ASSERT(sql_field->sql_type == MYSQL_TYPE_ENUM);
++        if (sql_field->def != NULL)
++        {
++          String str, *def= sql_field->def->val_str(&str);
++          if (def == NULL) /* SQL "NULL" maps to NULL */
++          {
++            if ((sql_field->flags & NOT_NULL_FLAG) != 0)
++            {
++              my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
++              DBUG_RETURN(TRUE);
++            }
++
++            /* else, the defaults yield the correct length for NULLs. */
++          } 
++          else /* not NULL */
++          {
++            def->length(cs->cset->lengthsp(cs, def->ptr(), def->length()));
++            if (find_type2(interval, def->ptr(), def->length(), cs) == 0) /* not found */
++            {
++              my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
++              DBUG_RETURN(TRUE);
++            }
++          }
++        }
++        calculate_interval_lengths(cs, interval, &field_length, &dummy);
++        sql_field->length= field_length;
++      }
++      set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1);
++    }
++
++    if (sql_field->sql_type == MYSQL_TYPE_BIT)
++    { 
++      sql_field->pack_flag= FIELDFLAG_NUMBER;
++      if (file->ha_table_flags() & HA_CAN_BIT_FIELD)
++        total_uneven_bit_length+= sql_field->length & 7;
++      else
++        sql_field->pack_flag|= FIELDFLAG_TREAT_BIT_AS_CHAR;
++    }
++
++    sql_field->create_length_to_internal_length();
++    if (prepare_blob_field(thd, sql_field))
++      DBUG_RETURN(TRUE);
++
++    if (!(sql_field->flags & NOT_NULL_FLAG))
++      null_fields++;
++
++    if (check_column_name(sql_field->field_name))
++    {
++      my_error(ER_WRONG_COLUMN_NAME, MYF(0), sql_field->field_name);
++      DBUG_RETURN(TRUE);
++    }
++
++    /* Check if we have used the same field name before */
++    for (dup_no=0; (dup_field=it2++) != sql_field; dup_no++)
++    {
++      if (my_strcasecmp(system_charset_info,
++			sql_field->field_name,
++			dup_field->field_name) == 0)
++      {
++	/*
++	  If this was a CREATE ... SELECT statement, accept a field
++	  redefinition if we are changing a field in the SELECT part
++	*/
++	if (field_no < select_field_pos || dup_no >= select_field_pos)
++	{
++	  my_error(ER_DUP_FIELDNAME, MYF(0), sql_field->field_name);
++	  DBUG_RETURN(TRUE);
++	}
++	else
++	{
++	  /* Field redefined */
++	  sql_field->def=		dup_field->def;
++	  sql_field->sql_type=		dup_field->sql_type;
++	  sql_field->charset=		(dup_field->charset ?
++					 dup_field->charset :
++					 create_info->default_table_charset);
++	  sql_field->length=		dup_field->char_length;
++          sql_field->pack_length=	dup_field->pack_length;
++          sql_field->key_length=	dup_field->key_length;
++	  sql_field->decimals=		dup_field->decimals;
++	  sql_field->create_length_to_internal_length();
++	  sql_field->unireg_check=	dup_field->unireg_check;
++          /* 
++            We're making one field from two, the result field will have
++            dup_field->flags as flags. If we've incremented null_fields
++            because of sql_field->flags, decrement it back.
++          */
++          if (!(sql_field->flags & NOT_NULL_FLAG))
++            null_fields--;
++	  sql_field->flags=		dup_field->flags;
++          sql_field->interval=          dup_field->interval;
++	  it2.remove();			// Remove first (create) definition
++	  select_field_pos--;
++	  break;
++	}
++      }
++    }
++    /* Don't pack rows in old tables if the user has requested this */
++    if ((sql_field->flags & BLOB_FLAG) ||
++	(sql_field->sql_type == MYSQL_TYPE_VARCHAR &&
++	create_info->row_type != ROW_TYPE_FIXED))
++      (*db_options)|= HA_OPTION_PACK_RECORD;
++    it2.rewind();
++  }
++
++  /* record_offset will be increased with 'length-of-null-bits' later */
++  record_offset= 0;
++  null_fields+= total_uneven_bit_length;
++
++  it.rewind();
++  while ((sql_field=it++))
++  {
++    DBUG_ASSERT(sql_field->charset != 0);
++
++    if (prepare_create_field(sql_field, &blob_columns, 
++			     &timestamps, &timestamps_with_niladic,
++			     file->ha_table_flags()))
++      DBUG_RETURN(TRUE);
++    if (sql_field->sql_type == MYSQL_TYPE_VARCHAR)
++      create_info->varchar= TRUE;
++    sql_field->offset= record_offset;
++    if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER)
++      auto_increment++;
++    record_offset+= sql_field->pack_length;
++  }
++  if (timestamps_with_niladic > 1)
++  {
++    my_message(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS,
++               ER(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS), MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++  if (auto_increment > 1)
++  {
++    my_message(ER_WRONG_AUTO_KEY, ER(ER_WRONG_AUTO_KEY), MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++  if (auto_increment &&
++      (file->ha_table_flags() & HA_NO_AUTO_INCREMENT))
++  {
++    my_message(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT,
++               ER(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT), MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++
++  if (blob_columns && (file->ha_table_flags() & HA_NO_BLOBS))
++  {
++    my_message(ER_TABLE_CANT_HANDLE_BLOB, ER(ER_TABLE_CANT_HANDLE_BLOB),
++               MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++
++  /* Create keys */
++
++  List_iterator<Key> key_iterator(alter_info->key_list);
++  List_iterator<Key> key_iterator2(alter_info->key_list);
++  uint key_parts=0, fk_key_count=0;
++  bool primary_key=0,unique_key=0;
++  Key *key, *key2;
++  uint tmp, key_number;
++  /* special marker for keys to be ignored */
++  static char ignore_key[1];
++
++  /* Calculate number of key segements */
++  *key_count= 0;
++
++  while ((key=key_iterator++))
++  {
++    DBUG_PRINT("info", ("key name: '%s'  type: %d", key->name ? key->name :
++                        "(none)" , key->type));
++    LEX_STRING key_name_str;
++    if (key->type == Key::FOREIGN_KEY)
++    {
++      fk_key_count++;
++      Foreign_key *fk_key= (Foreign_key*) key;
++      if (fk_key->ref_columns.elements &&
++	  fk_key->ref_columns.elements != fk_key->columns.elements)
++      {
++        my_error(ER_WRONG_FK_DEF, MYF(0),
++                 (fk_key->name ?  fk_key->name : "foreign key without name"),
++                 ER(ER_KEY_REF_DO_NOT_MATCH_TABLE_REF));
++	DBUG_RETURN(TRUE);
++      }
++      continue;
++    }
++    (*key_count)++;
++    tmp=file->max_key_parts();
++    if (key->columns.elements > tmp)
++    {
++      my_error(ER_TOO_MANY_KEY_PARTS,MYF(0),tmp);
++      DBUG_RETURN(TRUE);
++    }
++    key_name_str.str= (char*) key->name;
++    key_name_str.length= key->name ? strlen(key->name) : 0;
++    if (check_string_char_length(&key_name_str, "", NAME_CHAR_LEN,
++                                 system_charset_info, 1))
++    {
++      my_error(ER_TOO_LONG_IDENT, MYF(0), key->name);
++      DBUG_RETURN(TRUE);
++    }
++    key_iterator2.rewind ();
++    if (key->type != Key::FOREIGN_KEY)
++    {
++      while ((key2 = key_iterator2++) != key)
++      {
++	/*
++          foreign_key_prefix(key, key2) returns 0 if key or key2, or both, is
++          'generated', and a generated key is a prefix of the other key.
++          Then we do not need the generated shorter key.
++        */
++        if ((key2->type != Key::FOREIGN_KEY &&
++             key2->name != ignore_key &&
++             !foreign_key_prefix(key, key2)))
++        {
++          /* TODO: issue warning message */
++          /* mark that the generated key should be ignored */
++          if (!key2->generated ||
++              (key->generated && key->columns.elements <
++               key2->columns.elements))
++            key->name= ignore_key;
++          else
++          {
++            key2->name= ignore_key;
++            key_parts-= key2->columns.elements;
++            (*key_count)--;
++          }
++          break;
++        }
++      }
++    }
++    if (key->name != ignore_key)
++      key_parts+=key->columns.elements;
++    else
++      (*key_count)--;
++    if (key->name && !tmp_table && (key->type != Key::PRIMARY) &&
++	!my_strcasecmp(system_charset_info,key->name,primary_key_name))
++    {
++      my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name);
++      DBUG_RETURN(TRUE);
++    }
++  }
++  tmp=file->max_keys();
++  if (*key_count > tmp)
++  {
++    my_error(ER_TOO_MANY_KEYS,MYF(0),tmp);
++    DBUG_RETURN(TRUE);
++  }
++
++  (*key_info_buffer)= key_info= (KEY*) sql_calloc(sizeof(KEY) * (*key_count));
++  key_part_info=(KEY_PART_INFO*) sql_calloc(sizeof(KEY_PART_INFO)*key_parts);
++  if (!*key_info_buffer || ! key_part_info)
++    DBUG_RETURN(TRUE);				// Out of memory
++
++  key_iterator.rewind();
++  key_number=0;
++  for (; (key=key_iterator++) ; key_number++)
++  {
++    uint key_length=0;
++    Key_part_spec *column;
++
++    if (key->name == ignore_key)
++    {
++      /* ignore redundant keys */
++      do
++	key=key_iterator++;
++      while (key && key->name == ignore_key);
++      if (!key)
++	break;
++    }
++
++    switch (key->type) {
++    case Key::MULTIPLE:
++	key_info->flags= 0;
++	break;
++    case Key::FULLTEXT:
++	key_info->flags= HA_FULLTEXT;
++	if ((key_info->parser_name= &key->key_create_info.parser_name)->str)
++          key_info->flags|= HA_USES_PARSER;
++        else
++          key_info->parser_name= 0;
++	break;
++    case Key::SPATIAL:
++#ifdef HAVE_SPATIAL
++	key_info->flags= HA_SPATIAL;
++	break;
++#else
++	my_error(ER_FEATURE_DISABLED, MYF(0),
++                 sym_group_geom.name, sym_group_geom.needed_define);
++	DBUG_RETURN(TRUE);
++#endif
++    case Key::FOREIGN_KEY:
++      key_number--;				// Skip this key
++      continue;
++    default:
++      key_info->flags = HA_NOSAME;
++      break;
++    }
++    if (key->generated)
++      key_info->flags|= HA_GENERATED_KEY;
++
++    key_info->key_parts=(uint8) key->columns.elements;
++    key_info->key_part=key_part_info;
++    key_info->usable_key_parts= key_number;
++    key_info->algorithm= key->key_create_info.algorithm;
++
++    if (key->type == Key::FULLTEXT)
++    {
++      if (!(file->ha_table_flags() & HA_CAN_FULLTEXT))
++      {
++	my_message(ER_TABLE_CANT_HANDLE_FT, ER(ER_TABLE_CANT_HANDLE_FT),
++                   MYF(0));
++	DBUG_RETURN(TRUE);
++      }
++    }
++    /*
++       Make SPATIAL to be RTREE by default
++       SPATIAL only on BLOB or at least BINARY, this
++       actually should be replaced by special GEOM type
++       in near future when new frm file is ready
++       checking for proper key parts number:
++    */
++
++    /* TODO: Add proper checks if handler supports key_type and algorithm */
++    if (key_info->flags & HA_SPATIAL)
++    {
++      if (!(file->ha_table_flags() & HA_CAN_RTREEKEYS))
++      {
++        my_message(ER_TABLE_CANT_HANDLE_SPKEYS, ER(ER_TABLE_CANT_HANDLE_SPKEYS),
++                   MYF(0));
++        DBUG_RETURN(TRUE);
++      }
++      if (key_info->key_parts != 1)
++      {
++	my_error(ER_WRONG_ARGUMENTS, MYF(0), "SPATIAL INDEX");
++	DBUG_RETURN(TRUE);
++      }
++    }
++    else if (key_info->algorithm == HA_KEY_ALG_RTREE)
++    {
++#ifdef HAVE_RTREE_KEYS
++      if ((key_info->key_parts & 1) == 1)
++      {
++	my_error(ER_WRONG_ARGUMENTS, MYF(0), "RTREE INDEX");
++	DBUG_RETURN(TRUE);
++      }
++      /* TODO: To be deleted */
++      my_error(ER_NOT_SUPPORTED_YET, MYF(0), "RTREE INDEX");
++      DBUG_RETURN(TRUE);
++#else
++      my_error(ER_FEATURE_DISABLED, MYF(0),
++               sym_group_rtree.name, sym_group_rtree.needed_define);
++      DBUG_RETURN(TRUE);
++#endif
++    }
++
++    /* Take block size from key part or table part */
++    /*
++      TODO: Add warning if block size changes. We can't do it here, as
++      this may depend on the size of the key
++    */
++    key_info->block_size= (key->key_create_info.block_size ?
++                           key->key_create_info.block_size :
++                           create_info->key_block_size);
++
++    if (key_info->block_size)
++      key_info->flags|= HA_USES_BLOCK_SIZE;
++
++    List_iterator<Key_part_spec> cols(key->columns), cols2(key->columns);
++    CHARSET_INFO *ft_key_charset=0;  // for FULLTEXT
++    for (uint column_nr=0 ; (column=cols++) ; column_nr++)
++    {
++      uint length;
++      Key_part_spec *dup_column;
++
++      it.rewind();
++      field=0;
++      while ((sql_field=it++) &&
++	     my_strcasecmp(system_charset_info,
++			   column->field_name,
++			   sql_field->field_name))
++	field++;
++      if (!sql_field)
++      {
++	my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), column->field_name);
++	DBUG_RETURN(TRUE);
++      }
++      while ((dup_column= cols2++) != column)
++      {
++        if (!my_strcasecmp(system_charset_info,
++	     	           column->field_name, dup_column->field_name))
++	{
++	  my_printf_error(ER_DUP_FIELDNAME,
++			  ER(ER_DUP_FIELDNAME),MYF(0),
++			  column->field_name);
++	  DBUG_RETURN(TRUE);
++	}
++      }
++      cols2.rewind();
++      if (key->type == Key::FULLTEXT)
++      {
++	if ((sql_field->sql_type != MYSQL_TYPE_STRING &&
++	     sql_field->sql_type != MYSQL_TYPE_VARCHAR &&
++	     !f_is_blob(sql_field->pack_flag)) ||
++	    sql_field->charset == &my_charset_bin ||
++	    sql_field->charset->mbminlen > 1 || // ucs2 doesn't work yet
++	    (ft_key_charset && sql_field->charset != ft_key_charset))
++	{
++	    my_error(ER_BAD_FT_COLUMN, MYF(0), column->field_name);
++	    DBUG_RETURN(-1);
++	}
++	ft_key_charset=sql_field->charset;
++	/*
++	  for fulltext keys keyseg length is 1 for blobs (it's ignored in ft
++	  code anyway, and 0 (set to column width later) for char's. it has
++	  to be correct col width for char's, as char data are not prefixed
++	  with length (unlike blobs, where ft code takes data length from a
++	  data prefix, ignoring column->length).
++	*/
++	column->length=test(f_is_blob(sql_field->pack_flag));
++      }
++      else
++      {
++	column->length*= sql_field->charset->mbmaxlen;
++
++        if (key->type == Key::SPATIAL)
++        {
++          if (column->length)
++          {
++            my_error(ER_WRONG_SUB_KEY, MYF(0));
++            DBUG_RETURN(TRUE);
++          }
++
++          if (!f_is_geom(sql_field->pack_flag))
++          {
++            my_error(ER_WRONG_ARGUMENTS, MYF(0), "SPATIAL INDEX");
++            DBUG_RETURN(TRUE);
++          }
++        }
++
++	if (f_is_blob(sql_field->pack_flag) ||
++            (f_is_geom(sql_field->pack_flag) && key->type != Key::SPATIAL))
++	{
++	  if (!(file->ha_table_flags() & HA_CAN_INDEX_BLOBS))
++	  {
++	    my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name);
++	    DBUG_RETURN(TRUE);
++	  }
++          if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type ==
++              Field::GEOM_POINT)
++            column->length= 25;
++	  if (!column->length)
++	  {
++	    my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name);
++	    DBUG_RETURN(TRUE);
++	  }
++	}
++#ifdef HAVE_SPATIAL
++	if (key->type == Key::SPATIAL)
++	{
++	  if (!column->length)
++	  {
++	    /*
++              4 is: (Xmin,Xmax,Ymin,Ymax), this is for 2D case
++              Lately we'll extend this code to support more dimensions
++	    */
++	    column->length= 4*sizeof(double);
++	  }
++	}
++#endif
++	if (!(sql_field->flags & NOT_NULL_FLAG))
++	{
++	  if (key->type == Key::PRIMARY)
++	  {
++	    /* Implicitly set primary key fields to NOT NULL for ISO conf. */
++	    sql_field->flags|= NOT_NULL_FLAG;
++	    sql_field->pack_flag&= ~FIELDFLAG_MAYBE_NULL;
++            null_fields--;
++	  }
++	  else
++          {
++            key_info->flags|= HA_NULL_PART_KEY;
++            if (!(file->ha_table_flags() & HA_NULL_IN_KEY))
++            {
++              my_error(ER_NULL_COLUMN_IN_INDEX, MYF(0), column->field_name);
++              DBUG_RETURN(TRUE);
++            }
++            if (key->type == Key::SPATIAL)
++            {
++              my_message(ER_SPATIAL_CANT_HAVE_NULL,
++                         ER(ER_SPATIAL_CANT_HAVE_NULL), MYF(0));
++              DBUG_RETURN(TRUE);
++            }
++          }
++	}
++	if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER)
++	{
++	  if (column_nr == 0 || (file->ha_table_flags() & HA_AUTO_PART_KEY))
++	    auto_increment--;			// Field is used
++	}
++      }
++
++      key_part_info->fieldnr= field;
++      key_part_info->offset=  (uint16) sql_field->offset;
++      key_part_info->key_type=sql_field->pack_flag;
++      length= sql_field->key_length;
++
++      if (column->length)
++      {
++	if (f_is_blob(sql_field->pack_flag))
++	{
++	  if ((length=column->length) > max_key_length ||
++	      length > file->max_key_part_length())
++	  {
++	    length=min(max_key_length, file->max_key_part_length());
++	    if (key->type == Key::MULTIPLE)
++	    {
++	      /* not a critical problem */
++	      char warn_buff[MYSQL_ERRMSG_SIZE];
++	      my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY),
++			  length);
++	      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++			   ER_TOO_LONG_KEY, warn_buff);
++              /* Align key length to multibyte char boundary */
++              length-= length % sql_field->charset->mbmaxlen;
++	    }
++	    else
++	    {
++	      my_error(ER_TOO_LONG_KEY,MYF(0),length);
++	      DBUG_RETURN(TRUE);
++	    }
++	  }
++	}
++	else if (!f_is_geom(sql_field->pack_flag) &&
++		  (column->length > length ||
++                   !Field::type_can_have_key_part (sql_field->sql_type) ||
++		   ((f_is_packed(sql_field->pack_flag) ||
++		     ((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
++		      (key_info->flags & HA_NOSAME))) &&
++		    column->length != length)))
++	{
++	  my_message(ER_WRONG_SUB_KEY, ER(ER_WRONG_SUB_KEY), MYF(0));
++	  DBUG_RETURN(TRUE);
++	}
++	else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
++	  length=column->length;
++      }
++      else if (length == 0)
++      {
++	my_error(ER_WRONG_KEY_COLUMN, MYF(0), column->field_name);
++	  DBUG_RETURN(TRUE);
++      }
++      if (length > file->max_key_part_length() && key->type != Key::FULLTEXT)
++      {
++        length= file->max_key_part_length();
++	if (key->type == Key::MULTIPLE)
++	{
++	  /* not a critical problem */
++	  char warn_buff[MYSQL_ERRMSG_SIZE];
++	  my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY),
++		      length);
++	  push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++		       ER_TOO_LONG_KEY, warn_buff);
++          /* Align key length to multibyte char boundary */
++          length-= length % sql_field->charset->mbmaxlen;
++	}
++	else
++	{
++	  my_error(ER_TOO_LONG_KEY,MYF(0),length);
++	  DBUG_RETURN(TRUE);
++	}
++      }
++      key_part_info->length=(uint16) length;
++      /* Use packed keys for long strings on the first column */
++      if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
++          !((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
++	  (length >= KEY_DEFAULT_PACK_LENGTH &&
++	   (sql_field->sql_type == MYSQL_TYPE_STRING ||
++	    sql_field->sql_type == MYSQL_TYPE_VARCHAR ||
++	    sql_field->pack_flag & FIELDFLAG_BLOB)))
++      {
++	if ((column_nr == 0 && (sql_field->pack_flag & FIELDFLAG_BLOB)) ||
++            sql_field->sql_type == MYSQL_TYPE_VARCHAR)
++	  key_info->flags|= HA_BINARY_PACK_KEY | HA_VAR_LENGTH_KEY;
++	else
++	  key_info->flags|= HA_PACK_KEY;
++      }
++      /* Check if the key segment is partial, set the key flag accordingly */
++      if (length != sql_field->key_length)
++        key_info->flags|= HA_KEY_HAS_PART_KEY_SEG;
++
++      key_length+=length;
++      key_part_info++;
++
++      /* Create the key name based on the first column (if not given) */
++      if (column_nr == 0)
++      {
++	if (key->type == Key::PRIMARY)
++	{
++	  if (primary_key)
++	  {
++	    my_message(ER_MULTIPLE_PRI_KEY, ER(ER_MULTIPLE_PRI_KEY),
++                       MYF(0));
++	    DBUG_RETURN(TRUE);
++	  }
++	  key_name=primary_key_name;
++	  primary_key=1;
++	}
++	else if (!(key_name = key->name))
++	  key_name=make_unique_key_name(sql_field->field_name,
++					*key_info_buffer, key_info);
++	if (check_if_keyname_exists(key_name, *key_info_buffer, key_info))
++	{
++	  my_error(ER_DUP_KEYNAME, MYF(0), key_name);
++	  DBUG_RETURN(TRUE);
++	}
++	key_info->name=(char*) key_name;
++      }
++    }
++    if (!key_info->name || check_column_name(key_info->name))
++    {
++      my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key_info->name);
++      DBUG_RETURN(TRUE);
++    }
++    if (!(key_info->flags & HA_NULL_PART_KEY))
++      unique_key=1;
++    key_info->key_length=(uint16) key_length;
++    if (key_length > max_key_length && key->type != Key::FULLTEXT)
++    {
++      my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length);
++      DBUG_RETURN(TRUE);
++    }
++    key_info++;
++  }
++  if (!unique_key && !primary_key &&
++      (file->ha_table_flags() & HA_REQUIRE_PRIMARY_KEY))
++  {
++    my_message(ER_REQUIRES_PRIMARY_KEY, ER(ER_REQUIRES_PRIMARY_KEY), MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++  if (auto_increment > 0)
++  {
++    my_message(ER_WRONG_AUTO_KEY, ER(ER_WRONG_AUTO_KEY), MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++  /* Sort keys in optimized order */
++  my_qsort((uchar*) *key_info_buffer, *key_count, sizeof(KEY),
++	   (qsort_cmp) sort_keys);
++  create_info->null_bits= null_fields;
++
++  /* Check fields. */
++  it.rewind();
++  while ((sql_field=it++))
++  {
++    Field::utype type= (Field::utype) MTYP_TYPENR(sql_field->unireg_check);
++
++    if (thd->variables.sql_mode & MODE_NO_ZERO_DATE &&
++        !sql_field->def &&
++        sql_field->sql_type == MYSQL_TYPE_TIMESTAMP &&
++        (sql_field->flags & NOT_NULL_FLAG) &&
++        (type == Field::NONE || type == Field::TIMESTAMP_UN_FIELD))
++    {
++      /*
++        An error should be reported if:
++          - NO_ZERO_DATE SQL mode is active;
++          - there is no explicit DEFAULT clause (default column value);
++          - this is a TIMESTAMP column;
++          - the column is not NULL;
++          - this is not the DEFAULT CURRENT_TIMESTAMP column.
++
++        In other words, an error should be reported if
++          - NO_ZERO_DATE SQL mode is active;
++          - the column definition is equivalent to
++            'column_name TIMESTAMP DEFAULT 0'.
++      */
++
++      my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
++      DBUG_RETURN(TRUE);
++    }
++  }
++
++  DBUG_RETURN(FALSE);
++}
++
++
++/*
++  Set table default charset, if not set
++
++  SYNOPSIS
++    set_table_default_charset()
++    create_info        Table create information
++
++  DESCRIPTION
++    If the table character set was not given explicitely,
++    let's fetch the database default character set and
++    apply it to the table.
++*/
++
++static void set_table_default_charset(THD *thd,
++				      HA_CREATE_INFO *create_info, char *db)
++{
++  /*
++    If the table character set was not given explicitly,
++    let's fetch the database default character set and
++    apply it to the table.
++  */
++  if (!create_info->default_table_charset)
++  {
++    HA_CREATE_INFO db_info;
++
++    load_db_opt_by_name(thd, db, &db_info);
++
++    create_info->default_table_charset= db_info.default_table_charset;
++  }
++}
++
++
++/*
++  Extend long VARCHAR fields to blob & prepare field if it's a blob
++
++  SYNOPSIS
++    prepare_blob_field()
++    sql_field		Field to check
++
++  RETURN
++    0	ok
++    1	Error (sql_field can't be converted to blob)
++        In this case the error is given
++*/
++
++static bool prepare_blob_field(THD *thd, Create_field *sql_field)
++{
++  DBUG_ENTER("prepare_blob_field");
++
++  if (sql_field->length > MAX_FIELD_VARCHARLENGTH &&
++      !(sql_field->flags & BLOB_FLAG))
++  {
++    /* Convert long VARCHAR columns to TEXT or BLOB */
++    char warn_buff[MYSQL_ERRMSG_SIZE];
++
++    if (sql_field->def || (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES |
++                                                      MODE_STRICT_ALL_TABLES)))
++    {
++      my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name,
++               MAX_FIELD_VARCHARLENGTH / sql_field->charset->mbmaxlen);
++      DBUG_RETURN(1);
++    }
++    sql_field->sql_type= MYSQL_TYPE_BLOB;
++    sql_field->flags|= BLOB_FLAG;
++    my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_AUTO_CONVERT), sql_field->field_name,
++            (sql_field->charset == &my_charset_bin) ? "VARBINARY" : "VARCHAR",
++            (sql_field->charset == &my_charset_bin) ? "BLOB" : "TEXT");
++    push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_AUTO_CONVERT,
++                 warn_buff);
++  }
++
++  if ((sql_field->flags & BLOB_FLAG) && sql_field->length)
++  {
++    if (sql_field->sql_type == FIELD_TYPE_BLOB ||
++        sql_field->sql_type == FIELD_TYPE_TINY_BLOB ||
++        sql_field->sql_type == FIELD_TYPE_MEDIUM_BLOB)
++    {
++      /* The user has given a length to the blob column */
++      sql_field->sql_type= get_blob_type_from_length(sql_field->length);
++      sql_field->pack_length= calc_pack_length(sql_field->sql_type, 0);
++    }
++    sql_field->length= 0;
++  }
++  DBUG_RETURN(0);
++}
++
++
++/*
++  Preparation of Create_field for SP function return values.
++  Based on code used in the inner loop of mysql_prepare_create_table()
++  above.
++
++  SYNOPSIS
++    sp_prepare_create_field()
++    thd			Thread object
++    sql_field		Field to prepare
++
++  DESCRIPTION
++    Prepares the field structures for field creation.
++
++*/
++
++void sp_prepare_create_field(THD *thd, Create_field *sql_field)
++{
++  if (sql_field->sql_type == MYSQL_TYPE_SET ||
++      sql_field->sql_type == MYSQL_TYPE_ENUM)
++  {
++    uint32 field_length, dummy;
++    if (sql_field->sql_type == MYSQL_TYPE_SET)
++    {
++      calculate_interval_lengths(sql_field->charset,
++                                 sql_field->interval, &dummy, 
++                                 &field_length);
++      sql_field->length= field_length + 
++                         (sql_field->interval->count - 1);
++    }
++    else /* MYSQL_TYPE_ENUM */
++    {
++      calculate_interval_lengths(sql_field->charset,
++                                 sql_field->interval,
++                                 &field_length, &dummy);
++      sql_field->length= field_length;
++    }
++    set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1);
++  }
++
++  if (sql_field->sql_type == MYSQL_TYPE_BIT)
++  {
++    sql_field->pack_flag= FIELDFLAG_NUMBER |
++                          FIELDFLAG_TREAT_BIT_AS_CHAR;
++  }
++  sql_field->create_length_to_internal_length();
++  DBUG_ASSERT(sql_field->def == 0);
++  /* Can't go wrong as sql_field->def is not defined */
++  (void) prepare_blob_field(thd, sql_field);
++}
++
++
++/*
++  Write CREATE TABLE binlog
++
++  SYNOPSIS
++    write_create_table_bin_log()
++    thd               Thread object
++    create_info       Create information
++    internal_tmp_table  Set to 1 if this is an internal temporary table
++
++  DESCRIPTION
++    This function only is called in mysql_create_table_no_lock and
++    mysql_create_table
++
++  RETURN VALUES
++    NONE
++ */
++static inline int write_create_table_bin_log(THD *thd,
++                                             const HA_CREATE_INFO *create_info,
++                                             bool internal_tmp_table)
++{
++  /*
++    Don't write statement if:
++    - It is an internal temporary table,
++    - Row-based logging is used and it we are creating a temporary table, or
++    - The binary log is not open.
++    Otherwise, the statement shall be binlogged.
++   */
++  if (!internal_tmp_table &&
++      (!thd->current_stmt_binlog_row_based ||
++       (thd->current_stmt_binlog_row_based &&
++        !(create_info->options & HA_LEX_CREATE_TMP_TABLE))))
++    return write_bin_log(thd, TRUE, thd->query(), thd->query_length());
++  return 0;
++}
++
++
++/*
++  Create a table
++
++  SYNOPSIS
++    mysql_create_table_no_lock()
++    thd			Thread object
++    db			Database
++    table_name		Table name
++    create_info	        Create information (like MAX_ROWS)
++    fields		List of fields to create
++    keys		List of keys to create
++    internal_tmp_table  Set to 1 if this is an internal temporary table
++			(From ALTER TABLE)
++    select_field_count
++
++  DESCRIPTION
++    If one creates a temporary table, this is automatically opened
++
++    Note that this function assumes that caller already have taken
++    name-lock on table being created or used some other way to ensure
++    that concurrent operations won't intervene. mysql_create_table()
++    is a wrapper that can be used for this.
++
++    no_log is needed for the case of CREATE ... SELECT,
++    as the logging will be done later in sql_insert.cc
++    select_field_count is also used for CREATE ... SELECT,
++    and must be zero for standard create of table.
++
++  RETURN VALUES
++    FALSE OK
++    TRUE  error
++*/
++
++bool mysql_create_table_no_lock(THD *thd,
++                                const char *db, const char *table_name,
++                                HA_CREATE_INFO *create_info,
++                                Alter_info *alter_info,
++                                bool internal_tmp_table,
++                                uint select_field_count)
++{
++  char		path[FN_REFLEN + 1];
++  uint          path_length;
++  const char	*alias;
++  uint		db_options, key_count;
++  KEY		*key_info_buffer;
++  handler	*file;
++  bool		error= TRUE;
++  DBUG_ENTER("mysql_create_table_no_lock");
++  DBUG_PRINT("enter", ("db: '%s'  table: '%s'  tmp: %d",
++                       db, table_name, internal_tmp_table));
++
++
++  /* Check for duplicate fields and check type of table to create */
++  if (!alter_info->create_list.elements)
++  {
++    my_message(ER_TABLE_MUST_HAVE_COLUMNS, ER(ER_TABLE_MUST_HAVE_COLUMNS),
++               MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++  if (check_engine(thd, table_name, create_info))
++    DBUG_RETURN(TRUE);
++  db_options= create_info->table_options;
++  if (create_info->row_type == ROW_TYPE_DYNAMIC)
++    db_options|=HA_OPTION_PACK_RECORD;
++  alias= table_case_name(create_info, table_name);
++  if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
++                              create_info->db_type)))
++  {
++    mem_alloc_error(sizeof(handler));
++    DBUG_RETURN(TRUE);
++  }
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  partition_info *part_info= thd->work_part_info;
++
++  if (!part_info && create_info->db_type->partition_flags &&
++      (create_info->db_type->partition_flags() & HA_USE_AUTO_PARTITION))
++  {
++    /*
++      Table is not defined as a partitioned table but the engine handles
++      all tables as partitioned. The handler will set up the partition info
++      object with the default settings.
++    */
++    thd->work_part_info= part_info= new partition_info();
++    if (!part_info)
++    {
++      mem_alloc_error(sizeof(partition_info));
++      DBUG_RETURN(TRUE);
++    }
++    file->set_auto_partitions(part_info);
++    part_info->default_engine_type= create_info->db_type;
++    part_info->is_auto_partitioned= TRUE;
++  }
++  if (part_info)
++  {
++    /*
++      The table has been specified as a partitioned table.
++      If this is part of an ALTER TABLE the handler will be the partition
++      handler but we need to specify the default handler to use for
++      partitions also in the call to check_partition_info. We transport
++      this information in the default_db_type variable, it is either
++      DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command.
++
++      Check that we don't use foreign keys in the table since it won't
++      work even with InnoDB beneath it.
++    */
++    List_iterator<Key> key_iterator(alter_info->key_list);
++    Key *key;
++    handlerton *part_engine_type= create_info->db_type;
++    char *part_syntax_buf;
++    uint syntax_len;
++    handlerton *engine_type;
++    if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
++    {
++      my_error(ER_PARTITION_NO_TEMPORARY, MYF(0));
++      goto err;
++    }
++    while ((key= key_iterator++))
++    {
++      if (key->type == Key::FOREIGN_KEY &&
++          !part_info->is_auto_partitioned)
++      {
++        my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0));
++        goto err;
++      }
++    }
++    if ((part_engine_type == partition_hton) &&
++        part_info->default_engine_type)
++    {
++      /*
++        This only happens at ALTER TABLE.
++        default_engine_type was assigned from the engine set in the ALTER
++        TABLE command.
++      */
++      ;
++    }
++    else
++    {
++      if (create_info->used_fields & HA_CREATE_USED_ENGINE)
++      {
++        part_info->default_engine_type= create_info->db_type;
++      }
++      else
++      {
++        if (part_info->default_engine_type == NULL)
++        {
++          part_info->default_engine_type= ha_checktype(thd,
++                                          DB_TYPE_DEFAULT, 0, 0);
++        }
++      }
++    }
++    DBUG_PRINT("info", ("db_type = %s create_info->db_type = %s",
++             ha_resolve_storage_engine_name(part_info->default_engine_type),
++             ha_resolve_storage_engine_name(create_info->db_type)));
++    if (part_info->check_partition_info(thd, &engine_type, file,
++                                        create_info, TRUE))
++      goto err;
++    part_info->default_engine_type= engine_type;
++
++    /*
++      We reverse the partitioning parser and generate a standard format
++      for syntax stored in frm file.
++    */
++    if (!(part_syntax_buf= generate_partition_syntax(part_info,
++                                                     &syntax_len,
++                                                     TRUE, TRUE)))
++      goto err;
++    part_info->part_info_string= part_syntax_buf;
++    part_info->part_info_len= syntax_len;
++    if ((!(engine_type->partition_flags &&
++           engine_type->partition_flags() & HA_CAN_PARTITION)) ||
++        create_info->db_type == partition_hton)
++    {
++      /*
++        The handler assigned to the table cannot handle partitioning.
++        Assign the partition handler as the handler of the table.
++      */
++      DBUG_PRINT("info", ("db_type: %s",
++                        ha_resolve_storage_engine_name(create_info->db_type)));
++      delete file;
++      create_info->db_type= partition_hton;
++      if (!(file= get_ha_partition(part_info)))
++      {
++        DBUG_RETURN(TRUE);
++      }
++      /*
++        If we have default number of partitions or subpartitions we
++        might require to set-up the part_info object such that it
++        creates a proper .par file. The current part_info object is
++        only used to create the frm-file and .par-file.
++      */
++      if (part_info->use_default_no_partitions &&
++          part_info->no_parts &&
++          (int)part_info->no_parts !=
++          file->get_default_no_partitions(create_info))
++      {
++        uint i;
++        List_iterator<partition_element> part_it(part_info->partitions);
++        part_it++;
++        DBUG_ASSERT(thd->lex->sql_command != SQLCOM_CREATE_TABLE);
++        for (i= 1; i < part_info->partitions.elements; i++)
++          (part_it++)->part_state= PART_TO_BE_DROPPED;
++      }
++      else if (part_info->is_sub_partitioned() &&
++               part_info->use_default_no_subpartitions &&
++               part_info->no_subparts &&
++               (int)part_info->no_subparts !=
++                 file->get_default_no_partitions(create_info))
++      {
++        DBUG_ASSERT(thd->lex->sql_command != SQLCOM_CREATE_TABLE);
++        part_info->no_subparts= file->get_default_no_partitions(create_info);
++      }
++    }
++    else if (create_info->db_type != engine_type)
++    {
++      /*
++        We come here when we don't use a partitioned handler.
++        Since we use a partitioned table it must be "native partitioned".
++        We have switched engine from defaults, most likely only specified
++        engines in partition clauses.
++      */
++      delete file;
++      if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
++                                  engine_type)))
++      {
++        mem_alloc_error(sizeof(handler));
++        DBUG_RETURN(TRUE);
++      }
++    }
++  }
++#endif
++
++  set_table_default_charset(thd, create_info, (char*) db);
++
++  if (mysql_prepare_create_table(thd, create_info, alter_info,
++                                 internal_tmp_table,
++                                 &db_options, file,
++                                 &key_info_buffer, &key_count,
++                                 select_field_count))
++    goto err;
++
++      /* Check if table exists */
++  if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
++  {
++    path_length= build_tmptable_filename(thd, path, sizeof(path));
++    create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE;
++  }
++  else  
++  {
++    path_length= build_table_filename(path, sizeof(path) - 1, db, alias, reg_ext,
++                                      internal_tmp_table ? FN_IS_TMP : 0);
++  }
++
++  /* Check if table already exists */
++  if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
++      find_temporary_table(thd, db, table_name))
++  {
++    if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
++    {
++      create_info->table_existed= 1;		// Mark that table existed
++      push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++                          ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
++                          alias);
++      error= write_create_table_bin_log(thd, create_info, internal_tmp_table);
++      goto err;
++    }
++    my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
++    goto err;
++  }
++
++  VOID(pthread_mutex_lock(&LOCK_open));
++  if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
++  {
++    if (!access(path,F_OK))
++    {
++      if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
++        goto warn;
++      my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
++      goto unlock_and_end;
++    }
++    /*
++      We don't assert here, but check the result, because the table could be
++      in the table definition cache and in the same time the .frm could be
++      missing from the disk, in case of manual intervention which deletes
++      the .frm file. The user has to use FLUSH TABLES; to clear the cache.
++      Then she could create the table. This case is pretty obscure and
++      therefore we don't introduce a new error message only for it.
++    */
++    if (get_cached_table_share(db, table_name))
++    {
++      my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name);
++      goto unlock_and_end;
++    }
++  }
++
++  /*
++    Check that table with given name does not already
++    exist in any storage engine. In such a case it should
++    be discovered and the error ER_TABLE_EXISTS_ERROR be returned
++    unless user specified CREATE TABLE IF EXISTS
++    The LOCK_open mutex has been locked to make sure no
++    one else is attempting to discover the table. Since
++    it's not on disk as a frm file, no one could be using it!
++  */
++  if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE))
++  {
++    bool create_if_not_exists =
++      create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS;
++    int retcode = ha_table_exists_in_engine(thd, db, table_name);
++    DBUG_PRINT("info", ("exists_in_engine: %u",retcode));
++    switch (retcode)
++    {
++      case HA_ERR_NO_SUCH_TABLE:
++        /* Normal case, no table exists. we can go and create it */
++        break;
++      case HA_ERR_TABLE_EXIST:
++        DBUG_PRINT("info", ("Table existed in handler"));
++
++        if (create_if_not_exists)
++          goto warn;
++        my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
++        goto unlock_and_end;
++        break;
++      default:
++        DBUG_PRINT("info", ("error: %u from storage engine", retcode));
++        my_error(retcode, MYF(0),table_name);
++        goto unlock_and_end;
++    }
++  }
++
++  thd_proc_info(thd, "creating table");
++  create_info->table_existed= 0;		// Mark that table is created
++
++#ifdef HAVE_READLINK
++  if (test_if_data_home_dir(create_info->data_file_name))
++  {
++    my_error(ER_WRONG_ARGUMENTS, MYF(0), "DATA DIRECTORY");
++    goto unlock_and_end;
++  }
++  if (test_if_data_home_dir(create_info->index_file_name))
++  {
++    my_error(ER_WRONG_ARGUMENTS, MYF(0), "INDEX DIRECTORY");
++    goto unlock_and_end;
++  }
++
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  if (check_partition_dirs(thd->lex->part_info))
++  {
++    goto unlock_and_end;
++  }
++#endif /* WITH_PARTITION_STORAGE_ENGINE */
++
++  if (!my_use_symdir || (thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE))
++#endif /* HAVE_READLINK */
++  {
++    if (create_info->data_file_name)
++      push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                          WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
++                          "DATA DIRECTORY");
++    if (create_info->index_file_name)
++      push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
++                          WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
++                          "INDEX DIRECTORY");
++    create_info->data_file_name= create_info->index_file_name= 0;
++  }
++  create_info->table_options=db_options;
++
++  path[path_length - reg_ext_length]= '\0'; // Remove .frm extension
++  if (rea_create_table(thd, path, db, table_name,
++                       create_info, alter_info->create_list,
++                       key_count, key_info_buffer, file))
++    goto unlock_and_end;
++
++  if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
++  {
++    /* Open table and put in temporary table list */
++    if (!(open_temporary_table(thd, path, db, table_name, 1)))
++    {
++      (void) rm_temporary_table(create_info->db_type, path);
++      goto unlock_and_end;
++    }
++    thd->thread_specific_used= TRUE;
++  }
++
++  error= write_create_table_bin_log(thd, create_info, internal_tmp_table);
++unlock_and_end:
++  VOID(pthread_mutex_unlock(&LOCK_open));
++
++err:
++  thd_proc_info(thd, "After create");
++  delete file;
++  DBUG_RETURN(error);
++
++warn:
++  error= FALSE;
++  push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++                      ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
++                      alias);
++  create_info->table_existed= 1;		// Mark that table existed
++  error= write_create_table_bin_log(thd, create_info, internal_tmp_table);
++  goto unlock_and_end;
++}
++
++
++/*
++  Database and name-locking aware wrapper for mysql_create_table_no_lock(),
++*/
++
++bool mysql_create_table(THD *thd, const char *db, const char *table_name,
++                        HA_CREATE_INFO *create_info,
++                        Alter_info *alter_info,
++                        bool internal_tmp_table,
++                        uint select_field_count)
++{
++  TABLE *name_lock= 0;
++  bool result;
++  DBUG_ENTER("mysql_create_table");
++
++  /* Wait for any database locks */
++  pthread_mutex_lock(&LOCK_lock_db);
++  while (!thd->killed &&
++         hash_search(&lock_db_cache,(uchar*) db, strlen(db)))
++  {
++    wait_for_condition(thd, &LOCK_lock_db, &COND_refresh);
++    pthread_mutex_lock(&LOCK_lock_db);
++  }
++
++  if (thd->killed)
++  {
++    pthread_mutex_unlock(&LOCK_lock_db);
++    DBUG_RETURN(TRUE);
++  }
++  creating_table++;
++  pthread_mutex_unlock(&LOCK_lock_db);
++
++  if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE))
++  {
++    if (lock_table_name_if_not_cached(thd, db, table_name, &name_lock))
++    {
++      result= TRUE;
++      goto unlock;
++    }
++    if (!name_lock)
++    {
++      if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
++      {
++        push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++                            ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
++                            table_name);
++        create_info->table_existed= 1;
++        result= FALSE;
++        write_create_table_bin_log(thd, create_info, internal_tmp_table);
++      }
++      else
++      {
++        my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
++        result= TRUE;
++      }
++      goto unlock;
++    }
++  }
++
++  result= mysql_create_table_no_lock(thd, db, table_name, create_info,
++                                     alter_info,
++                                     internal_tmp_table,
++                                     select_field_count);
++
++unlock:
++  if (name_lock)
++  {
++    pthread_mutex_lock(&LOCK_open);
++    unlink_open_table(thd, name_lock, FALSE);
++    pthread_mutex_unlock(&LOCK_open);
++  }
++  pthread_mutex_lock(&LOCK_lock_db);
++  if (!--creating_table && creating_database)
++    pthread_cond_signal(&COND_refresh);
++  pthread_mutex_unlock(&LOCK_lock_db);
++  DBUG_RETURN(result);
++}
++
++
++/*
++** Give the key name after the first field with an optional '_#' after
++**/
++
++static bool
++check_if_keyname_exists(const char *name, KEY *start, KEY *end)
++{
++  for (KEY *key=start ; key != end ; key++)
++    if (!my_strcasecmp(system_charset_info,name,key->name))
++      return 1;
++  return 0;
++}
++
++
++static char *
++make_unique_key_name(const char *field_name,KEY *start,KEY *end)
++{
++  char buff[MAX_FIELD_NAME],*buff_end;
++
++  if (!check_if_keyname_exists(field_name,start,end) &&
++      my_strcasecmp(system_charset_info,field_name,primary_key_name))
++    return (char*) field_name;			// Use fieldname
++  buff_end=strmake(buff,field_name, sizeof(buff)-4);
++
++  /*
++    Only 3 chars + '\0' left, so need to limit to 2 digit
++    This is ok as we can't have more than 100 keys anyway
++  */
++  for (uint i=2 ; i< 100; i++)
++  {
++    *buff_end= '_';
++    int10_to_str(i, buff_end+1, 10);
++    if (!check_if_keyname_exists(buff,start,end))
++      return sql_strdup(buff);
++  }
++  return (char*) "not_specified";		// Should never happen
++}
++
++
++/****************************************************************************
++** Alter a table definition
++****************************************************************************/
++
++
++/*
++  Rename a table.
++
++  SYNOPSIS
++    mysql_rename_table()
++      base                      The handlerton handle.
++      old_db                    The old database name.
++      old_name                  The old table name.
++      new_db                    The new database name.
++      new_name                  The new table name.
++      flags                     flags for build_table_filename().
++                                FN_FROM_IS_TMP old_name is temporary.
++                                FN_TO_IS_TMP   new_name is temporary.
++                                NO_FRM_RENAME  Don't rename the FRM file
++                                but only the table in the storage engine.
++
++  RETURN
++    FALSE   OK
++    TRUE    Error
++*/
++
++bool
++mysql_rename_table(handlerton *base, const char *old_db,
++                   const char *old_name, const char *new_db,
++                   const char *new_name, uint flags)
++{
++  THD *thd= current_thd;
++  char from[FN_REFLEN + 1], to[FN_REFLEN + 1],
++    lc_from[FN_REFLEN + 1], lc_to[FN_REFLEN + 1];
++  char *from_base= from, *to_base= to;
++  char tmp_name[NAME_LEN+1];
++  handler *file;
++  int error=0;
++  DBUG_ENTER("mysql_rename_table");
++  DBUG_PRINT("enter", ("old: '%s'.'%s'  new: '%s'.'%s'",
++                       old_db, old_name, new_db, new_name));
++
++  file= (base == NULL ? 0 :
++         get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base));
++
++  build_table_filename(from, sizeof(from) - 1, old_db, old_name, "",
++                       flags & FN_FROM_IS_TMP);
++  build_table_filename(to, sizeof(to) - 1, new_db, new_name, "",
++                       flags & FN_TO_IS_TMP);
++
++  /*
++    If lower_case_table_names == 2 (case-preserving but case-insensitive
++    file system) and the storage is not HA_FILE_BASED, we need to provide
++    a lowercase file name, but we leave the .frm in mixed case.
++   */
++  if (lower_case_table_names == 2 && file &&
++      !(file->ha_table_flags() & HA_FILE_BASED))
++  {
++    strmov(tmp_name, old_name);
++    my_casedn_str(files_charset_info, tmp_name);
++    build_table_filename(lc_from, sizeof(lc_from) - 1, old_db, tmp_name, "",
++                         flags & FN_FROM_IS_TMP);
++    from_base= lc_from;
++
++    strmov(tmp_name, new_name);
++    my_casedn_str(files_charset_info, tmp_name);
++    build_table_filename(lc_to, sizeof(lc_to) - 1, new_db, tmp_name, "",
++                         flags & FN_TO_IS_TMP);
++    to_base= lc_to;
++  }
++
++  if (!file || !(error=file->ha_rename_table(from_base, to_base)))
++  {
++    if (!(flags & NO_FRM_RENAME) && rename_file_ext(from,to,reg_ext))
++    {
++      error=my_errno;
++      /* Restore old file name */
++      if (file)
++        file->ha_rename_table(to_base, from_base);
++    }
++  }
++  delete file;
++  if (error == HA_ERR_WRONG_COMMAND)
++    my_error(ER_NOT_SUPPORTED_YET, MYF(0), "ALTER TABLE");
++  else if (error)
++    my_error(ER_ERROR_ON_RENAME, MYF(0), from, to, error);
++  DBUG_RETURN(error != 0);
++}
++
++
++/*
++  Force all other threads to stop using the table
++
++  SYNOPSIS
++    wait_while_table_is_used()
++    thd			Thread handler
++    table		Table to remove from cache
++    function            HA_EXTRA_PREPARE_FOR_DROP if table is to be deleted
++                        HA_EXTRA_FORCE_REOPEN if table is not be used
++                        HA_EXTRA_PREPARE_FOR_RENAME if table is to be renamed
++  NOTES
++   When returning, the table will be unusable for other threads until
++   the table is closed.
++
++  PREREQUISITES
++    Lock on LOCK_open
++    Win32 clients must also have a WRITE LOCK on the table !
++*/
++
++void wait_while_table_is_used(THD *thd, TABLE *table,
++                              enum ha_extra_function function)
++{
++  DBUG_ENTER("wait_while_table_is_used");
++  DBUG_PRINT("enter", ("table: '%s'  share: 0x%lx  db_stat: %u  version: %lu",
++                       table->s->table_name.str, (ulong) table->s,
++                       table->db_stat, table->s->version));
++
++  safe_mutex_assert_owner(&LOCK_open);
++
++  VOID(table->file->extra(function));
++  /* Mark all tables that are in use as 'old' */
++  mysql_lock_abort(thd, table, TRUE);	/* end threads waiting on lock */
++
++  /* Wait until all there are no other threads that has this table open */
++  remove_table_from_cache(thd, table->s->db.str,
++                          table->s->table_name.str,
++                          RTFC_WAIT_OTHER_THREAD_FLAG);
++  DBUG_VOID_RETURN;
++}
++
++/*
++  Close a cached table
++
++  SYNOPSIS
++    close_cached_table()
++    thd			Thread handler
++    table		Table to remove from cache
++
++  NOTES
++    Function ends by signaling threads waiting for the table to try to
++    reopen the table.
++
++  PREREQUISITES
++    Lock on LOCK_open
++    Win32 clients must also have a WRITE LOCK on the table !
++*/
++
++void close_cached_table(THD *thd, TABLE *table)
++{
++  DBUG_ENTER("close_cached_table");
++
++  wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN);
++  /* Close lock if this is not got with LOCK TABLES */
++  if (thd->lock)
++  {
++    mysql_unlock_tables(thd, thd->lock);
++    thd->lock=0;			// Start locked threads
++  }
++  /* Close all copies of 'table'.  This also frees all LOCK TABLES lock */
++  unlink_open_table(thd, table, TRUE);
++
++  /* When lock on LOCK_open is freed other threads can continue */
++  broadcast_refresh();
++  DBUG_VOID_RETURN;
++}
++
++static int send_check_errmsg(THD *thd, TABLE_LIST* table,
++			     const char* operator_name, const char* errmsg)
++
++{
++  Protocol *protocol= thd->protocol;
++  protocol->prepare_for_resend();
++  protocol->store(table->alias, system_charset_info);
++  protocol->store((char*) operator_name, system_charset_info);
++  protocol->store(STRING_WITH_LEN("error"), system_charset_info);
++  protocol->store(errmsg, system_charset_info);
++  thd->clear_error();
++  if (protocol->write())
++    return -1;
++  return 1;
++}
++
++
++static int prepare_for_restore(THD* thd, TABLE_LIST* table,
++			       HA_CHECK_OPT *check_opt)
++{
++  DBUG_ENTER("prepare_for_restore");
++
++  if (table->table) // do not overwrite existing tables on restore
++  {
++    DBUG_RETURN(send_check_errmsg(thd, table, "restore",
++				  "table exists, will not overwrite on restore"
++				  ));
++  }
++  else
++  {
++    char* backup_dir= thd->lex->backup_dir;
++    char src_path[FN_REFLEN], dst_path[FN_REFLEN + 1], uname[FN_REFLEN];
++    char* table_name= table->table_name;
++    char* db= table->db;
++
++    VOID(tablename_to_filename(table->table_name, uname, sizeof(uname) - 1));
++
++    if (fn_format_relative_to_data_home(src_path, uname, backup_dir, reg_ext))
++      DBUG_RETURN(-1); // protect buffer overflow
++
++    build_table_filename(dst_path, sizeof(dst_path) - 1,
++                         db, table_name, reg_ext, 0);
++
++    if (lock_and_wait_for_table_name(thd,table))
++      DBUG_RETURN(-1);
++
++    if (my_copy(src_path, dst_path, MYF(MY_WME)))
++    {
++      pthread_mutex_lock(&LOCK_open);
++      unlock_table_name(thd, table);
++      pthread_mutex_unlock(&LOCK_open);
++      DBUG_RETURN(send_check_errmsg(thd, table, "restore",
++				    "Failed copying .frm file"));
++    }
++    if (mysql_truncate(thd, table, 1))
++    {
++      pthread_mutex_lock(&LOCK_open);
++      unlock_table_name(thd, table);
++      pthread_mutex_unlock(&LOCK_open);
++      DBUG_RETURN(send_check_errmsg(thd, table, "restore",
++				    "Failed generating table from .frm file"));
++    }
++  }
++
++  /*
++    Now we should be able to open the partially restored table
++    to finish the restore in the handler later on
++  */
++  pthread_mutex_lock(&LOCK_open);
++  if (reopen_name_locked_table(thd, table, TRUE))
++  {
++    unlock_table_name(thd, table);
++    pthread_mutex_unlock(&LOCK_open);
++    DBUG_RETURN(send_check_errmsg(thd, table, "restore",
++                                  "Failed to open partially restored table"));
++  }
++  /* A MERGE table must not come here. */
++  DBUG_ASSERT(!table->table || !table->table->child_l);
++  pthread_mutex_unlock(&LOCK_open);
++  DBUG_RETURN(0);
++}
++
++
++static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
++			      HA_CHECK_OPT *check_opt)
++{
++  int error= 0;
++  TABLE tmp_table, *table;
++  TABLE_SHARE *share;
++  char from[FN_REFLEN],tmp[FN_REFLEN+32];
++  const char **ext;
++  MY_STAT stat_info;
++  DBUG_ENTER("prepare_for_repair");
++
++  if (!(check_opt->sql_flags & TT_USEFRM))
++    DBUG_RETURN(0);
++
++  if (!(table= table_list->table))		/* if open_ltable failed */
++  {
++    char key[MAX_DBKEY_LENGTH];
++    uint key_length;
++
++    key_length= create_table_def_key(thd, key, table_list, 0);
++    pthread_mutex_lock(&LOCK_open);
++    if (!(share= (get_table_share(thd, table_list, key, key_length, 0,
++                                  &error))))
++    {
++      pthread_mutex_unlock(&LOCK_open);
++      DBUG_RETURN(0);				// Can't open frm file
++    }
++
++    if (open_table_from_share(thd, share, "", 0, 0, 0, &tmp_table, FALSE))
++    {
++      release_table_share(share, RELEASE_NORMAL);
++      pthread_mutex_unlock(&LOCK_open);
++      DBUG_RETURN(0);                           // Out of memory
++    }
++    table= &tmp_table;
++    pthread_mutex_unlock(&LOCK_open);
++  }
++
++  /*
++    REPAIR TABLE ... USE_FRM for temporary tables makes little sense.
++  */
++  if (table->s->tmp_table)
++  {
++    error= send_check_errmsg(thd, table_list, "repair",
++			     "Cannot repair temporary table from .frm file");
++    goto end;
++  }
++
++  /*
++    User gave us USE_FRM which means that the header in the index file is
++    trashed.
++    In this case we will try to fix the table the following way:
++    - Rename the data file to a temporary name
++    - Truncate the table
++    - Replace the new data file with the old one
++    - Run a normal repair using the new index file and the old data file
++  */
++
++  if (table->s->frm_version != FRM_VER_TRUE_VARCHAR)
++  {
++    error= send_check_errmsg(thd, table_list, "repair",
++                             "Failed repairing incompatible .frm file");
++    goto end;
++  }
++
++  /*
++    Check if this is a table type that stores index and data separately,
++    like ISAM or MyISAM. We assume fixed order of engine file name
++    extentions array. First element of engine file name extentions array
++    is meta/index file extention. Second element - data file extention. 
++  */
++  ext= table->file->bas_ext();
++  if (!ext[0] || !ext[1])
++    goto end;					// No data file
++
++  // Name of data file
++  strxmov(from, table->s->normalized_path.str, ext[1], NullS);
++  if (!my_stat(from, &stat_info, MYF(0)))
++    goto end;				// Can't use USE_FRM flag
++
++  my_snprintf(tmp, sizeof(tmp), "%s-%lx_%lx",
++	      from, current_pid, thd->thread_id);
++
++  /* If we could open the table, close it */
++  if (table_list->table)
++  {
++    pthread_mutex_lock(&LOCK_open);
++    close_cached_table(thd, table);
++    pthread_mutex_unlock(&LOCK_open);
++  }
++  if (lock_and_wait_for_table_name(thd,table_list))
++  {
++    error= -1;
++    goto end;
++  }
++  if (my_rename(from, tmp, MYF(MY_WME)))
++  {
++    pthread_mutex_lock(&LOCK_open);
++    unlock_table_name(thd, table_list);
++    pthread_mutex_unlock(&LOCK_open);
++    error= send_check_errmsg(thd, table_list, "repair",
++			     "Failed renaming data file");
++    goto end;
++  }
++  if (mysql_truncate(thd, table_list, 1))
++  {
++    pthread_mutex_lock(&LOCK_open);
++    unlock_table_name(thd, table_list);
++    pthread_mutex_unlock(&LOCK_open);
++    error= send_check_errmsg(thd, table_list, "repair",
++			     "Failed generating table from .frm file");
++    goto end;
++  }
++  if (my_rename(tmp, from, MYF(MY_WME)))
++  {
++    pthread_mutex_lock(&LOCK_open);
++    unlock_table_name(thd, table_list);
++    pthread_mutex_unlock(&LOCK_open);
++    error= send_check_errmsg(thd, table_list, "repair",
++			     "Failed restoring .MYD file");
++    goto end;
++  }
++
++  /*
++    Now we should be able to open the partially repaired table
++    to finish the repair in the handler later on.
++  */
++  pthread_mutex_lock(&LOCK_open);
++  if (reopen_name_locked_table(thd, table_list, TRUE))
++  {
++    unlock_table_name(thd, table_list);
++    pthread_mutex_unlock(&LOCK_open);
++    error= send_check_errmsg(thd, table_list, "repair",
++                             "Failed to open partially repaired table");
++    goto end;
++  }
++  pthread_mutex_unlock(&LOCK_open);
++
++end:
++  if (table == &tmp_table)
++  {
++    pthread_mutex_lock(&LOCK_open);
++    closefrm(table, 1);				// Free allocated memory
++    pthread_mutex_unlock(&LOCK_open);
++  }
++  DBUG_RETURN(error);
++}
++
++
++
++/*
++  RETURN VALUES
++    FALSE Message sent to net (admin operation went ok)
++    TRUE  Message should be sent by caller 
++          (admin operation or network communication failed)
++*/
++static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
++                              HA_CHECK_OPT* check_opt,
++                              const char *operator_name,
++                              thr_lock_type lock_type,
++                              bool open_for_modify,
++                              bool no_warnings_for_error,
++                              uint extra_open_options,
++                              int (*prepare_func)(THD *, TABLE_LIST *,
++                                                  HA_CHECK_OPT *),
++                              int (handler::*operator_func)(THD *,
++                                                            HA_CHECK_OPT *),
++                              int (view_operator_func)(THD *, TABLE_LIST*))
++{
++  TABLE_LIST *table;
++  SELECT_LEX *select= &thd->lex->select_lex;
++  List<Item> field_list;
++  Item *item;
++  Protocol *protocol= thd->protocol;
++  LEX *lex= thd->lex;
++  int result_code;
++  DBUG_ENTER("mysql_admin_table");
++
++  if (end_active_trans(thd))
++    DBUG_RETURN(1);
++  field_list.push_back(item = new Item_empty_string("Table", NAME_CHAR_LEN*2));
++  item->maybe_null = 1;
++  field_list.push_back(item = new Item_empty_string("Op", 10));
++  item->maybe_null = 1;
++  field_list.push_back(item = new Item_empty_string("Msg_type", 10));
++  item->maybe_null = 1;
++  field_list.push_back(item = new Item_empty_string("Msg_text", 255));
++  item->maybe_null = 1;
++  if (protocol->send_fields(&field_list,
++                            Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    DBUG_RETURN(TRUE);
++
++  mysql_ha_rm_tables(thd, tables, FALSE);
++
++  for (table= tables; table; table= table->next_local)
++  {
++    char table_name[NAME_LEN*2+2];
++    char* db = table->db;
++    bool fatal_error=0;
++
++    DBUG_PRINT("admin", ("table: '%s'.'%s'", table->db, table->table_name));
++    DBUG_PRINT("admin", ("extra_open_options: %u", extra_open_options));
++    strxmov(table_name, db, ".", table->table_name, NullS);
++    thd->open_options|= extra_open_options;
++    table->lock_type= lock_type;
++    /* open only one table from local list of command */
++    {
++      TABLE_LIST *save_next_global, *save_next_local;
++      save_next_global= table->next_global;
++      table->next_global= 0;
++      save_next_local= table->next_local;
++      table->next_local= 0;
++      select->table_list.first= table;
++      /*
++        Time zone tables and SP tables can be add to lex->query_tables list,
++        so it have to be prepared.
++        TODO: Investigate if we can put extra tables into argument instead of
++        using lex->query_tables
++      */
++      lex->query_tables= table;
++      lex->query_tables_last= &table->next_global;
++      lex->query_tables_own_last= 0;
++      thd->no_warnings_for_error= no_warnings_for_error;
++      if (view_operator_func == NULL)
++        table->required_type=FRMTYPE_TABLE;
++
++      open_and_lock_tables(thd, table);
++      thd->no_warnings_for_error= 0;
++      table->next_global= save_next_global;
++      table->next_local= save_next_local;
++      thd->open_options&= ~extra_open_options;
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++      if (table->table)
++      {
++        /*
++          Set up which partitions that should be processed
++          if ALTER TABLE t ANALYZE/CHECK/OPTIMIZE/REPAIR PARTITION ..
++        */
++        Alter_info *alter_info= &lex->alter_info;
++
++        if (alter_info->flags & ALTER_ADMIN_PARTITION)
++        {
++          if (!table->table->part_info)
++          {
++            my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
++            DBUG_RETURN(TRUE);
++          }
++          uint no_parts_found;
++          uint no_parts_opt= alter_info->partition_names.elements;
++          no_parts_found= set_part_state(alter_info, table->table->part_info,
++                                         PART_CHANGED);
++          if (no_parts_found != no_parts_opt &&
++              (!(alter_info->flags & ALTER_ALL_PARTITION)))
++          {
++            char buff[FN_REFLEN + MYSQL_ERRMSG_SIZE];
++            size_t length;
++            DBUG_PRINT("admin", ("sending non existent partition error"));
++            protocol->prepare_for_resend();
++            protocol->store(table_name, system_charset_info);
++            protocol->store(operator_name, system_charset_info);
++            protocol->store(STRING_WITH_LEN("error"), system_charset_info);
++            length= my_snprintf(buff, sizeof(buff),
++                                ER(ER_DROP_PARTITION_NON_EXISTENT),
++                                table_name);
++            protocol->store(buff, length, system_charset_info);
++            if(protocol->write())
++              goto err;
++            my_eof(thd);
++            goto err;
++          }
++        }
++      }
++#endif
++    }
++    DBUG_PRINT("admin", ("table: 0x%lx", (long) table->table));
++
++    if (prepare_func)
++    {
++      DBUG_PRINT("admin", ("calling prepare_func"));
++      switch ((*prepare_func)(thd, table, check_opt)) {
++      case  1:           // error, message written to net
++        ha_autocommit_or_rollback(thd, 1);
++        end_trans(thd, ROLLBACK);
++        close_thread_tables(thd);
++        DBUG_PRINT("admin", ("simple error, admin next table"));
++        continue;
++      case -1:           // error, message could be written to net
++        /* purecov: begin inspected */
++        DBUG_PRINT("admin", ("severe error, stop"));
++        goto err;
++        /* purecov: end */
++      default:           // should be 0 otherwise
++        DBUG_PRINT("admin", ("prepare_func succeeded"));
++        ;
++      }
++    }
++
++    /*
++      CHECK TABLE command is only command where VIEW allowed here and this
++      command use only temporary teble method for VIEWs resolving => there
++      can't be VIEW tree substitition of join view => if opening table
++      succeed then table->table will have real TABLE pointer as value (in
++      case of join view substitution table->table can be 0, but here it is
++      impossible)
++    */
++    if (!table->table)
++    {
++      DBUG_PRINT("admin", ("open table failed"));
++      if (!thd->warn_list.elements)
++        push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
++                     ER_CHECK_NO_SUCH_TABLE, ER(ER_CHECK_NO_SUCH_TABLE));
++      /* if it was a view will check md5 sum */
++      if (table->view &&
++          view_checksum(thd, table) == HA_ADMIN_WRONG_CHECKSUM)
++        push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
++                     ER_VIEW_CHECKSUM, ER(ER_VIEW_CHECKSUM));
++      if (thd->main_da.is_error() && 
++          (thd->main_da.sql_errno() == ER_NO_SUCH_TABLE ||
++           thd->main_da.sql_errno() == ER_FILE_NOT_FOUND))
++        /* A missing table is just issued as a failed command */
++        result_code= HA_ADMIN_FAILED;
++      else
++        /* Default failure code is corrupt table */
++        result_code= HA_ADMIN_CORRUPT;
++      goto send_result;
++    }
++
++    if (table->view)
++    {
++      DBUG_PRINT("admin", ("calling view_operator_func"));
++      result_code= (*view_operator_func)(thd, table);
++      goto send_result;
++    }
++
++    if (table->schema_table)
++    {
++      result_code= HA_ADMIN_NOT_IMPLEMENTED;
++      goto send_result;
++    }
++
++    if ((table->table->db_stat & HA_READ_ONLY) && open_for_modify)
++    {
++      /* purecov: begin inspected */
++      char buff[FN_REFLEN + MYSQL_ERRMSG_SIZE];
++      size_t length;
++      DBUG_PRINT("admin", ("sending error message"));
++      protocol->prepare_for_resend();
++      protocol->store(table_name, system_charset_info);
++      protocol->store(operator_name, system_charset_info);
++      protocol->store(STRING_WITH_LEN("error"), system_charset_info);
++      length= my_snprintf(buff, sizeof(buff), ER(ER_OPEN_AS_READONLY),
++                          table_name);
++      protocol->store(buff, length, system_charset_info);
++      ha_autocommit_or_rollback(thd, 0);
++      end_trans(thd, COMMIT);
++      close_thread_tables(thd);
++      lex->reset_query_tables_list(FALSE);
++      table->table=0;				// For query cache
++      if (protocol->write())
++	goto err;
++      thd->main_da.reset_diagnostics_area();
++      continue;
++      /* purecov: end */
++    }
++
++    /* Close all instances of the table to allow repair to rename files */
++    if (lock_type == TL_WRITE && table->table->s->version)
++    {
++      DBUG_PRINT("admin", ("removing table from cache"));
++      pthread_mutex_lock(&LOCK_open);
++      const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open,
++					      "Waiting to get writelock");
++      mysql_lock_abort(thd,table->table, TRUE);
++      remove_table_from_cache(thd, table->table->s->db.str,
++                              table->table->s->table_name.str,
++                              RTFC_WAIT_OTHER_THREAD_FLAG |
++                              RTFC_CHECK_KILLED_FLAG);
++      thd->exit_cond(old_message);
++      DBUG_EXECUTE_IF("wait_in_mysql_admin_table", wait_for_kill_signal(thd););
++      if (thd->killed)
++	goto err;
++      /* Flush entries in the query cache involving this table. */
++      query_cache_invalidate3(thd, table->table, 0);
++      open_for_modify= 0;
++    }
++
++    if (table->table->s->crashed && operator_func == &handler::ha_check)
++    {
++      /* purecov: begin inspected */
++      DBUG_PRINT("admin", ("sending crashed warning"));
++      protocol->prepare_for_resend();
++      protocol->store(table_name, system_charset_info);
++      protocol->store(operator_name, system_charset_info);
++      protocol->store(STRING_WITH_LEN("warning"), system_charset_info);
++      protocol->store(STRING_WITH_LEN("Table is marked as crashed"),
++                      system_charset_info);
++      if (protocol->write())
++        goto err;
++      /* purecov: end */
++    }
++
++    if (operator_func == &handler::ha_repair &&
++        !(check_opt->sql_flags & TT_USEFRM))
++    {
++      if ((table->table->file->check_old_types() == HA_ADMIN_NEEDS_ALTER) ||
++          (table->table->file->ha_check_for_upgrade(check_opt) ==
++           HA_ADMIN_NEEDS_ALTER))
++      {
++        DBUG_PRINT("admin", ("recreating table"));
++        ha_autocommit_or_rollback(thd, 1);
++        close_thread_tables(thd);
++        tmp_disable_binlog(thd); // binlogging is done by caller if wanted
++        result_code= mysql_recreate_table(thd, table);
++        reenable_binlog(thd);
++        /*
++          mysql_recreate_table() can push OK or ERROR.
++          Clear 'OK' status. If there is an error, keep it:
++          we will store the error message in a result set row 
++          and then clear.
++        */
++        if (thd->main_da.is_ok())
++          thd->main_da.reset_diagnostics_area();
++        goto send_result;
++      }
++    }
++
++    DBUG_PRINT("admin", ("calling operator_func '%s'", operator_name));
++    result_code = (table->table->file->*operator_func)(thd, check_opt);
++    DBUG_PRINT("admin", ("operator_func returned: %d", result_code));
++
++send_result:
++
++    lex->cleanup_after_one_table_open();
++    thd->clear_error();  // these errors shouldn't get client
++    {
++      List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
++      MYSQL_ERROR *err;
++      while ((err= it++))
++      {
++        protocol->prepare_for_resend();
++        protocol->store(table_name, system_charset_info);
++        protocol->store((char*) operator_name, system_charset_info);
++        protocol->store(warning_level_names[err->level].str,
++                        warning_level_names[err->level].length,
++                        system_charset_info);
++        protocol->store(err->msg, system_charset_info);
++        if (protocol->write())
++          goto err;
++      }
++      mysql_reset_errors(thd, true);
++    }
++    protocol->prepare_for_resend();
++    protocol->store(table_name, system_charset_info);
++    protocol->store(operator_name, system_charset_info);
++
++send_result_message:
++
++    DBUG_PRINT("info", ("result_code: %d", result_code));
++    switch (result_code) {
++    case HA_ADMIN_NOT_IMPLEMENTED:
++      {
++       char buf[MYSQL_ERRMSG_SIZE];
++       size_t length=my_snprintf(buf, sizeof(buf),
++				ER(ER_CHECK_NOT_IMPLEMENTED), operator_name);
++	protocol->store(STRING_WITH_LEN("note"), system_charset_info);
++	protocol->store(buf, length, system_charset_info);
++      }
++      break;
++
++    case HA_ADMIN_NOT_BASE_TABLE:
++      {
++        char buf[MYSQL_ERRMSG_SIZE];
++        size_t length= my_snprintf(buf, sizeof(buf),
++                                 ER(ER_BAD_TABLE_ERROR), table_name);
++        protocol->store(STRING_WITH_LEN("note"), system_charset_info);
++        protocol->store(buf, length, system_charset_info);
++      }
++      break;
++
++    case HA_ADMIN_OK:
++      protocol->store(STRING_WITH_LEN("status"), system_charset_info);
++      protocol->store(STRING_WITH_LEN("OK"), system_charset_info);
++      break;
++
++    case HA_ADMIN_FAILED:
++      protocol->store(STRING_WITH_LEN("status"), system_charset_info);
++      protocol->store(STRING_WITH_LEN("Operation failed"),
++                      system_charset_info);
++      break;
++
++    case HA_ADMIN_REJECT:
++      protocol->store(STRING_WITH_LEN("status"), system_charset_info);
++      protocol->store(STRING_WITH_LEN("Operation need committed state"),
++                      system_charset_info);
++      open_for_modify= FALSE;
++      break;
++
++    case HA_ADMIN_ALREADY_DONE:
++      protocol->store(STRING_WITH_LEN("status"), system_charset_info);
++      protocol->store(STRING_WITH_LEN("Table is already up to date"),
++                      system_charset_info);
++      break;
++
++    case HA_ADMIN_CORRUPT:
++      protocol->store(STRING_WITH_LEN("error"), system_charset_info);
++      protocol->store(STRING_WITH_LEN("Corrupt"), system_charset_info);
++      fatal_error=1;
++      break;
++
++    case HA_ADMIN_INVALID:
++      protocol->store(STRING_WITH_LEN("error"), system_charset_info);
++      protocol->store(STRING_WITH_LEN("Invalid argument"),
++                      system_charset_info);
++      break;
++
++    case HA_ADMIN_TRY_ALTER:
++    {
++      /*
++        This is currently used only by InnoDB. ha_innobase::optimize() answers
++        "try with alter", so here we close the table, do an ALTER TABLE,
++        reopen the table and do ha_innobase::analyze() on it.
++        We have to end the row, so analyze could return more rows.
++      */
++      protocol->store(STRING_WITH_LEN("note"), system_charset_info);
++      protocol->store(STRING_WITH_LEN(
++          "Table does not support optimize, doing recreate + analyze instead"),
++                      system_charset_info);
++      if (protocol->write())
++        goto err;
++      ha_autocommit_or_rollback(thd, 0);
++      close_thread_tables(thd);
++      DBUG_PRINT("info", ("HA_ADMIN_TRY_ALTER, trying analyze..."));
++      TABLE_LIST *save_next_local= table->next_local,
++                 *save_next_global= table->next_global;
++      table->next_local= table->next_global= 0;
++      tmp_disable_binlog(thd); // binlogging is done by caller if wanted
++      result_code= mysql_recreate_table(thd, table);
++      reenable_binlog(thd);
++      /*
++        mysql_recreate_table() can push OK or ERROR.
++        Clear 'OK' status. If there is an error, keep it:
++        we will store the error message in a result set row 
++        and then clear.
++      */
++      if (thd->main_da.is_ok())
++        thd->main_da.reset_diagnostics_area();
++      ha_autocommit_or_rollback(thd, 0);
++      close_thread_tables(thd);
++      if (!result_code) // recreation went ok
++      {
++        if ((table->table= open_ltable(thd, table, lock_type, 0)) &&
++            ((result_code= table->table->file->ha_analyze(thd, check_opt)) > 0))
++          result_code= 0; // analyze went ok
++      }
++      /* Start a new row for the final status row */
++      protocol->prepare_for_resend();
++      protocol->store(table_name, system_charset_info);
++      protocol->store(operator_name, system_charset_info);
++      if (result_code) // either mysql_recreate_table or analyze failed
++      {
++        DBUG_ASSERT(thd->is_error());
++        if (thd->is_error())
++        {
++          const char *err_msg= thd->main_da.message();
++          if (!thd->vio_ok())
++          {
++            sql_print_error("%s", err_msg);
++          }
++          else
++          {
++            /* Hijack the row already in-progress. */
++            protocol->store(STRING_WITH_LEN("error"), system_charset_info);
++            protocol->store(err_msg, system_charset_info);
++            if (protocol->write())
++              goto err;
++            /* Start off another row for HA_ADMIN_FAILED */
++            protocol->prepare_for_resend();
++            protocol->store(table_name, system_charset_info);
++            protocol->store(operator_name, system_charset_info);
++          }
++          thd->clear_error();
++        }
++      }
++      result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK;
++      table->next_local= save_next_local;
++      table->next_global= save_next_global;
++      goto send_result_message;
++    }
++    case HA_ADMIN_WRONG_CHECKSUM:
++    {
++      protocol->store(STRING_WITH_LEN("note"), system_charset_info);
++      protocol->store(ER(ER_VIEW_CHECKSUM), strlen(ER(ER_VIEW_CHECKSUM)),
++                      system_charset_info);
++      break;
++    }
++
++    case HA_ADMIN_NEEDS_UPGRADE:
++    case HA_ADMIN_NEEDS_ALTER:
++    {
++      char buf[MYSQL_ERRMSG_SIZE];
++      size_t length;
++
++      protocol->store(STRING_WITH_LEN("error"), system_charset_info);
++      length=my_snprintf(buf, sizeof(buf), ER(ER_TABLE_NEEDS_UPGRADE),
++                         table->table_name);
++      protocol->store(buf, length, system_charset_info);
++      fatal_error=1;
++      break;
++    }
++
++    default:				// Probably HA_ADMIN_INTERNAL_ERROR
++      {
++        char buf[MYSQL_ERRMSG_SIZE];
++        size_t length=my_snprintf(buf, sizeof(buf),
++                                "Unknown - internal error %d during operation",
++                                result_code);
++        protocol->store(STRING_WITH_LEN("error"), system_charset_info);
++        protocol->store(buf, length, system_charset_info);
++        fatal_error=1;
++        break;
++      }
++    }
++    if (table->table)
++    {
++      if (fatal_error)
++        table->table->s->version=0;               // Force close of table
++      else if (open_for_modify)
++      {
++        if (table->table->s->tmp_table)
++          table->table->file->info(HA_STATUS_CONST);
++        else
++        {
++          pthread_mutex_lock(&LOCK_open);
++          remove_table_from_cache(thd, table->table->s->db.str,
++                                  table->table->s->table_name.str, RTFC_NO_FLAG);
++          pthread_mutex_unlock(&LOCK_open);
++        }
++        /* May be something modified consequently we have to invalidate cache */
++        query_cache_invalidate3(thd, table->table, 0);
++      }
++    }
++    ha_autocommit_or_rollback(thd, 0);
++    end_trans(thd, COMMIT);
++    close_thread_tables(thd);
++    table->table=0;				// For query cache
++    if (protocol->write())
++      goto err;
++  }
++
++  my_eof(thd);
++  DBUG_RETURN(FALSE);
++
++err:
++  ha_autocommit_or_rollback(thd, 1);
++  end_trans(thd, ROLLBACK);
++  close_thread_tables(thd);			// Shouldn't be needed
++  if (table)
++    table->table=0;
++  DBUG_RETURN(TRUE);
++}
++
++
++bool mysql_backup_table(THD* thd, TABLE_LIST* table_list)
++{
++  DBUG_ENTER("mysql_backup_table");
++  WARN_DEPRECATED(thd, "6.0", "BACKUP TABLE",
++                  "MySQL Administrator (mysqldump, mysql)");
++  DBUG_RETURN(mysql_admin_table(thd, table_list, 0,
++				"backup", TL_READ, 0, 0, 0, 0,
++				&handler::ha_backup, 0));
++}
++
++
++bool mysql_restore_table(THD* thd, TABLE_LIST* table_list)
++{
++  DBUG_ENTER("mysql_restore_table");
++  WARN_DEPRECATED(thd, "6.0", "RESTORE TABLE",
++                  "MySQL Administrator (mysqldump, mysql)");
++  DBUG_RETURN(mysql_admin_table(thd, table_list, 0,
++				"restore", TL_WRITE, 1, 1, 0,
++				&prepare_for_restore,
++				&handler::ha_restore, 0));
++}
++
++
++bool mysql_repair_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
++{
++  DBUG_ENTER("mysql_repair_table");
++  DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
++				"repair", TL_WRITE, 1,
++                                test(check_opt->sql_flags & TT_USEFRM),
++                                HA_OPEN_FOR_REPAIR,
++				&prepare_for_repair,
++				&handler::ha_repair, 0));
++}
++
++
++bool mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
++{
++  DBUG_ENTER("mysql_optimize_table");
++  DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
++				"optimize", TL_WRITE, 1,0,0,0,
++				&handler::ha_optimize, 0));
++}
++
++
++/*
++  Assigned specified indexes for a table into key cache
++
++  SYNOPSIS
++    mysql_assign_to_keycache()
++    thd		Thread object
++    tables	Table list (one table only)
++
++  RETURN VALUES
++   FALSE ok
++   TRUE  error
++*/
++
++bool mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables,
++			     LEX_STRING *key_cache_name)
++{
++  HA_CHECK_OPT check_opt;
++  KEY_CACHE *key_cache;
++  DBUG_ENTER("mysql_assign_to_keycache");
++
++  check_opt.init();
++  pthread_mutex_lock(&LOCK_global_system_variables);
++  if (!(key_cache= get_key_cache(key_cache_name)))
++  {
++    pthread_mutex_unlock(&LOCK_global_system_variables);
++    my_error(ER_UNKNOWN_KEY_CACHE, MYF(0), key_cache_name->str);
++    DBUG_RETURN(TRUE);
++  }
++  pthread_mutex_unlock(&LOCK_global_system_variables);
++  check_opt.key_cache= key_cache;
++  DBUG_RETURN(mysql_admin_table(thd, tables, &check_opt,
++				"assign_to_keycache", TL_READ_NO_INSERT, 0, 0,
++				0, 0, &handler::assign_to_keycache, 0));
++}
++
++
++/*
++  Reassign all tables assigned to a key cache to another key cache
++
++  SYNOPSIS
++    reassign_keycache_tables()
++    thd		Thread object
++    src_cache	Reference to the key cache to clean up
++    dest_cache	New key cache
++
++  NOTES
++    This is called when one sets a key cache size to zero, in which
++    case we have to move the tables associated to this key cache to
++    the "default" one.
++
++    One has to ensure that one never calls this function while
++    some other thread is changing the key cache. This is assured by
++    the caller setting src_cache->in_init before calling this function.
++
++    We don't delete the old key cache as there may still be pointers pointing
++    to it for a while after this function returns.
++
++ RETURN VALUES
++    0	  ok
++*/
++
++int reassign_keycache_tables(THD *thd, KEY_CACHE *src_cache,
++			     KEY_CACHE *dst_cache)
++{
++  DBUG_ENTER("reassign_keycache_tables");
++
++  DBUG_ASSERT(src_cache != dst_cache);
++  DBUG_ASSERT(src_cache->in_init);
++  src_cache->param_buff_size= 0;		// Free key cache
++  ha_resize_key_cache(src_cache);
++  ha_change_key_cache(src_cache, dst_cache);
++  DBUG_RETURN(0);
++}
++
++
++/*
++  Preload specified indexes for a table into key cache
++
++  SYNOPSIS
++    mysql_preload_keys()
++    thd		Thread object
++    tables	Table list (one table only)
++
++  RETURN VALUES
++    FALSE ok
++    TRUE  error
++*/
++
++bool mysql_preload_keys(THD* thd, TABLE_LIST* tables)
++{
++  DBUG_ENTER("mysql_preload_keys");
++  /*
++    We cannot allow concurrent inserts. The storage engine reads
++    directly from the index file, bypassing the cache. It could read
++    outdated information if parallel inserts into cache blocks happen.
++  */
++  DBUG_RETURN(mysql_admin_table(thd, tables, 0,
++				"preload_keys", TL_READ_NO_INSERT, 0, 0, 0, 0,
++				&handler::preload_keys, 0));
++}
++
++
++
++/**
++  @brief          Create frm file based on I_S table
++
++  @param[in]      thd                      thread handler
++  @param[in]      schema_table             I_S table           
++  @param[in]      dst_path                 path where frm should be created
++  @param[in]      create_info              Create info
++
++  @return         Operation status
++    @retval       0                        success
++    @retval       1                        error
++*/
++
++
++bool mysql_create_like_schema_frm(THD* thd, TABLE_LIST* schema_table,
++                                  char *dst_path, HA_CREATE_INFO *create_info)
++{
++  HA_CREATE_INFO local_create_info;
++  Alter_info alter_info;
++  bool tmp_table= (create_info->options & HA_LEX_CREATE_TMP_TABLE);
++  uint keys= schema_table->table->s->keys;
++  uint db_options= 0;
++  DBUG_ENTER("mysql_create_like_schema_frm");
++
++  bzero((char*) &local_create_info, sizeof(local_create_info));
++  local_create_info.db_type= schema_table->table->s->db_type();
++  local_create_info.row_type= schema_table->table->s->row_type;
++  local_create_info.default_table_charset=default_charset_info;
++  alter_info.flags= (ALTER_CHANGE_COLUMN | ALTER_RECREATE);
++  schema_table->table->use_all_columns();
++  if (mysql_prepare_alter_table(thd, schema_table->table,
++                                &local_create_info, &alter_info))
++    DBUG_RETURN(1);
++  if (mysql_prepare_create_table(thd, &local_create_info, &alter_info,
++                                 tmp_table, &db_options,
++                                 schema_table->table->file,
++                                 &schema_table->table->s->key_info, &keys, 0))
++    DBUG_RETURN(1);
++  local_create_info.max_rows= 0;
++  if (mysql_create_frm(thd, dst_path, NullS, NullS,
++                       &local_create_info, alter_info.create_list,
++                       keys, schema_table->table->s->key_info,
++                       schema_table->table->file))
++    DBUG_RETURN(1);
++  DBUG_RETURN(0);
++}
++
++
++/*
++  Create a table identical to the specified table
++
++  SYNOPSIS
++    mysql_create_like_table()
++    thd		Thread object
++    table       Table list element for target table
++    src_table   Table list element for source table
++    create_info Create info
++
++  RETURN VALUES
++    FALSE OK
++    TRUE  error
++*/
++
++bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
++                             HA_CREATE_INFO *create_info)
++{
++  TABLE *name_lock= 0;
++  char src_path[FN_REFLEN], dst_path[FN_REFLEN + 1];
++  uint dst_path_length;
++  char *db= table->db;
++  char *table_name= table->table_name;
++  int  err;
++  bool res= TRUE;
++  uint not_used;
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  char tmp_path[FN_REFLEN];
++#endif
++  char ts_name[FN_LEN + 1];
++  myf flags= MY_DONT_OVERWRITE_FILE;
++  DBUG_ENTER("mysql_create_like_table");
++
++
++  /*
++    By opening source table we guarantee that it exists and no concurrent
++    DDL operation will mess with it. Later we also take an exclusive
++    name-lock on target table name, which makes copying of .frm file,
++    call to ha_create_table() and binlogging atomic against concurrent DML
++    and DDL operations on target table. Thus by holding both these "locks"
++    we ensure that our statement is properly isolated from all concurrent
++    operations which matter.
++  */
++  if (open_tables(thd, &src_table, &not_used, 0))
++    DBUG_RETURN(TRUE);
++
++  /*
++    For bug#25875, Newly created table through CREATE TABLE .. LIKE
++                   has no ndb_dd attributes;
++    Add something to get possible tablespace info from src table,
++    it can get valid tablespace name only for disk-base ndb table
++  */
++  if ((src_table->table->file->get_tablespace_name(thd, ts_name, FN_LEN)))
++  {
++    create_info->tablespace= ts_name;
++    create_info->storage_media= HA_SM_DISK;
++  }
++
++  strxmov(src_path, src_table->table->s->path.str, reg_ext, NullS);
++
++  DBUG_EXECUTE_IF("sleep_create_like_before_check_if_exists", my_sleep(6000000););
++
++  /*
++    Check that destination tables does not exist. Note that its name
++    was already checked when it was added to the table list.
++  */
++  if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
++  {
++    if (src_table->table->file->ht == partition_hton)
++    {
++      my_error(ER_PARTITION_NO_TEMPORARY, MYF(0));
++      goto err;
++    }
++    if (find_temporary_table(thd, db, table_name))
++      goto table_exists;
++    dst_path_length= build_tmptable_filename(thd, dst_path, sizeof(dst_path));
++    create_info->table_options|= HA_CREATE_DELAY_KEY_WRITE;
++  }
++  else
++  {
++    if (lock_table_name_if_not_cached(thd, db, table_name, &name_lock))
++      goto err;
++    if (!name_lock)
++      goto table_exists;
++    dst_path_length= build_table_filename(dst_path, sizeof(dst_path) - 1,
++                                          db, table_name, reg_ext, 0);
++    if (!access(dst_path, F_OK))
++      goto table_exists;
++  }
++
++  DBUG_EXECUTE_IF("sleep_create_like_before_copy", my_sleep(6000000););
++
++  if (opt_sync_frm && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
++    flags|= MY_SYNC;
++
++  /*
++    Create a new table by copying from source table
++    and sync the new table if the flag MY_SYNC is set
++
++    Altough exclusive name-lock on target table protects us from concurrent
++    DML and DDL operations on it we still want to wrap .FRM creation and call
++    to ha_create_table() in critical section protected by LOCK_open in order
++    to provide minimal atomicity against operations which disregard name-locks,
++    like I_S implementation, for example. This is a temporary and should not
++    be copied. Instead we should fix our code to always honor name-locks.
++
++    Also some engines (e.g. NDB cluster) require that LOCK_open should be held
++    during the call to ha_create_table(). See bug #28614 for more info.
++  */
++  VOID(pthread_mutex_lock(&LOCK_open));
++  if (src_table->schema_table)
++  {
++    if (mysql_create_like_schema_frm(thd, src_table, dst_path, create_info))
++    {
++      VOID(pthread_mutex_unlock(&LOCK_open));
++      goto err;
++    }
++  }
++  else if (my_copy(src_path, dst_path, flags))
++  {
++    if (my_errno == ENOENT)
++      my_error(ER_BAD_DB_ERROR,MYF(0),db);
++    else
++      my_error(ER_CANT_CREATE_FILE,MYF(0),dst_path,my_errno);
++    VOID(pthread_mutex_unlock(&LOCK_open));
++    goto err;
++  }
++
++  /*
++    As mysql_truncate don't work on a new table at this stage of
++    creation, instead create the table directly (for both normal
++    and temporary tables).
++  */
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  /*
++    For partitioned tables we need to copy the .par file as well since
++    it is used in open_table_def to even be able to create a new handler.
++  */
++  if (src_table->table->file->ht == partition_hton)
++  {
++    fn_format(tmp_path, dst_path, reg_ext, ".par", MYF(MY_REPLACE_EXT));
++    strmov(dst_path, tmp_path);
++    fn_format(tmp_path, src_path, reg_ext, ".par", MYF(MY_REPLACE_EXT));
++    strmov(src_path, tmp_path);
++    my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE));
++  }
++#endif
++
++  DBUG_EXECUTE_IF("sleep_create_like_before_ha_create", my_sleep(6000000););
++
++  dst_path[dst_path_length - reg_ext_length]= '\0';  // Remove .frm
++  if (thd->variables.keep_files_on_create)
++    create_info->options|= HA_CREATE_KEEP_FILES;
++  err= ha_create_table(thd, dst_path, db, table_name, create_info, 1);
++  VOID(pthread_mutex_unlock(&LOCK_open));
++
++  if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
++  {
++    if (err || !open_temporary_table(thd, dst_path, db, table_name, 1))
++    {
++      (void) rm_temporary_table(create_info->db_type,
++				dst_path); /* purecov: inspected */
++      goto err;     /* purecov: inspected */
++    }
++    thd->thread_specific_used= TRUE;
++  }
++  else if (err)
++  {
++    (void) quick_rm_table(create_info->db_type, db,
++			  table_name, 0); /* purecov: inspected */
++    goto err;	    /* purecov: inspected */
++  }
++
++goto binlog;
++
++table_exists:
++  if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
++  {
++    char warn_buff[MYSQL_ERRMSG_SIZE];
++    my_snprintf(warn_buff, sizeof(warn_buff),
++		ER(ER_TABLE_EXISTS_ERROR), table_name);
++    push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++		 ER_TABLE_EXISTS_ERROR,warn_buff);
++  }
++  else
++  {
++    my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name);
++    goto err;
++  }
++
++binlog:
++  DBUG_EXECUTE_IF("sleep_create_like_before_binlogging", my_sleep(6000000););
++
++  /*
++    We have to write the query before we unlock the tables.
++  */
++  if (thd->current_stmt_binlog_row_based)
++  {
++    /*
++       Since temporary tables are not replicated under row-based
++       replication, CREATE TABLE ... LIKE ... needs special
++       treatement.  We have four cases to consider, according to the
++       following decision table:
++
++           ==== ========= ========= ==============================
++           Case    Target    Source Write to binary log
++           ==== ========= ========= ==============================
++           1       normal    normal Original statement
++           2       normal temporary Generated statement
++           3    temporary    normal Nothing
++           4    temporary temporary Nothing
++           ==== ========= ========= ==============================
++    */
++    if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE))
++    {
++      if (src_table->table->s->tmp_table)               // Case 2
++      {
++        char buf[2048];
++        String query(buf, sizeof(buf), system_charset_info);
++        query.length(0);  // Have to zero it since constructor doesn't
++
++        /*
++          Here we open the destination table, on which we already have
++          name-lock. This is needed for store_create_info() to work.
++          The table will be closed by unlink_open_table() at the end
++          of this function.
++        */
++        table->table= name_lock;
++        VOID(pthread_mutex_lock(&LOCK_open));
++        if (reopen_name_locked_table(thd, table, FALSE))
++        {
++          VOID(pthread_mutex_unlock(&LOCK_open));
++          goto err;
++        }
++        VOID(pthread_mutex_unlock(&LOCK_open));
++
++       /*
++         The condition avoids a crash as described in BUG#48506. Other
++         binlogging problems related to CREATE TABLE IF NOT EXISTS LIKE
++         when the existing object is a view will be solved by BUG 47442.
++       */
++        if (!table->view)
++        {
++          IF_DBUG(int result=)
++            store_create_info(thd, table, &query,
++                              create_info, FALSE /* show_database */);
++
++          DBUG_ASSERT(result == 0); // store_create_info() always return 0
++          if (write_bin_log(thd, TRUE, query.ptr(), query.length()))
++            goto err;
++        }
++      }
++      else                                      // Case 1
++        if (write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
++          goto err;
++    }
++    /*
++      Case 3 and 4 does nothing under RBR
++    */
++  }
++  else if (write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
++    goto err;
++
++  res= FALSE;
++
++err:
++  if (name_lock)
++  {
++    pthread_mutex_lock(&LOCK_open);
++    unlink_open_table(thd, name_lock, FALSE);
++    pthread_mutex_unlock(&LOCK_open);
++  }
++  DBUG_RETURN(res);
++}
++
++
++bool mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
++{
++  thr_lock_type lock_type = TL_READ_NO_INSERT;
++
++  DBUG_ENTER("mysql_analyze_table");
++  DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
++				"analyze", lock_type, 1, 0, 0, 0,
++				&handler::ha_analyze, 0));
++}
++
++
++bool mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt)
++{
++  thr_lock_type lock_type = TL_READ_NO_INSERT;
++
++  DBUG_ENTER("mysql_check_table");
++  DBUG_RETURN(mysql_admin_table(thd, tables, check_opt,
++				"check", lock_type,
++				0, 0, HA_OPEN_FOR_REPAIR, 0,
++				&handler::ha_check, &view_checksum));
++}
++
++
++/* table_list should contain just one table */
++static int
++mysql_discard_or_import_tablespace(THD *thd,
++                                   TABLE_LIST *table_list,
++                                   enum tablespace_op_type tablespace_op)
++{
++  TABLE *table;
++  my_bool discard;
++  int error;
++  DBUG_ENTER("mysql_discard_or_import_tablespace");
++
++  /*
++    Note that DISCARD/IMPORT TABLESPACE always is the only operation in an
++    ALTER TABLE
++  */
++
++  thd_proc_info(thd, "discard_or_import_tablespace");
++
++  discard= test(tablespace_op == DISCARD_TABLESPACE);
++
++ /*
++   We set this flag so that ha_innobase::open and ::external_lock() do
++   not complain when we lock the table
++ */
++  thd->tablespace_op= TRUE;
++  if (!(table=open_ltable(thd, table_list, TL_WRITE, 0)))
++  {
++    thd->tablespace_op=FALSE;
++    DBUG_RETURN(-1);
++  }
++
++  error= table->file->ha_discard_or_import_tablespace(discard);
++
++  thd_proc_info(thd, "end");
++
++  if (error)
++    goto err;
++
++  /*
++    The 0 in the call below means 'not in a transaction', which means
++    immediate invalidation; that is probably what we wish here
++  */
++  query_cache_invalidate3(thd, table_list, 0);
++
++  /* The ALTER TABLE is always in its own transaction */
++  error = ha_autocommit_or_rollback(thd, 0);
++  if (end_active_trans(thd))
++    error=1;
++  if (error)
++    goto err;
++  error= write_bin_log(thd, FALSE, thd->query(), thd->query_length());
++
++err:
++  ha_autocommit_or_rollback(thd, error);
++  thd->tablespace_op=FALSE;
++  
++  if (error == 0)
++  {
++    my_ok(thd);
++    DBUG_RETURN(0);
++  }
++
++  table->file->print_error(error, MYF(0));
++    
++  DBUG_RETURN(-1);
++}
++
++/**
++  @brief Check if both DROP and CREATE are present for an index in ALTER TABLE
++ 
++  @details Checks if any index is being modified (present as both DROP INDEX 
++    and ADD INDEX) in the current ALTER TABLE statement. Needed for disabling 
++    online ALTER TABLE.
++  
++  @param table       The table being altered
++  @param alter_info  The ALTER TABLE structure
++  @return presence of index being altered  
++  @retval FALSE  No such index
++  @retval TRUE   Have at least 1 index modified
++*/
++
++static bool
++is_index_maintenance_unique (TABLE *table, Alter_info *alter_info)
++{
++  List_iterator<Key> key_it(alter_info->key_list);
++  List_iterator<Alter_drop> drop_it(alter_info->drop_list);
++  Key *key;
++
++  while ((key= key_it++))
++  {
++    if (key->name)
++    {
++      Alter_drop *drop;
++
++      drop_it.rewind();
++      while ((drop= drop_it++))
++      {
++        if (drop->type == Alter_drop::KEY &&
++            !my_strcasecmp(system_charset_info, key->name, drop->name))
++          return TRUE;
++      }
++    }
++  }
++  return FALSE;
++}
++
++
++/*
++  SYNOPSIS
++    compare_tables()
++      table                     The original table.
++      alter_info                Alter options, fields and keys for the new
++                                table.
++      create_info               Create options for the new table.
++      order_num                 Number of order list elements.
++      need_copy_table     OUT   Result of the comparison. Undefined if error.
++                                Otherwise is one of:
++                                ALTER_TABLE_METADATA_ONLY  No copy needed
++                                ALTER_TABLE_DATA_CHANGED   Data changes,
++                                                           copy needed
++                                ALTER_TABLE_INDEX_CHANGED  Index changes,
++                                                           copy might be needed
++      key_info_buffer     OUT   An array of KEY structs for new indexes
++      index_drop_buffer   OUT   An array of offsets into table->key_info.
++      index_drop_count    OUT   The number of elements in the array.
++      index_add_buffer    OUT   An array of offsets into key_info_buffer.
++      index_add_count     OUT   The number of elements in the array.
++      candidate_key_count OUT   The number of candidate keys in original table.
++
++  DESCRIPTION
++    'table' (first argument) contains information of the original
++    table, which includes all corresponding parts that the new
++    table has in arguments create_list, key_list and create_info.
++
++    By comparing the changes between the original and new table
++    we can determine how much it has changed after ALTER TABLE
++    and whether we need to make a copy of the table, or just change
++    the .frm file.
++
++    If there are no data changes, but index changes, 'index_drop_buffer'
++    and/or 'index_add_buffer' are populated with offsets into
++    table->key_info or key_info_buffer respectively for the indexes
++    that need to be dropped and/or (re-)created.
++
++  RETURN VALUES
++    TRUE   error
++    FALSE  success
++*/
++
++static
++bool
++compare_tables(TABLE *table,
++               Alter_info *alter_info,
++               HA_CREATE_INFO *create_info,
++               uint order_num,
++               enum_alter_table_change_level *need_copy_table,
++               KEY **key_info_buffer,
++               uint **index_drop_buffer, uint *index_drop_count,
++               uint **index_add_buffer, uint *index_add_count,
++               uint *candidate_key_count)
++{
++  Field **f_ptr, *field;
++  uint changes= 0, tmp;
++  uint key_count;
++  List_iterator_fast<Create_field> new_field_it, tmp_new_field_it;
++  Create_field *new_field, *tmp_new_field;
++  KEY_PART_INFO *key_part;
++  KEY_PART_INFO *end;
++  THD *thd= table->in_use;
++  /*
++    Remember if the new definition has new VARCHAR column;
++    create_info->varchar will be reset in mysql_prepare_create_table.
++  */
++  bool varchar= create_info->varchar;
++  bool not_nullable= true;
++  DBUG_ENTER("compare_tables");
++
++  /*
++    Create a copy of alter_info.
++    To compare the new and old table definitions, we need to "prepare"
++    the new definition - transform it from parser output to a format
++    that describes the final table layout (all column defaults are
++    initialized, duplicate columns are removed). This is done by
++    mysql_prepare_create_table.  Unfortunately,
++    mysql_prepare_create_table performs its transformations
++    "in-place", that is, modifies the argument.  Since we would
++    like to keep compare_tables() idempotent (not altering any
++    of the arguments) we create a copy of alter_info here and
++    pass it to mysql_prepare_create_table, then use the result
++    to evaluate possibility of fast ALTER TABLE, and then
++    destroy the copy.
++  */
++  Alter_info tmp_alter_info(*alter_info, thd->mem_root);
++  uint db_options= 0; /* not used */
++
++  /* Create the prepared information. */
++  if (mysql_prepare_create_table(thd, create_info,
++                                 &tmp_alter_info,
++                                 (table->s->tmp_table != NO_TMP_TABLE),
++                                 &db_options,
++                                 table->file, key_info_buffer,
++                                 &key_count, 0))
++    DBUG_RETURN(1);
++  /* Allocate result buffers. */
++  if (! (*index_drop_buffer=
++         (uint*) thd->alloc(sizeof(uint) * table->s->keys)) ||
++      ! (*index_add_buffer=
++         (uint*) thd->alloc(sizeof(uint) * tmp_alter_info.key_list.elements)))
++    DBUG_RETURN(1);
++  
++  /*
++    Some very basic checks. If number of fields changes, or the
++    handler, we need to run full ALTER TABLE. In the future
++    new fields can be added and old dropped without copy, but
++    not yet.
++
++    Test also that engine was not given during ALTER TABLE, or
++    we are force to run regular alter table (copy).
++    E.g. ALTER TABLE tbl_name ENGINE=MyISAM.
++
++    For the following ones we also want to run regular alter table:
++    ALTER TABLE tbl_name ORDER BY ..
++    ALTER TABLE tbl_name CONVERT TO CHARACTER SET ..
++
++    At the moment we can't handle altering temporary tables without a copy.
++    We also test if OPTIMIZE TABLE was given and was mapped to alter table.
++    In that case we always do full copy.
++
++    There was a bug prior to mysql-4.0.25. Number of null fields was
++    calculated incorrectly. As a result frm and data files gets out of
++    sync after fast alter table. There is no way to determine by which
++    mysql version (in 4.0 and 4.1 branches) table was created, thus we
++    disable fast alter table for all tables created by mysql versions
++    prior to 5.0 branch.
++    See BUG#6236.
++  */
++  if (table->s->fields != alter_info->create_list.elements ||
++      table->s->db_type() != create_info->db_type ||
++      table->s->tmp_table ||
++      create_info->used_fields & HA_CREATE_USED_ENGINE ||
++      create_info->used_fields & HA_CREATE_USED_CHARSET ||
++      create_info->used_fields & HA_CREATE_USED_DEFAULT_CHARSET ||
++      (table->s->row_type != create_info->row_type) ||
++      create_info->used_fields & HA_CREATE_USED_PACK_KEYS ||
++      create_info->used_fields & HA_CREATE_USED_MAX_ROWS ||
++      (alter_info->flags & (ALTER_RECREATE | ALTER_FOREIGN_KEY)) ||
++      order_num ||
++      !table->s->mysql_version ||
++      (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar))
++  {
++    *need_copy_table= ALTER_TABLE_DATA_CHANGED;
++    DBUG_RETURN(0);
++  }
++
++  /*
++    Use transformed info to evaluate possibility of fast ALTER TABLE
++    but use the preserved field to persist modifications.
++  */
++  new_field_it.init(alter_info->create_list);
++  tmp_new_field_it.init(tmp_alter_info.create_list);
++
++  /*
++    Go through fields and check if the original ones are compatible
++    with new table.
++  */
++  for (f_ptr= table->field, new_field= new_field_it++,
++       tmp_new_field= tmp_new_field_it++;
++       (field= *f_ptr);
++       f_ptr++, new_field= new_field_it++,
++       tmp_new_field= tmp_new_field_it++)
++  {
++    /* Make sure we have at least the default charset in use. */
++    if (!new_field->charset)
++      new_field->charset= create_info->default_table_charset;
++
++    /* Check that NULL behavior is same for old and new fields */
++    if ((tmp_new_field->flags & NOT_NULL_FLAG) !=
++	(uint) (field->flags & NOT_NULL_FLAG))
++    {
++      *need_copy_table= ALTER_TABLE_DATA_CHANGED;
++      DBUG_RETURN(0);
++    }
++
++    /* Don't pack rows in old tables if the user has requested this. */
++    if (create_info->row_type == ROW_TYPE_DYNAMIC ||
++	(tmp_new_field->flags & BLOB_FLAG) ||
++	(tmp_new_field->sql_type == MYSQL_TYPE_VARCHAR &&
++	create_info->row_type != ROW_TYPE_FIXED))
++      create_info->table_options|= HA_OPTION_PACK_RECORD;
++
++    /* Check if field was renamed */
++    field->flags&= ~FIELD_IS_RENAMED;
++    if (my_strcasecmp(system_charset_info,
++		      field->field_name,
++		      tmp_new_field->field_name))
++      field->flags|= FIELD_IS_RENAMED;      
++
++    /* Evaluate changes bitmap and send to check_if_incompatible_data() */
++    if (!(tmp= field->is_equal(tmp_new_field)))
++    {
++      *need_copy_table= ALTER_TABLE_DATA_CHANGED;
++      DBUG_RETURN(0);
++    }
++    // Clear indexed marker
++    field->flags&= ~FIELD_IN_ADD_INDEX;
++    changes|= tmp;
++  }
++
++  /*
++    Go through keys and check if the original ones are compatible
++    with new table.
++  */
++  KEY *table_key;
++  KEY *table_key_end= table->key_info + table->s->keys;
++  KEY *new_key;
++  KEY *new_key_end= *key_info_buffer + key_count;
++
++  DBUG_PRINT("info", ("index count old: %d  new: %d",
++                      table->s->keys, key_count));
++  /*
++    Step through all keys of the old table and search matching new keys.
++  */
++  *index_drop_count= 0;
++  *index_add_count= 0;
++  *candidate_key_count= 0;
++  for (table_key= table->key_info; table_key < table_key_end; table_key++)
++  {
++    KEY_PART_INFO *table_part;
++    KEY_PART_INFO *table_part_end= table_key->key_part + table_key->key_parts;
++    KEY_PART_INFO *new_part;
++
++   /*
++      Check if key is a candidate key, i.e. a unique index with no index
++      fields nullable, then key is either already primary key or could
++      be promoted to primary key if the original primary key is dropped.
++      Count all candidate keys.
++    */
++    not_nullable= true;
++    for (table_part= table_key->key_part;
++         table_part < table_part_end;
++         table_part++)
++    {
++      not_nullable= not_nullable && (! table_part->field->maybe_null());
++    }
++    if ((table_key->flags & HA_NOSAME) && not_nullable)
++      (*candidate_key_count)++;
++
++    /* Search a new key with the same name. */
++    for (new_key= *key_info_buffer; new_key < new_key_end; new_key++)
++    {
++      if (! strcmp(table_key->name, new_key->name))
++        break;
++    }
++    if (new_key >= new_key_end)
++    {
++      /* Key not found. Add the offset of the key to the drop buffer. */
++      (*index_drop_buffer)[(*index_drop_count)++]= table_key - table->key_info;
++      DBUG_PRINT("info", ("index dropped: '%s'", table_key->name));
++      continue;
++    }
++
++    /* Check that the key types are compatible between old and new tables. */
++    if ((table_key->algorithm != new_key->algorithm) ||
++	((table_key->flags & HA_KEYFLAG_MASK) !=
++         (new_key->flags & HA_KEYFLAG_MASK)) ||
++        (table_key->key_parts != new_key->key_parts))
++      goto index_changed;
++
++    /*
++      Check that the key parts remain compatible between the old and
++      new tables.
++    */
++    for (table_part= table_key->key_part, new_part= new_key->key_part;
++         table_part < table_part_end;
++         table_part++, new_part++)
++    {
++      /*
++	Key definition has changed if we are using a different field or
++	if the used key part length is different. We know that the fields
++        did not change. Comparing field numbers is sufficient.
++      */
++      if ((table_part->length != new_part->length) ||
++          (table_part->fieldnr - 1 != new_part->fieldnr))
++	goto index_changed;
++    }
++    continue;
++
++  index_changed:
++    /* Key modified. Add the offset of the key to both buffers. */
++    (*index_drop_buffer)[(*index_drop_count)++]= table_key - table->key_info;
++    (*index_add_buffer)[(*index_add_count)++]= new_key - *key_info_buffer;
++    key_part= new_key->key_part;
++    end= key_part + new_key->key_parts;
++    for(; key_part != end; key_part++)
++    {
++      // Mark field to be part of new key 
++      field= table->field[key_part->fieldnr];
++      field->flags|= FIELD_IN_ADD_INDEX;
++    }
++    DBUG_PRINT("info", ("index changed: '%s'", table_key->name));
++  }
++  /*end of for (; table_key < table_key_end;) */
++
++  /*
++    Step through all keys of the new table and find matching old keys.
++  */
++  for (new_key= *key_info_buffer; new_key < new_key_end; new_key++)
++  {
++    /* Search an old key with the same name. */
++    for (table_key= table->key_info; table_key < table_key_end; table_key++)
++    {
++      if (! strcmp(table_key->name, new_key->name))
++        break;
++    }
++    if (table_key >= table_key_end)
++    {
++      /* Key not found. Add the offset of the key to the add buffer. */
++      (*index_add_buffer)[(*index_add_count)++]= new_key - *key_info_buffer;
++      key_part= new_key->key_part;
++      end= key_part + new_key->key_parts;
++      for(; key_part != end; key_part++)
++      {
++        // Mark field to be part of new key 
++        field= table->field[key_part->fieldnr];
++        field->flags|= FIELD_IN_ADD_INDEX;
++      }
++      DBUG_PRINT("info", ("index added: '%s'", new_key->name));
++    }
++  }
++
++  /* Check if changes are compatible with current handler without a copy */
++  if (table->file->check_if_incompatible_data(create_info, changes))
++  {
++    *need_copy_table= ALTER_TABLE_DATA_CHANGED;
++    DBUG_RETURN(0);
++  }
++
++  if (*index_drop_count || *index_add_count)
++  {
++    *need_copy_table= ALTER_TABLE_INDEX_CHANGED;
++    DBUG_RETURN(0);
++  }
++
++  *need_copy_table= ALTER_TABLE_METADATA_ONLY; // Tables are compatible
++  DBUG_RETURN(0);
++}
++
++
++/*
++  Manages enabling/disabling of indexes for ALTER TABLE
++
++  SYNOPSIS
++    alter_table_manage_keys()
++      table                  Target table
++      indexes_were_disabled  Whether the indexes of the from table
++                             were disabled
++      keys_onoff             ENABLE | DISABLE | LEAVE_AS_IS
++
++  RETURN VALUES
++    FALSE  OK
++    TRUE   Error
++*/
++
++static
++bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
++                             enum enum_enable_or_disable keys_onoff)
++{
++  int error= 0;
++  DBUG_ENTER("alter_table_manage_keys");
++  DBUG_PRINT("enter", ("table=%p were_disabled=%d on_off=%d",
++             table, indexes_were_disabled, keys_onoff));
++
++  switch (keys_onoff) {
++  case ENABLE:
++    error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
++    break;
++  case LEAVE_AS_IS:
++    if (!indexes_were_disabled)
++      break;
++    /* fall-through: disabled indexes */
++  case DISABLE:
++    error= table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
++  }
++
++  if (error == HA_ERR_WRONG_COMMAND)
++  {
++    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++                        ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
++                        table->s->table_name.str);
++    error= 0;
++  } else if (error)
++    table->file->print_error(error, MYF(0));
++
++  DBUG_RETURN(error);
++}
++
++
++/**
++  Prepare column and key definitions for CREATE TABLE in ALTER TABLE.
++
++  This function transforms parse output of ALTER TABLE - lists of
++  columns and keys to add, drop or modify into, essentially,
++  CREATE TABLE definition - a list of columns and keys of the new
++  table. While doing so, it also performs some (bug not all)
++  semantic checks.
++
++  This function is invoked when we know that we're going to
++  perform ALTER TABLE via a temporary table -- i.e. fast ALTER TABLE
++  is not possible, perhaps because the ALTER statement contains
++  instructions that require change in table data, not only in
++  table definition or indexes.
++
++  @param[in,out]  thd         thread handle. Used as a memory pool
++                              and source of environment information.
++  @param[in]      table       the source table, open and locked
++                              Used as an interface to the storage engine
++                              to acquire additional information about
++                              the original table.
++  @param[in,out]  create_info A blob with CREATE/ALTER TABLE
++                              parameters
++  @param[in,out]  alter_info  Another blob with ALTER/CREATE parameters.
++                              Originally create_info was used only in
++                              CREATE TABLE and alter_info only in ALTER TABLE.
++                              But since ALTER might end-up doing CREATE,
++                              this distinction is gone and we just carry
++                              around two structures.
++
++  @return
++    Fills various create_info members based on information retrieved
++    from the storage engine.
++    Sets create_info->varchar if the table has a VARCHAR column.
++    Prepares alter_info->create_list and alter_info->key_list with
++    columns and keys of the new table.
++  @retval TRUE   error, out of memory or a semantical error in ALTER
++                 TABLE instructions
++  @retval FALSE  success
++*/
++
++static bool
++mysql_prepare_alter_table(THD *thd, TABLE *table,
++                          HA_CREATE_INFO *create_info,
++                          Alter_info *alter_info)
++{
++  /* New column definitions are added here */
++  List<Create_field> new_create_list;
++  /* New key definitions are added here */
++  List<Key> new_key_list;
++  List_iterator<Alter_drop> drop_it(alter_info->drop_list);
++  List_iterator<Create_field> def_it(alter_info->create_list);
++  List_iterator<Alter_column> alter_it(alter_info->alter_list);
++  List_iterator<Key> key_it(alter_info->key_list);
++  List_iterator<Create_field> find_it(new_create_list);
++  List_iterator<Create_field> field_it(new_create_list);
++  List<Key_part_spec> key_parts;
++  uint db_create_options= (table->s->db_create_options
++                           & ~(HA_OPTION_PACK_RECORD));
++  uint used_fields= create_info->used_fields;
++  KEY *key_info=table->key_info;
++  bool rc= TRUE;
++
++  DBUG_ENTER("mysql_prepare_alter_table");
++
++  create_info->varchar= FALSE;
++  /* Let new create options override the old ones */
++  if (!(used_fields & HA_CREATE_USED_MIN_ROWS))
++    create_info->min_rows= table->s->min_rows;
++  if (!(used_fields & HA_CREATE_USED_MAX_ROWS))
++    create_info->max_rows= table->s->max_rows;
++  if (!(used_fields & HA_CREATE_USED_AVG_ROW_LENGTH))
++    create_info->avg_row_length= table->s->avg_row_length;
++  if (!(used_fields & HA_CREATE_USED_DEFAULT_CHARSET))
++    create_info->default_table_charset= table->s->table_charset;
++  if (!(used_fields & HA_CREATE_USED_AUTO) && table->found_next_number_field)
++  {
++    /* Table has an autoincrement, copy value to new table */
++    table->file->info(HA_STATUS_AUTO);
++    create_info->auto_increment_value= table->file->stats.auto_increment_value;
++  }
++  if (!(used_fields & HA_CREATE_USED_KEY_BLOCK_SIZE))
++    create_info->key_block_size= table->s->key_block_size;
++
++  if (!create_info->tablespace && create_info->storage_media != HA_SM_MEMORY)
++  {
++    char *tablespace= static_cast<char *>(thd->alloc(FN_LEN + 1));
++    /*
++       Regular alter table of disk stored table (no tablespace/storage change)
++       Copy tablespace name
++    */
++    if (tablespace &&
++        (table->file->get_tablespace_name(thd, tablespace, FN_LEN)))
++      create_info->tablespace= tablespace;
++  }
++  restore_record(table, s->default_values);     // Empty record for DEFAULT
++  Create_field *def;
++
++  /*
++    First collect all fields from table which isn't in drop_list
++  */
++  Field **f_ptr,*field;
++  for (f_ptr=table->field ; (field= *f_ptr) ; f_ptr++)
++  {
++    if (field->type() == MYSQL_TYPE_STRING)
++      create_info->varchar= TRUE;
++    /* Check if field should be dropped */
++    Alter_drop *drop;
++    drop_it.rewind();
++    while ((drop=drop_it++))
++    {
++      if (drop->type == Alter_drop::COLUMN &&
++	  !my_strcasecmp(system_charset_info,field->field_name, drop->name))
++      {
++	/* Reset auto_increment value if it was dropped */
++	if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER &&
++	    !(used_fields & HA_CREATE_USED_AUTO))
++	{
++	  create_info->auto_increment_value=0;
++	  create_info->used_fields|=HA_CREATE_USED_AUTO;
++	}
++	break;
++      }
++    }
++    if (drop)
++    {
++      drop_it.remove();
++      continue;
++    }
++    /* Check if field is changed */
++    def_it.rewind();
++    while ((def=def_it++))
++    {
++      if (def->change &&
++	  !my_strcasecmp(system_charset_info,field->field_name, def->change))
++	break;
++    }
++    if (def)
++    {						// Field is changed
++      def->field=field;
++      if (!def->after)
++      {
++	new_create_list.push_back(def);
++	def_it.remove();
++      }
++    }
++    else
++    {
++      /*
++        This field was not dropped and not changed, add it to the list
++        for the new table.
++      */
++      def= new Create_field(field, field);
++      new_create_list.push_back(def);
++      alter_it.rewind();			// Change default if ALTER
++      Alter_column *alter;
++      while ((alter=alter_it++))
++      {
++	if (!my_strcasecmp(system_charset_info,field->field_name, alter->name))
++	  break;
++      }
++      if (alter)
++      {
++	if (def->sql_type == MYSQL_TYPE_BLOB)
++	{
++	  my_error(ER_BLOB_CANT_HAVE_DEFAULT, MYF(0), def->change);
++          goto err;
++	}
++	if ((def->def=alter->def))              // Use new default
++          def->flags&= ~NO_DEFAULT_VALUE_FLAG;
++        else
++          def->flags|= NO_DEFAULT_VALUE_FLAG;
++	alter_it.remove();
++      }
++    }
++  }
++  def_it.rewind();
++  while ((def=def_it++))			// Add new columns
++  {
++    if (def->change && ! def->field)
++    {
++      my_error(ER_BAD_FIELD_ERROR, MYF(0), def->change, table->s->table_name.str);
++      goto err;
++    }
++    /*
++      Check that the DATE/DATETIME not null field we are going to add is
++      either has a default value or the '0000-00-00' is allowed by the
++      set sql mode.
++      If the '0000-00-00' value isn't allowed then raise the error_if_not_empty
++      flag to allow ALTER TABLE only if the table to be altered is empty.
++    */
++    if ((def->sql_type == MYSQL_TYPE_DATE ||
++         def->sql_type == MYSQL_TYPE_NEWDATE ||
++         def->sql_type == MYSQL_TYPE_DATETIME) &&
++         !alter_info->datetime_field &&
++         !(~def->flags & (NO_DEFAULT_VALUE_FLAG | NOT_NULL_FLAG)) &&
++         thd->variables.sql_mode & MODE_NO_ZERO_DATE)
++    {
++        alter_info->datetime_field= def;
++        alter_info->error_if_not_empty= TRUE;
++    }
++    if (!def->after)
++      new_create_list.push_back(def);
++    else if (def->after == first_keyword)
++      new_create_list.push_front(def);
++    else
++    {
++      Create_field *find;
++      find_it.rewind();
++      while ((find=find_it++))			// Add new columns
++      {
++	if (!my_strcasecmp(system_charset_info,def->after, find->field_name))
++	  break;
++      }
++      if (!find)
++      {
++	my_error(ER_BAD_FIELD_ERROR, MYF(0), def->after, table->s->table_name.str);
++        goto err;
++      }
++      find_it.after(def);			// Put element after this
++      alter_info->change_level= ALTER_TABLE_DATA_CHANGED;
++    }
++  }
++  if (alter_info->alter_list.elements)
++  {
++    my_error(ER_BAD_FIELD_ERROR, MYF(0),
++             alter_info->alter_list.head()->name, table->s->table_name.str);
++    goto err;
++  }
++  if (!new_create_list.elements)
++  {
++    my_message(ER_CANT_REMOVE_ALL_FIELDS, ER(ER_CANT_REMOVE_ALL_FIELDS),
++               MYF(0));
++    goto err;
++  }
++
++  /*
++    Collect all keys which isn't in drop list. Add only those
++    for which some fields exists.
++  */
++
++  for (uint i=0 ; i < table->s->keys ; i++,key_info++)
++  {
++    char *key_name= key_info->name;
++    Alter_drop *drop;
++    drop_it.rewind();
++    while ((drop=drop_it++))
++    {
++      if (drop->type == Alter_drop::KEY &&
++	  !my_strcasecmp(system_charset_info,key_name, drop->name))
++	break;
++    }
++    if (drop)
++    {
++      drop_it.remove();
++      continue;
++    }
++
++    KEY_PART_INFO *key_part= key_info->key_part;
++    key_parts.empty();
++    for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
++    {
++      if (!key_part->field)
++	continue;				// Wrong field (from UNIREG)
++      const char *key_part_name=key_part->field->field_name;
++      Create_field *cfield;
++      field_it.rewind();
++      while ((cfield=field_it++))
++      {
++	if (cfield->change)
++	{
++	  if (!my_strcasecmp(system_charset_info, key_part_name,
++			     cfield->change))
++	    break;
++	}
++	else if (!my_strcasecmp(system_charset_info,
++				key_part_name, cfield->field_name))
++	  break;
++      }
++      if (!cfield)
++	continue;				// Field is removed
++      uint key_part_length=key_part->length;
++      if (cfield->field)			// Not new field
++      {
++        /*
++          If the field can't have only a part used in a key according to its
++          new type, or should not be used partially according to its
++          previous type, or the field length is less than the key part
++          length, unset the key part length.
++
++          We also unset the key part length if it is the same as the
++          old field's length, so the whole new field will be used.
++
++          BLOBs may have cfield->length == 0, which is why we test it before
++          checking whether cfield->length < key_part_length (in chars).
++         */
++        if (!Field::type_can_have_key_part(cfield->field->type()) ||
++            !Field::type_can_have_key_part(cfield->sql_type) ||
++            /* spatial keys can't have sub-key length */
++            (key_info->flags & HA_SPATIAL) ||
++            (cfield->field->field_length == key_part_length &&
++             !f_is_blob(key_part->key_type)) ||
++	    (cfield->length && (cfield->length < key_part_length /
++                                key_part->field->charset()->mbmaxlen)))
++	  key_part_length= 0;			// Use whole field
++      }
++      key_part_length /= key_part->field->charset()->mbmaxlen;
++      key_parts.push_back(new Key_part_spec(cfield->field_name,
++					    key_part_length));
++    }
++    if (key_parts.elements)
++    {
++      KEY_CREATE_INFO key_create_info;
++      Key *key;
++      enum Key::Keytype key_type;
++      bzero((char*) &key_create_info, sizeof(key_create_info));
++
++      key_create_info.algorithm= key_info->algorithm;
++      if (key_info->flags & HA_USES_BLOCK_SIZE)
++        key_create_info.block_size= key_info->block_size;
++      if (key_info->flags & HA_USES_PARSER)
++        key_create_info.parser_name= *plugin_name(key_info->parser);
++
++      if (key_info->flags & HA_SPATIAL)
++        key_type= Key::SPATIAL;
++      else if (key_info->flags & HA_NOSAME)
++      {
++        if (! my_strcasecmp(system_charset_info, key_name, primary_key_name))
++          key_type= Key::PRIMARY;
++        else
++          key_type= Key::UNIQUE;
++      }
++      else if (key_info->flags & HA_FULLTEXT)
++        key_type= Key::FULLTEXT;
++      else
++        key_type= Key::MULTIPLE;
++
++      key= new Key(key_type, key_name,
++                   &key_create_info,
++                   test(key_info->flags & HA_GENERATED_KEY),
++                   key_parts);
++      new_key_list.push_back(key);
++    }
++  }
++  {
++    Key *key;
++    while ((key=key_it++))			// Add new keys
++    {
++      if (key->type != Key::FOREIGN_KEY)
++        new_key_list.push_back(key);
++      if (key->name &&
++	  !my_strcasecmp(system_charset_info,key->name,primary_key_name))
++      {
++	my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name);
++        goto err;
++      }
++    }
++  }
++
++  if (alter_info->drop_list.elements)
++  {
++    my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0),
++             alter_info->drop_list.head()->name);
++    goto err;
++  }
++  if (alter_info->alter_list.elements)
++  {
++    my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0),
++             alter_info->alter_list.head()->name);
++    goto err;
++  }
++
++  if (!create_info->comment.str)
++  {
++    create_info->comment.str= table->s->comment.str;
++    create_info->comment.length= table->s->comment.length;
++  }
++
++  table->file->update_create_info(create_info);
++  if ((create_info->table_options &
++       (HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS)) ||
++      (used_fields & HA_CREATE_USED_PACK_KEYS))
++    db_create_options&= ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS);
++  if (create_info->table_options &
++      (HA_OPTION_CHECKSUM | HA_OPTION_NO_CHECKSUM))
++    db_create_options&= ~(HA_OPTION_CHECKSUM | HA_OPTION_NO_CHECKSUM);
++  if (create_info->table_options &
++      (HA_OPTION_DELAY_KEY_WRITE | HA_OPTION_NO_DELAY_KEY_WRITE))
++    db_create_options&= ~(HA_OPTION_DELAY_KEY_WRITE |
++			  HA_OPTION_NO_DELAY_KEY_WRITE);
++  create_info->table_options|= db_create_options;
++
++  if (table->s->tmp_table)
++    create_info->options|=HA_LEX_CREATE_TMP_TABLE;
++
++  rc= FALSE;
++  alter_info->create_list.swap(new_create_list);
++  alter_info->key_list.swap(new_key_list);
++err:
++  DBUG_RETURN(rc);
++}
++
++
++/*
++  Alter table
++
++  SYNOPSIS
++    mysql_alter_table()
++      thd              Thread handle
++      new_db           If there is a RENAME clause
++      new_name         If there is a RENAME clause
++      create_info      Information from the parsing phase about new
++                       table properties.
++      table_list       The table to change.
++      alter_info       Lists of fields, keys to be changed, added
++                       or dropped.
++      order_num        How many ORDER BY fields has been specified.
++      order            List of fields to ORDER BY.
++      ignore           Whether we have ALTER IGNORE TABLE
++
++  DESCRIPTION
++    This is a veery long function and is everything but the kitchen sink :)
++    It is used to alter a table and not only by ALTER TABLE but also
++    CREATE|DROP INDEX are mapped on this function.
++
++    When the ALTER TABLE statement just does a RENAME or ENABLE|DISABLE KEYS,
++    or both, then this function short cuts its operation by renaming
++    the table and/or enabling/disabling the keys. In this case, the FRM is
++    not changed, directly by mysql_alter_table. However, if there is a
++    RENAME + change of a field, or an index, the short cut is not used.
++    See how `create_list` is used to generate the new FRM regarding the
++    structure of the fields. The same is done for the indices of the table.
++
++    Important is the fact, that this function tries to do as little work as
++    possible, by finding out whether a intermediate table is needed to copy
++    data into and when finishing the altering to use it as the original table.
++    For this reason the function compare_tables() is called, which decides
++    based on all kind of data how similar are the new and the original
++    tables.
++
++  RETURN VALUES
++    FALSE  OK
++    TRUE   Error
++*/
++
++bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
++                       HA_CREATE_INFO *create_info,
++                       TABLE_LIST *table_list,
++                       Alter_info *alter_info,
++                       uint order_num, ORDER *order, bool ignore)
++{
++  TABLE *table, *new_table= 0, *name_lock= 0;
++  int error= 0;
++  char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN + 1];
++  char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias;
++  char index_file[FN_REFLEN], data_file[FN_REFLEN];
++  char path[FN_REFLEN + 1];
++  char reg_path[FN_REFLEN+1];
++  ha_rows copied,deleted;
++  handlerton *old_db_type, *new_db_type, *save_old_db_type;
++  legacy_db_type table_type;
++  frm_type_enum frm_type;
++  enum_alter_table_change_level need_copy_table= ALTER_TABLE_METADATA_ONLY;
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  uint fast_alter_partition= 0;
++  bool partition_changed= FALSE;
++#endif
++  bool need_lock_for_indexes= TRUE;
++  KEY  *key_info_buffer;
++  uint index_drop_count= 0;
++  uint *index_drop_buffer= NULL;
++  uint index_add_count= 0;
++  uint *index_add_buffer= NULL;
++  uint candidate_key_count= 0;
++  bool no_pk;
++  DBUG_ENTER("mysql_alter_table");
++
++  /*
++    Check if we attempt to alter mysql.slow_log or
++    mysql.general_log table and return an error if
++    it is the case.
++    TODO: this design is obsolete and will be removed.
++  */
++  if (table_list && table_list->db && table_list->table_name)
++  {
++    int table_kind= 0;
++
++    table_kind= check_if_log_table(table_list->db_length, table_list->db,
++                                   table_list->table_name_length,
++                                   table_list->table_name, 0);
++
++    if (table_kind)
++    {
++      /* Disable alter of enabled log tables */
++      if (logger.is_log_table_enabled(table_kind))
++      {
++        my_error(ER_BAD_LOG_STATEMENT, MYF(0), "ALTER");
++        DBUG_RETURN(TRUE);
++      }
++
++      /* Disable alter of log tables to unsupported engine */
++      if ((create_info->used_fields & HA_CREATE_USED_ENGINE) &&
++          (!create_info->db_type || /* unknown engine */
++           !(create_info->db_type->flags & HTON_SUPPORT_LOG_TABLES)))
++      {
++        my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0));
++        DBUG_RETURN(TRUE);
++      }
++
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++      if (alter_info->flags & ALTER_PARTITION)
++      {
++        my_error(ER_WRONG_USAGE, MYF(0), "PARTITION", "log table");
++        DBUG_RETURN(TRUE);
++      }
++#endif
++    }
++  }
++
++  /*
++    Assign variables table_name, new_name, db, new_db, path, reg_path
++    to simplify further comparisions: we want to see if it's a RENAME
++    later just by comparing the pointers, avoiding the need for strcmp.
++  */
++  thd_proc_info(thd, "init");
++  table_name=table_list->table_name;
++  alias= (lower_case_table_names == 2) ? table_list->alias : table_name;
++  db=table_list->db;
++  if (!new_db || !my_strcasecmp(table_alias_charset, new_db, db))
++    new_db= db;
++  build_table_filename(reg_path, sizeof(reg_path) - 1, db, table_name, reg_ext, 0);
++  build_table_filename(path, sizeof(path) - 1, db, table_name, "", 0);
++
++  mysql_ha_rm_tables(thd, table_list, FALSE);
++
++  /* DISCARD/IMPORT TABLESPACE is always alone in an ALTER TABLE */
++  if (alter_info->tablespace_op != NO_TABLESPACE_OP)
++    /* Conditionally writes to binlog. */
++    DBUG_RETURN(mysql_discard_or_import_tablespace(thd,table_list,
++						   alter_info->tablespace_op));
++  strxnmov(new_name_buff, sizeof (new_name_buff) - 1, mysql_data_home, "/", db, 
++           "/", table_name, reg_ext, NullS);
++  (void) unpack_filename(new_name_buff, new_name_buff);
++  /*
++    If this is just a rename of a view, short cut to the
++    following scenario: 1) lock LOCK_open 2) do a RENAME
++    2) unlock LOCK_open.
++    This is a copy-paste added to make sure
++    ALTER (sic:) TABLE .. RENAME works for views. ALTER VIEW is handled
++    as an independent branch in mysql_execute_command. The need
++    for a copy-paste arose because the main code flow of ALTER TABLE
++    ... RENAME tries to use open_ltable, which does not work for views
++    (open_ltable was never modified to merge table lists of child tables
++    into the main table list, like open_tables does).
++    This code is wrong and will be removed, please do not copy.
++  */
++  frm_type= mysql_frm_type(thd, new_name_buff, &table_type);
++  /* Rename a view */
++  /* Sic: there is a race here */
++  if (frm_type == FRMTYPE_VIEW && !(alter_info->flags & ~ALTER_RENAME))
++  {
++    /*
++      The following branch handles "ALTER VIEW v1 /no arguments/;"
++      This feature is not documented one. 
++      However, before "OPTIMIZE TABLE t1;" was implemented, 
++      ALTER TABLE with no alter_specifications was used to force-rebuild
++      the table. That's why this grammar is allowed. That's why we ignore
++      it for views. So just do nothing in such a case.
++    */
++    if (!new_name)
++    {
++      my_ok(thd);
++      DBUG_RETURN(FALSE);
++    }
++
++    /*
++      Avoid problems with a rename on a table that we have locked or
++      if the user is trying to to do this in a transcation context
++    */
++
++    if (thd->locked_tables || thd->active_transaction())
++    {
++      my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
++                 ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
++      DBUG_RETURN(TRUE);
++    }
++
++    if (wait_if_global_read_lock(thd,0,1))
++      DBUG_RETURN(TRUE);
++    VOID(pthread_mutex_lock(&LOCK_open));
++    if (lock_table_names(thd, table_list))
++    {
++      error= 1;
++      goto view_err;
++    }
++    
++    if (!do_rename(thd, table_list, new_db, new_name, new_name, 1))
++    {
++      if (mysql_bin_log.is_open())
++      {
++        thd->clear_error();
++        Query_log_event qinfo(thd, thd->query(), thd->query_length(),
++                              0, FALSE, 0);
++        if ((error= mysql_bin_log.write(&qinfo)))
++          goto view_err_unlock;
++      }
++      my_ok(thd);
++    }
++
++view_err_unlock:
++    unlock_table_names(thd, table_list, (TABLE_LIST*) 0);
++
++view_err:
++    pthread_mutex_unlock(&LOCK_open);
++    start_waiting_global_read_lock(thd);
++    DBUG_RETURN(error);
++  }
++
++  if (!(table= open_n_lock_single_table(thd, table_list, TL_WRITE_ALLOW_READ)))
++    DBUG_RETURN(TRUE);
++  table->use_all_columns();
++
++  /*
++    Prohibit changing of the UNION list of a non-temporary MERGE table
++    under LOCK tables. It would be quite difficult to reuse a shrinked
++    set of tables from the old table or to open a new TABLE object for
++    an extended list and verify that they belong to locked tables.
++  */
++  if (thd->locked_tables &&
++      (create_info->used_fields & HA_CREATE_USED_UNION) &&
++      (table->s->tmp_table == NO_TMP_TABLE))
++  {
++    my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
++    DBUG_RETURN(TRUE);
++  }
++
++  /* Check that we are not trying to rename to an existing table */
++  if (new_name)
++  {
++    DBUG_PRINT("info", ("new_db.new_name: '%s'.'%s'", new_db, new_name));
++    strmov(new_name_buff,new_name);
++    strmov(new_alias= new_alias_buff, new_name);
++    if (lower_case_table_names)
++    {
++      if (lower_case_table_names != 2)
++      {
++	my_casedn_str(files_charset_info, new_name_buff);
++	new_alias= new_name;			// Create lower case table name
++      }
++      my_casedn_str(files_charset_info, new_name);
++    }
++    if (new_db == db &&
++	!my_strcasecmp(table_alias_charset, new_name_buff, table_name))
++    {
++      /*
++	Source and destination table names are equal: make later check
++	easier.
++      */
++      new_alias= new_name= table_name;
++    }
++    else
++    {
++      if (table->s->tmp_table != NO_TMP_TABLE)
++      {
++	if (find_temporary_table(thd,new_db,new_name_buff))
++	{
++	  my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name_buff);
++	  DBUG_RETURN(TRUE);
++	}
++      }
++      else
++      {
++        if (lock_table_name_if_not_cached(thd, new_db, new_name, &name_lock))
++          DBUG_RETURN(TRUE);
++        if (!name_lock)
++        {
++	  my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias);
++	  DBUG_RETURN(TRUE);
++        }
++
++        build_table_filename(new_name_buff, sizeof(new_name_buff) - 1,
++                             new_db, new_name_buff, reg_ext, 0);
++        if (!access(new_name_buff, F_OK))
++	{
++	  /* Table will be closed in do_command() */
++	  my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias);
++          goto err;
++	}
++      }
++    }
++  }
++  else
++  {
++    new_alias= (lower_case_table_names == 2) ? alias : table_name;
++    new_name= table_name;
++  }
++
++  old_db_type= table->s->db_type();
++  if (!create_info->db_type)
++  {
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++    if (table->part_info &&
++        create_info->used_fields & HA_CREATE_USED_ENGINE)
++    {
++      /*
++        This case happens when the user specified
++        ENGINE = x where x is a non-existing storage engine
++        We set create_info->db_type to default_engine_type
++        to ensure we don't change underlying engine type
++        due to a erroneously given engine name.
++      */
++      create_info->db_type= table->part_info->default_engine_type;
++    }
++    else
++#endif
++      create_info->db_type= old_db_type;
++  }
++
++  if (check_engine(thd, new_name, create_info))
++    goto err;
++  new_db_type= create_info->db_type;
++
++  if ((new_db_type != old_db_type ||
++       alter_info->flags & ALTER_PARTITION) &&
++      !table->file->can_switch_engines())
++  {
++    my_error(ER_ROW_IS_REFERENCED, MYF(0));
++    goto err;
++  }
++
++  /*
++   If this is an ALTER TABLE and no explicit row type specified reuse
++   the table's row type.
++   Note : this is the same as if the row type was specified explicitly.
++  */
++  if (create_info->row_type == ROW_TYPE_NOT_USED)
++  {
++    /* ALTER TABLE without explicit row type */
++    create_info->row_type= table->s->row_type;
++  }
++  else
++  {
++    /* ALTER TABLE with specific row type */
++    create_info->used_fields |= HA_CREATE_USED_ROW_FORMAT;
++  }
++
++  DBUG_PRINT("info", ("old type: %s  new type: %s",
++             ha_resolve_storage_engine_name(old_db_type),
++             ha_resolve_storage_engine_name(new_db_type)));
++  if (ha_check_storage_engine_flag(old_db_type, HTON_ALTER_NOT_SUPPORTED) ||
++      ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED))
++  {
++    DBUG_PRINT("info", ("doesn't support alter"));
++    my_error(ER_ILLEGAL_HA, MYF(0), table_name);
++    goto err;
++  }
++  
++  thd_proc_info(thd, "setup");
++  if (!(alter_info->flags & ~(ALTER_RENAME | ALTER_KEYS_ONOFF)) &&
++      !table->s->tmp_table) // no need to touch frm
++  {
++    switch (alter_info->keys_onoff) {
++    case LEAVE_AS_IS:
++      break;
++    case ENABLE:
++      /*
++        wait_while_table_is_used() ensures that table being altered is
++        opened only by this thread and that TABLE::TABLE_SHARE::version
++        of TABLE object corresponding to this table is 0.
++        The latter guarantees that no DML statement will open this table
++        until ALTER TABLE finishes (i.e. until close_thread_tables())
++        while the fact that the table is still open gives us protection
++        from concurrent DDL statements.
++      */
++      VOID(pthread_mutex_lock(&LOCK_open));
++      wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN);
++      VOID(pthread_mutex_unlock(&LOCK_open));
++      DBUG_EXECUTE_IF("sleep_alter_enable_indexes", my_sleep(6000000););
++      error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
++      /* COND_refresh will be signaled in close_thread_tables() */
++      break;
++    case DISABLE:
++      VOID(pthread_mutex_lock(&LOCK_open));
++      wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN);
++      VOID(pthread_mutex_unlock(&LOCK_open));
++      error=table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
++      /* COND_refresh will be signaled in close_thread_tables() */
++      break;
++    default:
++      DBUG_ASSERT(FALSE);
++      error= 0;
++      break;
++    }
++    if (error == HA_ERR_WRONG_COMMAND)
++    {
++      error= 0;
++      push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++			  ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
++			  table->alias);
++    }
++
++    /*
++      Unlike to the above case close_cached_table() below will remove ALL
++      instances of TABLE from table cache (it will also remove table lock
++      held by this thread). So to make actual table renaming and writing
++      to binlog atomic we have to put them into the same critical section
++      protected by LOCK_open mutex. This also removes gap for races between
++      access() and mysql_rename_table() calls.
++    */
++
++    if (!error && (new_name != table_name || new_db != db))
++    {
++      thd_proc_info(thd, "rename");
++
++      /*
++        Workaround InnoDB ending the transaction when the table instance
++        is unlocked/closed (close_cached_table below), otherwise the trx
++        state will differ between the server and storage engine layers.
++      */
++      ha_autocommit_or_rollback(thd, 0);
++
++      VOID(pthread_mutex_lock(&LOCK_open));
++      /*
++        Then do a 'simple' rename of the table. First we need to close all
++        instances of 'source' table.
++      */
++      close_cached_table(thd, table);
++      /*
++        Then, we want check once again that target table does not exist.
++        Actually the order of these two steps does not matter since
++        earlier we took name-lock on the target table, so we do them
++        in this particular order only to be consistent with 5.0, in which
++        we don't take this name-lock and where this order really matters.
++        TODO: Investigate if we need this access() check at all.
++      */
++      if (!access(new_name_buff,F_OK))
++      {
++	my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name);
++	error= -1;
++      }
++      else
++      {
++	*fn_ext(new_name)=0;
++	if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias, 0))
++	  error= -1;
++        else if (Table_triggers_list::change_table_name(thd, db, table_name,
++                                                        new_db, new_alias))
++        {
++          VOID(mysql_rename_table(old_db_type, new_db, new_alias, db,
++                                  table_name, 0));
++          error= -1;
++        }
++      }
++    }
++    else
++      VOID(pthread_mutex_lock(&LOCK_open));
++
++    if (error == HA_ERR_WRONG_COMMAND)
++    {
++      error= 0;
++      push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++			  ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
++			  table->alias);
++    }
++
++    if (!error)
++    {
++      error= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
++      if (!error)
++        my_ok(thd);
++    }
++    else if (error > 0)
++    {
++      table->file->print_error(error, MYF(0));
++      error= -1;
++    }
++    if (name_lock)
++      unlink_open_table(thd, name_lock, FALSE);
++    VOID(pthread_mutex_unlock(&LOCK_open));
++    table_list->table= NULL;                    // For query cache
++    query_cache_invalidate3(thd, table_list, 0);
++    DBUG_RETURN(error);
++  }
++
++  /* We have to do full alter table. */
++
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  if (prep_alter_part_table(thd, table, alter_info, create_info, old_db_type,
++                            &partition_changed, &fast_alter_partition))
++    goto err;
++#endif
++  /*
++    If the old table had partitions and we are doing ALTER TABLE ...
++    engine= <new_engine>, the new table must preserve the original
++    partitioning. That means that the new engine is still the
++    partitioning engine, not the engine specified in the parser.
++    This is discovered  in prep_alter_part_table, which in such case
++    updates create_info->db_type.
++    Now we need to update the stack copy of create_info->db_type,
++    as otherwise we won't be able to correctly move the files of the
++    temporary table to the result table files.
++  */
++  new_db_type= create_info->db_type;
++
++  if (is_index_maintenance_unique (table, alter_info))
++    need_copy_table= ALTER_TABLE_DATA_CHANGED;
++
++  if (mysql_prepare_alter_table(thd, table, create_info, alter_info))
++    goto err;
++  
++  if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
++    need_copy_table= alter_info->change_level;
++
++  set_table_default_charset(thd, create_info, db);
++
++  if (thd->variables.old_alter_table
++      || (table->s->db_type() != create_info->db_type)
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++      || partition_changed
++#endif
++     )
++    need_copy_table= ALTER_TABLE_DATA_CHANGED;
++  else
++  {
++    enum_alter_table_change_level need_copy_table_res;
++    /* Check how much the tables differ. */
++    if (compare_tables(table, alter_info,
++                       create_info, order_num,
++                       &need_copy_table_res,
++                       &key_info_buffer,
++                       &index_drop_buffer, &index_drop_count,
++                       &index_add_buffer, &index_add_count,
++                       &candidate_key_count))
++      goto err;
++   
++    DBUG_EXECUTE_IF("alter_table_only_metadata_change", {
++      if (need_copy_table_res != ALTER_TABLE_METADATA_ONLY)
++        goto err; });
++    DBUG_EXECUTE_IF("alter_table_only_index_change", {
++      if (need_copy_table_res != ALTER_TABLE_INDEX_CHANGED)
++        goto err; });
++   
++    if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
++      need_copy_table= need_copy_table_res;
++  }
++
++  /*
++    If there are index changes only, try to do them online. "Index
++    changes only" means also that the handler for the table does not
++    change. The table is open and locked. The handler can be accessed.
++  */
++  if (need_copy_table == ALTER_TABLE_INDEX_CHANGED)
++  {
++    int   pk_changed= 0;
++    ulong alter_flags= 0;
++    ulong needed_online_flags= 0;
++    ulong needed_fast_flags= 0;
++    KEY   *key;
++    uint  *idx_p;
++    uint  *idx_end_p;
++
++    alter_flags= table->file->alter_table_flags(alter_info->flags);
++    DBUG_PRINT("info", ("alter_flags: %lu", alter_flags));
++    /* Check dropped indexes. */
++    for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count;
++         idx_p < idx_end_p;
++         idx_p++)
++    {
++      key= table->key_info + *idx_p;
++      DBUG_PRINT("info", ("index dropped: '%s'", key->name));
++      if (key->flags & HA_NOSAME)
++      {
++        /* 
++           Unique key. Check for "PRIMARY". 
++           or if dropping last unique key
++        */
++        if ((uint) (key - table->key_info) == table->s->primary_key)
++        {
++          DBUG_PRINT("info", ("Dropping primary key"));
++          /* Primary key. */
++          needed_online_flags|=  HA_ONLINE_DROP_PK_INDEX;
++          needed_fast_flags|= HA_ONLINE_DROP_PK_INDEX_NO_WRITES;
++          pk_changed++;
++          candidate_key_count--;
++        }
++        else
++        {
++          KEY_PART_INFO *part_end= key->key_part + key->key_parts;
++          bool is_candidate_key= true;
++
++          /* Non-primary unique key. */
++          needed_online_flags|=  HA_ONLINE_DROP_UNIQUE_INDEX;
++          needed_fast_flags|= HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES;
++
++          /*
++            Check if all fields in key are declared
++            NOT NULL and adjust candidate_key_count
++          */
++          for (KEY_PART_INFO *key_part= key->key_part;
++               key_part < part_end;
++               key_part++)
++            is_candidate_key=
++              (is_candidate_key && 
++               (! table->field[key_part->fieldnr-1]->maybe_null()));
++          if (is_candidate_key)
++            candidate_key_count--;
++        }
++      }
++      else
++      {
++        /* Non-unique key. */
++        needed_online_flags|=  HA_ONLINE_DROP_INDEX;
++        needed_fast_flags|= HA_ONLINE_DROP_INDEX_NO_WRITES;
++      }
++    }
++    no_pk= ((table->s->primary_key == MAX_KEY) ||
++            (needed_online_flags & HA_ONLINE_DROP_PK_INDEX));
++    /* Check added indexes. */
++    for (idx_p= index_add_buffer, idx_end_p= idx_p + index_add_count;
++         idx_p < idx_end_p;
++         idx_p++)
++    {
++      key= key_info_buffer + *idx_p;
++      DBUG_PRINT("info", ("index added: '%s'", key->name));
++      if (key->flags & HA_NOSAME)
++      {
++        /* Unique key */
++
++        KEY_PART_INFO *part_end= key->key_part + key->key_parts;    
++        bool is_candidate_key= true;
++
++        /*
++          Check if all fields in key are declared
++          NOT NULL
++         */
++        for (KEY_PART_INFO *key_part= key->key_part;
++             key_part < part_end;
++             key_part++)
++          is_candidate_key=
++            (is_candidate_key && 
++             (! table->field[key_part->fieldnr]->maybe_null()));
++
++        /*
++           Check for "PRIMARY"
++           or if adding first unique key
++           defined on non-nullable fields
++        */
++
++        if ((!my_strcasecmp(system_charset_info,
++                            key->name, primary_key_name)) ||
++            (no_pk && candidate_key_count == 0 && is_candidate_key))
++        {
++          DBUG_PRINT("info", ("Adding primary key"));
++          /* Primary key. */
++          needed_online_flags|=  HA_ONLINE_ADD_PK_INDEX;
++          needed_fast_flags|= HA_ONLINE_ADD_PK_INDEX_NO_WRITES;
++          pk_changed++;
++          no_pk= false;
++        }
++        else
++        {
++          /* Non-primary unique key. */
++          needed_online_flags|=  HA_ONLINE_ADD_UNIQUE_INDEX;
++          needed_fast_flags|= HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES;
++        }
++      }
++      else
++      {
++        /* Non-unique key. */
++        needed_online_flags|=  HA_ONLINE_ADD_INDEX;
++        needed_fast_flags|= HA_ONLINE_ADD_INDEX_NO_WRITES;
++      }
++    }
++
++    if ((candidate_key_count > 0) && 
++        (needed_online_flags & HA_ONLINE_DROP_PK_INDEX))
++    {
++      /*
++        Dropped primary key when there is some other unique 
++        not null key that should be converted to primary key
++      */
++      needed_online_flags|=  HA_ONLINE_ADD_PK_INDEX;
++      needed_fast_flags|= HA_ONLINE_ADD_PK_INDEX_NO_WRITES;
++      pk_changed= 2;
++    }
++
++    DBUG_PRINT("info", ("needed_online_flags: 0x%lx, needed_fast_flags: 0x%lx",
++                        needed_online_flags, needed_fast_flags));
++    /*
++      Online or fast add/drop index is possible only if
++      the primary key is not added and dropped in the same statement.
++      Otherwise we have to recreate the table.
++      need_copy_table is no-zero at this place.
++    */
++    if ( pk_changed < 2 )
++    {
++      if ((alter_flags & needed_online_flags) == needed_online_flags)
++      {
++        /* All required online flags are present. */
++        need_copy_table= ALTER_TABLE_METADATA_ONLY;
++        need_lock_for_indexes= FALSE;
++      }
++      else if ((alter_flags & needed_fast_flags) == needed_fast_flags)
++      {
++        /* All required fast flags are present. */
++        need_copy_table= ALTER_TABLE_METADATA_ONLY;
++      }
++    }
++    DBUG_PRINT("info", ("need_copy_table: %u  need_lock: %d",
++                        need_copy_table, need_lock_for_indexes));
++  }
++
++  /*
++    better have a negative test here, instead of positive, like
++    alter_info->flags & ALTER_ADD_COLUMN|ALTER_ADD_INDEX|...
++    so that ALTER TABLE won't break when somebody will add new flag
++  */
++  if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
++    create_info->frm_only= 1;
++
++#ifdef WITH_PARTITION_STORAGE_ENGINE
++  if (fast_alter_partition)
++  {
++    DBUG_ASSERT(!name_lock);
++    DBUG_RETURN(fast_alter_partition_table(thd, table, alter_info,
++                                           create_info, table_list,
++                                           db, table_name,
++                                           fast_alter_partition));
++  }
++#endif
++
++  my_snprintf(tmp_name, sizeof(tmp_name), "%s-%lx_%lx", tmp_file_prefix,
++	      current_pid, thd->thread_id);
++  /* Safety fix for innodb */
++  if (lower_case_table_names)
++    my_casedn_str(files_charset_info, tmp_name);
++
++  /*
++    Handling of symlinked tables:
++    If no rename:
++      Create new data file and index file on the same disk as the
++      old data and index files.
++      Copy data.
++      Rename new data file over old data file and new index file over
++      old index file.
++      Symlinks are not changed.
++
++   If rename:
++      Create new data file and index file on the same disk as the
++      old data and index files.  Create also symlinks to point at
++      the new tables.
++      Copy data.
++      At end, rename intermediate tables, and symlinks to intermediate
++      table, to final table name.
++      Remove old table and old symlinks
++
++    If rename is made to another database:
++      Create new tables in new database.
++      Copy data.
++      Remove old table and symlinks.
++  */
++  if (!strcmp(db, new_db))		// Ignore symlink if db changed
++  {
++    if (create_info->index_file_name)
++    {
++      /* Fix index_file_name to have 'tmp_name' as basename */
++      strmov(index_file, tmp_name);
++      create_info->index_file_name=fn_same(index_file,
++					   create_info->index_file_name,
++					   1);
++    }
++    if (create_info->data_file_name)
++    {
++      /* Fix data_file_name to have 'tmp_name' as basename */
++      strmov(data_file, tmp_name);
++      create_info->data_file_name=fn_same(data_file,
++					  create_info->data_file_name,
++					  1);
++    }
++  }
++  else
++    create_info->data_file_name=create_info->index_file_name=0;
++
++  DEBUG_SYNC(thd, "alter_table_before_create_table_no_lock");
++  /*
++    Create a table with a temporary name.
++    With create_info->frm_only == 1 this creates a .frm file only.
++    We don't log the statement, it will be logged later.
++  */
++  tmp_disable_binlog(thd);
++  error= mysql_create_table_no_lock(thd, new_db, tmp_name,
++                                    create_info,
++                                    alter_info,
++                                    1, 0);
++  reenable_binlog(thd);
++  if (error)
++    goto err;
++
++  /* Open the table if we need to copy the data. */
++  DBUG_PRINT("info", ("need_copy_table: %u", need_copy_table));
++  if (need_copy_table != ALTER_TABLE_METADATA_ONLY)
++  {
++    if (table->s->tmp_table)
++    {
++      TABLE_LIST tbl;
++      bzero((void*) &tbl, sizeof(tbl));
++      tbl.db= new_db;
++      tbl.table_name= tbl.alias= tmp_name;
++      /* Table is in thd->temporary_tables */
++      new_table= open_table(thd, &tbl, thd->mem_root, (bool*) 0,
++                            MYSQL_LOCK_IGNORE_FLUSH);
++    }
++    else
++    {
++      char path[FN_REFLEN + 1];
++      /* table is a normal table: Create temporary table in same directory */
++      build_table_filename(path, sizeof(path) - 1, new_db, tmp_name, "",
++                           FN_IS_TMP);
++      /* Open our intermediate table */
++      new_table=open_temporary_table(thd, path, new_db, tmp_name,0);
++    }
++    if (!new_table)
++      goto err1;
++    /*
++      Note: In case of MERGE table, we do not attach children. We do not
++      copy data for MERGE tables. Only the children have data.
++    */
++  }
++
++  /* Copy the data if necessary. */
++  thd->count_cuted_fields= CHECK_FIELD_WARN;	// calc cuted fields
++  thd->cuted_fields=0L;
++  copied=deleted=0;
++  /*
++    We do not copy data for MERGE tables. Only the children have data.
++    MERGE tables have HA_NO_COPY_ON_ALTER set.
++  */
++  if (new_table && !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER))
++  {
++    /* We don't want update TIMESTAMP fields during ALTER TABLE. */
++    new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
++    new_table->next_number_field=new_table->found_next_number_field;
++    thd_proc_info(thd, "copy to tmp table");
++    error= copy_data_between_tables(table, new_table,
++                                    alter_info->create_list, ignore,
++                                    order_num, order, &copied, &deleted,
++                                    alter_info->keys_onoff,
++                                    alter_info->error_if_not_empty);
++  }
++  else
++  {
++    VOID(pthread_mutex_lock(&LOCK_open));
++    wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN);
++    VOID(pthread_mutex_unlock(&LOCK_open));
++    thd_proc_info(thd, "manage keys");
++    alter_table_manage_keys(table, table->file->indexes_are_disabled(),
++                            alter_info->keys_onoff);
++    error= ha_autocommit_or_rollback(thd, 0);
++    if (end_active_trans(thd))
++      error= 1;
++  }
++  thd->count_cuted_fields= CHECK_FIELD_IGNORE;
++
++  /* If we did not need to copy, we might still need to add/drop indexes. */
++  if (! new_table)
++  {
++    uint          *key_numbers;
++    uint          *keyno_p;
++    KEY           *key_info;
++    KEY           *key;
++    uint          *idx_p;
++    uint          *idx_end_p;
++    KEY_PART_INFO *key_part;
++    KEY_PART_INFO *part_end;
++    DBUG_PRINT("info", ("No new_table, checking add/drop index"));
++
++    table->file->ha_prepare_for_alter();
++    if (index_add_count)
++    {
++      /* The add_index() method takes an array of KEY structs. */
++      key_info= (KEY*) thd->alloc(sizeof(KEY) * index_add_count);
++      key= key_info;
++      for (idx_p= index_add_buffer, idx_end_p= idx_p + index_add_count;
++           idx_p < idx_end_p;
++           idx_p++, key++)
++      {
++        /* Copy the KEY struct. */
++        *key= key_info_buffer[*idx_p];
++        /* Fix the key parts. */
++        part_end= key->key_part + key->key_parts;
++        for (key_part= key->key_part; key_part < part_end; key_part++)
++          key_part->field= table->field[key_part->fieldnr];
++      }
++      /* Add the indexes. */
++      if ((error= table->file->add_index(table, key_info, index_add_count)))
++      {
++        /*
++          Exchange the key_info for the error message. If we exchange
++          key number by key name in the message later, we need correct info.
++        */
++        KEY *save_key_info= table->key_info;
++        table->key_info= key_info;
++        table->file->print_error(error, MYF(0));
++        table->key_info= save_key_info;
++        goto err1;
++      }
++    }
++    /*end of if (index_add_count)*/
++
++    if (index_drop_count)
++    {
++      /* The prepare_drop_index() method takes an array of key numbers. */
++      key_numbers= (uint*) thd->alloc(sizeof(uint) * index_drop_count);
++      keyno_p= key_numbers;
++      /* Get the number of each key. */
++      for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count;
++           idx_p < idx_end_p;
++           idx_p++, keyno_p++)
++        *keyno_p= *idx_p;
++      /*
++        Tell the handler to prepare for drop indexes.
++        This re-numbers the indexes to get rid of gaps.
++      */
++      if ((error= table->file->prepare_drop_index(table, key_numbers,
++                                                  index_drop_count)))
++      {
++        table->file->print_error(error, MYF(0));
++        goto err1;
++      }
++
++      /* Tell the handler to finally drop the indexes. */
++      if ((error= table->file->final_drop_index(table)))
++      {
++        table->file->print_error(error, MYF(0));
++        goto err1;
++      }
++    }
++    /*end of if (index_drop_count)*/
++
++    /*
++      The final .frm file is already created as a temporary file
++      and will be renamed to the original table name later.
++    */
++
++    /* Need to commit before a table is unlocked (NDB requirement). */
++    DBUG_PRINT("info", ("Committing before unlocking table"));
++    if (ha_autocommit_or_rollback(thd, 0) || end_active_trans(thd))
++      goto err1;
++  }
++  /*end of if (! new_table) for add/drop index*/
++
++  if (table->s->tmp_table != NO_TMP_TABLE)
++  {
++    /* We changed a temporary table */
++    if (error)
++      goto err1;
++    /* Close lock if this is a transactional table */
++    if (thd->lock)
++    {
++      mysql_unlock_tables(thd, thd->lock);
++      thd->lock=0;
++    }
++    /*
++      If LOCK TABLES list is not empty and contains this table,
++      unlock the table and remove the table from this list.
++    */
++    mysql_lock_remove(thd, thd->locked_tables, table, FALSE);
++    /* Remove link to old table and rename the new one */
++    close_temporary_table(thd, table, 1, 1);
++    /* Should pass the 'new_name' as we store table name in the cache */
++    if (rename_temporary_table(thd, new_table, new_db, new_name))
++      goto err1;
++    /* We don't replicate alter table statement on temporary tables */
++    if (!thd->current_stmt_binlog_row_based &&
++        write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
++      DBUG_RETURN(TRUE);
++    goto end_temporary;
++  }
++
++  if (new_table)
++  {
++    /*
++      Close the intermediate table that will be the new table.
++      Note that MERGE tables do not have their children attached here.
++    */
++    intern_close_table(new_table);
++    my_free(new_table,MYF(0));
++  }
++  DEBUG_SYNC(thd, "alter_table_before_rename_result_table");
++  VOID(pthread_mutex_lock(&LOCK_open));
++  if (error)
++  {
++    VOID(quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP));
++    VOID(pthread_mutex_unlock(&LOCK_open));
++    goto err;
++  }
++
++  /*
++    Data is copied. Now we:
++    1) Wait until all other threads close old version of table.
++    2) Close instances of table open by this thread and replace them
++       with exclusive name-locks.
++    3) Rename the old table to a temp name, rename the new one to the
++       old name.
++    4) If we are under LOCK TABLES and don't do ALTER TABLE ... RENAME
++       we reopen new version of table.
++    5) Write statement to the binary log.
++    6) If we are under LOCK TABLES and do ALTER TABLE ... RENAME we
++       remove name-locks from list of open tables and table cache.
++    7) If we are not not under LOCK TABLES we rely on close_thread_tables()
++       call to remove name-locks from table cache and list of open table.
++  */
++
++  thd_proc_info(thd, "rename result table");
++  my_snprintf(old_name, sizeof(old_name), "%s2-%lx-%lx", tmp_file_prefix,
++	      current_pid, thd->thread_id);
++  if (lower_case_table_names)
++    my_casedn_str(files_charset_info, old_name);
++
++  wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME);
++  close_data_files_and_morph_locks(thd, db, table_name);
++
++  error=0;
++  save_old_db_type= old_db_type;
++
++  /*
++    This leads to the storage engine (SE) not being notified for renames in
++    mysql_rename_table(), because we just juggle with the FRM and nothing
++    more. If we have an intermediate table, then we notify the SE that
++    it should become the actual table. Later, we will recycle the old table.
++    However, in case of ALTER TABLE RENAME there might be no intermediate
++    table. This is when the old and new tables are compatible, according to
++    compare_table(). Then, we need one additional call to
++    mysql_rename_table() with flag NO_FRM_RENAME, which does nothing else but
++    actual rename in the SE and the FRM is not touched. Note that, if the
++    table is renamed and the SE is also changed, then an intermediate table
++    is created and the additional call will not take place.
++  */
++  if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
++  {
++    DBUG_ASSERT(new_db_type == old_db_type);
++    /* This type cannot happen in regular ALTER. */
++    new_db_type= old_db_type= NULL;
++  }
++  if (mysql_rename_table(old_db_type, db, table_name, db, old_name,
++                         FN_TO_IS_TMP))
++  {
++    error=1;
++    VOID(quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP));
++  }
++  else if (mysql_rename_table(new_db_type, new_db, tmp_name, new_db,
++                              new_alias, FN_FROM_IS_TMP) ||
++           ((new_name != table_name || new_db != db) && // we also do rename
++           (need_copy_table != ALTER_TABLE_METADATA_ONLY ||
++            mysql_rename_table(save_old_db_type, db, table_name, new_db,
++                               new_alias, NO_FRM_RENAME)) &&
++           Table_triggers_list::change_table_name(thd, db, table_name,
++                                                  new_db, new_alias)))
++  {
++    /* Try to get everything back. */
++    error=1;
++    VOID(quick_rm_table(new_db_type,new_db,new_alias, 0));
++    VOID(quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP));
++    VOID(mysql_rename_table(old_db_type, db, old_name, db, alias,
++                            FN_FROM_IS_TMP));
++  }
++
++  if (error)
++  {
++    /* This shouldn't happen. But let us play it safe. */
++    goto err_with_placeholders;
++  }
++
++  if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
++  {
++    /*
++      Now we have to inform handler that new .FRM file is in place.
++      To do this we need to obtain a handler object for it.
++      NO need to tamper with MERGE tables. The real open is done later.
++    */
++    TABLE *t_table;
++    if (new_name != table_name || new_db != db)
++    {
++      table_list->alias= new_name;
++      table_list->table_name= new_name;
++      table_list->table_name_length= strlen(new_name);
++      table_list->db= new_db;
++      table_list->db_length= strlen(new_db);
++      table_list->table= name_lock;
++      if (reopen_name_locked_table(thd, table_list, FALSE))
++        goto err_with_placeholders;
++      t_table= table_list->table;
++    }
++    else
++    {
++      if (reopen_table(table))
++        goto err_with_placeholders;
++      t_table= table;
++    }
++    /* Tell the handler that a new frm file is in place. */
++    if (t_table->file->ha_create_handler_files(path, NULL, CHF_INDEX_FLAG,
++                                               create_info))
++      goto err_with_placeholders;
++    if (thd->locked_tables && new_name == table_name && new_db == db)
++    {
++      /*
++        We are going to reopen table down on the road, so we have to restore
++        state of the TABLE object which we used for obtaining of handler
++        object to make it suitable for reopening.
++      */
++      DBUG_ASSERT(t_table == table);
++      table->open_placeholder= 1;
++      close_handle_and_leave_table_as_lock(table);
++    }
++  }
++
++  VOID(quick_rm_table(old_db_type, db, old_name, FN_IS_TMP));
++
++  if (thd->locked_tables && new_name == table_name && new_db == db)
++  {
++    thd->in_lock_tables= 1;
++    error= reopen_tables(thd, 1, 1);
++    thd->in_lock_tables= 0;
++    if (error)
++      goto err_with_placeholders;
++  }
++  VOID(pthread_mutex_unlock(&LOCK_open));
++
++  thd_proc_info(thd, "end");
++
++  DBUG_EXECUTE_IF("sleep_alter_before_main_binlog", my_sleep(6000000););
++  DEBUG_SYNC(thd, "alter_table_before_main_binlog");
++
++  ha_binlog_log_query(thd, create_info->db_type, LOGCOM_ALTER_TABLE,
++                      thd->query(), thd->query_length(),
++                      db, table_name);
++
++  DBUG_ASSERT(!(mysql_bin_log.is_open() &&
++                thd->current_stmt_binlog_row_based &&
++                (create_info->options & HA_LEX_CREATE_TMP_TABLE)));
++  if (write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
++    DBUG_RETURN(TRUE);
++
++  if (ha_check_storage_engine_flag(old_db_type, HTON_FLUSH_AFTER_RENAME))
++  {
++    /*
++      For the alter table to be properly flushed to the logs, we
++      have to open the new table.  If not, we get a problem on server
++      shutdown. But we do not need to attach MERGE children.
++    */
++    char path[FN_REFLEN];
++    TABLE *t_table;
++    build_table_filename(path + 1, sizeof(path) - 1, new_db, table_name, "", 0);
++    t_table= open_temporary_table(thd, path, new_db, tmp_name, 0);
++    if (t_table)
++    {
++      intern_close_table(t_table);
++      my_free(t_table, MYF(0));
++    }
++    else
++      sql_print_warning("Could not open table %s.%s after rename\n",
++                        new_db,table_name);
++    ha_flush_logs(old_db_type);
++  }
++  table_list->table=0;				// For query cache
++  query_cache_invalidate3(thd, table_list, 0);
++
++  if (thd->locked_tables && (new_name != table_name || new_db != db))
++  {
++    /*
++      If are we under LOCK TABLES and did ALTER TABLE with RENAME we need
++      to remove placeholders for the old table and for the target table
++      from the list of open tables and table cache. If we are not under
++      LOCK TABLES we can rely on close_thread_tables() doing this job.
++    */
++    pthread_mutex_lock(&LOCK_open);
++    unlink_open_table(thd, table, FALSE);
++    unlink_open_table(thd, name_lock, FALSE);
++    pthread_mutex_unlock(&LOCK_open);
++  }
++
++end_temporary:
++  my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO),
++	      (ulong) (copied + deleted), (ulong) deleted,
++	      (ulong) thd->cuted_fields);
++  my_ok(thd, copied + deleted, 0L, tmp_name);
++  thd->some_tables_deleted=0;
++  DBUG_RETURN(FALSE);
++
++err1:
++  if (new_table)
++  {
++    /* close_temporary_table() frees the new_table pointer. */
++    close_temporary_table(thd, new_table, 1, 1);
++  }
++  else
++    VOID(quick_rm_table(new_db_type, new_db, tmp_name, 
++                        create_info->frm_only
++                        ? FN_IS_TMP | FRM_ONLY
++                        : FN_IS_TMP));
++
++err:
++  /*
++    No default value was provided for a DATE/DATETIME field, the
++    current sql_mode doesn't allow the '0000-00-00' value and
++    the table to be altered isn't empty.
++    Report error here.
++  */
++  if (alter_info->error_if_not_empty && thd->row_count)
++  {
++    const char *f_val= 0;
++    enum enum_mysql_timestamp_type t_type= MYSQL_TIMESTAMP_DATE;
++    switch (alter_info->datetime_field->sql_type)
++    {
++      case MYSQL_TYPE_DATE:
++      case MYSQL_TYPE_NEWDATE:
++        f_val= "0000-00-00";
++        t_type= MYSQL_TIMESTAMP_DATE;
++        break;
++      case MYSQL_TYPE_DATETIME:
++        f_val= "0000-00-00 00:00:00";
++        t_type= MYSQL_TIMESTAMP_DATETIME;
++        break;
++      default:
++        /* Shouldn't get here. */
++        DBUG_ASSERT(0);
++    }
++    bool save_abort_on_warning= thd->abort_on_warning;
++    thd->abort_on_warning= TRUE;
++    make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
++                                 f_val, strlength(f_val), t_type,
++                                 alter_info->datetime_field->field_name);
++    thd->abort_on_warning= save_abort_on_warning;
++  }
++  if (name_lock)
++  {
++    pthread_mutex_lock(&LOCK_open);
++    unlink_open_table(thd, name_lock, FALSE);
++    pthread_mutex_unlock(&LOCK_open);
++  }
++  DBUG_RETURN(TRUE);
++
++err_with_placeholders:
++  /*
++    An error happened while we were holding exclusive name-lock on table
++    being altered. To be safe under LOCK TABLES we should remove placeholders
++    from list of open tables list and table cache.
++  */
++  unlink_open_table(thd, table, FALSE);
++  if (name_lock)
++    unlink_open_table(thd, name_lock, FALSE);
++  VOID(pthread_mutex_unlock(&LOCK_open));
++  DBUG_RETURN(TRUE);
++}
++/* mysql_alter_table */
++
++static int
++copy_data_between_tables(TABLE *from,TABLE *to,
++			 List<Create_field> &create,
++                         bool ignore,
++			 uint order_num, ORDER *order,
++			 ha_rows *copied,
++			 ha_rows *deleted,
++                         enum enum_enable_or_disable keys_onoff,
++                         bool error_if_not_empty)
++{
++  int error;
++  Copy_field *copy,*copy_end;
++  ulong found_count,delete_count;
++  THD *thd= current_thd;
++  uint length= 0;
++  SORT_FIELD *sortorder;
++  READ_RECORD info;
++  TABLE_LIST   tables;
++  List<Item>   fields;
++  List<Item>   all_fields;
++  ha_rows examined_rows;
++  bool auto_increment_field_copied= 0;
++  ulong save_sql_mode;
++  ulonglong prev_insert_id;
++  DBUG_ENTER("copy_data_between_tables");
++
++  /*
++    Turn off recovery logging since rollback of an alter table is to
++    delete the new table so there is no need to log the changes to it.
++    
++    This needs to be done before external_lock
++  */
++  error= ha_enable_transaction(thd, FALSE);
++  if (error)
++    DBUG_RETURN(-1);
++  
++  if (!(copy= new Copy_field[to->s->fields]))
++    DBUG_RETURN(-1);				/* purecov: inspected */
++
++  if (to->file->ha_external_lock(thd, F_WRLCK))
++    DBUG_RETURN(-1);
++
++  /* We need external lock before we can disable/enable keys */
++  alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff);
++
++  /* We can abort alter table for any table type */
++  thd->abort_on_warning= !ignore && test(thd->variables.sql_mode &
++                                         (MODE_STRICT_TRANS_TABLES |
++                                          MODE_STRICT_ALL_TABLES));
++
++  from->file->info(HA_STATUS_VARIABLE);
++  to->file->ha_start_bulk_insert(from->file->stats.records);
++
++  save_sql_mode= thd->variables.sql_mode;
++
++  List_iterator<Create_field> it(create);
++  Create_field *def;
++  copy_end=copy;
++  for (Field **ptr=to->field ; *ptr ; ptr++)
++  {
++    def=it++;
++    if (def->field)
++    {
++      if (*ptr == to->next_number_field)
++      {
++        auto_increment_field_copied= TRUE;
++        /*
++          If we are going to copy contents of one auto_increment column to
++          another auto_increment column it is sensible to preserve zeroes.
++          This condition also covers case when we are don't actually alter
++          auto_increment column.
++        */
++        if (def->field == from->found_next_number_field)
++          thd->variables.sql_mode|= MODE_NO_AUTO_VALUE_ON_ZERO;
++      }
++      (copy_end++)->set(*ptr,def->field,0);
++    }
++
++  }
++
++  found_count=delete_count=0;
++
++  if (order)
++  {
++    if (to->s->primary_key != MAX_KEY && to->file->primary_key_is_clustered())
++    {
++      char warn_buff[MYSQL_ERRMSG_SIZE];
++      my_snprintf(warn_buff, sizeof(warn_buff), 
++                  "ORDER BY ignored as there is a user-defined clustered index"
++                  " in the table '%-.192s'", from->s->table_name.str);
++      push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
++                   warn_buff);
++    }
++    else
++    {
++      from->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
++                                                MYF(MY_FAE | MY_ZEROFILL));
++      bzero((char *) &tables, sizeof(tables));
++      tables.table= from;
++      tables.alias= tables.table_name= from->s->table_name.str;
++      tables.db= from->s->db.str;
++      error= 1;
++
++      if (thd->lex->select_lex.setup_ref_array(thd, order_num) ||
++          setup_order(thd, thd->lex->select_lex.ref_pointer_array,
++                      &tables, fields, all_fields, order) ||
++          !(sortorder= make_unireg_sortorder(order, &length, NULL)) ||
++          (from->sort.found_records= filesort(thd, from, sortorder, length,
++                                              (SQL_SELECT *) 0, HA_POS_ERROR,
++                                              1, &examined_rows)) ==
++          HA_POS_ERROR)
++        goto err;
++    }
++  };
++
++  /* Tell handler that we have values for all columns in the to table */
++  to->use_all_columns();
++  init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE);
++  if (ignore)
++    to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
++  thd->row_count= 0;
++  restore_record(to, s->default_values);        // Create empty record
++  while (!(error=info.read_record(&info)))
++  {
++    if (thd->killed)
++    {
++      thd->send_kill_message();
++      error= 1;
++      break;
++    }
++    thd->row_count++;
++    /* Return error if source table isn't empty. */
++    if (error_if_not_empty)
++    {
++      error= 1;
++      break;
++    }
++    if (to->next_number_field)
++    {
++      if (auto_increment_field_copied)
++        to->auto_increment_field_not_null= TRUE;
++      else
++        to->next_number_field->reset();
++    }
++    
++    for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++)
++    {
++      copy_ptr->do_copy(copy_ptr);
++    }
++    prev_insert_id= to->file->next_insert_id;
++    error=to->file->ha_write_row(to->record[0]);
++    to->auto_increment_field_not_null= FALSE;
++    if (error)
++    {
++      if (!ignore ||
++          to->file->is_fatal_error(error, HA_CHECK_DUP))
++      {
++         if (!to->file->is_fatal_error(error, HA_CHECK_DUP))
++         {
++           uint key_nr= to->file->get_dup_key(error);
++           if ((int) key_nr >= 0)
++           {
++             const char *err_msg= ER(ER_DUP_ENTRY_WITH_KEY_NAME);
++             if (key_nr == 0 &&
++                 (to->key_info[0].key_part[0].field->flags &
++                  AUTO_INCREMENT_FLAG))
++               err_msg= ER(ER_DUP_ENTRY_AUTOINCREMENT_CASE);
++             to->file->print_keydup_error(key_nr, err_msg);
++             break;
++           }
++         }
++
++	to->file->print_error(error,MYF(0));
++	break;
++      }
++      to->file->restore_auto_increment(prev_insert_id);
++      delete_count++;
++    }
++    else
++      found_count++;
++  }
++  end_read_record(&info);
++  free_io_cache(from);
++  delete [] copy;				// This is never 0
++
++  if (to->file->ha_end_bulk_insert() && error <= 0)
++  {
++    to->file->print_error(my_errno,MYF(0));
++    error=1;
++  }
++  to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
++
++  if (ha_enable_transaction(thd, TRUE))
++  {
++    error= 1;
++    goto err;
++  }
++  
++  /*
++    Ensure that the new table is saved properly to disk so that we
++    can do a rename
++  */
++  if (ha_autocommit_or_rollback(thd, 0))
++    error=1;
++  if (end_active_trans(thd))
++    error=1;
++
++ err:
++  thd->variables.sql_mode= save_sql_mode;
++  thd->abort_on_warning= 0;
++  free_io_cache(from);
++  *copied= found_count;
++  *deleted=delete_count;
++  to->file->ha_release_auto_increment();
++  if (to->file->ha_external_lock(thd,F_UNLCK))
++    error=1;
++  DBUG_RETURN(error > 0 ? -1 : 0);
++}
++
++
++/*
++  Recreates tables by calling mysql_alter_table().
++
++  SYNOPSIS
++    mysql_recreate_table()
++    thd			Thread handler
++    tables		Tables to recreate
++
++ RETURN
++    Like mysql_alter_table().
++*/
++bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list)
++{
++  HA_CREATE_INFO create_info;
++  Alter_info alter_info;
++
++  DBUG_ENTER("mysql_recreate_table");
++  DBUG_ASSERT(!table_list->next_global);
++  /*
++    table_list->table has been closed and freed. Do not reference
++    uninitialized data. open_tables() could fail.
++  */
++  table_list->table= NULL;
++
++  bzero((char*) &create_info, sizeof(create_info));
++  create_info.row_type=ROW_TYPE_NOT_USED;
++  create_info.default_table_charset=default_charset_info;
++  /* Force alter table to recreate table */
++  alter_info.flags= (ALTER_CHANGE_COLUMN | ALTER_RECREATE);
++  DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
++                                table_list, &alter_info, 0,
++                                (ORDER *) 0, 0));
++}
++
++
++bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
++                          HA_CHECK_OPT *check_opt)
++{
++  TABLE_LIST *table;
++  List<Item> field_list;
++  Item *item;
++  Protocol *protocol= thd->protocol;
++  DBUG_ENTER("mysql_checksum_table");
++
++  field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2));
++  item->maybe_null= 1;
++  field_list.push_back(item= new Item_int("Checksum", (longlong) 1,
++                                          MY_INT64_NUM_DECIMAL_DIGITS));
++  item->maybe_null= 1;
++  if (protocol->send_fields(&field_list,
++                            Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
++    DBUG_RETURN(TRUE);
++
++  /* Open one table after the other to keep lock time as short as possible. */
++  for (table= tables; table; table= table->next_local)
++  {
++    char table_name[NAME_LEN*2+2];
++    TABLE *t;
++
++    strxmov(table_name, table->db ,".", table->table_name, NullS);
++
++    t= table->table= open_n_lock_single_table(thd, table, TL_READ);
++    thd->clear_error();			// these errors shouldn't get client
++
++    protocol->prepare_for_resend();
++    protocol->store(table_name, system_charset_info);
++
++    if (!t)
++    {
++      /* Table didn't exist */
++      protocol->store_null();
++      thd->clear_error();
++    }
++    else
++    {
++      if (t->file->ha_table_flags() & HA_HAS_CHECKSUM &&
++	  !(check_opt->flags & T_EXTEND))
++	protocol->store((ulonglong)t->file->checksum());
++      else if (!(t->file->ha_table_flags() & HA_HAS_CHECKSUM) &&
++	       (check_opt->flags & T_QUICK))
++	protocol->store_null();
++      else
++      {
++	/* calculating table's checksum */
++	ha_checksum crc= 0;
++        uchar null_mask=256 -  (1 << t->s->last_null_bit_pos);
++
++        t->use_all_columns();
++
++	if (t->file->ha_rnd_init(1))
++	  protocol->store_null();
++	else
++	{
++	  for (;;)
++	  {
++            if (thd->killed)
++            {
++              /* 
++                 we've been killed; let handler clean up, and remove the 
++                 partial current row from the recordset (embedded lib) 
++              */
++              t->file->ha_rnd_end();
++              thd->protocol->remove_last_row();
++              goto err;
++            }
++	    ha_checksum row_crc= 0;
++            int error= t->file->rnd_next(t->record[0]);
++            if (unlikely(error))
++            {
++              if (error == HA_ERR_RECORD_DELETED)
++                continue;
++              break;
++            }
++	    if (t->s->null_bytes)
++            {
++              /* fix undefined null bits */
++              t->record[0][t->s->null_bytes-1] |= null_mask;
++              if (!(t->s->db_create_options & HA_OPTION_PACK_RECORD))
++                t->record[0][0] |= 1;
++
++	      row_crc= my_checksum(row_crc, t->record[0], t->s->null_bytes);
++            }
++
++	    for (uint i= 0; i < t->s->fields; i++ )
++	    {
++	      Field *f= t->field[i];
++
++             /*
++               BLOB and VARCHAR have pointers in their field, we must convert
++               to string; GEOMETRY is implemented on top of BLOB.
++               BIT may store its data among NULL bits, convert as well.
++             */
++              switch (f->type()) {
++                case MYSQL_TYPE_BLOB:
++                case MYSQL_TYPE_VARCHAR:
++                case MYSQL_TYPE_GEOMETRY:
++                case MYSQL_TYPE_BIT:
++                {
++                  String tmp;
++                  f->val_str(&tmp);
++                  row_crc= my_checksum(row_crc, (uchar*) tmp.ptr(),
++                           tmp.length());
++                  break;
++                }
++                default:
++                  row_crc= my_checksum(row_crc, f->ptr, f->pack_length());
++                  break;
++	      }
++	    }
++
++	    crc+= row_crc;
++	  }
++	  protocol->store((ulonglong)crc);
++          t->file->ha_rnd_end();
++	}
++      }
++      thd->clear_error();
++      close_thread_tables(thd);
++      table->table=0;				// For query cache
++    }
++    if (protocol->write())
++      goto err;
++  }
++
++  my_eof(thd);
++  DBUG_RETURN(FALSE);
++
++ err:
++  close_thread_tables(thd);			// Shouldn't be needed
++  if (table)
++    table->table=0;
++  DBUG_RETURN(TRUE);
++}
++
++static bool check_engine(THD *thd, const char *table_name,
++                         HA_CREATE_INFO *create_info)
++{
++  handlerton **new_engine= &create_info->db_type;
++  handlerton *req_engine= *new_engine;
++  bool no_substitution=
++        test(thd->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION);
++  if (!(*new_engine= ha_checktype(thd, ha_legacy_type(req_engine),
++                                  no_substitution, 1)))
++    return TRUE;
++
++  if (req_engine && req_engine != *new_engine)
++  {
++    push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
++                       ER_WARN_USING_OTHER_HANDLER,
++                       ER(ER_WARN_USING_OTHER_HANDLER),
++                       ha_resolve_storage_engine_name(*new_engine),
++                       table_name);
++  }
++  if (create_info->options & HA_LEX_CREATE_TMP_TABLE &&
++      ha_check_storage_engine_flag(*new_engine, HTON_TEMPORARY_NOT_SUPPORTED))
++  {
++    if (create_info->used_fields & HA_CREATE_USED_ENGINE)
++    {
++      my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0),
++               ha_resolve_storage_engine_name(*new_engine), "TEMPORARY");
++      *new_engine= 0;
++      return TRUE;
++    }
++    *new_engine= myisam_hton;
++  }
++  return FALSE;
++}
+diff -urN mysql-old/sql/sql_yacc.cc mysql/sql/sql_yacc.cc
+--- mysql-old/sql/sql_yacc.cc	2011-05-10 17:45:45.636682376 +0000
++++ mysql/sql/sql_yacc.cc	2011-05-10 17:56:01.630015710 +0000
+@@ -16217,7 +16217,7 @@
+                from 0" (4 in fact), unspecified means "don't change the position
+                (keep the preceding value)").
+             */
+-            Lex->mi.pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.pos);
++            Lex->mi.pos = MYSQL_MAX(BIN_LOG_HEADER_SIZE, Lex->mi.pos);
+           }
+     break;
+ 
+@@ -16237,7 +16237,7 @@
+     {
+             Lex->mi.relay_log_pos = (yyvsp[(3) - (3)].ulong_num);
+             /* Adjust if < BIN_LOG_HEADER_SIZE (same comment as Lex->mi.pos) */
+-            Lex->mi.relay_log_pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos);
++            Lex->mi.relay_log_pos = MYSQL_MAX(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos);
+           }
+     break;
+ 
+diff -urN mysql-old/sql/sql_yacc.yy mysql/sql/sql_yacc.yy
+--- mysql-old/sql/sql_yacc.yy	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/sql_yacc.yy	2011-05-10 17:56:01.650015709 +0000
+@@ -1805,7 +1805,7 @@
+                from 0" (4 in fact), unspecified means "don't change the position
+                (keep the preceding value)").
+             */
+-            Lex->mi.pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.pos);
++            Lex->mi.pos = MYSQL_MAX(BIN_LOG_HEADER_SIZE, Lex->mi.pos);
+           }
+         | RELAY_LOG_FILE_SYM EQ TEXT_STRING_sys
+           {
+@@ -1815,7 +1815,7 @@
+           {
+             Lex->mi.relay_log_pos = $3;
+             /* Adjust if < BIN_LOG_HEADER_SIZE (same comment as Lex->mi.pos) */
+-            Lex->mi.relay_log_pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos);
++            Lex->mi.relay_log_pos = MYSQL_MAX(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos);
+           }
+         ;
+ 
+diff -urN mysql-old/sql/thr_malloc.cc mysql/sql/thr_malloc.cc
+--- mysql-old/sql/thr_malloc.cc	2011-05-10 17:45:45.633349043 +0000
++++ mysql/sql/thr_malloc.cc	2011-05-10 17:56:01.656682376 +0000
+@@ -130,7 +130,7 @@
+   if ((from_cs == &my_charset_bin) || (to_cs == &my_charset_bin))
+   {
+     // Safety if to_cs->mbmaxlen > 0
+-    new_length= min(arg_length, max_res_length);
++    new_length= MYSQL_MIN(arg_length, max_res_length);
+     memcpy(pos, str, new_length);
+   }
+   else
+diff -urN mysql-old/sql/tztime.cc mysql/sql/tztime.cc
+--- mysql-old/sql/tztime.cc	2011-05-10 17:45:45.636682376 +0000
++++ mysql/sql/tztime.cc	2011-05-10 17:56:01.656682376 +0000
+@@ -167,7 +167,7 @@
+       uchar buf[sizeof(struct tzhead) + sizeof(my_time_t) * TZ_MAX_TIMES +
+                 TZ_MAX_TIMES + sizeof(TRAN_TYPE_INFO) * TZ_MAX_TYPES +
+ #ifdef ABBR_ARE_USED
+-               max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1))) +
++               MYSQL_MAX(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1))) +
+ #endif
+                sizeof(LS_INFO) * TZ_MAX_LEAPS];
+     } u;
+@@ -396,7 +396,7 @@
+       Let us choose end_t as point before next time type change or leap
+       second correction.
+     */
+-    end_t= min((next_trans_idx < sp->timecnt) ? sp->ats[next_trans_idx] - 1:
++    end_t= MYSQL_MIN((next_trans_idx < sp->timecnt) ? sp->ats[next_trans_idx] - 1:
+                                                 MY_TIME_T_MAX,
+                (next_leap_idx < sp->leapcnt) ?
+                  sp->lsis[next_leap_idx].ls_trans - 1: MY_TIME_T_MAX);
+@@ -1823,7 +1823,7 @@
+   uchar types[TZ_MAX_TIMES];
+   TRAN_TYPE_INFO ttis[TZ_MAX_TYPES];
+ #ifdef ABBR_ARE_USED
+-  char chars[max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))];
++  char chars[MYSQL_MAX(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))];
+ #endif
+   /* 
+     Used as a temporary tz_info until we decide that we actually want to
+diff -urN mysql-old/sql/unireg.cc mysql/sql/unireg.cc
+--- mysql-old/sql/unireg.cc	2011-05-10 17:45:45.630015710 +0000
++++ mysql/sql/unireg.cc	2011-05-10 17:56:01.660015710 +0000
+@@ -496,7 +496,7 @@
+     }
+     cfield->row=(uint8) row;
+     cfield->col=(uint8) (length+1);
+-    cfield->sc_length=(uint8) min(cfield->length,cols-(length+2));
++    cfield->sc_length=(uint8) MYSQL_MIN(cfield->length,cols-(length+2));
+   }
+   length=(uint) (pos-start_screen);
+   int2store(start_screen,length);
+@@ -715,7 +715,7 @@
+     DBUG_RETURN(1);
+   }
+   /* Hack to avoid bugs with small static rows in MySQL */
+-  reclength=max(file->min_record_length(table_options),reclength);
++  reclength=MYSQL_MAX(file->min_record_length(table_options),reclength);
+   if (info_length+(ulong) create_fields.elements*FCOMP+288+
+       n_length+int_length+com_length > 65535L || int_count > 255)
+   {
+diff -urN mysql-old/sql-common/client.c mysql/sql-common/client.c
+--- mysql-old/sql-common/client.c	2011-05-10 17:45:45.710015710 +0000
++++ mysql/sql-common/client.c	2011-05-10 17:56:01.660015710 +0000
+@@ -728,7 +728,7 @@
+       }
+ 
+       (void) strmake(net->last_error,(char*) pos,
+-		     min((uint) len,(uint) sizeof(net->last_error)-1));
++		     MYSQL_MIN((uint) len,(uint) sizeof(net->last_error)-1));
+     }
+     else
+       set_mysql_error(mysql, CR_UNKNOWN_ERROR, unknown_sqlstate);
+@@ -2102,7 +2102,7 @@
+       {
+         IF_DBUG(char ipaddr[18];)
+         memcpy(&sock_addr.sin_addr, hp->h_addr_list[i],
+-               min(sizeof(sock_addr.sin_addr), (size_t) hp->h_length));
++               MYSQL_MIN(sizeof(sock_addr.sin_addr), (size_t) hp->h_length));
+         DBUG_PRINT("info",("Trying %s...",
+                           (my_inet_ntoa(sock_addr.sin_addr, ipaddr), ipaddr)));
+         status= my_connect(sock, (struct sockaddr *) &sock_addr,
+diff -urN mysql-old/sql-common/my_time.c mysql/sql-common/my_time.c
+--- mysql-old/sql-common/my_time.c	2011-05-10 17:45:45.710015710 +0000
++++ mysql/sql-common/my_time.c	2011-05-10 17:56:01.660015710 +0000
+@@ -249,7 +249,7 @@
+     2003-03-03 20:00:20 AM
+     20:00:20.000000 AM 03-03-2000
+   */
+-  i= max((uint) format_position[0], (uint) format_position[1]);
++  i= MYSQL_MAX((uint) format_position[0], (uint) format_position[1]);
+   set_if_bigger(i, (uint) format_position[2]);
+   allow_space= ((1 << i) | (1 << format_position[6]));
+   allow_space&= (1 | 2 | 4 | 8);
+diff -urN mysql-old/storage/csv/ha_tina.cc mysql/storage/csv/ha_tina.cc
+--- mysql-old/storage/csv/ha_tina.cc	2011-05-10 17:45:45.693349043 +0000
++++ mysql/storage/csv/ha_tina.cc	2011-05-10 17:56:01.663349044 +0000
+@@ -1193,7 +1193,7 @@
+   if (closest_hole == chain_ptr) /* no more chains */
+     *end_pos= file_buff->end();
+   else
+-    *end_pos= min(file_buff->end(),
++    *end_pos= MYSQL_MIN(file_buff->end(),
+                   closest_hole->begin);
+   return (closest_hole != chain_ptr) && (*end_pos == closest_hole->begin);
+ }
+@@ -1429,7 +1429,7 @@
+   /* write repaired file */
+   while (1)
+   {
+-    write_end= min(file_buff->end(), current_position);
++    write_end= MYSQL_MIN(file_buff->end(), current_position);
+     if ((write_end - write_begin) &&
+         (my_write(repair_file, (uchar*)file_buff->ptr(),
+                   (size_t) (write_end - write_begin), MYF_RW)))
+diff -urN mysql-old/storage/federated/ha_federated.cc mysql/storage/federated/ha_federated.cc
+--- mysql-old/storage/federated/ha_federated.cc	2011-05-10 17:45:45.690015710 +0000
++++ mysql/storage/federated/ha_federated.cc	2011-05-10 17:56:01.663349044 +0000
+@@ -546,7 +546,7 @@
+   size_t buf_len;
+   DBUG_ENTER("ha_federated parse_url_error");
+ 
+-  buf_len= min(table->s->connect_string.length,
++  buf_len= MYSQL_MIN(table->s->connect_string.length,
+                FEDERATED_QUERY_BUFFER_SIZE-1);
+   strmake(buf, table->s->connect_string.str, buf_len);
+   my_error(error_num, MYF(0), buf);
+@@ -1291,7 +1291,7 @@
+     {
+       Field *field= key_part->field;
+       uint store_length= key_part->store_length;
+-      uint part_length= min(store_length, length);
++      uint part_length= MYSQL_MIN(store_length, length);
+       needs_quotes= field->str_needs_quotes();
+       DBUG_DUMP("key, start of loop", ptr, length);
+ 
+diff -urN mysql-old/storage/heap/hp_create.c mysql/storage/heap/hp_create.c
+--- mysql-old/storage/heap/hp_create.c	2011-05-10 17:45:45.690015710 +0000
++++ mysql/storage/heap/hp_create.c	2011-05-10 17:56:01.666682377 +0000
+@@ -229,7 +229,7 @@
+ {
+   uint i,recbuffer,records_in_block;
+ 
+-  max_records= max(min_records,max_records);
++  max_records= MYSQL_MAX(min_records,max_records);
+   if (!max_records)
+     max_records= 1000;			/* As good as quess as anything */
+   recbuffer= (uint) (reclength + sizeof(uchar**) - 1) & ~(sizeof(uchar**) - 1);
+diff -urN mysql-old/storage/heap/hp_test2.c mysql/storage/heap/hp_test2.c
+--- mysql-old/storage/heap/hp_test2.c	2011-05-10 17:45:45.690015710 +0000
++++ mysql/storage/heap/hp_test2.c	2011-05-10 17:56:01.723349044 +0000
+@@ -136,7 +136,7 @@
+ 
+   for (i=0 ; i < recant ; i++)
+   {
+-    n1=rnd(1000); n2=rnd(100); n3=rnd(min(recant*5,MAX_RECORDS));
++    n1=rnd(1000); n2=rnd(100); n3=rnd(MYSQL_MIN(recant*5,MAX_RECORDS));
+     make_record(record,n1,n2,n3,"Pos",write_count);
+ 
+     if (heap_write(file,record))
+@@ -217,7 +217,7 @@
+   printf("- Update\n");
+   for (i=0 ; i < write_count/10 ; i++)
+   {
+-    n1=rnd(1000); n2=rnd(100); n3=rnd(min(recant*2,MAX_RECORDS));
++    n1=rnd(1000); n2=rnd(100); n3=rnd(MYSQL_MIN(recant*2,MAX_RECORDS));
+     make_record(record2, n1, n2, n3, "XXX", update);
+     if (rnd(2) == 1)
+     {
+diff -urN mysql-old/storage/myisam/ha_myisam.cc mysql/storage/myisam/ha_myisam.cc
+--- mysql-old/storage/myisam/ha_myisam.cc	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/ha_myisam.cc	2011-05-10 17:56:01.723349044 +0000
+@@ -1527,7 +1527,7 @@
+ {
+   DBUG_ENTER("ha_myisam::start_bulk_insert");
+   THD *thd= current_thd;
+-  ulong size= min(thd->variables.read_buff_size,
++  ulong size= MYSQL_MIN(thd->variables.read_buff_size,
+                   (ulong) (table->s->avg_row_length*rows));
+   DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
+                      (ulong) rows, size));
+diff -urN mysql-old/storage/myisam/mi_cache.c mysql/storage/myisam/mi_cache.c
+--- mysql-old/storage/myisam/mi_cache.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/mi_cache.c	2011-05-10 17:56:01.723349044 +0000
+@@ -61,7 +61,7 @@
+       (my_off_t) (info->read_end - info->request_pos))
+   {
+     in_buff_pos=info->request_pos+(uint) offset;
+-    in_buff_length= min(length, (size_t) (info->read_end-in_buff_pos));
++    in_buff_length= MYSQL_MIN(length, (size_t) (info->read_end-in_buff_pos));
+     memcpy(buff,info->request_pos+(uint) offset,(size_t) in_buff_length);
+     if (!(length-=in_buff_length))
+       DBUG_RETURN(0);
+diff -urN mysql-old/storage/myisam/mi_check.c mysql/storage/myisam/mi_check.c
+--- mysql-old/storage/myisam/mi_check.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/mi_check.c	2011-05-10 17:56:01.726682377 +0000
+@@ -2173,7 +2173,7 @@
+   ulong buff_length;
+   DBUG_ENTER("filecopy");
+ 
+-  buff_length=(ulong) min(param->write_buffer_length,length);
++  buff_length=(ulong) MYSQL_MIN(param->write_buffer_length,length);
+   if (!(buff=my_malloc(buff_length,MYF(0))))
+   {
+     buff=tmp_buff; buff_length=IO_SIZE;
+@@ -2329,7 +2329,7 @@
+   init_alloc_root(&sort_param.wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0);
+ 
+   if (share->data_file_type == DYNAMIC_RECORD)
+-    length=max(share->base.min_pack_length+1,share->base.min_block_length);
++    length=MYSQL_MAX(share->base.min_pack_length+1,share->base.min_block_length);
+   else if (share->data_file_type == COMPRESSED_RECORD)
+     length=share->base.min_block_length;
+   else
+@@ -2782,7 +2782,7 @@
+     my_seek(param->read_cache.file,0L,MY_SEEK_END,MYF(0));
+ 
+   if (share->data_file_type == DYNAMIC_RECORD)
+-    rec_length=max(share->base.min_pack_length+1,share->base.min_block_length);
++    rec_length=MYSQL_MAX(share->base.min_pack_length+1,share->base.min_block_length);
+   else if (share->data_file_type == COMPRESSED_RECORD)
+     rec_length=share->base.min_block_length;
+   else
+@@ -4331,7 +4331,7 @@
+ 
+   VOID(mi_close(*org_info));
+   bzero((char*) &create_info,sizeof(create_info));
+-  create_info.max_rows=max(max_records,share.base.records);
++  create_info.max_rows=MYSQL_MAX(max_records,share.base.records);
+   create_info.reloc_rows=share.base.reloc;
+   create_info.old_options=(share.options |
+ 			   (unpack ? HA_OPTION_TEMP_COMPRESS_RECORD : 0));
+diff -urN mysql-old/storage/myisam/mi_create.c mysql/storage/myisam/mi_create.c
+--- mysql-old/storage/myisam/mi_create.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/mi_create.c	2011-05-10 17:56:01.726682377 +0000
+@@ -437,8 +437,8 @@
+     block_length= (keydef->block_length ? 
+                    my_round_up_to_next_power(keydef->block_length) :
+                    myisam_block_size);
+-    block_length= max(block_length, MI_MIN_KEY_BLOCK_LENGTH);
+-    block_length= min(block_length, MI_MAX_KEY_BLOCK_LENGTH);
++    block_length= MYSQL_MAX(block_length, MI_MIN_KEY_BLOCK_LENGTH);
++    block_length= MYSQL_MIN(block_length, MI_MAX_KEY_BLOCK_LENGTH);
+ 
+     keydef->block_length= (uint16) MI_BLOCK_SIZE(length-real_length_diff,
+                                                  pointer,MI_MAX_KEYPTR_SIZE,
+@@ -527,7 +527,7 @@
+     got from MYI file header (see also myisampack.c:save_state)
+   */
+   share.base.key_reflength=
+-    mi_get_pointer_length(max(ci->key_file_length,tmp),3);
++    mi_get_pointer_length(MYSQL_MAX(ci->key_file_length,tmp),3);
+   share.base.keys= share.state.header.keys= keys;
+   share.state.header.uniques= uniques;
+   share.state.header.fulltext_keys= fulltext_keys;
+@@ -565,7 +565,7 @@
+   share.base.min_block_length=
+     (share.base.pack_reclength+3 < MI_EXTEND_BLOCK_LENGTH &&
+      ! share.base.blobs) ?
+-    max(share.base.pack_reclength,MI_MIN_BLOCK_LENGTH) :
++    MYSQL_MAX(share.base.pack_reclength,MI_MIN_BLOCK_LENGTH) :
+     MI_EXTEND_BLOCK_LENGTH;
+   if (! (flags & HA_DONT_TOUCH_DATA))
+     share.state.create_time= (long) time((time_t*) 0);
+diff -urN mysql-old/storage/myisam/mi_dynrec.c mysql/storage/myisam/mi_dynrec.c
+--- mysql-old/storage/myisam/mi_dynrec.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/mi_dynrec.c	2011-05-10 17:56:01.730015710 +0000
+@@ -880,7 +880,7 @@
+ 	uint tmp=MY_ALIGN(reclength - length + 3 +
+ 			  test(reclength >= 65520L),MI_DYN_ALIGN_SIZE);
+ 	/* Don't create a block bigger than MI_MAX_BLOCK_LENGTH */
+-	tmp= min(length+tmp, MI_MAX_BLOCK_LENGTH)-length;
++	tmp= MYSQL_MIN(length+tmp, MI_MAX_BLOCK_LENGTH)-length;
+ 	/* Check if we can extend this block */
+ 	if (block_info.filepos + block_info.block_len ==
+ 	    info->state->data_file_length &&
+diff -urN mysql-old/storage/myisam/mi_extra.c mysql/storage/myisam/mi_extra.c
+--- mysql-old/storage/myisam/mi_extra.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/mi_extra.c	2011-05-10 17:56:01.730015710 +0000
+@@ -99,7 +99,7 @@
+       cache_size= (extra_arg ? *(ulong*) extra_arg :
+ 		   my_default_record_cache_size);
+       if (!(init_io_cache(&info->rec_cache,info->dfile,
+-			 (uint) min(info->state->data_file_length+1,
++			 (uint) MYSQL_MIN(info->state->data_file_length+1,
+ 				    cache_size),
+ 			  READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK),
+ 			  MYF(share->write_flag & MY_WAIT_IF_FULL))))
+diff -urN mysql-old/storage/myisam/mi_open.c mysql/storage/myisam/mi_open.c
+--- mysql-old/storage/myisam/mi_open.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/mi_open.c	2011-05-10 17:56:01.730015710 +0000
+@@ -328,7 +328,7 @@
+     strmov(share->index_file_name,  index_name);
+     strmov(share->data_file_name,   data_name);
+ 
+-    share->blocksize=min(IO_SIZE,myisam_block_size);
++    share->blocksize=MYSQL_MIN(IO_SIZE,myisam_block_size);
+     {
+       HA_KEYSEG *pos=share->keyparts;
+       uint32 ftkey_nr= 1;
+@@ -501,7 +501,7 @@
+     share->base.margin_key_file_length=(share->base.max_key_file_length -
+ 					(keys ? MI_INDEX_BLOCK_MARGIN *
+ 					 share->blocksize * keys : 0));
+-    share->blocksize=min(IO_SIZE,myisam_block_size);
++    share->blocksize=MYSQL_MIN(IO_SIZE,myisam_block_size);
+     share->data_file_type=STATIC_RECORD;
+     if (share->options & HA_OPTION_COMPRESS_RECORD)
+     {
+@@ -714,10 +714,10 @@
+     if (length == (ulong) -1)
+     {
+       if (info->s->options & HA_OPTION_COMPRESS_RECORD)
+-        length= max(info->s->base.pack_reclength, info->s->max_pack_length);
++        length= MYSQL_MAX(info->s->base.pack_reclength, info->s->max_pack_length);
+       else
+         length= info->s->base.pack_reclength;
+-      length= max(length, info->s->base.max_key_length);
++      length= MYSQL_MAX(length, info->s->base.max_key_length);
+       /* Avoid unnecessary realloc */
+       if (newptr && length == old_length)
+ 	return newptr;
+diff -urN mysql-old/storage/myisam/mi_packrec.c mysql/storage/myisam/mi_packrec.c
+--- mysql-old/storage/myisam/mi_packrec.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/mi_packrec.c	2011-05-10 17:56:01.733349043 +0000
+@@ -684,7 +684,7 @@
+       return OFFSET_TABLE_SIZE;
+     }
+     length2= find_longest_bitstream(next, end) + 1;
+-    length=max(length,length2);
++    length=MYSQL_MAX(length,length2);
+   }
+   return length;
+ }
+@@ -1399,7 +1399,7 @@
+   info->filepos=filepos+head_length;
+   if (file > 0)
+   {
+-    info->offset=min(info->rec_len, ref_length - head_length);
++    info->offset=MYSQL_MIN(info->rec_len, ref_length - head_length);
+     memcpy(*rec_buff_p, header + head_length, info->offset);
+   }
+   return 0;
+diff -urN mysql-old/storage/myisam/mi_test1.c mysql/storage/myisam/mi_test1.c
+--- mysql-old/storage/myisam/mi_test1.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/mi_test1.c	2011-05-10 17:56:01.733349043 +0000
+@@ -436,7 +436,7 @@
+     uint tmp;
+     uchar *ptr;;
+     sprintf((char*) blob_record,"... row: %d", rownr);
+-    strappend((char*) blob_record,max(MAX_REC_LENGTH-rownr,10),' ');
++    strappend((char*) blob_record,MYSQL_MAX(MAX_REC_LENGTH-rownr,10),' ');
+     tmp=strlen((char*) blob_record);
+     int4store(pos,tmp);
+     ptr=blob_record;
+diff -urN mysql-old/storage/myisam/mi_test2.c mysql/storage/myisam/mi_test2.c
+--- mysql-old/storage/myisam/mi_test2.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/mi_test2.c	2011-05-10 17:56:01.733349043 +0000
+@@ -601,7 +601,7 @@
+     goto err;
+ 
+   bmove(read_record2,read_record,reclength);
+-  for (i=min(2,keys) ; i-- > 0 ;)
++  for (i=MYSQL_MIN(2,keys) ; i-- > 0 ;)
+   {
+     if (mi_rsame(file,read_record2,(int) i)) goto err;
+     if (memcmp(read_record,read_record2,reclength) != 0)
+diff -urN mysql-old/storage/myisam/myisamlog.c mysql/storage/myisam/myisamlog.c
+--- mysql-old/storage/myisam/myisamlog.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/myisamlog.c	2011-05-10 17:56:01.733349043 +0000
+@@ -90,7 +90,7 @@
+   log_filename=myisam_log_filename;
+   get_options(&argc,&argv);
+   /* Number of MyISAM files we can have open at one time */
+-  max_files= (my_set_max_open_files(min(max_files,8))-6)/2;
++  max_files= (my_set_max_open_files(MYSQL_MIN(max_files,8))-6)/2;
+   if (update)
+     printf("Trying to %s MyISAM files according to log '%s'\n",
+ 	   (recover ? "recover" : "update"),log_filename);
+diff -urN mysql-old/storage/myisam/myisampack.c mysql/storage/myisam/myisampack.c
+--- mysql-old/storage/myisam/myisampack.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/myisampack.c	2011-05-10 17:56:01.736682376 +0000
+@@ -1239,7 +1239,7 @@
+     {
+       if (huff_counts->field_length > 2 &&
+ 	  huff_counts->empty_fields + (records - huff_counts->empty_fields)*
+-	  (1+max_bit(max(huff_counts->max_pre_space,
++	  (1+max_bit(MYSQL_MAX(huff_counts->max_pre_space,
+ 			 huff_counts->max_end_space))) <
+ 	  records * max_bit(huff_counts->field_length))
+       {
+@@ -3001,7 +3001,7 @@
+   if (mrg->src_file_has_indexes_disabled)
+   {
+     isam_file->s->state.state.key_file_length=
+-      max(isam_file->s->state.state.key_file_length, new_length);
++      MYSQL_MAX(isam_file->s->state.state.key_file_length, new_length);
+   }
+   state.dellink= HA_OFFSET_ERROR;
+   state.version=(ulong) time((time_t*) 0);
+diff -urN mysql-old/storage/myisam/rt_mbr.c mysql/storage/myisam/rt_mbr.c
+--- mysql-old/storage/myisam/rt_mbr.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/rt_mbr.c	2011-05-10 17:56:01.736682376 +0000
+@@ -325,8 +325,8 @@
+   bmin = korr_func(b); \
+   amax = korr_func(a+len); \
+   bmax = korr_func(b+len); \
+-  amin = min(amin, bmin); \
+-  amax = max(amax, bmax); \
++  amin = MYSQL_MIN(amin, bmin); \
++  amax = MYSQL_MAX(amax, bmax); \
+   store_func(c, amin); \
+   store_func(c+len, amax); \
+ }
+@@ -338,8 +338,8 @@
+   get_func(bmin, b); \
+   get_func(amax, a+len); \
+   get_func(bmax, b+len); \
+-  amin = min(amin, bmin); \
+-  amax = max(amax, bmax); \
++  amin = MYSQL_MIN(amin, bmin); \
++  amax = MYSQL_MAX(amax, bmax); \
+   store_func(c, amin); \
+   store_func(c+len, amax); \
+ }
+@@ -417,8 +417,8 @@
+   bmin = korr_func(b); \
+   amax = korr_func(a+len); \
+   bmax = korr_func(b+len); \
+-  amin = max(amin, bmin); \
+-  amax = min(amax, bmax); \
++  amin = MYSQL_MAX(amin, bmin); \
++  amax = MYSQL_MIN(amax, bmax); \
+   if (amin >= amax) \
+     return 0; \
+   res *= amax - amin; \
+@@ -431,8 +431,8 @@
+   get_func(bmin, b); \
+   get_func(amax, a+len); \
+   get_func(bmax, b+len); \
+-  amin = max(amin, bmin); \
+-  amax = min(amax, bmax); \
++  amin = MYSQL_MAX(amin, bmin); \
++  amax = MYSQL_MIN(amax, bmax); \
+   if (amin >= amax)  \
+     return 0; \
+   res *= amax - amin; \
+@@ -508,7 +508,7 @@
+    amax = korr_func(a+len); \
+    bmax = korr_func(b+len); \
+    a_area *= (((double)amax) - ((double)amin)); \
+-   loc_ab_area *= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
++   loc_ab_area *= ((double)MYSQL_MAX(amax, bmax) - (double)MYSQL_MIN(amin, bmin)); \
+ }
+ 
+ #define RT_AREA_INC_GET(type, get_func, len)\
+@@ -519,7 +519,7 @@
+    get_func(amax, a+len); \
+    get_func(bmax, b+len); \
+    a_area *= (((double)amax) - ((double)amin)); \
+-   loc_ab_area *= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
++   loc_ab_area *= ((double)MYSQL_MAX(amax, bmax) - (double)MYSQL_MIN(amin, bmin)); \
+ }
+ 
+ /*
+@@ -604,7 +604,7 @@
+    amax = korr_func(a+len); \
+    bmax = korr_func(b+len); \
+    a_perim+= (((double)amax) - ((double)amin)); \
+-   *ab_perim+= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
++   *ab_perim+= ((double)MYSQL_MAX(amax, bmax) - (double)MYSQL_MIN(amin, bmin)); \
+ }
+ 
+ #define RT_PERIM_INC_GET(type, get_func, len)\
+@@ -615,7 +615,7 @@
+    get_func(amax, a+len); \
+    get_func(bmax, b+len); \
+    a_perim+= (((double)amax) - ((double)amin)); \
+-   *ab_perim+= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
++   *ab_perim+= ((double)MYSQL_MAX(amax, bmax) - (double)MYSQL_MIN(amin, bmin)); \
+ }
+ 
+ /*
+diff -urN mysql-old/storage/myisam/sort.c mysql/storage/myisam/sort.c
+--- mysql-old/storage/myisam/sort.c	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisam/sort.c	2011-05-10 17:56:01.736682376 +0000
+@@ -129,7 +129,7 @@
+   sort_keys= (uchar **) NULL; error= 1;
+   maxbuffer=1;
+ 
+-  memavl= max(sortbuff_size, MIN_SORT_BUFFER);
++  memavl= MYSQL_MAX(sortbuff_size, MIN_SORT_BUFFER);
+   records=	info->sort_info->max_records;
+   sort_length=	info->key_length;
+   LINT_INIT(keys);
+@@ -346,7 +346,7 @@
+     bzero((char*) &sort_param->unique,  sizeof(sort_param->unique));
+     sort_keys= (uchar **) NULL;
+ 
+-    memavl=       max(sort_param->sortbuff_size, MIN_SORT_BUFFER);
++    memavl=       MYSQL_MAX(sort_param->sortbuff_size, MIN_SORT_BUFFER);
+     idx=          (uint)sort_param->sort_info->max_records;
+     sort_length=  sort_param->key_length;
+     maxbuffer=    1;
+@@ -820,7 +820,7 @@
+   register uint count;
+   uint length;
+ 
+-  if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count)))
++  if ((count=(uint) MYSQL_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
+   {
+     if (my_pread(fromfile->file,(uchar*) buffpek->base,
+                  (length= sort_length*count),buffpek->file_pos,MYF_RW))
+@@ -841,7 +841,7 @@
+   uint idx;
+   uchar *buffp;
+ 
+-  if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count)))
++  if ((count=(uint) MYSQL_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
+   {
+     buffp = buffpek->base;
+ 
+diff -urN mysql-old/storage/myisammrg/ha_myisammrg.cc mysql/storage/myisammrg/ha_myisammrg.cc
+--- mysql-old/storage/myisammrg/ha_myisammrg.cc	2011-05-10 17:45:45.670015709 +0000
++++ mysql/storage/myisammrg/ha_myisammrg.cc	2011-05-10 17:56:01.740015709 +0000
+@@ -965,7 +965,7 @@
+       memcpy((char*) table->key_info[0].rec_per_key,
+ 	     (char*) mrg_info.rec_per_key,
+              sizeof(table->key_info[0].rec_per_key[0]) *
+-             min(file->keys, table->s->key_parts));
++             MYSQL_MIN(file->keys, table->s->key_parts));
+     }
+   }
+   if (flag & HA_STATUS_ERRKEY)
+diff -urN mysql-old/storage/ndb/src/common/portlib/NdbTCP.cpp mysql/storage/ndb/src/common/portlib/NdbTCP.cpp
+--- mysql-old/storage/ndb/src/common/portlib/NdbTCP.cpp	2011-05-10 17:45:45.666682376 +0000
++++ mysql/storage/ndb/src/common/portlib/NdbTCP.cpp	2011-05-10 17:56:01.740015709 +0000
+@@ -30,7 +30,7 @@
+ 			    &tmp_errno);
+     if (hp)
+     {
+-      memcpy(dst, hp->h_addr, min(sizeof(*dst), (size_t) hp->h_length));
++      memcpy(dst, hp->h_addr, MYSQL_MIN(sizeof(*dst), (size_t) hp->h_length));
+       my_gethostbyname_r_free();
+       return 0; //DBUG_RETURN(0);
+     }
+diff -urN mysql-old/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp mysql/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
+--- mysql-old/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp	2011-05-10 17:45:45.656682377 +0000
++++ mysql/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp	2011-05-10 17:56:01.740015709 +0000
+@@ -212,7 +212,7 @@
+     }
+   }
+   // return values
+-  par.m_depth = 1 + max(cpar[0].m_depth, cpar[1].m_depth);
++  par.m_depth = 1 + MYSQL_MAX(cpar[0].m_depth, cpar[1].m_depth);
+   par.m_occup = node.getOccup();
+   for (unsigned i = 0; i <= 1; i++) {
+     if (node.getLink(i) == NullTupLoc)
+diff -urN mysql-old/storage/ndb/src/ndbapi/NdbBlob.cpp mysql/storage/ndb/src/ndbapi/NdbBlob.cpp
+--- mysql-old/storage/ndb/src/ndbapi/NdbBlob.cpp	2011-05-10 17:45:45.653349044 +0000
++++ mysql/storage/ndb/src/ndbapi/NdbBlob.cpp	2011-05-10 17:56:01.740015709 +0000
+@@ -1523,7 +1523,7 @@
+   }
+   // these buffers are always used
+   theKeyBuf.alloc(theTable->m_keyLenInWords << 2);
+-  thePackKeyBuf.alloc(max(theTable->m_keyLenInWords, theAccessTable->m_keyLenInWords) << 2);
++  thePackKeyBuf.alloc(MYSQL_MAX(theTable->m_keyLenInWords, theAccessTable->m_keyLenInWords) << 2);
+   theHeadInlineBuf.alloc(sizeof(Head) + theInlineSize);
+   theHead = (Head*)theHeadInlineBuf.data;
+   theInlineData = theHeadInlineBuf.data + sizeof(Head);
+diff -urN mysql-old/storage/ndb/test/ndbapi/testIndexStat.cpp mysql/storage/ndb/test/ndbapi/testIndexStat.cpp
+--- mysql-old/storage/ndb/test/ndbapi/testIndexStat.cpp	2011-05-10 17:45:45.650015710 +0000
++++ mysql/storage/ndb/test/ndbapi/testIndexStat.cpp	2011-05-10 17:56:01.743349042 +0000
+@@ -30,10 +30,10 @@
+  * 0. baseline with same options as handler
+  */
+ 
+-#undef min
+-#undef max
+-#define min(a, b) ((a) <= (b) ? (a) : (b))
+-#define max(a, b) ((a) >= (b) ? (a) : (b))
++#undef MYSQL_MIN
++#undef MYSQL_MAX
++#define MYSQL_MIN(a, b) ((a) <= (b) ? (a) : (b))
++#define MYSQL_MAX(a, b) ((a) >= (b) ? (a) : (b))
+ 
+ inline NdbOut&
+ NdbOut::operator<<(double x)
+@@ -784,13 +784,13 @@
+ uint
+ Range::minattrs() const
+ {
+-  return min(bnd[0].val.numattrs, bnd[1].val.numattrs);
++  return MYSQL_MIN(bnd[0].val.numattrs, bnd[1].val.numattrs);
+ }
+ 
+ uint
+ Range::maxattrs() const
+ {
+-  return max(bnd[0].val.numattrs, bnd[1].val.numattrs);
++  return MYSQL_MAX(bnd[0].val.numattrs, bnd[1].val.numattrs);
+ }
+ 
+ int
+@@ -856,8 +856,8 @@
+       lim[i] = lo;
+   }
+   // the range
+-  const int lo = max(lim[0], 0);
+-  const int hi = min(lim[1], (int)g_sortcount - 1);
++  const int lo = MYSQL_MAX(lim[0], 0);
++  const int hi = MYSQL_MIN(lim[1], (int)g_sortcount - 1);
+   if (! g_opts.nochecks) {
+     int curr = -1;
+     for (i = 0; i < (int)g_sortcount; i++) {
+diff -urN mysql-old/storage/ndb/test/src/getarg.c mysql/storage/ndb/test/src/getarg.c
+--- mysql-old/storage/ndb/test/src/getarg.c	2011-05-10 17:45:45.650015710 +0000
++++ mysql/storage/ndb/test/src/getarg.c	2011-05-10 17:56:01.743349042 +0000
+@@ -65,8 +65,8 @@
+ 
+ #define ISFLAG(X) ((X).type == arg_flag || (X).type == arg_negative_flag)
+ 
+-#ifndef max
+-#define max(a, b) (a) > (b) ? (a) : (b)
++#ifndef MYSQL_MAX
++#define MYSQL_MAX(a, b) (a) > (b) ? (a) : (b)
+ #endif
+ 
+ #ifdef HAVE___PROGNAME
+@@ -306,7 +306,7 @@
+ 	}
+ 	if (args[i].long_name && args[i].short_name)
+ 	    len += 2; /* ", " */
+-	max_len = max(max_len, len);
++	max_len = MYSQL_MAX(max_len, len);
+     }
+     if (extra_string) {
+ 	col = check_column(stderr, col, strlen(extra_string) + 1, columns);
+diff -urN mysql-old/strings/ctype-big5.c mysql/strings/ctype-big5.c
+--- mysql-old/strings/ctype-big5.c	2011-05-10 17:45:45.706682376 +0000
++++ mysql/strings/ctype-big5.c	2011-05-10 17:56:01.743349042 +0000
+@@ -253,7 +253,7 @@
+                              const uchar *b, size_t b_length,
+                              my_bool b_is_prefix)
+ {
+-  size_t length= min(a_length, b_length);
++  size_t length= MYSQL_MIN(a_length, b_length);
+   int res= my_strnncoll_big5_internal(&a, &b, length);
+   return res ? res : (int)((b_is_prefix ? length : a_length) - b_length);
+ }
+@@ -266,7 +266,7 @@
+ 			       const uchar *b, size_t b_length,
+                                my_bool diff_if_only_endspace_difference)
+ {
+-  size_t length= min(a_length, b_length);
++  size_t length= MYSQL_MIN(a_length, b_length);
+   int res= my_strnncoll_big5_internal(&a, &b, length);
+ 
+ #ifndef VARCHAR_WITH_DIFF_ENDSPACE_ARE_DIFFERENT_FOR_UNIQUE
+diff -urN mysql-old/strings/ctype-bin.c mysql/strings/ctype-bin.c
+--- mysql-old/strings/ctype-bin.c	2011-05-10 17:45:45.703349042 +0000
++++ mysql/strings/ctype-bin.c	2011-05-10 17:56:01.750015710 +0000
+@@ -80,7 +80,7 @@
+                                const uchar *t, size_t tlen,
+                                my_bool t_is_prefix)
+ {
+-  size_t len=min(slen,tlen);
++  size_t len=MYSQL_MIN(slen,tlen);
+   int cmp= memcmp(s,t,len);
+   return cmp ? cmp : (int)((t_is_prefix ? len : slen) - tlen);
+ }
+@@ -131,7 +131,7 @@
+                                  const uchar *t, size_t tlen,
+                                  my_bool t_is_prefix)
+ {
+-  size_t len=min(slen,tlen);
++  size_t len=MYSQL_MIN(slen,tlen);
+   int cmp= memcmp(s,t,len);
+   return cmp ? cmp : (int)((t_is_prefix ? len : slen) - tlen);
+ }
+@@ -175,7 +175,7 @@
+   diff_if_only_endspace_difference= 0;
+ #endif
+ 
+-  end= a + (length= min(a_length, b_length));
++  end= a + (length= MYSQL_MIN(a_length, b_length));
+   while (a < end)
+   {
+     if (*a++ != *b++)
+@@ -404,7 +404,7 @@
+                               const uchar *src, size_t srclen)
+ {
+   if (dest != src)
+-    memcpy(dest, src, min(dstlen,srclen));
++    memcpy(dest, src, MYSQL_MIN(dstlen,srclen));
+   if (dstlen > srclen)
+     bfill(dest + srclen, dstlen - srclen, 0);
+   return dstlen;
+@@ -417,7 +417,7 @@
+                             const uchar *src, size_t srclen)
+ {
+   if (dest != src)
+-    memcpy(dest, src, min(dstlen,srclen));
++    memcpy(dest, src, MYSQL_MIN(dstlen,srclen));
+   if (dstlen > srclen)
+     bfill(dest + srclen, dstlen - srclen, ' ');
+   return dstlen;
+diff -urN mysql-old/strings/ctype-gbk.c mysql/strings/ctype-gbk.c
+--- mysql-old/strings/ctype-gbk.c	2011-05-10 17:45:45.703349042 +0000
++++ mysql/strings/ctype-gbk.c	2011-05-10 17:56:01.753349044 +0000
+@@ -2616,7 +2616,7 @@
+                      const uchar *b, size_t b_length,
+                      my_bool b_is_prefix)
+ {
+-  size_t length= min(a_length, b_length);
++  size_t length= MYSQL_MIN(a_length, b_length);
+   int res= my_strnncoll_gbk_internal(&a, &b, length);
+   return res ? res : (int) ((b_is_prefix ? length : a_length) - b_length);
+ }
+@@ -2627,7 +2627,7 @@
+ 			      const uchar *b, size_t b_length,
+                               my_bool diff_if_only_endspace_difference)
+ {
+-  size_t length= min(a_length, b_length);
++  size_t length= MYSQL_MIN(a_length, b_length);
+   int res= my_strnncoll_gbk_internal(&a, &b, length);
+ 
+ #ifndef VARCHAR_WITH_DIFF_ENDSPACE_ARE_DIFFERENT_FOR_UNIQUE
+diff -urN mysql-old/strings/ctype-mb.c mysql/strings/ctype-mb.c
+--- mysql-old/strings/ctype-mb.c	2011-05-10 17:45:45.706682376 +0000
++++ mysql/strings/ctype-mb.c	2011-05-10 17:56:01.760015710 +0000
+@@ -368,7 +368,7 @@
+ 				const uchar *t, size_t tlen,
+                                 my_bool t_is_prefix)
+ {
+-  size_t len=min(slen,tlen);
++  size_t len=MYSQL_MIN(slen,tlen);
+   int cmp= memcmp(s,t,len);
+   return cmp ? cmp : (int) ((t_is_prefix ? len : slen) - tlen);
+ }
+@@ -412,7 +412,7 @@
+   diff_if_only_endspace_difference= 0;
+ #endif
+   
+-  end= a + (length= min(a_length, b_length));
++  end= a + (length= MYSQL_MIN(a_length, b_length));
+   while (a < end)
+   {
+     if (*a++ != *b++)
+@@ -451,7 +451,7 @@
+                                  const uchar *src, size_t srclen)
+ {
+   if (dest != src)
+-    memcpy(dest, src, min(dstlen, srclen));
++    memcpy(dest, src, MYSQL_MIN(dstlen, srclen));
+   if (dstlen > srclen)
+     bfill(dest + srclen, dstlen - srclen, ' ');
+   return dstlen;
+diff -urN mysql-old/strings/ctype-simple.c mysql/strings/ctype-simple.c
+--- mysql-old/strings/ctype-simple.c	2011-05-10 17:45:45.706682376 +0000
++++ mysql/strings/ctype-simple.c	2011-05-10 17:56:01.760015710 +0000
+@@ -159,7 +159,7 @@
+   diff_if_only_endspace_difference= 0;
+ #endif
+ 
+-  end= a + (length= min(a_length, b_length));
++  end= a + (length= MYSQL_MIN(a_length, b_length));
+   while (a < end)
+   {
+     if (map[*a++] != map[*b++])
+@@ -873,7 +873,7 @@
+     val= new_val;
+   }
+   
+-  len= min(len, (size_t) (e-p));
++  len= MYSQL_MIN(len, (size_t) (e-p));
+   memcpy(dst, p, len);
+   return len+sign;
+ }
+@@ -927,7 +927,7 @@
+     long_val= quo;
+   }
+   
+-  len= min(len, (size_t) (e-p));
++  len= MYSQL_MIN(len, (size_t) (e-p));
+ cnv:
+   memcpy(dst, p, len);
+   return len+sign;
+@@ -1158,7 +1158,7 @@
+ {
+   size_t nbytes= (size_t) (end-start);
+   *error= 0;
+-  return min(nbytes, nchars);
++  return MYSQL_MIN(nbytes, nchars);
+ }
+ 
+ 
+diff -urN mysql-old/strings/ctype-tis620.c mysql/strings/ctype-tis620.c
+--- mysql-old/strings/ctype-tis620.c	2011-05-10 17:45:45.703349042 +0000
++++ mysql/strings/ctype-tis620.c	2011-05-10 17:56:01.760015710 +0000
+@@ -581,7 +581,7 @@
+   a_length= thai2sortable(a, a_length);
+   b_length= thai2sortable(b, b_length);
+   
+-  end= a + (length= min(a_length, b_length));
++  end= a + (length= MYSQL_MIN(a_length, b_length));
+   while (a < end)
+   {
+     if (*a++ != *b++)
+@@ -638,7 +638,7 @@
+                           const uchar *src, size_t srclen)
+ {
+   size_t dstlen= len;
+-  len= (size_t) (strmake((char*) dest, (char*) src, min(len, srclen)) -
++  len= (size_t) (strmake((char*) dest, (char*) src, MYSQL_MIN(len, srclen)) -
+                  (char*) dest);
+   len= thai2sortable(dest, len);
+   if (dstlen > len)
+diff -urN mysql-old/strings/ctype-uca.c mysql/strings/ctype-uca.c
+--- mysql-old/strings/ctype-uca.c	2011-05-10 17:45:45.703349042 +0000
++++ mysql/strings/ctype-uca.c	2011-05-10 17:56:01.763349043 +0000
+@@ -7567,7 +7567,7 @@
+ {
+   char tail[30];
+   size_t len= lexem->end - lexem->prev;
+-  strmake (tail, lexem->prev, (size_t) min(len, sizeof(tail)-1));
++  strmake (tail, lexem->prev, (size_t) MYSQL_MIN(len, sizeof(tail)-1));
+   errstr[errsize-1]= '\0';
+   my_snprintf(errstr,errsize-1,"%s at '%s'", txt, tail);
+ }
+diff -urN mysql-old/strings/ctype-ucs2.c mysql/strings/ctype-ucs2.c
+--- mysql-old/strings/ctype-ucs2.c	2011-05-10 17:45:45.703349042 +0000
++++ mysql/strings/ctype-ucs2.c	2011-05-10 17:56:01.773349042 +0000
+@@ -279,7 +279,7 @@
+   se= s + slen;
+   te= t + tlen;
+ 
+-  for (minlen= min(slen, tlen); minlen; minlen-= 2)
++  for (minlen= MYSQL_MIN(slen, tlen); minlen; minlen-= 2)
+   {
+     int s_wc = uni_plane[s[0]] ? (int) uni_plane[s[0]][s[1]].sort :
+                                  (((int) s[0]) << 8) + (int) s[1];
+@@ -1331,7 +1331,7 @@
+   size_t nbytes= ((size_t) (e-b)) & ~(size_t) 1;
+   *error= 0;
+   nchars*= 2;
+-  return min(nbytes, nchars);
++  return MYSQL_MIN(nbytes, nchars);
+ }
+ 
+ 
+@@ -1425,7 +1425,7 @@
+   se= s + slen;
+   te= t + tlen;
+ 
+-  for (minlen= min(slen, tlen); minlen; minlen-= 2)
++  for (minlen= MYSQL_MIN(slen, tlen); minlen; minlen-= 2)
+   {
+     int s_wc= s[0] * 256 + s[1];
+     int t_wc= t[0] * 256 + t[1];
+@@ -1472,7 +1472,7 @@
+                             const uchar *src, size_t srclen)
+ {
+   if (dst != src)
+-    memcpy(dst,src,srclen= min(dstlen,srclen));
++    memcpy(dst,src,srclen= MYSQL_MIN(dstlen,srclen));
+   if (dstlen > srclen)
+     cs->cset->fill(cs, (char*) dst + srclen, dstlen - srclen, ' ');
+   return dstlen;
+diff -urN mysql-old/strings/ctype-utf8.c mysql/strings/ctype-utf8.c
+--- mysql-old/strings/ctype-utf8.c	2011-05-10 17:45:45.703349042 +0000
++++ mysql/strings/ctype-utf8.c	2011-05-10 17:56:01.776682376 +0000
+@@ -1937,7 +1937,7 @@
+                          const uchar *t, const uchar *te)
+ {
+   int slen= (int) (se-s), tlen= (int) (te-t);
+-  int len=min(slen,tlen);
++  int len=MYSQL_MIN(slen,tlen);
+   int cmp= memcmp(s,t,len);
+   return cmp ? cmp : slen-tlen;
+ }
+diff -urN mysql-old/strings/decimal.c mysql/strings/decimal.c
+--- mysql-old/strings/decimal.c	2011-05-10 17:45:45.703349042 +0000
++++ mysql/strings/decimal.c	2011-05-10 17:56:01.780015710 +0000
+@@ -403,7 +403,7 @@
+     for (; frac>0; frac-=DIG_PER_DEC1)
+     {
+       dec1 x=*buf++;
+-      for (i=min(frac, DIG_PER_DEC1); i; i--)
++      for (i=MYSQL_MIN(frac, DIG_PER_DEC1); i; i--)
+       {
+         dec1 y=x/DIG_MASK;
+         *s1++='0'+(uchar)y;
+@@ -426,7 +426,7 @@
+     for (buf=buf0+ROUND_UP(intg); intg>0; intg-=DIG_PER_DEC1)
+     {
+       dec1 x=*--buf;
+-      for (i=min(intg, DIG_PER_DEC1); i; i--)
++      for (i=MYSQL_MIN(intg, DIG_PER_DEC1); i; i--)
+       {
+         dec1 y=x/10;
+         *--s='0'+(uchar)(x-y*10);
+@@ -1517,8 +1517,8 @@
+ 
+   if (to != from || intg1>intg0)
+   {
+-    dec1 *p0= buf0+intg0+max(frac1, frac0);
+-    dec1 *p1= buf1+intg1+max(frac1, frac0);
++    dec1 *p0= buf0+intg0+MYSQL_MAX(frac1, frac0);
++    dec1 *p1= buf1+intg1+MYSQL_MAX(frac1, frac0);
+ 
+     while (buf0 < p0)
+       *(--p1) = *(--p0);
+@@ -1529,7 +1529,7 @@
+     buf0=to->buf;
+     buf1=to->buf;
+     to->sign=from->sign;
+-    to->intg=min(intg0, len)*DIG_PER_DEC1;
++    to->intg=MYSQL_MIN(intg0, len)*DIG_PER_DEC1;
+   }
+ 
+   if (frac0 > frac1)
+@@ -1631,7 +1631,7 @@
+         scale=frac0*DIG_PER_DEC1;
+         error=E_DEC_TRUNCATED; /* XXX */
+       }
+-      for (buf1=to->buf+intg0+max(frac0,0); buf1 > to->buf; buf1--)
++      for (buf1=to->buf+intg0+MYSQL_MAX(frac0,0); buf1 > to->buf; buf1--)
+       {
+         buf1[0]=buf1[-1];
+       }
+@@ -1650,7 +1650,7 @@
+         /* making 'zero' with the proper scale */
+         dec1 *p0= to->buf + frac0 + 1;
+         to->intg=1;
+-        to->frac= max(scale, 0);
++        to->frac= MYSQL_MAX(scale, 0);
+         to->sign= 0;
+         for (buf1= to->buf; buf1<p0; buf1++)
+           *buf1= 0;
+@@ -1699,11 +1699,11 @@
+ {
+   switch (op) {
+   case '-':
+-    return ROUND_UP(max(from1->intg, from2->intg)) +
+-           ROUND_UP(max(from1->frac, from2->frac));
++    return ROUND_UP(MYSQL_MAX(from1->intg, from2->intg)) +
++           ROUND_UP(MYSQL_MAX(from1->frac, from2->frac));
+   case '+':
+-    return ROUND_UP(max(from1->intg, from2->intg)+1) +
+-           ROUND_UP(max(from1->frac, from2->frac));
++    return ROUND_UP(MYSQL_MAX(from1->intg, from2->intg)+1) +
++           ROUND_UP(MYSQL_MAX(from1->frac, from2->frac));
+   case '*':
+     return ROUND_UP(from1->intg+from2->intg)+
+            ROUND_UP(from1->frac)+ROUND_UP(from2->frac);
+@@ -1718,7 +1718,7 @@
+ {
+   int intg1=ROUND_UP(from1->intg), intg2=ROUND_UP(from2->intg),
+       frac1=ROUND_UP(from1->frac), frac2=ROUND_UP(from2->frac),
+-      frac0=max(frac1, frac2), intg0=max(intg1, intg2), error;
++      frac0=MYSQL_MAX(frac1, frac2), intg0=MYSQL_MAX(intg1, intg2), error;
+   dec1 *buf1, *buf2, *buf0, *stop, *stop2, x, carry;
+ 
+   sanity(to);
+@@ -1743,7 +1743,7 @@
+   buf0=to->buf+intg0+frac0;
+ 
+   to->sign=from1->sign;
+-  to->frac=max(from1->frac, from2->frac);
++  to->frac=MYSQL_MAX(from1->frac, from2->frac);
+   to->intg=intg0*DIG_PER_DEC1;
+   if (unlikely(error))
+   {
+@@ -1772,14 +1772,14 @@
+   while (buf1 > stop)
+     *--buf0=*--buf1;
+ 
+-  /* part 2 - min(frac) ... min(intg) */
++  /* part 2 - MYSQL_MIN(frac) ... MYSQL_MIN(intg) */
+   carry=0;
+   while (buf1 > stop2)
+   {
+     ADD(*--buf0, *--buf1, *--buf2, carry);
+   }
+ 
+-  /* part 3 - min(intg) ... max(intg) */
++  /* part 3 - MYSQL_MIN(intg) ... max(intg) */
+   buf1= intg1 > intg2 ? ((stop=from1->buf)+intg1-intg2) :
+                         ((stop=from2->buf)+intg2-intg1) ;
+   while (buf1 > stop)
+@@ -1800,7 +1800,7 @@
+ {
+   int intg1=ROUND_UP(from1->intg), intg2=ROUND_UP(from2->intg),
+       frac1=ROUND_UP(from1->frac), frac2=ROUND_UP(from2->frac);
+-  int frac0=max(frac1, frac2), error;
++  int frac0=MYSQL_MAX(frac1, frac2), error;
+   dec1 *buf1, *buf2, *buf0, *stop1, *stop2, *start1, *start2, carry=0;
+ 
+   /* let carry:=1 if from2 > from1 */
+@@ -1875,7 +1875,7 @@
+   FIX_INTG_FRAC_ERROR(to->len, intg1, frac0, error);
+   buf0=to->buf+intg1+frac0;
+ 
+-  to->frac=max(from1->frac, from2->frac);
++  to->frac=MYSQL_MAX(from1->frac, from2->frac);
+   to->intg=intg1*DIG_PER_DEC1;
+   if (unlikely(error))
+   {
+@@ -1910,7 +1910,7 @@
+     }
+   }
+ 
+-  /* part 2 - min(frac) ... intg2 */
++  /* part 2 - MYSQL_MIN(frac) ... intg2 */
+   while (buf2 > start2)
+   {
+     SUB(*--buf0, *--buf1, *--buf2, carry);
+@@ -2173,11 +2173,11 @@
+   {
+     /* we're calculating N1 % N2.
+        The result will have
+-         frac=max(frac1, frac2), as for subtraction
++         frac=MYSQL_MAX(frac1, frac2), as for subtraction
+          intg=intg2
+     */
+     to->sign=from1->sign;
+-    to->frac=max(from1->frac, from2->frac);
++    to->frac=MYSQL_MAX(from1->frac, from2->frac);
+     frac0=0;
+   }
+   else
+@@ -2301,7 +2301,7 @@
+     /*
+       now the result is in tmp1, it has
+         intg=prec1-frac1
+-        frac=max(frac1, frac2)=to->frac
++        frac=MYSQL_MAX(frac1, frac2)=to->frac
+     */
+     if (dcarry)
+       *--start1=dcarry;
+@@ -2339,7 +2339,7 @@
+       }
+       DBUG_ASSERT(intg0 <= ROUND_UP(from2->intg));
+       stop1=start1+frac0+intg0;
+-      to->intg=min(intg0*DIG_PER_DEC1, from2->intg);
++      to->intg=MYSQL_MIN(intg0*DIG_PER_DEC1, from2->intg);
+     }
+     if (unlikely(intg0+frac0 > to->len))
+     {
+diff -urN mysql-old/strings/my_vsnprintf.c mysql/strings/my_vsnprintf.c
+--- mysql-old/strings/my_vsnprintf.c	2011-05-10 17:45:45.703349042 +0000
++++ mysql/strings/my_vsnprintf.c	2011-05-10 17:56:01.846682377 +0000
+@@ -141,7 +141,7 @@
+       /* If %#d syntax was used, we have to pre-zero/pre-space the string */
+       if (store_start == buff)
+       {
+-	length= min(length, to_length);
++	length= MYSQL_MIN(length, to_length);
+ 	if (res_length < length)
+ 	{
+ 	  size_t diff= (length- res_length);
+diff -urN mysql-old/strings/str2int.c mysql/strings/str2int.c
+--- mysql-old/strings/str2int.c	2011-05-10 17:45:45.706682376 +0000
++++ mysql/strings/str2int.c	2011-05-10 17:56:01.846682377 +0000
+@@ -82,7 +82,7 @@
+       machines all, if +|n| is representable, so is -|n|, but on
+       twos complement machines the converse is not true.  So the
+       "maximum" representable number has a negative representative.
+-      Limit is set to min(-|lower|,-|upper|); this is the "largest"
++      Limit is set to MYSQL_MIN(-|lower|,-|upper|); this is the "largest"
+       number we are concerned with.	*/
+ 
+   /*  Calculate Limit using Scale as a scratch variable  */
+diff -urN mysql-old/tests/mysql_client_test.c mysql/tests/mysql_client_test.c
+--- mysql-old/tests/mysql_client_test.c	2011-05-10 17:45:45.620015710 +0000
++++ mysql/tests/mysql_client_test.c	2011-05-10 17:56:01.850015711 +0000
+@@ -610,7 +610,7 @@
+     return row_count;
+   }
+ 
+-  field_count= min(mysql_num_fields(result), MAX_RES_FIELDS);
++  field_count= MYSQL_MIN(mysql_num_fields(result), MAX_RES_FIELDS);
+ 
+   bzero((char*) buffer, sizeof(buffer));
+   bzero((char*) length, sizeof(length));
+diff -urN mysql-old/vio/viosocket.c mysql/vio/viosocket.c
+--- mysql-old/vio/viosocket.c	2011-05-10 17:45:45.640015709 +0000
++++ mysql/vio/viosocket.c	2011-05-10 17:56:01.856682377 +0000
+@@ -69,7 +69,7 @@
+ 
+   if (vio->read_pos < vio->read_end)
+   {
+-    rc= min((size_t) (vio->read_end - vio->read_pos), size);
++    rc= MYSQL_MIN((size_t) (vio->read_end - vio->read_pos), size);
+     memcpy(buf, vio->read_pos, rc);
+     vio->read_pos+= rc;
+     /*



             reply	other threads:[~2011-05-10 18:07 UTC|newest]

Thread overview: 300+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-05-10 18:05 Jorge Manuel B. S. Vicetto [this message]
  -- strict thread matches above, loose matches on Subject: below --
2020-05-23 12:19 [gentoo-commits] proj/mysql-extras:master commit in: / Thomas Deutschmann
2020-03-17  0:57 Thomas Deutschmann
2020-02-03 17:50 Brian Evans
2020-01-25 19:42 Thomas Deutschmann
2020-01-25 19:42 Thomas Deutschmann
2020-01-22 18:27 Thomas Deutschmann
2020-01-22 18:27 Thomas Deutschmann
2020-01-22 18:27 Thomas Deutschmann
2020-01-20 18:28 Thomas Deutschmann
2020-01-20 18:19 Thomas Deutschmann
2020-01-20  2:21 Thomas Deutschmann
2019-10-31  0:50 Thomas Deutschmann
2019-10-30 23:49 Thomas Deutschmann
2019-10-30 23:47 Thomas Deutschmann
2019-10-30  1:24 Thomas Deutschmann
2019-10-30  1:01 Thomas Deutschmann
2019-10-30  1:01 Thomas Deutschmann
2019-10-29 23:58 Thomas Deutschmann
2019-10-29 23:58 Thomas Deutschmann
2019-10-17 18:34 Brian Evans
2019-10-15 17:00 Brian Evans
2019-10-15 16:48 Brian Evans
2019-10-15 16:40 Brian Evans
2019-10-14 19:15 Brian Evans
2019-10-14 18:28 Brian Evans
2019-08-22 19:08 Thomas Deutschmann
2019-08-17  0:24 Thomas Deutschmann
2019-08-17  0:24 Thomas Deutschmann
2019-08-17  0:24 Thomas Deutschmann
2019-08-02 15:49 Thomas Deutschmann
2019-08-02  0:31 Thomas Deutschmann
2019-08-02  0:13 Thomas Deutschmann
2019-07-22 19:21 Brian Evans
2019-06-04 11:30 Thomas Deutschmann
2019-05-24 10:51 Thomas Deutschmann
2019-05-21 18:03 Brian Evans
2019-03-10  2:57 Brian Evans
2019-03-05 20:47 Brian Evans
2019-03-05 20:39 Brian Evans
2019-03-05 20:22 Brian Evans
2019-03-03 18:52 Thomas Deutschmann
2019-03-03 18:44 Thomas Deutschmann
2019-03-03 18:44 Thomas Deutschmann
2019-01-26 19:21 Thomas Deutschmann
2019-01-20 23:10 Brian Evans
2019-01-20 18:22 Brian Evans
2019-01-19 20:42 Brian Evans
2019-01-19 20:38 Brian Evans
2018-12-11 17:34 Brian Evans
2018-11-11 23:17 Thomas Deutschmann
2018-11-04 22:52 Brian Evans
2018-10-23  0:12 Brian Evans
2018-10-17 22:22 Thomas Deutschmann
2018-10-17 22:22 Thomas Deutschmann
2018-10-17 12:24 Thomas Deutschmann
2018-10-17 10:37 Thomas Deutschmann
2018-10-17  0:45 Thomas Deutschmann
2018-10-16 16:17 Thomas Deutschmann
2018-10-16 15:19 Thomas Deutschmann
2018-10-14 23:20 Thomas Deutschmann
2018-10-14 21:03 Thomas Deutschmann
2018-10-13 23:31 Thomas Deutschmann
2018-10-13 23:31 Thomas Deutschmann
2018-10-13 23:31 Thomas Deutschmann
2018-10-13 23:31 Thomas Deutschmann
2018-10-13 23:31 Thomas Deutschmann
2018-08-09 17:01 Brian Evans
2018-08-04 23:23 Brian Evans
2018-06-28  1:08 Brian Evans
2018-06-27 14:29 Brian Evans
2018-06-21  2:05 Brian Evans
2018-06-21  2:02 Brian Evans
2018-05-29  0:35 Brian Evans
2018-05-29  0:35 Brian Evans
2018-05-28  1:05 Brian Evans
2018-05-28  1:03 Brian Evans
2018-05-28  0:28 Brian Evans
2018-05-15 13:34 Brian Evans
2018-05-15  0:59 Brian Evans
2018-03-28 20:33 Brian Evans
2018-03-12 19:54 Brian Evans
2018-03-12 19:54 Brian Evans
2018-03-12 18:10 Brian Evans
2018-03-12 16:39 Brian Evans
2018-03-12 16:26 Brian Evans
2018-03-09 15:32 Brian Evans
2018-03-09 15:12 Brian Evans
2018-03-09 14:02 Brian Evans
2018-03-08 19:38 Brian Evans
2018-03-08 19:38 Brian Evans
2018-02-28 16:11 Brian Evans
2018-02-14  0:43 Thomas Deutschmann
2018-02-14  0:43 Thomas Deutschmann
2018-02-09 21:42 Brian Evans
2017-11-21 15:00 Brian Evans
2017-11-08 20:50 Brian Evans
2017-10-18 19:48 Brian Evans
2017-10-18 13:40 Brian Evans
2017-10-18 13:24 Brian Evans
2017-09-26 13:18 Brian Evans
2017-08-30 12:08 Brian Evans
2017-08-20 22:45 Brian Evans
2017-08-03 18:14 Brian Evans
2017-07-29  1:13 Brian Evans
2017-07-29  1:00 Brian Evans
2017-07-27  0:36 Brian Evans
2017-07-27  0:26 Brian Evans
2017-07-19 16:30 Brian Evans
2017-07-19 13:35 Brian Evans
2017-07-19  1:08 Brian Evans
2017-07-19  1:08 Brian Evans
2017-06-28 18:47 Brian Evans
2017-06-28 17:40 Brian Evans
2017-06-28 17:21 Brian Evans
2017-05-29  2:02 Brian Evans
2017-03-16 13:55 Brian Evans
2017-03-16 13:40 Brian Evans
2017-03-10 14:26 Brian Evans
2017-03-01 21:39 Brian Evans
2017-03-01 20:41 Thomas Deutschmann
2017-01-29  1:26 Brian Evans
2016-12-03 20:41 Thomas Deutschmann
2016-10-19 19:14 Brian Evans
2016-10-19 18:53 Brian Evans
2016-08-18 17:25 Brian Evans
2016-08-18 17:20 Brian Evans
2016-07-21 15:26 Brian Evans
2016-07-21 15:26 Brian Evans
2016-06-28 14:22 Brian Evans
2016-06-28 14:15 Brian Evans
2016-04-27 18:40 Brian Evans
2016-04-27 18:06 Brian Evans
2016-04-27 17:32 Brian Evans
2016-03-07 18:54 Brian Evans
2016-03-07 18:49 Brian Evans
2016-02-12  2:33 Brian Evans
2016-02-12  2:26 Brian Evans
2016-01-31  1:57 Brian Evans
2016-01-31  1:46 Brian Evans
2016-01-21 13:50 Brian Evans
2015-12-22 21:38 Brian Evans
2015-11-23 16:43 Brian Evans
2015-11-17 20:40 Brian Evans
2015-11-05 20:51 Brian Evans
2015-10-19 17:25 Brian Evans
2015-09-11 15:05 Brian Evans
2015-08-05 16:09 Brian Evans
2015-07-29 18:46 Brian Evans
2015-07-17 17:04 Brian Evans
2015-07-10 19:09 Brian Evans
2015-05-09 18:16 Brian Evans
2015-04-10 18:53 Brian Evans
2015-03-10 20:43 Brian Evans
2015-03-04  3:35 git@oystercatcher mirror+tproxy
2015-03-04  3:35 Brian Evans
2015-02-10 17:50 Brian Evans
2015-02-10 17:50 Brian Evans
2015-02-10 15:02 Brian Evans
2015-01-27 13:51 Brian Evans
2015-01-13 18:54 Brian Evans
2014-12-15  2:02 Brian Evans
2014-12-15  1:44 Brian Evans
2014-12-09 23:20 Brian Evans
2014-12-03 19:36 Brian Evans
2014-12-03 19:04 Brian Evans
2014-12-03 18:22 Brian Evans
2014-12-03 18:16 Brian Evans
2014-11-25 14:15 Brian Evans
2014-11-25 13:52 Brian Evans
2014-11-25 13:51 Brian Evans
2014-11-25 13:47 Brian Evans
2014-10-25  2:42 Brian Evans
2014-10-22 20:44 Brian Evans
2014-10-22 19:12 Brian Evans
2014-10-21 17:41 Brian Evans
2014-10-19 19:27 Brian Evans
2014-10-19 19:21 Brian Evans
2014-10-18  0:15 Brian Evans
2014-10-17 14:20 Brian Evans
2014-10-09 14:50 Brian Evans
2014-09-09 18:03 Brian Evans
2014-09-03 19:11 Brian Evans
2014-08-18 23:37 Brian Evans
2014-08-18 23:37 Brian Evans
2014-08-18 23:37 Brian Evans
2014-08-18 23:37 Brian Evans
2014-08-18 20:21 Robin H. Johnson
2014-08-18 20:21 Robin H. Johnson
2014-08-18 20:21 Robin H. Johnson
2014-08-17 23:32 Brian Evans
2014-08-17 23:19 Brian Evans
2014-08-11 23:05 Brian Evans
2014-08-05 18:17 Brian Evans
2014-07-29 18:41 Brian Evans
2014-07-28 23:43 Brian Evans
2014-07-28 22:54 Brian Evans
2014-05-14  0:58 Brian Evans
2014-05-14  0:52 Brian Evans
2014-05-12 18:19 Brian Evans
2014-05-12 18:16 Brian Evans
2014-05-06 19:37 Brian Evans
2014-05-06 19:29 Brian Evans
2014-04-26  3:53 Brian Evans
2014-04-26  1:26 Brian Evans
2014-04-26  0:57 Brian Evans
2014-04-23 16:22 Brian Evans
2014-04-18 15:28 Brian Evans
2014-04-17 19:45 Brian Evans
2014-04-10 15:29 Brian Evans
2014-03-31 18:05 Brian Evans
2014-03-31 17:48 Brian Evans
2014-03-27 17:45 Brian Evans
2014-03-11 15:02 Brian Evans
2014-03-11 14:59 Brian Evans
2014-03-11 14:55 Brian Evans
2014-03-10 20:02 Brian Evans
2014-03-04 15:33 Brian Evans
2014-02-26 18:37 Brian Evans
2014-02-24 14:57 Brian Evans
2014-01-23  3:40 Brian Evans
2014-01-23  0:14 Brian Evans
2014-01-20 14:03 Jorge Manuel B. S. Vicetto
2014-01-20  2:05 Brian Evans
2014-01-20  1:35 Jorge Manuel B. S. Vicetto
2014-01-20  1:08 Jorge Manuel B. S. Vicetto
2014-01-20  1:04 Jorge Manuel B. S. Vicetto
2014-01-19  2:11 Brian Evans
2014-01-18 23:47 Jorge Manuel B. S. Vicetto
2014-01-18 22:40 Jorge Manuel B. S. Vicetto
2013-12-12 15:25 Brian Evans
2013-12-10 18:24 Brian Evans
2013-10-09 19:30 Brian Evans
2013-10-09 19:30 Brian Evans
2013-08-23 18:56 Brian Evans
2013-06-27 12:31 Brian Evans
2013-06-27 12:31 Brian Evans
2013-06-25 15:48 Jorge Manuel B. S. Vicetto
2013-06-06 20:32 Robin H. Johnson
2013-05-28 19:46 Robin H. Johnson
2013-05-28 19:42 Robin H. Johnson
2013-05-28 19:39 Robin H. Johnson
2013-05-28 19:34 Robin H. Johnson
2013-05-28 19:34 Robin H. Johnson
2013-05-28 19:34 Robin H. Johnson
2013-05-28 19:16 Robin H. Johnson
2013-05-01  0:07 Jorge Manuel B. S. Vicetto
2013-04-24 19:49 Jorge Manuel B. S. Vicetto
2013-04-23 23:26 Jorge Manuel B. S. Vicetto
2013-03-01  2:47 Robin H. Johnson
2013-01-28 17:27 Robin H. Johnson
2013-01-20 23:03 Robin H. Johnson
2013-01-19 22:38 Robin H. Johnson
2013-01-19 22:38 Robin H. Johnson
2013-01-18 18:10 Robin H. Johnson
2012-09-06 13:45 Jorge Manuel B. S. Vicetto
2012-09-05 15:11 Jorge Manuel B. S. Vicetto
2012-08-14  1:23 Jorge Manuel B. S. Vicetto
2012-08-07 17:42 Robin H. Johnson
2012-08-06 18:58 Robin H. Johnson
2012-08-02 19:27 Robin H. Johnson
2012-04-21 20:34 Robin H. Johnson
2012-04-21 20:34 Robin H. Johnson
2012-04-16 20:20 Robin H. Johnson
2012-04-01 17:59 Robin H. Johnson
2012-04-01 17:54 Robin H. Johnson
2012-04-01  5:13 Robin H. Johnson
2012-04-01  5:13 Robin H. Johnson
2011-11-18 20:58 Robin H. Johnson
2011-08-19  4:15 Jorge Manuel B. S. Vicetto
2011-08-19  4:04 Jorge Manuel B. S. Vicetto
2011-07-21  2:27 Jorge Manuel B. S. Vicetto
2011-07-21  2:20 Jorge Manuel B. S. Vicetto
2011-07-15 11:17 Jorge Manuel B. S. Vicetto
2011-06-16  2:20 Jorge Manuel B. S. Vicetto
2011-04-26  9:51 Robin H. Johnson
2011-04-26  9:48 Robin H. Johnson
2011-04-26  9:23 Robin H. Johnson
2011-04-26  9:15 Robin H. Johnson
2011-04-17 22:42 Robin H. Johnson
2011-04-17 20:10 Robin H. Johnson
2011-04-17 20:10 Robin H. Johnson
2011-04-17 20:10 Robin H. Johnson
2011-04-17  3:40 Jorge Manuel B. S. Vicetto
2011-03-27 21:02 Jorge Manuel B. S. Vicetto
2011-03-27 20:58 Jorge Manuel B. S. Vicetto
2011-03-21  2:23 Jorge Manuel B. S. Vicetto
2011-03-04 12:53 Jorge Manuel B. S. Vicetto
2011-03-02 19:55 Jorge Manuel B. S. Vicetto
2011-02-17 21:05 Jorge Manuel B. S. Vicetto
2011-02-17 21:05 Jorge Manuel B. S. Vicetto
2011-02-17 20:49 Jorge Manuel B. S. Vicetto
2011-02-17 20:36 Jorge Manuel B. S. Vicetto
2011-02-17 12:08 Jorge Manuel B. S. Vicetto
2011-02-17  2:04 Jorge Manuel B. S. Vicetto
2011-02-17  1:47 Jorge Manuel B. S. Vicetto
2011-02-17  1:42 Jorge Manuel B. S. Vicetto
2011-02-17  1:34 Jorge Manuel B. S. Vicetto
2011-02-17  1:25 Jorge Manuel B. S. Vicetto
2011-02-17  1:20 Jorge Manuel B. S. Vicetto

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=950710a56e02503b2e08400853d4365f4a297dea.jmbsvicetto@gentoo \
    --to=jmbsvicetto@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox