diff --git a/CHANGES.txt b/CHANGES.txt index 7ede921ded955238e43db7c47b54cbc74260d02d..3b1c4a43d1489a7e29cfc57f784a538ec9b45341 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,9 +1,22 @@ Changes ======= -0.38 (Unreleased) +0.39 (unreleased) ----------------- - * No change yet. + + * No changes yet. + +0.38 (2011-12-05) +----------------- + + * erp5: Swtich to percona, as maatkit is obsoleted. [Sebastien Robin] + * erp5: Improve haproxy configuration. [Sebastien Robin] + * erp5: Support sphinxd. [Kazuhiko Shiozaki] + * erp5: Improve and make logging more usual. [Sebastien Robin] + * erp5: Allow mysql connection from localhost. [Romain Courteaud] + * erp5: Allow to control Zope/Zeo cache [Arnaud Fontaine] + * erp5: Increase precision in logs [Julien Muchembled] + * erp5: Improve erp5 update [Arnaud Fontaine, Rafael Monnerat] 0.37 (2011-11-24) ----------------- diff --git a/component/apache/buildout.cfg b/component/apache/buildout.cfg index 335e87046b48c54b261a22ab808b6a07cf8f1c3a..42d1c7162ba8b1965e77a6ba9a1ba43bc045771f 100644 --- a/component/apache/buildout.cfg +++ b/component/apache/buildout.cfg @@ -13,61 +13,23 @@ extends = ../sqlite3/buildout.cfg ../zlib/buildout.cfg -[apache-no-ssl] -# inspired on http://old.aclark.net/team/aclark/blog/a-lamp-buildout-for-wordpress-and-other-php-apps/ -recipe = hexagonit.recipe.cmmi -url = http://mir2.ovh.net/ftp.apache.org/dist//httpd/httpd-2.2.21.tar.bz2 -md5sum = 1696ae62cd879ab1d4dd9ff021a470f2 -configure-options = --enable-authn-alias - --enable-bucketeer - --enable-cache - --enable-case-filter - --enable-case-filter-in - --enable-cgid - --enable-charset-lite - --enable-disk-cache - --enable-echo - --enable-exception-hook - --enable-mods-shared=all - --enable-optional-fn-export - --enable-optional-fn-import - --enable-optional-hook-export - --enable-optional-hook-import - --enable-proxy - --enable-proxy-ajp - --enable-proxy-balancer - --enable-proxy-connect - --enable-proxy-ftp - --enable-proxy-http - --enable-proxy-scgi - --enable-so - --enable-dav - --enable-dav-fs - --disable-ssl - --with-included-apr - --with-z=${zlib:location} - --with-expat=${libexpat:location} - --with-pcre=${pcre:location} - --with-sqlite3=${sqlite3:location} - --with-gdbm=${gdbm:location} - --without-ssl - --without-lber - --without-ldap - --without-ndbm - --without-berkeley-db - --without-pgsql - --without-mysql - --without-sqlite2 - --without-oracle - --without-freedts - --without-odbc - --without-iconv +[apache-CVE-2011-3368.patch] +recipe = hexagonit.recipe.download +md5sum = 1ad598213480ddfc239ce6359b7b2c0b +url = http://www.apache.org/dist/httpd/patches/apply_to_2.2.21/CVE-2011-3368.patch +filename = ${:_buildout_section_name_} +download-only = true [apache] # inspired on http://old.aclark.net/team/aclark/blog/a-lamp-buildout-for-wordpress-and-other-php-apps/ recipe = hexagonit.recipe.cmmi +depends = + ${gdbm:version} url = http://mir2.ovh.net/ftp.apache.org/dist//httpd/httpd-2.2.21.tar.bz2 md5sum = 1696ae62cd879ab1d4dd9ff021a470f2 +patches = + ${apache-CVE-2011-3368.patch:location}/${apache-CVE-2011-3368.patch:filename} +patch-options = -p1 configure-options = --disable-static --enable-authn-alias --enable-bucketeer diff --git a/component/cmake/buildout.cfg b/component/cmake/buildout.cfg index 387e7a2fff43a6498834d2f23a6eec0a670df0f6..51789dfc5dab350c13cd009fae5eb762e1f053ae 100644 --- a/component/cmake/buildout.cfg +++ b/component/cmake/buildout.cfg @@ -4,5 +4,5 @@ parts = [cmake] recipe = hexagonit.recipe.cmmi -url = http://www.cmake.org/files/v2.8/cmake-2.8.3.tar.gz -md5sum = a76a44b93acf5e3badda9de111385921 +url = http://www.cmake.org/files/v2.8/cmake-2.8.6.tar.gz +md5sum = 2147da452fd9212bb9b4542a9eee9d5b diff --git a/component/dropbear/buildout.cfg b/component/dropbear/buildout.cfg index dbb0709203192325335ec072f569234d5c9b8484..6f18f1785dae68f5d741c9ef48f6f749fa71e2ed 100644 --- a/component/dropbear/buildout.cfg +++ b/component/dropbear/buildout.cfg @@ -28,7 +28,9 @@ download-only = true [dropbear] recipe = hexagonit.recipe.cmmi md5sum = 0284ea239083f04c8b874e08e1aca243 -url = http://matt.ucc.asn.au/dropbear/dropbear-0.53.1.tar.bz2 +# XXX: We still use an old version of dropbear instead of the last one +# in order have all patches working. +url = http://matt.ucc.asn.au/dropbear/releases/dropbear-0.53.1.tar.bz2 configure-options = --prefix=${buildout:parts-directory}/${:_buildout_section_name_} diff --git a/component/fonts/buildout.cfg b/component/fonts/buildout.cfg index 0a8a59c527feb748bb15d7ffb0631bc4d4d4ef67..a148657775cfe81feb8ffce88d170944d60df17e 100644 --- a/component/fonts/buildout.cfg +++ b/component/fonts/buildout.cfg @@ -2,6 +2,7 @@ parts = liberation-fonts ipaex-fonts + ipa-fonts [fonts] location = ${buildout:parts-directory}/${:_buildout_section_name_} @@ -16,7 +17,7 @@ url = https://fedorahosted.org/releases/l/i/liberation-fonts/liberation-fonts-tt md5sum = 8150db1c6e27cacdfd524b563b85b69e destination = ${fonts:location}/${:_buildout_section_name_} -# IPAex Font - Japanese fonts provided by IPA +# IPAex and IPA Font - Japanese fonts provided by IPA # http://ossipedia.ipa.go.jp/ipafont/index.html [ipaex-fonts] recipe = hexagonit.recipe.download @@ -24,3 +25,10 @@ strip-top-level-dir = true url = http://info.openlab.ipa.go.jp/ipafont/fontdata/IPAexfont00103.zip md5sum = ac67b2fc3aab7f683d89f0070df284e7 destination = ${fonts:location}/${:_buildout_section_name_} + +[ipa-fonts] +recipe = hexagonit.recipe.download +strip-top-level-dir = true +url = http://info.openlab.ipa.go.jp/ipafont/fontdata/IPAfont00303.zip +md5sum = 39a828acf27790adbe4944dfb4d94bb1 +destination = ${fonts:location}/${:_buildout_section_name_} diff --git a/component/freetype/buildout.cfg b/component/freetype/buildout.cfg index 2ca184fa05b9b4145983a94c44f7e31210a82fb5..c358b3e996713b4d215af71d95eb238c3b9a318e 100644 --- a/component/freetype/buildout.cfg +++ b/component/freetype/buildout.cfg @@ -10,8 +10,8 @@ parts = [freetype] recipe = hexagonit.recipe.cmmi -url = http://download.savannah.gnu.org/releases/freetype/freetype-2.4.6.tar.bz2 -md5sum = 5e6510613f612809d2d7862592b92ab7 +url = http://download.savannah.gnu.org/releases/freetype/freetype-2.4.8.tar.bz2 +md5sum = dbf2caca1d3afd410a29217a9809d397 configure-options = --disable-static environment = diff --git a/component/gdbm/buildout.cfg b/component/gdbm/buildout.cfg index c97171d9b97a8444d0d9ebd7a034f7e2fbb03ee6..f44aabd4b693cfd7098bb15bb880b8283f2ab095 100644 --- a/component/gdbm/buildout.cfg +++ b/component/gdbm/buildout.cfg @@ -2,25 +2,18 @@ parts = gdbm -[gdbm-nochange-patch-download] -recipe = hexagonit.recipe.download -url = ${:_profile_base_location_}/${:filename} -md5sum = fafa6cae0afbf2b5afb9ef3b8e3035a4 -download-only = true -filename = gdbm-Makefile.in-nochange.patch - [gdbm] recipe = hexagonit.recipe.cmmi -url = ftp://ftp.gnu.org/gnu/gdbm/gdbm-1.8.3.tar.gz -md5sum = 1d1b1d5c0245b1c00aff92da751e9aa1 -patches = ${gdbm-nochange-patch-download:location}/${gdbm-nochange-patch-download:filename} +version = 1.9.1 +url = ftp://ftp.gnu.org/gnu/gdbm/gdbm-${:version}.tar.gz +md5sum = 59f6e4c4193cb875964ffbe8aa384b58 configure-options = --disable-static # install as parts/gdbm/include/gdbm/*.h etc. because some softwares # (eg. python's dbmmodule.c extension) assume the location like this. includedir = ${buildout:parts-directory}/${:_buildout_section_name_}/include make-targets = - install install-compat includedir=${:includedir}/gdbm && rm -f ${:includedir}/*.h && ln -sf gdbm/gdbm.h ${:includedir}/gdbm.h + install includedir=${:includedir}/gdbm && rm -f ${:includedir}/*.h && ln -sf gdbm/gdbm.h ${:includedir}/gdbm.h # it seems that parallel build sometimes fails for gdbm. make-options = -j1 diff --git a/component/ghostscript/buildout.cfg b/component/ghostscript/buildout.cfg index 90f4b639a6e3810106d3221e81b10eb61b2eadf7..85ff413060e3c14faf7a6ee981e0e5de3577f03a 100644 --- a/component/ghostscript/buildout.cfg +++ b/component/ghostscript/buildout.cfg @@ -7,16 +7,8 @@ extends = parts = ghostscript -[ghostscript-hooks-download] -recipe = hexagonit.recipe.download -url = ${:_profile_base_location_}/${:filename} -filename = ghostscript-hooks.py -md5sum = 731475648c91507bd1dfe2a61ee84552 -download-only = true - [ghostscript-common] recipe = hexagonit.recipe.cmmi -pre-configure-hook = ${ghostscript-hooks-download:location}/${ghostscript-hooks-download:filename}:pre_configure_hook configure-options = --disable-cups --disable-cairo @@ -32,17 +24,9 @@ environment = LD_LIBRARY_PATH=${fontconfig:location}/lib [ghostscript] -# we prefer ghostscript-8 for now, because ghostscript-9.00 seems to have a -# problem with Japanese fonts if -dTextAlphaBits=4 is specified by -# imagemagick. -<= ghostscript-8 +<= ghostscript-9 [ghostscript-9] <= ghostscript-common -url = http://ghostscript.com/releases/ghostscript-9.00.tar.gz -md5sum = a402462478b4cdda3e1816899227b845 - -[ghostscript-8] -<= ghostscript-common -url = http://www.nexedi.org/static/tarballs/ghostscript/ghostscript-8.71-no-looping-symlink.tar.bz2 -md5sum = 34639af3ffe8594f2c5ea944dfbe1d78 +url = http://downloads.ghostscript.com/public/ghostscript-9.04.tar.bz2 +md5sum = 9f6899e821ab6d78ab2c856f10fa3023 diff --git a/component/ghostscript/ghostscript-hooks.py b/component/ghostscript/ghostscript-hooks.py deleted file mode 100644 index c39936894def08a085a2928ca03995464899e351..0000000000000000000000000000000000000000 --- a/component/ghostscript/ghostscript-hooks.py +++ /dev/null @@ -1,7 +0,0 @@ -import os -def pre_configure_hook(oprtions, buildout): - # fixes python bug related to not creating symlink contained in tarfiles - for missing in 'configure.ac', 'Makefile.in': - if not os.path.exists(os.path.join(os.path.curdir, missing)): - os.symlink(os.path.join(os.path.curdir, 'base', missing), - os.path.join(os.path.curdir, missing)) diff --git a/component/grep/buildout.cfg b/component/grep/buildout.cfg index 589bb6cb490c4ba6dc5bf3742f91c4dc111b946b..231f7bca91a66dcc064f6b65318594a36bae8a2a 100644 --- a/component/grep/buildout.cfg +++ b/component/grep/buildout.cfg @@ -6,8 +6,8 @@ parts = [grep] recipe = hexagonit.recipe.cmmi -url = http://ftp.gnu.org/gnu/grep/grep-2.8.tar.gz -md5sum = cb2dfc502c5afc7a4a6e5f6cefd6850e +url = http://ftp.gnu.org/gnu/grep/grep-2.9.tar.gz +md5sum = 03e3451a38b0d615cb113cbeaf252dc0 environment = - PKG_CONFIG_PATH=${pcre:location}/lib/pkgconfig - LDFLAGS =-Wl,--as-needed -Wl,-rpath=${pcre:location}/lib + CPPFLAGS=-I${pcre:location}/include + LDFLAGS=-L${pcre:location}/lib -Wl,-rpath=${pcre:location}/lib diff --git a/component/groonga/buildout.cfg b/component/groonga/buildout.cfg index 45608460ecf6affa1aa6219d7bc1be05539cf9a7..49b8b0f94cef7126a2ec67c637c1f7c147cc20ae 100644 --- a/component/groonga/buildout.cfg +++ b/component/groonga/buildout.cfg @@ -8,8 +8,8 @@ parts = [groonga] recipe = hexagonit.recipe.cmmi -url = http://packages.groonga.org/source/groonga/groonga-1.2.5.tar.gz -md5sum = 7e608406677b7a3f91e287acc0c718c0 +url = http://packages.groonga.org/source/groonga/groonga-1.2.8.tar.gz +md5sum = a319b1f3a55cbf250ef5255f5c51ff46 configure-options = --disable-static --disable-glibtest diff --git a/component/handlersocket/HandlerSocket-Plugin-for-MySQL-1.0.6-mariadb.patch b/component/handlersocket/HandlerSocket-Plugin-for-MySQL-1.0.6-mariadb.patch deleted file mode 100644 index a3d817294cb2d09d989283e34efaf000fcde6bf0..0000000000000000000000000000000000000000 --- a/component/handlersocket/HandlerSocket-Plugin-for-MySQL-1.0.6-mariadb.patch +++ /dev/null @@ -1,37 +0,0 @@ ---- a/configure.ac -+++ b/configure.ac -@@ -28,7 +28,7 @@ - MYSQL_INC="$MYSQL_INC -I$ac_mysql_source_dir/regex" - MYSQL_INC="$MYSQL_INC -I$ac_mysql_source_dir" - AC_SUBST(MYSQL_INC) -- MYSQL_SOURCE_VERSION=`cat $ac_mysql_source_dir/configure.in | grep "\[[MySQL Server\]]" | sed -e "s|.*\([[0-9]]\+\.[[0-9]]\+\.[[0-9]]\+[[0-9a-zA-Z\_\-]]*\).*|\1|"` -+ MYSQL_SOURCE_VERSION=`cat $ac_mysql_source_dir/configure.in | grep "\[[MariaDB Server\]]" | sed -e "s|.*\([[0-9]]\+\.[[0-9]]\+\.[[0-9]]\+[[0-9a-zA-Z\_\-]]*\).*|\1|"` - AC_MSG_RESULT([yes: Using $ac_mysql_source_dir, version $MYSQL_SOURCE_VERSION]) - else - AC_MSG_ERROR([invalid MySQL source directory: $ac_mysql_source_dir]) ---- a/handlersocket/database.cpp -+++ b/handlersocket/database.cpp -@@ -686,19 +686,19 @@ - for (uint32_t i = 0; i < limit + skip; ++i) { - if (i == 0) { - const key_part_map kpm = (1U << args.kvalslen) - 1; -- r = hnd->index_read_map(table->record[0], key_buf, kpm, find_flag); -+ r = hnd->ha_index_read_map(table->record[0], key_buf, kpm, find_flag); - } else { - switch (find_flag) { - case HA_READ_BEFORE_KEY: - case HA_READ_KEY_OR_PREV: -- r = hnd->index_prev(table->record[0]); -+ r = hnd->ha_index_prev(table->record[0]); - break; - case HA_READ_AFTER_KEY: - case HA_READ_KEY_OR_NEXT: -- r = hnd->index_next(table->record[0]); -+ r = hnd->ha_index_next(table->record[0]); - break; - case HA_READ_KEY_EXACT: -- r = hnd->index_next_same(table->record[0], key_buf, kplen_sum); -+ r = hnd->ha_index_next_same(table->record[0], key_buf, kplen_sum); - break; - default: - r = HA_ERR_END_OF_FILE; /* to finish the loop */ diff --git a/component/handlersocket/buildout.cfg b/component/handlersocket/buildout.cfg deleted file mode 100644 index 72a260caa78c202aad98ec3bc8e95fb6f018ad97..0000000000000000000000000000000000000000 --- a/component/handlersocket/buildout.cfg +++ /dev/null @@ -1,36 +0,0 @@ -# Handlersocket - a NoSQL plugin for MySQL. -# http://github.com/ahiguti/HandlerSocket-Plugin-for-MySQL - -[buildout] -extends = - ../autoconf/buildout.cfg - ../automake/buildout.cfg - ../libtool/buildout.cfg - ../mariadb/buildout.cfg - -parts = - handlersocket - -[handlersocket-mariadb-patch] -recipe = hexagonit.recipe.download -url = ${:_profile_base_location_}/${:filename} -md5sum = 2654feea2e867c898b741ac0f0aa8e14 -filename = HandlerSocket-Plugin-for-MySQL-1.0.6-mariadb.patch -download-only = true - -[handlersocket] -recipe = hexagonit.recipe.cmmi -url = http://github.com/ahiguti/HandlerSocket-Plugin-for-MySQL/tarball/1.0.6 -md5sum = 57f5c131e3d29701b01dd92c35ed25fd -patch-options = -p1 -patches = - ${handlersocket-mariadb-patch:location}/${handlersocket-mariadb-patch:filename} -configure-command = - ACLOCAL_ARGS=-I${libtool:location}/share/aclocal ./autogen.sh && ./configure -configure-options = - --prefix=${buildout:parts-directory}/${:_buildout_section_name_} - --with-mysql-source=${mariadb:location}__compile__/mariadb-${mariadb:version} - --with-mysql-bindir=${mariadb:location}/bin - --with-mysql-plugindir=${mariadb:location}/lib/mysql/plugin -environment = - PATH =${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:%(PATH)s diff --git a/component/imagemagick/buildout.cfg b/component/imagemagick/buildout.cfg index 216d2389dfdef7b923e854a9be3f5d5b0559e491..a3d90bae4b8013307c31d2a0b2fd5e1f33709e20 100644 --- a/component/imagemagick/buildout.cfg +++ b/component/imagemagick/buildout.cfg @@ -35,8 +35,8 @@ filename = imagemagick-6.6.7-4-without-lzma.patch [imagemagick] recipe = hexagonit.recipe.cmmi -url = ftp://ftp.imagemagick.org/pub/ImageMagick/ImageMagick-6.7.2-10.tar.bz2 -md5sum = 073ec5d7f2a22db96a0e87e4322b75f9 +url = ftp://ftp.imagemagick.org/pub/ImageMagick/ImageMagick-6.7.3-1.tar.bz2 +md5sum = 89d378733d89bc61c04bc0fdc140a3a7 configure-options = --disable-static --without-x diff --git a/component/libaio/buildout.cfg b/component/libaio/buildout.cfg new file mode 100644 index 0000000000000000000000000000000000000000..4d46fe3c4b48486c708addcfa667a751c05f1c35 --- /dev/null +++ b/component/libaio/buildout.cfg @@ -0,0 +1,12 @@ +[buildout] +parts = + libaio + +[libaio] +recipe = hexagonit.recipe.cmmi +# originally located at http://www.kernel.org/pub/linux/libs/aio/ +url = http://ftp.free.fr/mirrors/ftp.debian.org/pool/main/liba/libaio/libaio_0.3.109.orig.tar.gz +md5sum = 435a5b16ca6198eaf01155263d855756 +location = ${buildout:parts-directory}/${:_buildout_section_name_} +configure-command = echo +make-options = prefix=${:location} diff --git a/component/libpng/buildout.cfg b/component/libpng/buildout.cfg index d7ead5bdf7c3e6d3baab4ddc6408ccabffbfa5c6..a78e8143ea8c5762c182fb003a8388bcbacbe591 100644 --- a/component/libpng/buildout.cfg +++ b/component/libpng/buildout.cfg @@ -15,5 +15,5 @@ environment = [libpng] <= libpng-common -url = http://download.sourceforge.net/libpng/libpng-1.5.4.tar.bz2 -md5sum = b43afe39237b69859522455b215f9e85 +url = http://download.sourceforge.net/libpng/libpng-1.5.5.tar.bz2 +md5sum = 3270bf2990c3174ae939388398de751e diff --git a/component/libreoffice-bin/buildout.cfg b/component/libreoffice-bin/buildout.cfg index c29f2b76c8adfed4901c9d0b8de2b2d2df8b9cbb..5cf716eb14a95f390f308bf4c6451f0a4be10dca 100644 --- a/component/libreoffice-bin/buildout.cfg +++ b/component/libreoffice-bin/buildout.cfg @@ -12,11 +12,11 @@ find-links = [libreoffice-bin] recipe = slapos.recipe.build # here, two %s are used, first one is for directory name (eg. x86_64), and second one is for filename (eg. x86-64). -url = http://download.documentfoundation.org/libreoffice/stable/3.4.3/rpm/%s/LibO_3.4.3_Linux_%s_install-rpm_en-US.tar.gz +url = http://download.documentfoundation.org/libreoffice/stable/3.4.4/rpm/%s/LibO_3.4.4_Linux_%s_install-rpm_en-US.tar.gz # supported architectures md5sums -md5sum_x86 = ae1b2b387dcef513c378cc95b255affc -md5sum_x86-64 = b2d6a902182c1af82ca088fbb665d0e3 +md5sum_x86 = 529c60e161d0c23405723f4a3cd1e046 +md5sum_x86-64 = fc6cb85312d6e11a7ab6ddb1bc4e79cc # where office code can be found? officedir = libreoffice3.4 @@ -37,7 +37,7 @@ script = rpmsdir = os.path.join(workdir, [q for q in os.listdir(workdir) if q == 'RPMS'][0]) rpmlist = [os.path.join(rpmsdir, q) for q in os.listdir(rpmsdir) if q.endswith('.rpm') and 'javafilter' not in q and 'xsltfilter' not in q] [self.pipeCommand([[sys.executable, '${:rpm2cpio}', rpm], ['${:cpio}', '-idum']], cwd=storagedir) for rpm in rpmlist] - self.copyTree(os.path.join(storagedir, 'opt', '${:officedir}'), location, ['basis3.4', 'basis3.3', 'ure']) + self.copyTree(os.path.join(storagedir, 'opt', '${:officedir}'), location, ['basis3.4', 'ure']) # helper binaries cpio = ${cpio:location}/bin/cpio diff --git a/component/libtool/buildout.cfg b/component/libtool/buildout.cfg index d50fa57038d95f16b36a514fde80e3e4d854ed37..8d6256665e45aac0682f028fa47575c4d3c22a1c 100644 --- a/component/libtool/buildout.cfg +++ b/component/libtool/buildout.cfg @@ -3,7 +3,7 @@ parts = libtool [libtool] recipe = hexagonit.recipe.cmmi -md5sum = b32b04148ecdd7344abc6fe8bd1bb021 -url = http://ftp.gnu.org/gnu/libtool/libtool-2.4.tar.gz +md5sum = d2f3b7d4627e69e13514a40e72a24d50 +url = http://ftp.gnu.org/gnu/libtool/libtool-2.4.2.tar.gz configure-options = --disable-static diff --git a/component/maatkit/buildout.cfg b/component/maatkit/buildout.cfg deleted file mode 100644 index 7f5d7d5efeecdb5fd6ded97ba218f40495b99549..0000000000000000000000000000000000000000 --- a/component/maatkit/buildout.cfg +++ /dev/null @@ -1,18 +0,0 @@ -[buildout] -extends = - ../perl/buildout.cfg - ../perl-DBI/buildout.cfg - ../perl-DBD-MySQL/buildout.cfg -parts = - maatkit - -[maatkit] -recipe = hexagonit.recipe.cmmi -depends = - ${perl:version} - ${perl-DBI:version} - ${perl-DBD-MySQL:version} -url = http://maatkit.googlecode.com/files/maatkit-7540.tar.gz -md5sum = 55457f98500b096a6bf549356d3445fe -configure-command = - ${perl:location}/bin/perl Makefile.PL diff --git a/component/mariadb/buildout.cfg b/component/mariadb/buildout.cfg index 73e4c8719b8fb93b3a780afd5b4b85096f51c9b4..56195197cc77792b0e307078f8cd3993e30fbb3a 100644 --- a/component/mariadb/buildout.cfg +++ b/component/mariadb/buildout.cfg @@ -5,6 +5,7 @@ extends = ../zlib/buildout.cfg ../groonga/buildout.cfg + ../libevent/buildout.cfg ../ncurses/buildout.cfg ../pkgconfig/buildout.cfg ../readline/buildout.cfg @@ -12,11 +13,18 @@ extends = parts = mariadb +[mariadb-no_test-patch] +recipe = hexagonit.recipe.download +url = ${:_profile_base_location_}/${:filename} +md5sum = d65f61829cfbcd5062f49db2b00bd6fe +filename = mysql_create_system_tables__no_test.patch +download-only = true + [mariadb] recipe = hexagonit.recipe.cmmi -version = 5.3.1-beta -url = http://downloads.askmonty.org/f/mariadb-${:version}/kvm-tarbake-jaunty-x86/mariadb-${:version}.tar.gz/from/http://mirror.layerjet.com/mariadb -md5sum = 5b3a94de1c1fcaa193edbbc8d7f8ffe4 +version = 5.3.2-beta +url = http://downloads.askmonty.org/f/mariadb-${:version}/kvm-tarbake-jaunty-x86/mariadb-${:version}.tar.gz/from/http:/ftp.osuosl.org/pub/mariadb +md5sum = d8199454059ab2c98313aaad0c1bc318 # compile directory is required to build mysql plugins. keep-compile-dir = true # configure: how to avoid searching for my.cnf? @@ -31,19 +39,30 @@ configure-options = --with-pic --with-fast-mutexes --with-charset=utf8 + --with-extra-charsets=complex --with-collation=utf8_unicode_ci + --with-big-tables + --with-embedded-server + --with-plugins=max-no-ndb + --with-aria-tmp-tables + --without-plugin-innodb_plugin + --without-plugin-oqgraph --without-readline --with-ssl + --with-libevent=${libevent:location} --with-zlib-dir=${zlib:location} +patch-options = -p0 +patches = + ${mariadb-no_test-patch:location}/${mariadb-no_test-patch:filename} environment = - CPPFLAGS =-I${ncurses:location}/include -I${readline:location}/include - LDFLAGS =-L${readline:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${ncurses:location}/lib -Wl,-rpath=${readline:location}/lib + CPPFLAGS=-I${ncurses:location}/include -I${readline:location}/include + LDFLAGS=-Wl,-rpath=${libevent:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${ncurses:location}/lib -L${readline:location}/lib -Wl,-rpath=${readline:location}/lib -Wl,-rpath=${zlib:location}/lib -[groonga-storage-engine-mariadb] +[mroonga-mariadb] recipe = hexagonit.recipe.cmmi -url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-0.5.tar.gz -md5sum = 52fed75d97a91f239750a1011ea9e468 +url = https://github.com/downloads/mroonga/mroonga/mroonga-1.10.tar.gz +md5sum = 6a712b2b20eddc65d918dabd8fba590f configure-options = --with-mysql-source=${mariadb:location}__compile__/mariadb-${mariadb:version} --with-mysql-config=${mariadb:location}/bin/mysql_config diff --git a/component/mariadb/groonga-storage-engine-1.0.1-mariadb.patch b/component/mariadb/groonga-storage-engine-1.0.1-mariadb.patch new file mode 100644 index 0000000000000000000000000000000000000000..98d6188fc91abac6dd6530cdd7aa113ca8c72de1 --- /dev/null +++ b/component/mariadb/groonga-storage-engine-1.0.1-mariadb.patch @@ -0,0 +1,382 @@ +--- groonga-storage-engine-1.0.1/ha_mroonga.cc 2011-10-28 07:19:15.506715507 +0200 ++++ groonga-storage-engine-1.0.1/ha_mroonga.cc 2011-11-02 11:37:03.095096227 +0100 +@@ -77,6 +77,9 @@ + extern "C" { + #endif + ++/* groonga's internal functions */ ++const char *grn_obj_get_value_(grn_ctx *ctx, grn_obj *obj, grn_id id, uint32 *size); ++ + /* global variables */ + pthread_mutex_t mrn_db_mutex; + pthread_mutex_t mrn_log_mutex; +@@ -109,7 +112,6 @@ + static bool mrn_logfile_opened = false; + grn_log_level mrn_log_level_default = GRN_LOG_DEFAULT_LEVEL; + ulong mrn_log_level = (ulong) mrn_log_level_default; +-char mrn_default_parser_name[MRN_MAX_KEY_SIZE]; + char *mrn_default_parser; + + static void mrn_logger_func(int level, const char *time, const char *title, +@@ -228,13 +230,12 @@ + "default parser changed from '%s' to '%s'", + old_value, new_value); + grn_ctx_fin(&ctx); +- strcpy(mrn_default_parser_name, new_value); +- mrn_default_parser = mrn_default_parser_name; ++ strncpy(mrn_default_parser, new_value, MRN_MAX_KEY_SIZE - 1); + DBUG_VOID_RETURN; + } + + static MYSQL_SYSVAR_STR(default_parser, mrn_default_parser, +- PLUGIN_VAR_RQCMDARG, ++ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + "default fulltext parser", + NULL, + mrn_default_parser_update, +@@ -908,6 +909,15 @@ + field->charset()); + break; + } ++ case MYSQL_TYPE_BLOB: ++ { ++ GRN_VOID_INIT(&buf); ++ uint32 len; ++ const char *val = grn_obj_get_value_(ctx, col, id, &len); ++ Field_blob *blob = (Field_blob *)field; ++ blob->set_ptr((uchar *)&len, (uchar *)val); ++ break; ++ } + default: //strings etc.. + { + GRN_TEXT_INIT(&buf,0); +@@ -1010,6 +1020,9 @@ + goto error_allocated_open_tables_hash_init; + } + ++ mrn_default_parser = (char *)my_malloc(MRN_MAX_KEY_SIZE, MYF(MY_WME)); ++ strncpy(mrn_default_parser, MRN_PARSER_DEFAULT, MRN_MAX_KEY_SIZE - 1); ++ + return 0; + + error_allocated_open_tables_hash_init: +@@ -4422,7 +4435,7 @@ + DBUG_RETURN(error); + } + +-int ha_mroonga::wrapper_index_read_map(uchar * buf, const uchar * key, ++int ha_mroonga::wrapper_index_read_map(uchar *buf, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) + { +@@ -4442,7 +4455,11 @@ + MRN_SET_WRAP_TABLE_KEY(this, table); + if (fulltext_searching) + set_pk_bitmap(); ++#ifdef MRN_HANDLER_HAVE_HA_INDEX_READ_MAP ++ error = wrap_handler->ha_index_read_map(buf, key, keypart_map, find_flag); ++#else + error = wrap_handler->index_read_map(buf, key, keypart_map, find_flag); ++#endif + MRN_SET_BASE_SHARE_KEY(share, table->s); + MRN_SET_BASE_TABLE_KEY(this, table); + } +@@ -4557,7 +4574,7 @@ + DBUG_RETURN(error); + } + +-int ha_mroonga::index_read_map(uchar * buf, const uchar * key, ++int ha_mroonga::index_read_map(uchar *buf, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) + { +@@ -4572,6 +4589,7 @@ + DBUG_RETURN(error); + } + ++#ifdef MRN_HANDLER_HAVE_INDEX_READ_LAST_MAP + int ha_mroonga::wrapper_index_read_last_map(uchar *buf, const uchar *key, + key_part_map keypart_map) + { +@@ -4658,6 +4676,7 @@ + } + DBUG_RETURN(error); + } ++#endif + + int ha_mroonga::wrapper_index_next(uchar *buf) + { +@@ -6226,7 +6245,11 @@ + } + + ha_rows ha_mroonga::wrapper_multi_range_read_info(uint keyno, uint n_ranges, +- uint keys, uint *bufsz, ++ uint keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ uint key_parts, ++#endif ++ uint *bufsz, + uint *flags, COST_VECT *cost) + { + MRN_DBUG_ENTER_METHOD(); +@@ -6236,6 +6259,9 @@ + if (fulltext_searching) + set_pk_bitmap(); + rows = wrap_handler->multi_range_read_info(keyno, n_ranges, keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ key_parts, ++#endif + bufsz, flags, cost); + MRN_SET_BASE_SHARE_KEY(share, table->s); + MRN_SET_BASE_TABLE_KEY(this, table); +@@ -6243,16 +6269,26 @@ + } + + ha_rows ha_mroonga::storage_multi_range_read_info(uint keyno, uint n_ranges, +- uint keys, uint *bufsz, ++ uint keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ uint key_parts, ++#endif ++ uint *bufsz, + uint *flags, COST_VECT *cost) + { + MRN_DBUG_ENTER_METHOD(); + ha_rows rows = handler::multi_range_read_info(keyno, n_ranges, keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ key_parts, ++#endif + bufsz, flags, cost); + DBUG_RETURN(rows); + } + + ha_rows ha_mroonga::multi_range_read_info(uint keyno, uint n_ranges, uint keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ uint key_parts, ++#endif + uint *bufsz, uint *flags, + COST_VECT *cost) + { +@@ -6261,9 +6297,15 @@ + if (share->wrapper_mode) + { + rows = wrapper_multi_range_read_info(keyno, n_ranges, keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ key_parts, ++#endif + bufsz, flags, cost); + } else { + rows = storage_multi_range_read_info(keyno, n_ranges, keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ key_parts, ++#endif + bufsz, flags, cost); + } + DBUG_RETURN(rows); +@@ -6315,7 +6357,7 @@ + DBUG_RETURN(error); + } + +-int ha_mroonga::wrapper_multi_range_read_next(char **range_info) ++int ha_mroonga::wrapper_multi_range_read_next(range_id_t *range_info) + { + MRN_DBUG_ENTER_METHOD(); + int error = 0; +@@ -6329,14 +6371,14 @@ + DBUG_RETURN(error); + } + +-int ha_mroonga::storage_multi_range_read_next(char **range_info) ++int ha_mroonga::storage_multi_range_read_next(range_id_t *range_info) + { + MRN_DBUG_ENTER_METHOD(); + int error = handler::multi_range_read_next(range_info); + DBUG_RETURN(error); + } + +-int ha_mroonga::multi_range_read_next(char **range_info) ++int ha_mroonga::multi_range_read_next(range_id_t *range_info) + { + MRN_DBUG_ENTER_METHOD(); + int error = 0; +--- groonga-storage-engine-1.0.1/ha_mroonga.h 2011-10-27 12:31:36.859277054 +0200 ++++ groonga-storage-engine-1.0.1/ha_mroonga.h 2011-11-02 11:37:03.095096227 +0100 +@@ -47,18 +47,22 @@ + # define MRN_HANDLER_HAVE_ADD_INDEX 1 + #endif + +-#if (MYSQL_VERSION_ID >= 50600) || \ +- (defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 50302) +-# define MRN_HANDLER_HAVE_HA_CLOSE 1 ++#if (MYSQL_VERSION_ID >= 50603) || \ ++ (defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 50209) + # define MRN_HANDLER_HAVE_HA_RND_NEXT 1 + # define MRN_HANDLER_HAVE_HA_RND_POS 1 ++# define MRN_HANDLER_HAVE_HA_INDEX_READ_MAP 1 + # define MRN_HANDLER_HAVE_HA_INDEX_READ_IDX_MAP 1 + # define MRN_HANDLER_HAVE_HA_INDEX_NEXT 1 + # define MRN_HANDLER_HAVE_HA_INDEX_PREV 1 + # define MRN_HANDLER_HAVE_HA_INDEX_FIRST 1 + # define MRN_HANDLER_HAVE_HA_INDEX_LAST 1 + # define MRN_HANDLER_HAVE_HA_INDEX_NEXT_SAME 1 ++#endif + ++#if (MYSQL_VERSION_ID >= 50603) || \ ++ (defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 50302) ++# define MRN_HANDLER_HAVE_HA_CLOSE 1 + # define MRN_HANDLER_HAVE_MULTI_RANGE_READ 1 + #endif + +@@ -66,6 +70,14 @@ + # define MRN_HANDLER_HAVE_HA_INPLACE_INDEX_CHANGE + #endif + ++#ifndef MRN_MARIADB_P ++# define MRN_HANDLER_HAVE_INDEX_READ_LAST_MAP ++#endif ++ ++#if (defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 50302) ++# define MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++#endif ++ + #if MYSQL_VERSION_ID < 50600 + typedef Item COND; + #endif +@@ -74,6 +86,10 @@ + typedef MYSQL_ERROR Sql_condition; + #endif + ++#ifndef MRN_MARIADB_P ++ typedef char *range_id_t; ++#endif ++ + class ha_mroonga; + + /* structs */ +@@ -213,11 +229,15 @@ + ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); + int index_init(uint idx, bool sorted); + int index_end(); ++#ifndef MRN_HANDLER_HAVE_HA_INDEX_READ_MAP + int index_read_map(uchar * buf, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); ++#endif ++#ifdef MRN_HANDLER_HAVE_INDEX_READ_LAST_MAP + int index_read_last_map(uchar *buf, const uchar *key, + key_part_map keypart_map); ++#endif + #ifndef MRN_HANDLER_HAVE_HA_INDEX_NEXT + int index_next(uchar *buf); + #endif +@@ -261,11 +281,14 @@ + uint n_ranges, uint *bufsz, + uint *flags, COST_VECT *cost); + ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ uint key_parts, ++#endif + uint *bufsz, uint *flags, COST_VECT *cost); + int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param, + uint n_ranges, uint mode, + HANDLER_BUFFER *buf); +- int multi_range_read_next(char **range_info); ++ int multi_range_read_next(range_id_t *range_info); + #else // MRN_HANDLER_HAVE_MULTI_RANGE_READ + int read_multi_range_first(KEY_MULTI_RANGE **found_range_p, + KEY_MULTI_RANGE *ranges, +@@ -321,6 +344,11 @@ + #ifdef MRN_HANDLER_HAVE_HA_RND_POS + int rnd_pos(uchar *buf, uchar *pos); + #endif ++#ifdef MRN_HANDLER_HAVE_HA_INDEX_READ_MAP ++ int index_read_map(uchar *buf, const uchar *key, ++ key_part_map keypart_map, ++ enum ha_rkey_function find_flag); ++#endif + #ifdef MRN_HANDLER_HAVE_HA_INDEX_NEXT + int index_next(uchar *buf); + #endif +@@ -469,10 +497,12 @@ + int storage_index_read_map(uchar *buf, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); ++#ifdef MRN_HANDLER_HAVE_INDEX_READ_LAST_MAP + int wrapper_index_read_last_map(uchar *buf, const uchar *key, + key_part_map keypart_map); + int storage_index_read_last_map(uchar *buf, const uchar *key, + key_part_map keypart_map); ++#endif + int wrapper_index_next(uchar *buf); + int storage_index_next(uchar *buf); + int wrapper_index_prev(uchar *buf); +@@ -533,9 +563,15 @@ + uint *flags, + COST_VECT *cost); + ha_rows wrapper_multi_range_read_info(uint keyno, uint n_ranges, uint keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ uint key_parts, ++#endif + uint *bufsz, uint *flags, + COST_VECT *cost); + ha_rows storage_multi_range_read_info(uint keyno, uint n_ranges, uint keys, ++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS ++ uint key_parts, ++#endif + uint *bufsz, uint *flags, + COST_VECT *cost); + int wrapper_multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param, +@@ -544,8 +580,8 @@ + int storage_multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param, + uint n_ranges, uint mode, + HANDLER_BUFFER *buf); +- int wrapper_multi_range_read_next(char **range_info); +- int storage_multi_range_read_next(char **range_info); ++ int wrapper_multi_range_read_next(range_id_t *range_info); ++ int storage_multi_range_read_next(range_id_t *range_info); + #else // MRN_HANDLER_HAVE_MULTI_RANGE_READ + int wrapper_read_multi_range_first(KEY_MULTI_RANGE **found_range_p, + KEY_MULTI_RANGE *ranges, +--- groonga-storage-engine-1.0.1/test/run-sql-test.sh 2011-09-27 10:43:29.093290682 +0200 ++++ groonga-storage-engine-1.0.1/test/run-sql-test.sh 2011-11-02 11:37:03.099096256 +0100 +@@ -24,12 +24,20 @@ + source_test_suites_dir="${source_mysql_test_dir}/suite" + build_test_suites_dir="${build_mysql_test_dir}/suite" + case "${MYSQL_VERSION}" in +- 5.1) ++ 5.1.*) + plugins_dir="${MYSQL_BUILD}/lib/mysql/plugin" + if ! test -d "${build_test_suites_dir}"; then + mkdir -p "${build_test_suites_dir}" + fi + ;; ++ *-MariaDB*) ++ if ! test -d "${build_test_suites_dir}"; then ++ ln -s "${source_test_suites_dir}" "${build_test_suites_dir}" ++ fi ++ if ! test -d "${MYSQL_BUILD}/plugin/mroonga"; then ++ ln -s "${top_dir}" "${MYSQL_BUILD}/plugin/mroonga" ++ fi ++ ;; + *) + if ! test -d "${build_test_suites_dir}"; then + ln -s "${source_test_suites_dir}" "${build_test_suites_dir}" +@@ -47,10 +55,14 @@ + fi + done + +-make -C ${top_dir} \ +- install-pluginLTLIBRARIES \ +- plugindir=${plugins_dir} > /dev/null || \ +- exit 1 ++if test -n "${plugins_dir}"; then ++ make -C ${top_dir} \ ++ install-pluginLTLIBRARIES \ ++ plugindir=${plugins_dir} > /dev/null || \ ++ exit 1 ++else ++ make -C ${top_dir} > /dev/null || exit 1 ++fi + + (cd "$build_mysql_test_dir" && \ + ./mysql-test-run.pl \ diff --git a/component/mariadb/mysql_create_system_tables__no_test.patch b/component/mariadb/mysql_create_system_tables__no_test.patch new file mode 100644 index 0000000000000000000000000000000000000000..31f5d0ab12a9c4715a8998e2107a3c17cce5f14f --- /dev/null +++ b/component/mariadb/mysql_create_system_tables__no_test.patch @@ -0,0 +1,26 @@ +# 33_scripts__mysql_create_system_tables__no_test.dpatch by <ch@debian.org> + +A user with no password prevents a normal user from login under certain +circumstances as it is checked first. +See http://bugs.debian.org/301741 +and http://bugs.mysql.com/bug.php?id=6901 + +--- scripts/mysql_system_tables_data.sql 2008-12-04 22:59:44.000000000 +0100 ++++ scripts/mysql_system_tables_data.sql 2008-12-04 23:00:07.000000000 +0100 +@@ -11,8 +11,6 @@ + -- Fill "db" table with default grants for anyone to + -- access database 'test' and 'test_%' if "db" table didn't exist + CREATE TEMPORARY TABLE tmp_db LIKE db; +-INSERT INTO tmp_db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y'); +-INSERT INTO tmp_db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y'); + INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0; + DROP TABLE tmp_db; + +@@ -24,7 +22,5 @@ + INSERT INTO tmp_user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'',''); + REPLACE INTO tmp_user SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','' FROM dual WHERE LOWER( @current_hostname) != 'localhost'; + REPLACE INTO tmp_user VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'',''); +-INSERT INTO tmp_user (host,user) VALUES ('localhost',''); +-INSERT INTO tmp_user (host,user) SELECT @current_hostname,'' FROM dual WHERE LOWER(@current_hostname ) != 'localhost'; + INSERT INTO user SELECT * FROM tmp_user WHERE @had_user_table=0; + DROP TABLE tmp_user; diff --git a/component/memcached/buildout.cfg b/component/memcached/buildout.cfg index 5c36165e4bfa3777421094c7d358b1be8aa00e3c..dbc2979c053fb0e367cd0f09b839f3e2e4d0e0ea 100644 --- a/component/memcached/buildout.cfg +++ b/component/memcached/buildout.cfg @@ -1,16 +1,9 @@ [buildout] parts = memcached -extends = ../libevent/buildout.cfg - -[memcached-strict-aliasing-patch] -# on some platforms original memcached refuses to build: -# * http://code.google.com/p/memcached/issues/detail?id=60 -# * http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=565033 -recipe = hexagonit.recipe.download -url = ${:_profile_base_location_}/${:filename} -md5sum = c03b3bfc237b77667b0e90442b0980e8 -download-only = true -filename = memcached-fix-strict-aliasing.patch +extends = + ../autoconf/buildout.cfg + ../automake/buildout.cfg + ../libevent/buildout.cfg [memcached-fix-array-subscript-is-above-array-bounds] recipe = hexagonit.recipe.download @@ -19,45 +12,30 @@ filename = memcached-1.4-fix-array-subscript-is-above-array-bounds.patch download-only = true md5sum = 472508b9a4b6c0b9f5d6f2abce3444e3 -[memcached-gcc4.6.patch] +[memcached-gcc-4.4.patch] recipe = hexagonit.recipe.download url = ${:_profile_base_location_}/${:filename} -filename = memcached-gcc4.6.patch +filename = memcached-gcc-4.4.patch download-only = true -md5sum = 3418477f64500cd2a8dce046f5d72fec +md5sum = fd98d0cbfc4d3a25ac9808472fbe62f8 [memcached] -<= memcached-1.4.6 - -[memcached-1.4.6] -<= memcached-common -url = http://memcached.googlecode.com/files/memcached-1.4.6.tar.gz -md5sum = 243e5d82de27e6e45caf0ebfd400e41a -patches = - ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename} - -[memcached-1.4.5] -<= memcached-common -url = http://memcached.googlecode.com/files/memcached-1.4.5.tar.gz -md5sum = 583441a25f937360624024f2881e5ea8 -patches = - ${memcached-strict-aliasing-patch:location}/${memcached-strict-aliasing-patch:filename} - ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename} - ${memcached-gcc4.6.patch:location}/${memcached-gcc4.6.patch:filename} - -[memcached-1.4.4] -<= memcached-common -url = http://memcached.googlecode.com/files/memcached-1.4.4.tar.gz -md5sum = 5ca5b24de347e97ac1f48f3785b4178a -patches = - ${memcached-strict-aliasing-patch:location}/${memcached-strict-aliasing-patch:filename} - ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename} - ${memcached-gcc4.6.patch:location}/${memcached-gcc4.6.patch:filename} - -[memcached-common] recipe = hexagonit.recipe.cmmi +url = http://memcached.googlecode.com/files/memcached-1.4.8.tar.gz +md5sum = b7104e269511621c2777367d6d6315fe +patches = + ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename} ${memcached-gcc-4.4.patch:location}/${memcached-gcc-4.4.patch:filename} +patch-options = -p1 +configure-command = + aclocal-1.11 + autoheader + automake-1.11 --foreign + autoconf + ./configure configure-options = + --prefix=${buildout:parts-directory}/${:_buildout_section_name_} --with-libevent=${libevent:location} -patch-options = -p1 + --disable-docs environment = - LDFLAGS =-Wl,-rpath=${libevent:location}/lib + PATH=${autoconf:location}/bin:${automake-1.11:location}/bin:%(PATH)s + LDFLAGS =-Wl,-rpath=${libevent:location}/lib diff --git a/component/memcached/memcached-fix-strict-aliasing.patch b/component/memcached/memcached-fix-strict-aliasing.patch deleted file mode 100644 index ac8d4dde568472b007a8565ee68c398943a47857..0000000000000000000000000000000000000000 --- a/component/memcached/memcached-fix-strict-aliasing.patch +++ /dev/null @@ -1,40 +0,0 @@ -diff -rdBu memcached-1.4.0-rc1/memcached.h memcached-1.4.0-my/memcached.h ---- memcached-1.4.0-rc1/memcached.h 2009-05-29 00:51:56.000000000 +0400 -+++ memcached-1.4.0-my/memcached.h 2009-06-07 22:32:52.000000000 +0400 -@@ -75,21 +75,21 @@ - - /* warning: don't use these macros with a function, as it evals its arg twice */ - #define ITEM_get_cas(i) ((uint64_t)(((i)->it_flags & ITEM_CAS) ? \ -- *(uint64_t*)&((i)->end[0]) : 0x0)) -+ *(uint64_t*)((char*)(i) + sizeof(*i)) : 0x0)) - #define ITEM_set_cas(i,v) { if ((i)->it_flags & ITEM_CAS) { \ -- *(uint64_t*)&((i)->end[0]) = v; } } -+ *(uint64_t*)((char*)(i) + sizeof(*i)) = v; } } - --#define ITEM_key(item) (((char*)&((item)->end[0])) \ -+#define ITEM_key(item) ((char*)(item) + sizeof(*item) \ - + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0)) - --#define ITEM_suffix(item) ((char*) &((item)->end[0]) + (item)->nkey + 1 \ -+#define ITEM_suffix(item) ((char*)(item) + sizeof(*item) + (item)->nkey + 1 \ - + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0)) - --#define ITEM_data(item) ((char*) &((item)->end[0]) + (item)->nkey + 1 \ -+#define ITEM_data(item) ((char*)(item) + sizeof(*item) + (item)->nkey + 1 \ - + (item)->nsuffix \ - + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0)) - --#define ITEM_ntotal(item) (sizeof(struct _stritem) + (item)->nkey + 1 \ -+#define ITEM_ntotal(item) (sizeof(*item) + (item)->nkey + 1 \ - + (item)->nsuffix + (item)->nbytes \ - + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0)) - -@@ -285,7 +285,6 @@ - uint8_t it_flags; /* ITEM_* above */ - uint8_t slabs_clsid;/* which slab class we're in */ - uint8_t nkey; /* key length, w/terminating null and padding */ -- void * end[]; - /* if it_flags & ITEM_CAS we have 8 bytes CAS */ - /* then null-terminated key */ - /* then " flags length\r\n" (no terminating null) */ - diff --git a/component/memcached/memcached-gcc-4.4.patch b/component/memcached/memcached-gcc-4.4.patch new file mode 100644 index 0000000000000000000000000000000000000000..f86324858ad92d1fdd860b27d7647252c16d99ea --- /dev/null +++ b/component/memcached/memcached-gcc-4.4.patch @@ -0,0 +1,12 @@ +# In OpenSuse 11.2, 'gcc -dumpversion' returns '4.4', not '4.4.*'. +--- memcached-1.4.8/configure.ac.orig ++++ memcached-1.4.8/configure.ac +@@ -502,7 +502,7 @@ + GCC_VERSION=`$CC -dumpversion` + CFLAGS="$CFLAGS -Wall -Werror -pedantic -Wmissing-prototypes -Wmissing-declarations -Wredundant-decls" + case $GCC_VERSION in +- 4.4.*) ++ 4.4 | 4.4.*) + CFLAGS="$CFLAGS -fno-strict-aliasing" + ;; + esac diff --git a/component/memcached/memcached-gcc4.6.patch b/component/memcached/memcached-gcc4.6.patch deleted file mode 100644 index 53647d90aed8753754d1652a267428b64bfe3381..0000000000000000000000000000000000000000 --- a/component/memcached/memcached-gcc4.6.patch +++ /dev/null @@ -1,36 +0,0 @@ -diff --git a/items.c b/items.c -index e7f01ea..9fc6704 100644 ---- a/items.c -+++ b/items.c -@@ -450,9 +450,7 @@ void do_item_stats_sizes(ADD_STAT add_stats, void *c) { - for (i = 0; i < num_buckets; i++) { - if (histogram[i] != 0) { - char key[8]; -- int klen = 0; -- klen = snprintf(key, sizeof(key), "%d", i * 32); -- assert(klen < sizeof(key)); -+ assert(snprintf(key, sizeof(key), "%d", i * 32) < sizeof(key)); - APPEND_STAT(key, "%u", histogram[i]); - } - } -diff --git a/memcached.c b/memcached.c -index 750c8b3..0913b77 100644 ---- a/memcached.c -+++ b/memcached.c -@@ -4627,8 +4627,6 @@ int main (int argc, char **argv) { - - /* create the listening socket, bind it, and init */ - if (settings.socketpath == NULL) { -- int udp_port; -- - const char *portnumber_filename = getenv("MEMCACHED_PORT_FILENAME"); - char temp_portnumber_filename[PATH_MAX]; - FILE *portnumber_file = NULL; -@@ -4658,7 +4656,6 @@ int main (int argc, char **argv) { - * then daemonise if needed, then init libevent (in some cases - * descriptors created by libevent wouldn't survive forking). - */ -- udp_port = settings.udpport ? settings.udpport : settings.port; - - /* create the UDP listening socket and bind it */ - errno = 0; diff --git a/component/mysql-5.1/buildout.cfg b/component/mysql-5.1/buildout.cfg index 5c912b79d2a4ff1eb9049240ba60c4724a348140..1397b9511be4e38a927b4e8e2b370001c40ca75a 100644 --- a/component/mysql-5.1/buildout.cfg +++ b/component/mysql-5.1/buildout.cfg @@ -21,6 +21,13 @@ md5sum = eefcd08c400c58d3e89542ab482a8429 filename = mysql-5.1-sphinx-2.0.1-beta.diff download-only = true +[mysql-5.1-no_test-patch] +recipe = hexagonit.recipe.download +url = ${:_profile_base_location_}/${:filename} +md5sum = 22b0ef8baec5efc182e10d233c6f14ca +filename = mysql_create_system_tables__no_test.patch +download-only = true + [mysql-5.1] recipe = hexagonit.recipe.cmmi version = 5.1.58 @@ -60,6 +67,7 @@ make-options = patch-options = -p0 patches = ${mysql-5.1-sphinx-patch:location}/${mysql-5.1-sphinx-patch:filename} + ${mysql-5.1-no_test-patch:location}/${mysql-5.1-no_test-patch:filename} environment = PATH =${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:${bison:location}/bin:${flex:location}/bin:%(PATH)s CPPFLAGS =-I${ncurses:location}/include -I${readline:location}/include @@ -67,8 +75,8 @@ environment = [groonga-storage-engine-mysql-5.1] recipe = hexagonit.recipe.cmmi -url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-0.9.tar.gz -md5sum = 78fe07122dc376796a5aede476f50cfd +url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-1.0.0.tar.gz +md5sum = 289b8b7919e790599ea79b6fe9270e04 configure-options = --with-mysql-source=${mysql-5.1:location}__compile__/mysql-${mysql-5.1:version} --with-mysql-config=${mysql-5.1:location}/bin/mysql_config diff --git a/component/mysql-5.1/mysql_create_system_tables__no_test.patch b/component/mysql-5.1/mysql_create_system_tables__no_test.patch new file mode 100644 index 0000000000000000000000000000000000000000..0087e17588e5ea42d88682af0ea5ca9c910a45cc --- /dev/null +++ b/component/mysql-5.1/mysql_create_system_tables__no_test.patch @@ -0,0 +1,26 @@ +# 33_scripts__mysql_create_system_tables__no_test.dpatch by <ch@debian.org> + +A user with no password prevents a normal user from login under certain +circumstances as it is checked first. +See http://bugs.debian.org/301741 +and http://bugs.mysql.com/bug.php?id=6901 + +--- scripts/mysql_system_tables_data.sql 2008-12-04 22:59:44.000000000 +0100 ++++ scripts/mysql_system_tables_data.sql 2008-12-04 23:00:07.000000000 +0100 +@@ -11,8 +11,6 @@ + -- Fill "db" table with default grants for anyone to + -- access database 'test' and 'test_%' if "db" table didn't exist + CREATE TEMPORARY TABLE tmp_db LIKE db; +-INSERT INTO tmp_db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y'); +-INSERT INTO tmp_db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y'); + INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0; + DROP TABLE tmp_db; + +@@ -24,7 +22,5 @@ + INSERT INTO tmp_user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0); + REPLACE INTO tmp_user SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0 FROM dual WHERE LOWER( @current_hostname) != 'localhost'; + REPLACE INTO tmp_user VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0); +-INSERT INTO tmp_user (host,user) VALUES ('localhost',''); +-INSERT INTO tmp_user (host,user) SELECT @current_hostname,'' FROM dual WHERE LOWER(@current_hostname ) != 'localhost'; + INSERT INTO user SELECT * FROM tmp_user WHERE @had_user_table=0; + DROP TABLE tmp_user; diff --git a/component/mysql-5.5/buildout.cfg b/component/mysql-5.5/buildout.cfg new file mode 100644 index 0000000000000000000000000000000000000000..1c55e0ea182ec5195a79ebdb11b97d705148e2ab --- /dev/null +++ b/component/mysql-5.5/buildout.cfg @@ -0,0 +1,76 @@ +[buildout] +extends = + ../autoconf/buildout.cfg + ../automake/buildout.cfg + ../bison/buildout.cfg + ../cmake/buildout.cfg + ../groonga/buildout.cfg + ../libaio/buildout.cfg + ../libtool/buildout.cfg + ../ncurses/buildout.cfg + ../pkgconfig/buildout.cfg + ../zlib/buildout.cfg + +parts = + mysql-5.5 + +[mysql-5.5-sphinx-patch] +# this patch comes from sphinx-2.0.1-beta including changes for +# MySQL-5.5 in +# http://code.google.com/p/sphinxsearch/source/detail?r=2921 +recipe = hexagonit.recipe.download +url = ${:_profile_base_location_}/${:filename} +md5sum = 04549822ebfa91b5f84025ff49ef24c2 +filename = mysql-5.5-sphinx-2.0.1-beta.diff +download-only = true + +[mysql-5.5-no_test-patch] +recipe = hexagonit.recipe.download +url = ${:_profile_base_location_}/${:filename} +md5sum = bb7ee34b9a98da1f913566c63ffbc6f5 +filename = mysql_create_system_tables__no_test.patch +download-only = true + +[mysql-5.5] +recipe = hexagonit.recipe.cmmi +version = 5.5.16 +url = http://mysql.he.net/Downloads/MySQL-5.5/mysql-${:version}.tar.gz +md5sum = 462ab3752dd666ec06ca32f92673b59e +# compile directory is required to build mysql plugins. +keep-compile-dir = true +patch-options = -p0 +patches = + ${mysql-5.5-sphinx-patch:location}/${mysql-5.5-sphinx-patch:filename} + ${mysql-5.5-no_test-patch:location}/${mysql-5.5-no_test-patch:filename} +configure-command = ${cmake:location}/bin/cmake +# we use embeded yassl instead of openssl to avoid compilation errors on sphinx search engine. +configure-options = + -DCMAKE_INSTALL_PREFIX=${buildout:parts-directory}/${:_buildout_section_name_} + -DBUILD_CONFIG=mysql_release + -DDEFAULT_CHARSET=utf8 + -DDEFAULT_COLLATION=utf8_unicode_ci + -DWITH_SSL=bundled + -DWITH_ZLIB=system + -DWITHOUT_EXAMPLE_STORAGE_ENGINE=1 + -DWITHOUT_DAEMON_EXAMPLE=1 + -DWITH_SPHINX_STORAGE_ENGINE=1 + -DCMAKE_C_FLAGS="-I${libaio:location}/include -I${ncurses:location}/include -I${zlib:location}/include" + -DCMAKE_INSTALL_RPATH=${libaio:location}/lib:${ncurses:location}/lib:${zlib:location}/lib +environment = + CMAKE_PROGRAM_PATH=${autoconf:location}/bin:${automake-1.11:location}/bin:${cmake:location}/bin:${libtool:location}/bin:${bison:location}/bin:${flex:location}/bin + CMAKE_INCLUDE_PATH=${libaio:location}/include:${ncurses:location}/include:${zlib:location}/include + CMAKE_LIBRARY_PATH=${libaio:location}/lib:${ncurses:location}/lib:${zlib:location}/lib + LDFLAGS=-L${libaio:location}/lib + +[groonga-storage-engine-mysql-5.5] +recipe = hexagonit.recipe.cmmi +url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-1.0.0.tar.gz +md5sum = 289b8b7919e790599ea79b6fe9270e04 +configure-options = + --with-mysql-source=${mysql-5.5:location}__compile__/mysql-${mysql-5.5:version} + --with-mysql-config=${mysql-5.5:location}/bin/mysql_config +environment = + PATH=${groonga:location}/bin:${pkgconfig:location}/bin:%(PATH)s + CPPFLAGS=-I${groonga:location}/include/groonga + LDFLAGS=-L${groonga:location}/lib + PKG_CONFIG_PATH=${groonga:location}/lib/pkgconfig diff --git a/component/mysql-5.5/mysql-5.5-sphinx-2.0.1-beta.diff b/component/mysql-5.5/mysql-5.5-sphinx-2.0.1-beta.diff new file mode 100644 index 0000000000000000000000000000000000000000..ee9f88cfa0b42ccc170a08f017bc229e6e3383ba --- /dev/null +++ b/component/mysql-5.5/mysql-5.5-sphinx-2.0.1-beta.diff @@ -0,0 +1,4721 @@ +diff -uNr storage/sphinx/CMakeLists.txt storage/sphinx/CMakeLists.txt +--- storage/sphinx/CMakeLists.txt 1970-01-01 01:00:00.000000000 +0100 ++++ storage/sphinx/CMakeLists.txt 2011-10-13 00:59:59.282957578 +0200 +@@ -0,0 +1,16 @@ ++SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") ++SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") ++ADD_DEFINITIONS(-DMYSQL_SERVER) ++ ++INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ++ ${CMAKE_SOURCE_DIR}/sql ++ ${CMAKE_SOURCE_DIR}/extra/yassl/include ++ ${CMAKE_SOURCE_DIR}/regex) ++ ++SET(SPHINX_SOURCES ha_sphinx.cc) ++IF(MYSQL_VERSION_ID LESS 50515) ++ADD_LIBRARY(sphinx ha_sphinx.cc) ++ELSE() ++SET(SPHINX_PLUGIN_DYNAMIC "ha_sphinx") ++MYSQL_ADD_PLUGIN(sphinx ${SPHINX_SOURCES} STORAGE_ENGINE MODULE_ONLY LINK_LIBRARIES mysys) ++ENDIF() +diff -uNr storage/sphinx/gen_data.php storage/sphinx/gen_data.php +--- storage/sphinx/gen_data.php 1970-01-01 01:00:00.000000000 +0100 ++++ storage/sphinx/gen_data.php 2006-06-07 09:28:43.000000000 +0200 +@@ -0,0 +1,37 @@ ++<?php ++ ++$file_name= $argv[1]; ++ ++//echo $file_name; ++ ++$cont= file_get_contents($file_name); ++ ++$words= explode(" ", $cont); ++ ++//echo "words: ".(count($words))."\n"; ++ ++$cw = count($words); ++ ++echo "REPLACE INTO test.documents ( id, group_id, date_added, title, content ) VALUES\n"; ++ ++ ++for ($i=1; $i<=100000; $i++) ++{ ++ $count_words= mt_rand(10,30); ++ $pred = ""; ++ for ($j=0; $j<$count_words; $j++) ++ { ++ $pred .= chop($words[mt_rand(1, $cw-1)])." "; ++ } ++ $count_words= mt_rand(3,5); ++ $tit = ""; ++ for ($j=0; $j<$count_words; $j++) ++ { ++ $tit .= chop($words[mt_rand(1, $cw-1)])." "; ++ } ++ echo "($i,".mt_rand(1,20).",NOW(),'".addslashes($tit)."','".addslashes($pred)."'),\n"; ++} ++ echo "(0,1,now(),'end','eND');\n"; ++ ++ ++?> +diff -uNr storage/sphinx/ha_sphinx.cc storage/sphinx/ha_sphinx.cc +--- storage/sphinx/ha_sphinx.cc 1970-01-01 01:00:00.000000000 +0100 ++++ storage/sphinx/ha_sphinx.cc 2011-10-13 00:59:59.282957578 +0200 +@@ -0,0 +1,3547 @@ ++// ++// $Id: ha_sphinx.cc 2752 2011-03-29 08:21:05Z tomat $ ++// ++ ++// ++// Copyright (c) 2001-2011, Andrew Aksyonoff ++// Copyright (c) 2008-2011, Sphinx Technologies Inc ++// All rights reserved ++// ++// This program is free software; you can redistribute it and/or modify ++// it under the terms of the GNU General Public License. You should have ++// received a copy of the GPL license along with this program; if you ++// did not, you can find it at http://www.gnu.org/ ++// ++ ++#ifdef USE_PRAGMA_IMPLEMENTATION ++#pragma implementation // gcc: Class implementation ++#endif ++ ++#if _MSC_VER>=1400 ++#define _CRT_SECURE_NO_DEPRECATE 1 ++#define _CRT_NONSTDC_NO_DEPRECATE 1 ++#endif ++ ++#include <mysql_version.h> ++ ++#if MYSQL_VERSION_ID>=50515 ++#include "sql_class.h" ++#include "sql_array.h" ++#elif MYSQL_VERSION_ID>50100 ++#include "mysql_priv.h" ++#include <mysql/plugin.h> ++#else ++#include "../mysql_priv.h" ++#endif ++ ++#include <mysys_err.h> ++#include <my_sys.h> ++#include <mysql.h> // include client for INSERT table (sort of redoing federated..) ++ ++#ifndef __WIN__ ++ // UNIX-specific ++ #include <my_net.h> ++ #include <netdb.h> ++ #include <sys/un.h> ++ ++ #define RECV_FLAGS MSG_WAITALL ++ ++ #define sphSockClose(_sock) ::close(_sock) ++#else ++ // Windows-specific ++ #include <io.h> ++ #define strcasecmp stricmp ++ #define snprintf _snprintf ++ ++ #define RECV_FLAGS 0 ++ ++ #define sphSockClose(_sock) ::closesocket(_sock) ++#endif ++ ++#include <ctype.h> ++#include "ha_sphinx.h" ++ ++#ifndef MSG_WAITALL ++#define MSG_WAITALL 0 ++#endif ++ ++#if _MSC_VER>=1400 ++#pragma warning(push,4) ++#endif ++ ++///////////////////////////////////////////////////////////////////////////// ++ ++/// there might be issues with min() on different platforms (eg. Gentoo, they say) ++#define Min(a,b) ((a)<(b)?(a):(b)) ++ ++/// unaligned RAM accesses are forbidden on SPARC ++#if defined(sparc) || defined(__sparc__) ++#define UNALIGNED_RAM_ACCESS 0 ++#else ++#define UNALIGNED_RAM_ACCESS 1 ++#endif ++ ++ ++#if UNALIGNED_RAM_ACCESS ++ ++/// pass-through wrapper ++template < typename T > inline T sphUnalignedRead ( const T & tRef ) ++{ ++ return tRef; ++} ++ ++/// pass-through wrapper ++template < typename T > void sphUnalignedWrite ( void * pPtr, const T & tVal ) ++{ ++ *(T*)pPtr = tVal; ++} ++ ++#else ++ ++/// unaligned read wrapper for some architectures (eg. SPARC) ++template < typename T > ++inline T sphUnalignedRead ( const T & tRef ) ++{ ++ T uTmp; ++ byte * pSrc = (byte *) &tRef; ++ byte * pDst = (byte *) &uTmp; ++ for ( int i=0; i<(int)sizeof(T); i++ ) ++ *pDst++ = *pSrc++; ++ return uTmp; ++} ++ ++/// unaligned write wrapper for some architectures (eg. SPARC) ++template < typename T > ++void sphUnalignedWrite ( void * pPtr, const T & tVal ) ++{ ++ byte * pDst = (byte *) pPtr; ++ byte * pSrc = (byte *) &tVal; ++ for ( int i=0; i<(int)sizeof(T); i++ ) ++ *pDst++ = *pSrc++; ++} ++ ++#endif ++ ++#if MYSQL_VERSION_ID>=50515 ++ ++#define sphinx_hash_init my_hash_init ++#define sphinx_hash_free my_hash_free ++#define sphinx_hash_search my_hash_search ++#define sphinx_hash_delete my_hash_delete ++ ++#else ++ ++#define sphinx_hash_init hash_init ++#define sphinx_hash_free hash_free ++#define sphinx_hash_search hash_search ++#define sphinx_hash_delete hash_delete ++ ++#endif ++ ++///////////////////////////////////////////////////////////////////////////// ++ ++// FIXME! make this all dynamic ++#define SPHINXSE_MAX_FILTERS 32 ++ ++#define SPHINXAPI_DEFAULT_HOST "127.0.0.1" ++#define SPHINXAPI_DEFAULT_PORT 9312 ++#define SPHINXAPI_DEFAULT_INDEX "*" ++ ++#define SPHINXQL_DEFAULT_PORT 9306 ++ ++#define SPHINXSE_SYSTEM_COLUMNS 3 ++ ++#define SPHINXSE_MAX_ALLOC (16*1024*1024) ++#define SPHINXSE_MAX_KEYWORDSTATS 4096 ++ ++#define SPHINXSE_VERSION "0.9.9 ($Revision: 2752 $)" ++ ++// FIXME? the following is cut-n-paste from sphinx.h and searchd.cpp ++// cut-n-paste is somewhat simpler that adding dependencies however.. ++ ++enum ++{ ++ SPHINX_SEARCHD_PROTO = 1, ++ SEARCHD_COMMAND_SEARCH = 0, ++ VER_COMMAND_SEARCH = 0x116, ++}; ++ ++/// search query sorting orders ++enum ESphSortOrder ++{ ++ SPH_SORT_RELEVANCE = 0, ///< sort by document relevance desc, then by date ++ SPH_SORT_ATTR_DESC = 1, ///< sort by document date desc, then by relevance desc ++ SPH_SORT_ATTR_ASC = 2, ///< sort by document date asc, then by relevance desc ++ SPH_SORT_TIME_SEGMENTS = 3, ///< sort by time segments (hour/day/week/etc) desc, then by relevance desc ++ SPH_SORT_EXTENDED = 4, ///< sort by SQL-like expression (eg. "@relevance DESC, price ASC, @id DESC") ++ SPH_SORT_EXPR = 5, ///< sort by expression ++ ++ SPH_SORT_TOTAL ++}; ++ ++/// search query matching mode ++enum ESphMatchMode ++{ ++ SPH_MATCH_ALL = 0, ///< match all query words ++ SPH_MATCH_ANY, ///< match any query word ++ SPH_MATCH_PHRASE, ///< match this exact phrase ++ SPH_MATCH_BOOLEAN, ///< match this boolean query ++ SPH_MATCH_EXTENDED, ///< match this extended query ++ SPH_MATCH_FULLSCAN, ///< match all document IDs w/o fulltext query, apply filters ++ SPH_MATCH_EXTENDED2, ///< extended engine V2 ++ ++ SPH_MATCH_TOTAL ++}; ++ ++/// search query relevance ranking mode ++enum ESphRankMode ++{ ++ SPH_RANK_PROXIMITY_BM25 = 0, ///< default mode, phrase proximity major factor and BM25 minor one ++ SPH_RANK_BM25 = 1, ///< statistical mode, BM25 ranking only (faster but worse quality) ++ SPH_RANK_NONE = 2, ///< no ranking, all matches get a weight of 1 ++ SPH_RANK_WORDCOUNT = 3, ///< simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts ++ SPH_RANK_PROXIMITY = 4, ///< phrase proximity ++ SPH_RANK_MATCHANY = 5, ///< emulate old match-any weighting ++ SPH_RANK_FIELDMASK = 6, ///< sets bits where there were matches ++ SPH_RANK_SPH04 = 7, ///< codename SPH04, phrase proximity + bm25 + head/exact boost ++ ++ SPH_RANK_TOTAL, ++ SPH_RANK_DEFAULT = SPH_RANK_PROXIMITY_BM25 ++}; ++ ++/// search query grouping mode ++enum ESphGroupBy ++{ ++ SPH_GROUPBY_DAY = 0, ///< group by day ++ SPH_GROUPBY_WEEK = 1, ///< group by week ++ SPH_GROUPBY_MONTH = 2, ///< group by month ++ SPH_GROUPBY_YEAR = 3, ///< group by year ++ SPH_GROUPBY_ATTR = 4 ///< group by attribute value ++}; ++ ++/// known attribute types ++enum ++{ ++ SPH_ATTR_NONE = 0, ///< not an attribute at all ++ SPH_ATTR_INTEGER = 1, ///< this attr is just an integer ++ SPH_ATTR_TIMESTAMP = 2, ///< this attr is a timestamp ++ SPH_ATTR_ORDINAL = 3, ///< this attr is an ordinal string number (integer at search time, specially handled at indexing time) ++ SPH_ATTR_BOOL = 4, ///< this attr is a boolean bit field ++ SPH_ATTR_FLOAT = 5, ++ SPH_ATTR_BIGINT = 6, ++ ++ SPH_ATTR_MULTI = 0x40000000UL ///< this attr has multiple values (0 or more) ++}; ++ ++/// known answers ++enum ++{ ++ SEARCHD_OK = 0, ///< general success, command-specific reply follows ++ SEARCHD_ERROR = 1, ///< general failure, error message follows ++ SEARCHD_RETRY = 2, ///< temporary failure, error message follows, client should retry later ++ SEARCHD_WARNING = 3 ///< general success, warning message and command-specific reply follow ++}; ++ ++////////////////////////////////////////////////////////////////////////////// ++ ++#define SPHINX_DEBUG_OUTPUT 0 ++#define SPHINX_DEBUG_CALLS 0 ++ ++#include <stdarg.h> ++ ++#if SPHINX_DEBUG_OUTPUT ++inline void SPH_DEBUG ( const char * format, ... ) ++{ ++ va_list ap; ++ va_start ( ap, format ); ++ fprintf ( stderr, "SphinxSE: " ); ++ vfprintf ( stderr, format, ap ); ++ fprintf ( stderr, "\n" ); ++ va_end ( ap ); ++} ++#else ++inline void SPH_DEBUG ( const char *, ... ) {} ++#endif ++ ++#if SPHINX_DEBUG_CALLS ++ ++#define SPH_ENTER_FUNC() { SPH_DEBUG ( "enter %s", __FUNCTION__ ); } ++#define SPH_ENTER_METHOD() { SPH_DEBUG ( "enter %s(this=%08x)", __FUNCTION__, this ); } ++#define SPH_RET(_arg) { SPH_DEBUG ( "leave %s", __FUNCTION__ ); return _arg; } ++#define SPH_VOID_RET() { SPH_DEBUG ( "leave %s", __FUNCTION__ ); return; } ++ ++#else ++ ++#define SPH_ENTER_FUNC() ++#define SPH_ENTER_METHOD() ++#define SPH_RET(_arg) { return(_arg); } ++#define SPH_VOID_RET() { return; } ++ ++#endif ++ ++ ++#define SafeDelete(_arg) { if ( _arg ) delete ( _arg ); (_arg) = NULL; } ++#define SafeDeleteArray(_arg) { if ( _arg ) delete [] ( _arg ); (_arg) = NULL; } ++ ++////////////////////////////////////////////////////////////////////////////// ++ ++/// per-table structure that will be shared among all open Sphinx SE handlers ++struct CSphSEShare ++{ ++ pthread_mutex_t m_tMutex; ++ THR_LOCK m_tLock; ++ ++ char * m_sTable; ++ char * m_sScheme; ///< our connection string ++ char * m_sHost; ///< points into m_sScheme buffer, DO NOT FREE EXPLICITLY ++ char * m_sSocket; ///< points into m_sScheme buffer, DO NOT FREE EXPLICITLY ++ char * m_sIndex; ///< points into m_sScheme buffer, DO NOT FREE EXPLICITLY ++ ushort m_iPort; ++ bool m_bSphinxQL; ///< is this read-only SphinxAPI table, or write-only SphinxQL table? ++ uint m_iTableNameLen; ++ uint m_iUseCount; ++ CHARSET_INFO * m_pTableQueryCharset; ++ ++ int m_iTableFields; ++ char ** m_sTableField; ++ enum_field_types * m_eTableFieldType; ++ ++ CSphSEShare () ++ : m_sTable ( NULL ) ++ , m_sScheme ( NULL ) ++ , m_sHost ( NULL ) ++ , m_sSocket ( NULL ) ++ , m_sIndex ( NULL ) ++ , m_iPort ( 0 ) ++ , m_bSphinxQL ( false ) ++ , m_iTableNameLen ( 0 ) ++ , m_iUseCount ( 1 ) ++ , m_pTableQueryCharset ( NULL ) ++ ++ , m_iTableFields ( 0 ) ++ , m_sTableField ( NULL ) ++ , m_eTableFieldType ( NULL ) ++ { ++ thr_lock_init ( &m_tLock ); ++ pthread_mutex_init ( &m_tMutex, MY_MUTEX_INIT_FAST ); ++ } ++ ++ ~CSphSEShare () ++ { ++ pthread_mutex_destroy ( &m_tMutex ); ++ thr_lock_delete ( &m_tLock ); ++ ++ SafeDeleteArray ( m_sTable ); ++ SafeDeleteArray ( m_sScheme ); ++ ResetTable (); ++ } ++ ++ void ResetTable () ++ { ++ for ( int i=0; i<m_iTableFields; i++ ) ++ SafeDeleteArray ( m_sTableField[i] ); ++ SafeDeleteArray ( m_sTableField ); ++ SafeDeleteArray ( m_eTableFieldType ); ++ } ++}; ++ ++/// schema attribute ++struct CSphSEAttr ++{ ++ char * m_sName; ///< attribute name (received from Sphinx) ++ uint32 m_uType; ///< attribute type (received from Sphinx) ++ int m_iField; ///< field index in current table (-1 if none) ++ ++ CSphSEAttr() ++ : m_sName ( NULL ) ++ , m_uType ( SPH_ATTR_NONE ) ++ , m_iField ( -1 ) ++ {} ++ ++ ~CSphSEAttr () ++ { ++ SafeDeleteArray ( m_sName ); ++ } ++}; ++ ++/// word stats ++struct CSphSEWordStats ++{ ++ char * m_sWord; ++ int m_iDocs; ++ int m_iHits; ++ ++ CSphSEWordStats () ++ : m_sWord ( NULL ) ++ , m_iDocs ( 0 ) ++ , m_iHits ( 0 ) ++ {} ++ ++ ~CSphSEWordStats () ++ { ++ SafeDeleteArray ( m_sWord ); ++ } ++}; ++ ++/// request stats ++struct CSphSEStats ++{ ++public: ++ int m_iMatchesTotal; ++ int m_iMatchesFound; ++ int m_iQueryMsec; ++ int m_iWords; ++ CSphSEWordStats * m_dWords; ++ bool m_bLastError; ++ char m_sLastMessage[1024]; ++ ++ CSphSEStats() ++ : m_dWords ( NULL ) ++ { ++ Reset (); ++ } ++ ++ void Reset () ++ { ++ m_iMatchesTotal = 0; ++ m_iMatchesFound = 0; ++ m_iQueryMsec = 0; ++ m_iWords = 0; ++ SafeDeleteArray ( m_dWords ); ++ m_bLastError = false; ++ m_sLastMessage[0] = '\0'; ++ } ++ ++ ~CSphSEStats() ++ { ++ Reset (); ++ } ++}; ++ ++/// thread local storage ++struct CSphSEThreadData ++{ ++ static const int MAX_QUERY_LEN = 262144; // 256k should be enough, right? ++ ++ bool m_bStats; ++ CSphSEStats m_tStats; ++ ++ bool m_bQuery; ++ char m_sQuery[MAX_QUERY_LEN]; ++ ++ CHARSET_INFO * m_pQueryCharset; ++ ++ bool m_bReplace; ///< are we doing an INSERT or REPLACE ++ ++ bool m_bCondId; ///< got a value from condition pushdown ++ longlong m_iCondId; ///< value acquired from id=value condition pushdown ++ bool m_bCondDone; ///< index_read() is now over ++ ++ CSphSEThreadData () ++ : m_bStats ( false ) ++ , m_bQuery ( false ) ++ , m_pQueryCharset ( NULL ) ++ , m_bReplace ( false ) ++ , m_bCondId ( false ) ++ , m_iCondId ( 0 ) ++ , m_bCondDone ( false ) ++ {} ++}; ++ ++/// filter types ++enum ESphFilter ++{ ++ SPH_FILTER_VALUES = 0, ///< filter by integer values set ++ SPH_FILTER_RANGE = 1, ///< filter by integer range ++ SPH_FILTER_FLOATRANGE = 2 ///< filter by float range ++}; ++ ++ ++/// search query filter ++struct CSphSEFilter ++{ ++public: ++ ESphFilter m_eType; ++ char * m_sAttrName; ++ longlong m_uMinValue; ++ longlong m_uMaxValue; ++ float m_fMinValue; ++ float m_fMaxValue; ++ int m_iValues; ++ longlong * m_pValues; ++ int m_bExclude; ++ ++public: ++ CSphSEFilter () ++ : m_eType ( SPH_FILTER_VALUES ) ++ , m_sAttrName ( NULL ) ++ , m_uMinValue ( 0 ) ++ , m_uMaxValue ( UINT_MAX ) ++ , m_fMinValue ( 0.0f ) ++ , m_fMaxValue ( 0.0f ) ++ , m_iValues ( 0 ) ++ , m_pValues ( NULL ) ++ , m_bExclude ( 0 ) ++ { ++ } ++ ++ ~CSphSEFilter () ++ { ++ SafeDeleteArray ( m_pValues ); ++ } ++}; ++ ++ ++/// float vs dword conversion ++inline uint32 sphF2DW ( float f ) { union { float f; uint32 d; } u; u.f = f; return u.d; } ++ ++/// dword vs float conversion ++inline float sphDW2F ( uint32 d ) { union { float f; uint32 d; } u; u.d = d; return u.f; } ++ ++ ++/// client-side search query ++struct CSphSEQuery ++{ ++public: ++ const char * m_sHost; ++ int m_iPort; ++ ++private: ++ char * m_sQueryBuffer; ++ ++ const char * m_sIndex; ++ int m_iOffset; ++ int m_iLimit; ++ ++ bool m_bQuery; ++ char * m_sQuery; ++ uint32 * m_pWeights; ++ int m_iWeights; ++ ESphMatchMode m_eMode; ++ ESphRankMode m_eRanker; ++ ESphSortOrder m_eSort; ++ char * m_sSortBy; ++ int m_iMaxMatches; ++ int m_iMaxQueryTime; ++ uint32 m_iMinID; ++ uint32 m_iMaxID; ++ ++ int m_iFilters; ++ CSphSEFilter m_dFilters[SPHINXSE_MAX_FILTERS]; ++ ++ ESphGroupBy m_eGroupFunc; ++ char * m_sGroupBy; ++ char * m_sGroupSortBy; ++ int m_iCutoff; ++ int m_iRetryCount; ++ int m_iRetryDelay; ++ char * m_sGroupDistinct; ///< points to query buffer; do NOT delete ++ int m_iIndexWeights; ++ char * m_sIndexWeight[SPHINXSE_MAX_FILTERS]; ///< points to query buffer; do NOT delete ++ int m_iIndexWeight[SPHINXSE_MAX_FILTERS]; ++ int m_iFieldWeights; ++ char * m_sFieldWeight[SPHINXSE_MAX_FILTERS]; ///< points to query buffer; do NOT delete ++ int m_iFieldWeight[SPHINXSE_MAX_FILTERS]; ++ ++ bool m_bGeoAnchor; ++ char * m_sGeoLatAttr; ++ char * m_sGeoLongAttr; ++ float m_fGeoLatitude; ++ float m_fGeoLongitude; ++ ++ char * m_sComment; ++ char * m_sSelect; ++ ++ struct Override_t ++ { ++ union Value_t ++ { ++ uint32 m_uValue; ++ longlong m_iValue64; ++ float m_fValue; ++ }; ++ char * m_sName; ///< points to query buffer ++ int m_iType; ++ Dynamic_array<ulonglong> m_dIds; ++ Dynamic_array<Value_t> m_dValues; ++ }; ++ Dynamic_array<Override_t *> m_dOverrides; ++ ++public: ++ char m_sParseError[256]; ++ ++public: ++ CSphSEQuery ( const char * sQuery, int iLength, const char * sIndex ); ++ ~CSphSEQuery (); ++ ++ bool Parse (); ++ int BuildRequest ( char ** ppBuffer ); ++ ++protected: ++ char * m_pBuf; ++ char * m_pCur; ++ int m_iBufLeft; ++ bool m_bBufOverrun; ++ ++ template < typename T > int ParseArray ( T ** ppValues, const char * sValue ); ++ bool ParseField ( char * sField ); ++ ++ void SendBytes ( const void * pBytes, int iBytes ); ++ void SendWord ( short int v ) { v = ntohs(v); SendBytes ( &v, sizeof(v) ); } ++ void SendInt ( int v ) { v = ntohl(v); SendBytes ( &v, sizeof(v) ); } ++ void SendDword ( uint v ) { v = ntohl(v) ;SendBytes ( &v, sizeof(v) ); } ++ void SendUint64 ( ulonglong v ) { SendDword ( (uint)(v>>32) ); SendDword ( (uint)(v&0xFFFFFFFFUL) ); } ++ void SendString ( const char * v ) { int iLen = strlen(v); SendDword(iLen); SendBytes ( v, iLen ); } ++ void SendFloat ( float v ) { SendDword ( sphF2DW(v) ); } ++}; ++ ++template int CSphSEQuery::ParseArray<uint32> ( uint32 **, const char * ); ++template int CSphSEQuery::ParseArray<longlong> ( longlong **, const char * ); ++ ++////////////////////////////////////////////////////////////////////////////// ++ ++#if MYSQL_VERSION_ID>50100 ++ ++#if MYSQL_VERSION_ID<50114 ++#error Sphinx SE requires MySQL 5.1.14 or higher if compiling for 5.1.x series! ++#endif ++ ++static handler * sphinx_create_handler ( handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root ); ++static int sphinx_init_func ( void * p ); ++static int sphinx_close_connection ( handlerton * hton, THD * thd ); ++static int sphinx_panic ( handlerton * hton, enum ha_panic_function flag ); ++static bool sphinx_show_status ( handlerton * hton, THD * thd, stat_print_fn * stat_print, enum ha_stat_type stat_type ); ++ ++#else ++ ++static bool sphinx_init_func_for_handlerton (); ++static int sphinx_close_connection ( THD * thd ); ++bool sphinx_show_status ( THD * thd ); ++ ++#endif // >50100 ++ ++////////////////////////////////////////////////////////////////////////////// ++ ++static const char sphinx_hton_name[] = "SPHINX"; ++static const char sphinx_hton_comment[] = "Sphinx storage engine " SPHINXSE_VERSION; ++ ++#if MYSQL_VERSION_ID<50100 ++handlerton sphinx_hton = ++{ ++ #ifdef MYSQL_HANDLERTON_INTERFACE_VERSION ++ MYSQL_HANDLERTON_INTERFACE_VERSION, ++ #endif ++ sphinx_hton_name, ++ SHOW_OPTION_YES, ++ sphinx_hton_comment, ++ DB_TYPE_SPHINX_DB, ++ sphinx_init_func_for_handlerton, ++ 0, // slot ++ 0, // savepoint size ++ sphinx_close_connection, // close_connection ++ NULL, // savepoint ++ NULL, // rollback to savepoint ++ NULL, // release savepoint ++ NULL, // commit ++ NULL, // rollback ++ NULL, // prepare ++ NULL, // recover ++ NULL, // commit_by_xid ++ NULL, // rollback_by_xid ++ NULL, // create_cursor_read_view ++ NULL, // set_cursor_read_view ++ NULL, // close_cursor_read_view ++ HTON_CAN_RECREATE ++}; ++#else ++static handlerton * sphinx_hton_ptr = NULL; ++#endif ++ ++////////////////////////////////////////////////////////////////////////////// ++ ++// variables for Sphinx shared methods ++pthread_mutex_t sphinx_mutex; // mutex to init the hash ++static int sphinx_init = 0; // flag whether the hash was initialized ++static HASH sphinx_open_tables; // hash used to track open tables ++ ++////////////////////////////////////////////////////////////////////////////// ++// INITIALIZATION AND SHUTDOWN ++////////////////////////////////////////////////////////////////////////////// ++ ++// hashing function ++#if MYSQL_VERSION_ID>=50120 ++typedef size_t GetKeyLength_t; ++#else ++typedef uint GetKeyLength_t; ++#endif ++ ++static byte * sphinx_get_key ( const byte * pSharePtr, GetKeyLength_t * pLength, my_bool ) ++{ ++ CSphSEShare * pShare = (CSphSEShare *) pSharePtr; ++ *pLength = (size_t) pShare->m_iTableNameLen; ++ return (byte*) pShare->m_sTable; ++} ++ ++#if MYSQL_VERSION_ID<50100 ++static int sphinx_init_func ( void * ) // to avoid unused arg warning ++#else ++static int sphinx_init_func ( void * p ) ++#endif ++{ ++ SPH_ENTER_FUNC(); ++ if ( !sphinx_init ) ++ { ++ sphinx_init = 1; ++ void ( pthread_mutex_init ( &sphinx_mutex, MY_MUTEX_INIT_FAST ) ); ++ sphinx_hash_init ( &sphinx_open_tables, system_charset_info, 32, 0, 0, ++ sphinx_get_key, 0, 0 ); ++ ++ #if MYSQL_VERSION_ID > 50100 ++ handlerton * hton = (handlerton*) p; ++ hton->state = SHOW_OPTION_YES; ++ hton->db_type = DB_TYPE_FIRST_DYNAMIC; ++ hton->create = sphinx_create_handler; ++ hton->close_connection = sphinx_close_connection; ++ hton->show_status = sphinx_show_status; ++ hton->panic = sphinx_panic; ++ hton->flags = HTON_CAN_RECREATE; ++ #endif ++ } ++ SPH_RET(0); ++} ++ ++ ++#if MYSQL_VERSION_ID<50100 ++static bool sphinx_init_func_for_handlerton () ++{ ++ return sphinx_init_func ( &sphinx_hton ); ++} ++#endif ++ ++ ++#if MYSQL_VERSION_ID>50100 ++ ++static int sphinx_close_connection ( handlerton * hton, THD * thd ) ++{ ++ // deallocate common handler data ++ SPH_ENTER_FUNC(); ++ void ** tmp = thd_ha_data ( thd, hton ); ++ CSphSEThreadData * pTls = (CSphSEThreadData*) (*tmp); ++ SafeDelete ( pTls ); ++ *tmp = NULL; ++ SPH_RET(0); ++} ++ ++ ++static int sphinx_done_func ( void * ) ++{ ++ SPH_ENTER_FUNC(); ++ ++ int error = 0; ++ if ( sphinx_init ) ++ { ++ sphinx_init = 0; ++ if ( sphinx_open_tables.records ) ++ error = 1; ++ sphinx_hash_free ( &sphinx_open_tables ); ++ pthread_mutex_destroy ( &sphinx_mutex ); ++ } ++ ++ SPH_RET(0); ++} ++ ++ ++static int sphinx_panic ( handlerton * hton, enum ha_panic_function ) ++{ ++ return sphinx_done_func ( hton ); ++} ++ ++#else ++ ++static int sphinx_close_connection ( THD * thd ) ++{ ++ // deallocate common handler data ++ SPH_ENTER_FUNC(); ++ CSphSEThreadData * pTls = (CSphSEThreadData*) thd->ha_data[sphinx_hton.slot]; ++ SafeDelete ( pTls ); ++ thd->ha_data[sphinx_hton.slot] = NULL; ++ SPH_RET(0); ++} ++ ++#endif // >50100 ++ ++////////////////////////////////////////////////////////////////////////////// ++// SHOW STATUS ++////////////////////////////////////////////////////////////////////////////// ++ ++#if MYSQL_VERSION_ID>50100 ++static bool sphinx_show_status ( handlerton * hton, THD * thd, stat_print_fn * stat_print, ++ enum ha_stat_type ) ++#else ++bool sphinx_show_status ( THD * thd ) ++#endif ++{ ++ SPH_ENTER_FUNC(); ++ ++#if MYSQL_VERSION_ID<50100 ++ Protocol * protocol = thd->protocol; ++ List<Item> field_list; ++#endif ++ ++ char buf1[IO_SIZE]; ++ uint buf1len; ++ char buf2[IO_SIZE]; ++ uint buf2len = 0; ++ String words; ++ ++ buf1[0] = '\0'; ++ buf2[0] = '\0'; ++ ++ ++#if MYSQL_VERSION_ID>50100 ++ // 5.1.x style stats ++ CSphSEThreadData * pTls = (CSphSEThreadData*) ( *thd_ha_data ( thd, hton ) ); ++ ++#define LOC_STATS(_key,_keylen,_val,_vallen) \ ++ stat_print ( thd, sphinx_hton_name, strlen(sphinx_hton_name), _key, _keylen, _val, _vallen ); ++ ++#else ++ // 5.0.x style stats ++ if ( have_sphinx_db!=SHOW_OPTION_YES ) ++ { ++ my_message ( ER_NOT_SUPPORTED_YET, ++ "failed to call SHOW SPHINX STATUS: --skip-sphinx was specified", ++ MYF(0) ); ++ SPH_RET(TRUE); ++ } ++ CSphSEThreadData * pTls = (CSphSEThreadData*) thd->ha_data[sphinx_hton.slot]; ++ ++ field_list.push_back ( new Item_empty_string ( "Type", 10 ) ); ++ field_list.push_back ( new Item_empty_string ( "Name", FN_REFLEN ) ); ++ field_list.push_back ( new Item_empty_string ( "Status", 10 ) ); ++ if ( protocol->send_fields ( &field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF ) ) ++ SPH_RET(TRUE); ++ ++#define LOC_STATS(_key,_keylen,_val,_vallen) \ ++ protocol->prepare_for_resend (); \ ++ protocol->store ( "SPHINX", 6, system_charset_info ); \ ++ protocol->store ( _key, _keylen, system_charset_info ); \ ++ protocol->store ( _val, _vallen, system_charset_info ); \ ++ if ( protocol->write() ) \ ++ SPH_RET(TRUE); ++ ++#endif ++ ++ ++ // show query stats ++ if ( pTls && pTls->m_bStats ) ++ { ++ const CSphSEStats * pStats = &pTls->m_tStats; ++ buf1len = my_snprintf ( buf1, sizeof(buf1), ++ "total: %d, total found: %d, time: %d, words: %d", ++ pStats->m_iMatchesTotal, pStats->m_iMatchesFound, pStats->m_iQueryMsec, pStats->m_iWords ); ++ ++ LOC_STATS ( "stats", 5, buf1, buf1len ); ++ ++ if ( pStats->m_iWords ) ++ { ++ for ( int i=0; i<pStats->m_iWords; i++ ) ++ { ++ CSphSEWordStats & tWord = pStats->m_dWords[i]; ++ buf2len = my_snprintf ( buf2, sizeof(buf2), "%s%s:%d:%d ", ++ buf2, tWord.m_sWord, tWord.m_iDocs, tWord.m_iHits ); ++ } ++ ++ // convert it if we can ++ const char * sWord = buf2; ++ int iWord = buf2len; ++ ++ String sBuf3; ++ if ( pTls->m_pQueryCharset ) ++ { ++ uint iErrors; ++ sBuf3.copy ( buf2, buf2len, pTls->m_pQueryCharset, system_charset_info, &iErrors ); ++ sWord = sBuf3.c_ptr(); ++ iWord = sBuf3.length(); ++ } ++ ++ LOC_STATS ( "words", 5, sWord, iWord ); ++ } ++ } ++ ++ // show last error or warning (either in addition to stats, or on their own) ++ if ( pTls && pTls->m_tStats.m_sLastMessage && pTls->m_tStats.m_sLastMessage[0] ) ++ { ++ const char * sMessageType = pTls->m_tStats.m_bLastError ? "error" : "warning"; ++ ++ LOC_STATS ( ++ sMessageType, strlen ( sMessageType ), ++ pTls->m_tStats.m_sLastMessage, strlen ( pTls->m_tStats.m_sLastMessage ) ); ++ ++ } else ++ { ++ // well, nothing to show just yet ++#if MYSQL_VERSION_ID < 50100 ++ LOC_STATS ( "stats", 5, "no query has been executed yet", sizeof("no query has been executed yet")-1 ); ++#endif ++ } ++ ++#if MYSQL_VERSION_ID < 50100 ++ send_eof(thd); ++#endif ++ ++ SPH_RET(FALSE); ++} ++ ++////////////////////////////////////////////////////////////////////////////// ++// HELPERS ++////////////////////////////////////////////////////////////////////////////// ++ ++static char * sphDup ( const char * sSrc, int iLen=-1 ) ++{ ++ if ( !sSrc ) ++ return NULL; ++ ++ if ( iLen<0 ) ++ iLen = strlen(sSrc); ++ ++ char * sRes = new char [ 1+iLen ]; ++ memcpy ( sRes, sSrc, iLen ); ++ sRes[iLen] = '\0'; ++ return sRes; ++} ++ ++ ++static void sphLogError ( const char * sFmt, ... ) ++{ ++ // emit timestamp ++#ifdef __WIN__ ++ SYSTEMTIME t; ++ GetLocalTime ( &t ); ++ ++ fprintf ( stderr, "%02d%02d%02d %2d:%02d:%02d SphinxSE: internal error: ", ++ (int)t.wYear % 100, (int)t.wMonth, (int)t.wDay, ++ (int)t.wHour, (int)t.wMinute, (int)t.wSecond ); ++#else ++ // Unix version ++ time_t tStamp; ++ time ( &tStamp ); ++ ++ struct tm * pParsed; ++#ifdef HAVE_LOCALTIME_R ++ struct tm tParsed; ++ localtime_r ( &tStamp, &tParsed ); ++ pParsed = &tParsed; ++#else ++ pParsed = localtime ( &tStamp ); ++#endif // HAVE_LOCALTIME_R ++ ++ fprintf ( stderr, "%02d%02d%02d %2d:%02d:%02d SphinxSE: internal error: ", ++ pParsed->tm_year % 100, pParsed->tm_mon + 1, pParsed->tm_mday, ++ pParsed->tm_hour, pParsed->tm_min, pParsed->tm_sec); ++#endif // __WIN__ ++ ++ // emit message ++ va_list ap; ++ va_start ( ap, sFmt ); ++ vfprintf ( stderr, sFmt, ap ); ++ va_end ( ap ); ++ ++ // emit newline ++ fprintf ( stderr, "\n" ); ++} ++ ++ ++ ++// the following scheme variants are recognized ++// ++// sphinx://host[:port]/index ++// sphinxql://host[:port]/index ++// unix://unix/domain/socket[:index] ++static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate ) ++{ ++ SPH_ENTER_FUNC(); ++ ++ if ( share ) ++ { ++ // check incoming stuff ++ if ( !table ) ++ { ++ sphLogError ( "table==NULL in ParseUrl()" ); ++ return false; ++ } ++ if ( !table->s ) ++ { ++ sphLogError ( "(table->s)==NULL in ParseUrl()" ); ++ return false; ++ } ++ ++ // free old stuff ++ share->ResetTable (); ++ ++ // fill new stuff ++ share->m_iTableFields = table->s->fields; ++ if ( share->m_iTableFields ) ++ { ++ share->m_sTableField = new char * [ share->m_iTableFields ]; ++ share->m_eTableFieldType = new enum_field_types [ share->m_iTableFields ]; ++ ++ for ( int i=0; i<share->m_iTableFields; i++ ) ++ { ++ share->m_sTableField[i] = sphDup ( table->field[i]->field_name ); ++ share->m_eTableFieldType[i] = table->field[i]->type(); ++ } ++ } ++ } ++ ++ // defaults ++ bool bOk = true; ++ bool bQL = false; ++ char * sScheme = NULL; ++ char * sHost = SPHINXAPI_DEFAULT_HOST; ++ char * sIndex = SPHINXAPI_DEFAULT_INDEX; ++ int iPort = SPHINXAPI_DEFAULT_PORT; ++ ++ // parse connection string, if any ++ while ( table->s->connect_string.length!=0 ) ++ { ++ sScheme = sphDup ( table->s->connect_string.str, table->s->connect_string.length ); ++ ++ sHost = strstr ( sScheme, "://" ); ++ if ( !sHost ) ++ { ++ bOk = false; ++ break; ++ } ++ sHost[0] = '\0'; ++ sHost += 3; ++ ++ ///////////////////////////// ++ // sphinxapi via unix socket ++ ///////////////////////////// ++ ++ if ( !strcmp ( sScheme, "unix" ) ) ++ { ++ sHost--; // reuse last slash ++ iPort = 0; ++ if (!( sIndex = strrchr ( sHost, ':' ) )) ++ sIndex = SPHINXAPI_DEFAULT_INDEX; ++ else ++ { ++ *sIndex++ = '\0'; ++ if ( !*sIndex ) ++ sIndex = SPHINXAPI_DEFAULT_INDEX; ++ } ++ bOk = true; ++ break; ++ } ++ ++ ///////////////////// ++ // sphinxapi via tcp ++ ///////////////////// ++ ++ if ( !strcmp ( sScheme, "sphinx" ) ) ++ { ++ char * sPort = strchr ( sHost, ':' ); ++ if ( sPort ) ++ { ++ *sPort++ = '\0'; ++ if ( *sPort ) ++ { ++ sIndex = strchr ( sPort, '/' ); ++ if ( sIndex ) ++ *sIndex++ = '\0'; ++ else ++ sIndex = SPHINXAPI_DEFAULT_INDEX; ++ ++ iPort = atoi(sPort); ++ if ( !iPort ) ++ iPort = SPHINXAPI_DEFAULT_PORT; ++ } ++ } else ++ { ++ sIndex = strchr ( sHost, '/' ); ++ if ( sIndex ) ++ *sIndex++ = '\0'; ++ else ++ sIndex = SPHINXAPI_DEFAULT_INDEX; ++ } ++ bOk = true; ++ break; ++ } ++ ++ //////////// ++ // sphinxql ++ //////////// ++ ++ if ( !strcmp ( sScheme, "sphinxql" ) ) ++ { ++ bQL = true; ++ iPort = SPHINXQL_DEFAULT_PORT; ++ ++ // handle port ++ char * sPort = strchr ( sHost, ':' ); ++ sIndex = sHost; // starting point for index name search ++ ++ if ( sPort ) ++ { ++ *sPort++ = '\0'; ++ sIndex = sPort; ++ ++ iPort = atoi(sPort); ++ if ( !iPort ) ++ { ++ bOk = false; // invalid port; can report ER_FOREIGN_DATA_STRING_INVALID ++ break; ++ } ++ } ++ ++ // find index ++ sIndex = strchr ( sIndex, '/' ); ++ if ( sIndex ) ++ *sIndex++ = '\0'; ++ ++ // final checks ++ // host and index names are required ++ bOk = ( sHost && *sHost && sIndex && *sIndex ); ++ break; ++ } ++ ++ // unknown case ++ bOk = false; ++ break; ++ } ++ ++ if ( !bOk ) ++ { ++ my_error ( bCreate ? ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE : ER_FOREIGN_DATA_STRING_INVALID, ++ MYF(0), table->s->connect_string ); ++ } else ++ { ++ if ( share ) ++ { ++ SafeDeleteArray ( share->m_sScheme ); ++ share->m_sScheme = sScheme; ++ share->m_sHost = sHost; ++ share->m_sIndex = sIndex; ++ share->m_iPort = (ushort)iPort; ++ share->m_bSphinxQL = bQL; ++ } ++ } ++ if ( !bOk && !share ) ++ SafeDeleteArray ( sScheme ); ++ ++ SPH_RET(bOk); ++} ++ ++ ++// Example of simple lock controls. The "share" it creates is structure we will ++// pass to each sphinx handler. Do you have to have one of these? Well, you have ++// pieces that are used for locking, and they are needed to function. ++static CSphSEShare * get_share ( const char * table_name, TABLE * table ) ++{ ++ SPH_ENTER_FUNC(); ++ pthread_mutex_lock ( &sphinx_mutex ); ++ ++ CSphSEShare * pShare = NULL; ++ for ( ;; ) ++ { ++ // check if we already have this share ++#if MYSQL_VERSION_ID>=50120 ++ pShare = (CSphSEShare*) sphinx_hash_search ( &sphinx_open_tables, (const uchar *) table_name, strlen(table_name) ); ++#else ++#ifdef __WIN__ ++ pShare = (CSphSEShare*) sphinx_hash_search ( &sphinx_open_tables, (const byte *) table_name, strlen(table_name) ); ++#else ++ pShare = (CSphSEShare*) sphinx_hash_search ( &sphinx_open_tables, table_name, strlen(table_name) ); ++#endif // win ++#endif // pre-5.1.20 ++ ++ if ( pShare ) ++ { ++ pShare->m_iUseCount++; ++ break; ++ } ++ ++ // try to allocate new share ++ pShare = new CSphSEShare (); ++ if ( !pShare ) ++ break; ++ ++ // try to setup it ++ if ( !ParseUrl ( pShare, table, false ) ) ++ { ++ SafeDelete ( pShare ); ++ break; ++ } ++ ++ if ( !pShare->m_bSphinxQL ) ++ pShare->m_pTableQueryCharset = table->field[2]->charset(); ++ ++ // try to hash it ++ pShare->m_iTableNameLen = strlen(table_name); ++ pShare->m_sTable = sphDup ( table_name ); ++ if ( my_hash_insert ( &sphinx_open_tables, (const byte *)pShare ) ) ++ { ++ SafeDelete ( pShare ); ++ break; ++ } ++ ++ // all seems fine ++ break; ++ } ++ ++ pthread_mutex_unlock ( &sphinx_mutex ); ++ SPH_RET(pShare); ++} ++ ++ ++// Free lock controls. We call this whenever we close a table. If the table had ++// the last reference to the share then we free memory associated with it. ++static int free_share ( CSphSEShare * pShare ) ++{ ++ SPH_ENTER_FUNC(); ++ pthread_mutex_lock ( &sphinx_mutex ); ++ ++ if ( !--pShare->m_iUseCount ) ++ { ++ sphinx_hash_delete ( &sphinx_open_tables, (byte *)pShare ); ++ SafeDelete ( pShare ); ++ } ++ ++ pthread_mutex_unlock ( &sphinx_mutex ); ++ SPH_RET(0); ++} ++ ++ ++#if MYSQL_VERSION_ID>50100 ++static handler * sphinx_create_handler ( handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root ) ++{ ++ sphinx_hton_ptr = hton; ++ return new ( mem_root ) ha_sphinx ( hton, table ); ++} ++#endif ++ ++////////////////////////////////////////////////////////////////////////////// ++// CLIENT-SIDE REQUEST STUFF ++////////////////////////////////////////////////////////////////////////////// ++ ++CSphSEQuery::CSphSEQuery ( const char * sQuery, int iLength, const char * sIndex ) ++ : m_sHost ( "" ) ++ , m_iPort ( 0 ) ++ , m_sIndex ( sIndex ? sIndex : "*" ) ++ , m_iOffset ( 0 ) ++ , m_iLimit ( 20 ) ++ , m_bQuery ( false ) ++ , m_sQuery ( "" ) ++ , m_pWeights ( NULL ) ++ , m_iWeights ( 0 ) ++ , m_eMode ( SPH_MATCH_ALL ) ++ , m_eRanker ( SPH_RANK_PROXIMITY_BM25 ) ++ , m_eSort ( SPH_SORT_RELEVANCE ) ++ , m_sSortBy ( "" ) ++ , m_iMaxMatches ( 1000 ) ++ , m_iMaxQueryTime ( 0 ) ++ , m_iMinID ( 0 ) ++ , m_iMaxID ( 0 ) ++ , m_iFilters ( 0 ) ++ , m_eGroupFunc ( SPH_GROUPBY_DAY ) ++ , m_sGroupBy ( "" ) ++ , m_sGroupSortBy ( "@group desc" ) ++ , m_iCutoff ( 0 ) ++ , m_iRetryCount ( 0 ) ++ , m_iRetryDelay ( 0 ) ++ , m_sGroupDistinct ( "" ) ++ , m_iIndexWeights ( 0 ) ++ , m_iFieldWeights ( 0 ) ++ , m_bGeoAnchor ( false ) ++ , m_sGeoLatAttr ( "" ) ++ , m_sGeoLongAttr ( "" ) ++ , m_fGeoLatitude ( 0.0f ) ++ , m_fGeoLongitude ( 0.0f ) ++ , m_sComment ( "" ) ++ , m_sSelect ( "" ) ++ ++ , m_pBuf ( NULL ) ++ , m_pCur ( NULL ) ++ , m_iBufLeft ( 0 ) ++ , m_bBufOverrun ( false ) ++{ ++ m_sQueryBuffer = new char [ iLength+2 ]; ++ memcpy ( m_sQueryBuffer, sQuery, iLength ); ++ m_sQueryBuffer[iLength] = ';'; ++ m_sQueryBuffer[iLength+1] = '\0'; ++} ++ ++ ++CSphSEQuery::~CSphSEQuery () ++{ ++ SPH_ENTER_METHOD(); ++ SafeDeleteArray ( m_sQueryBuffer ); ++ SafeDeleteArray ( m_pWeights ); ++ SafeDeleteArray ( m_pBuf ); ++ for ( int i=0; i<m_dOverrides.elements(); i++ ) ++ SafeDelete ( m_dOverrides.at(i) ); ++ SPH_VOID_RET(); ++} ++ ++ ++template < typename T > ++int CSphSEQuery::ParseArray ( T ** ppValues, const char * sValue ) ++{ ++ SPH_ENTER_METHOD(); ++ ++ assert ( ppValues ); ++ assert ( !(*ppValues) ); ++ ++ const char * pValue; ++ bool bPrevDigit = false; ++ int iValues = 0; ++ ++ // count the values ++ for ( pValue=sValue; *pValue; pValue++ ) ++ { ++ bool bDigit = (*pValue)>='0' && (*pValue)<='9'; ++ if ( bDigit && !bPrevDigit ) ++ iValues++; ++ bPrevDigit = bDigit; ++ } ++ if ( !iValues ) ++ SPH_RET(0); ++ ++ // extract the values ++ T * pValues = new T [ iValues ]; ++ *ppValues = pValues; ++ ++ int iIndex = 0, iSign = 1; ++ T uValue = 0; ++ ++ bPrevDigit = false; ++ for ( pValue=sValue ;; pValue++ ) ++ { ++ bool bDigit = (*pValue)>='0' && (*pValue)<='9'; ++ ++ if ( bDigit ) ++ { ++ if ( !bPrevDigit ) ++ uValue = 0; ++ uValue = uValue*10 + ( (*pValue)-'0' ); ++ } else if ( bPrevDigit ) ++ { ++ assert ( iIndex<iValues ); ++ pValues [ iIndex++ ] = uValue * iSign; ++ iSign = 1; ++ } else if ( *pValue=='-' ) ++ iSign = -1; ++ ++ bPrevDigit = bDigit; ++ if ( !*pValue ) ++ break; ++ } ++ ++ SPH_RET ( iValues ); ++} ++ ++ ++static char * chop ( char * s ) ++{ ++ while ( *s && isspace(*s) ) ++ s++; ++ ++ char * p = s + strlen(s); ++ while ( p>s && isspace ( p[-1] ) ) ++ p--; ++ *p = '\0'; ++ ++ return s; ++} ++ ++ ++static bool myisattr ( char c ) ++{ ++ return ++ ( c>='0' && c<='9' ) || ++ ( c>='a' && c<='z' ) || ++ ( c>='A' && c<='Z' ) || ++ c=='_'; ++} ++ ++ ++bool CSphSEQuery::ParseField ( char * sField ) ++{ ++ SPH_ENTER_METHOD(); ++ ++ // look for option name/value separator ++ char * sValue = strchr ( sField, '=' ); ++ if ( !sValue || sValue==sField || sValue[-1]=='\\' ) ++ { ++ // by default let's assume it's just query ++ if ( sField[0] ) ++ { ++ if ( m_bQuery ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "search query already specified; '%s' is redundant", sField ); ++ SPH_RET(false); ++ } else ++ { ++ m_sQuery = sField; ++ m_bQuery = true; ++ ++ // unescape only 1st one ++ char *s = sField, *d = sField; ++ int iSlashes = 0; ++ while ( *s ) ++ { ++ iSlashes = ( *s=='\\' ) ? iSlashes+1 : 0; ++ if ( ( iSlashes%2 )==0 ) *d++ = *s; ++ s++; ++ } ++ *d = '\0'; ++ } ++ } ++ SPH_RET(true); ++ } ++ ++ // split ++ *sValue++ = '\0'; ++ sValue = chop ( sValue ); ++ int iValue = atoi ( sValue ); ++ ++ // handle options ++ char * sName = chop ( sField ); ++ ++ if ( !strcmp ( sName, "query" ) ) m_sQuery = sValue; ++ else if ( !strcmp ( sName, "host" ) ) m_sHost = sValue; ++ else if ( !strcmp ( sName, "port" ) ) m_iPort = iValue; ++ else if ( !strcmp ( sName, "index" ) ) m_sIndex = sValue; ++ else if ( !strcmp ( sName, "offset" ) ) m_iOffset = iValue; ++ else if ( !strcmp ( sName, "limit" ) ) m_iLimit = iValue; ++ else if ( !strcmp ( sName, "weights" ) ) m_iWeights = ParseArray<uint32> ( &m_pWeights, sValue ); ++ else if ( !strcmp ( sName, "minid" ) ) m_iMinID = iValue; ++ else if ( !strcmp ( sName, "maxid" ) ) m_iMaxID = iValue; ++ else if ( !strcmp ( sName, "maxmatches" ) ) m_iMaxMatches = iValue; ++ else if ( !strcmp ( sName, "maxquerytime" ) ) m_iMaxQueryTime = iValue; ++ else if ( !strcmp ( sName, "groupsort" ) ) m_sGroupSortBy = sValue; ++ else if ( !strcmp ( sName, "distinct" ) ) m_sGroupDistinct = sValue; ++ else if ( !strcmp ( sName, "cutoff" ) ) m_iCutoff = iValue; ++ else if ( !strcmp ( sName, "comment" ) ) m_sComment = sValue; ++ else if ( !strcmp ( sName, "select" ) ) m_sSelect = sValue; ++ ++ else if ( !strcmp ( sName, "mode" ) ) ++ { ++ m_eMode = SPH_MATCH_ALL; ++ if ( !strcmp ( sValue, "any" ) ) m_eMode = SPH_MATCH_ANY; ++ else if ( !strcmp ( sValue, "phrase" ) ) m_eMode = SPH_MATCH_PHRASE; ++ else if ( !strcmp ( sValue, "boolean" ) ) m_eMode = SPH_MATCH_BOOLEAN; ++ else if ( !strcmp ( sValue, "ext" ) ) m_eMode = SPH_MATCH_EXTENDED; ++ else if ( !strcmp ( sValue, "extended" ) ) m_eMode = SPH_MATCH_EXTENDED; ++ else if ( !strcmp ( sValue, "ext2" ) ) m_eMode = SPH_MATCH_EXTENDED2; ++ else if ( !strcmp ( sValue, "extended2" ) ) m_eMode = SPH_MATCH_EXTENDED2; ++ else if ( !strcmp ( sValue, "all" ) ) m_eMode = SPH_MATCH_ALL; ++ else if ( !strcmp ( sValue, "fullscan" ) ) m_eMode = SPH_MATCH_FULLSCAN; ++ else ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "unknown matching mode '%s'", sValue ); ++ SPH_RET(false); ++ } ++ } else if ( !strcmp ( sName, "ranker" ) ) ++ { ++ m_eRanker = SPH_RANK_PROXIMITY_BM25; ++ if ( !strcmp ( sValue, "proximity_bm25" ) ) m_eRanker = SPH_RANK_PROXIMITY_BM25; ++ else if ( !strcmp ( sValue, "bm25" ) ) m_eRanker = SPH_RANK_BM25; ++ else if ( !strcmp ( sValue, "none" ) ) m_eRanker = SPH_RANK_NONE; ++ else if ( !strcmp ( sValue, "wordcount" ) ) m_eRanker = SPH_RANK_WORDCOUNT; ++ else if ( !strcmp ( sValue, "proximity" ) ) m_eRanker = SPH_RANK_PROXIMITY; ++ else if ( !strcmp ( sValue, "matchany" ) ) m_eRanker = SPH_RANK_MATCHANY; ++ else if ( !strcmp ( sValue, "fieldmask" ) ) m_eRanker = SPH_RANK_FIELDMASK; ++ else if ( !strcmp ( sValue, "sph04" ) ) m_eRanker = SPH_RANK_SPH04; ++ else ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "unknown ranking mode '%s'", sValue ); ++ SPH_RET(false); ++ } ++ } else if ( !strcmp ( sName, "sort" ) ) ++ { ++ static const struct ++ { ++ const char * m_sName; ++ ESphSortOrder m_eSort; ++ } dSortModes[] = ++ { ++ { "relevance", SPH_SORT_RELEVANCE }, ++ { "attr_desc:", SPH_SORT_ATTR_DESC }, ++ { "attr_asc:", SPH_SORT_ATTR_ASC }, ++ { "time_segments:", SPH_SORT_TIME_SEGMENTS }, ++ { "extended:", SPH_SORT_EXTENDED }, ++ { "expr:", SPH_SORT_EXPR } ++ }; ++ ++ int i; ++ const int nModes = sizeof(dSortModes)/sizeof(dSortModes[0]); ++ for ( i=0; i<nModes; i++ ) ++ if ( !strncmp ( sValue, dSortModes[i].m_sName, strlen ( dSortModes[i].m_sName ) ) ) ++ { ++ m_eSort = dSortModes[i].m_eSort; ++ m_sSortBy = sValue + strlen ( dSortModes[i].m_sName ); ++ break; ++ } ++ if ( i==nModes ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "unknown sorting mode '%s'", sValue ); ++ SPH_RET(false); ++ } ++ ++ } else if ( !strcmp ( sName, "groupby" ) ) ++ { ++ static const struct ++ { ++ const char * m_sName; ++ ESphGroupBy m_eFunc; ++ } dGroupModes[] = ++ { ++ { "day:", SPH_GROUPBY_DAY }, ++ { "week:", SPH_GROUPBY_WEEK }, ++ { "month:", SPH_GROUPBY_MONTH }, ++ { "year:", SPH_GROUPBY_YEAR }, ++ { "attr:", SPH_GROUPBY_ATTR }, ++ }; ++ ++ int i; ++ const int nModes = sizeof(dGroupModes)/sizeof(dGroupModes[0]); ++ for ( i=0; i<nModes; i++ ) ++ if ( !strncmp ( sValue, dGroupModes[i].m_sName, strlen ( dGroupModes[i].m_sName ) ) ) ++ { ++ m_eGroupFunc = dGroupModes[i].m_eFunc; ++ m_sGroupBy = sValue + strlen ( dGroupModes[i].m_sName ); ++ break; ++ } ++ if ( i==nModes ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "unknown groupby mode '%s'", sValue ); ++ SPH_RET(false); ++ } ++ ++ } else if ( m_iFilters<SPHINXSE_MAX_FILTERS && ++ ( !strcmp ( sName, "range" ) || !strcmp ( sName, "!range" ) || !strcmp ( sName, "floatrange" ) || !strcmp ( sName, "!floatrange" ) ) ) ++ { ++ for ( ;; ) ++ { ++ char * p = sName; ++ CSphSEFilter & tFilter = m_dFilters [ m_iFilters ]; ++ tFilter.m_bExclude = ( *p=='!' ); if ( tFilter.m_bExclude ) p++; ++ tFilter.m_eType = ( *p=='f' ) ? SPH_FILTER_FLOATRANGE : SPH_FILTER_RANGE; ++ ++ if (!( p = strchr ( sValue, ',' ) )) ++ break; ++ *p++ = '\0'; ++ ++ tFilter.m_sAttrName = chop ( sValue ); ++ sValue = p; ++ ++ if (!( p = strchr ( sValue, ',' ) )) ++ break; ++ *p++ = '\0'; ++ ++ if ( tFilter.m_eType==SPH_FILTER_RANGE ) ++ { ++ tFilter.m_uMinValue = strtoll ( sValue, NULL, 0 ); ++ tFilter.m_uMaxValue = strtoll ( p, NULL, 0 ); ++ } else ++ { ++ tFilter.m_fMinValue = (float)atof(sValue); ++ tFilter.m_fMaxValue = (float)atof(p); ++ } ++ ++ // all ok ++ m_iFilters++; ++ break; ++ } ++ ++ } else if ( m_iFilters<SPHINXSE_MAX_FILTERS && ++ ( !strcmp ( sName, "filter" ) || !strcmp ( sName, "!filter" ) ) ) ++ { ++ for ( ;; ) ++ { ++ CSphSEFilter & tFilter = m_dFilters [ m_iFilters ]; ++ tFilter.m_eType = SPH_FILTER_VALUES; ++ tFilter.m_bExclude = ( strcmp ( sName, "!filter" )==0 ); ++ ++ // get the attr name ++ while ( (*sValue) && !myisattr(*sValue) ) ++ sValue++; ++ if ( !*sValue ) ++ break; ++ ++ tFilter.m_sAttrName = sValue; ++ while ( (*sValue) && myisattr(*sValue) ) ++ sValue++; ++ if ( !*sValue ) ++ break; ++ *sValue++ = '\0'; ++ ++ // get the values ++ tFilter.m_iValues = ParseArray<longlong> ( &tFilter.m_pValues, sValue ); ++ if ( !tFilter.m_iValues ) ++ { ++ assert ( !tFilter.m_pValues ); ++ break; ++ } ++ ++ // all ok ++ m_iFilters++; ++ break; ++ } ++ ++ } else if ( !strcmp ( sName, "indexweights" ) || !strcmp ( sName, "fieldweights" ) ) ++ { ++ bool bIndex = !strcmp ( sName, "indexweights" ); ++ int * pCount = bIndex ? &m_iIndexWeights : &m_iFieldWeights; ++ char ** pNames = bIndex ? &m_sIndexWeight[0] : &m_sFieldWeight[0]; ++ int * pWeights = bIndex ? &m_iIndexWeight[0] : &m_iFieldWeight[0]; ++ ++ *pCount = 0; ++ ++ char * p = sValue; ++ while ( *p && *pCount<SPHINXSE_MAX_FILTERS ) ++ { ++ // extract attr name ++ if ( !myisattr(*p) ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "%s: index name expected near '%s'", sName, p ); ++ SPH_RET(false); ++ } ++ ++ pNames[*pCount] = p; ++ while ( myisattr(*p) ) p++; ++ ++ if ( *p!=',' ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "%s: comma expected near '%s'", sName, p ); ++ SPH_RET(false); ++ } ++ *p++ = '\0'; ++ ++ // extract attr value ++ char * sVal = p; ++ while ( isdigit(*p) ) p++; ++ if ( p==sVal ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "%s: integer weight expected near '%s'", sName, sVal ); ++ SPH_RET(false); ++ } ++ pWeights[*pCount] = atoi(sVal); ++ (*pCount)++; ++ ++ if ( !*p ) ++ break; ++ if ( *p!=',' ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "%s: comma expected near '%s'", sName, p ); ++ SPH_RET(false); ++ } ++ p++; ++ } ++ ++ } else if ( !strcmp ( sName, "geoanchor" ) ) ++ { ++ m_bGeoAnchor = false; ++ for ( ;; ) ++ { ++ char * sLat = sValue; ++ char * p = sValue; ++ ++ if (!( p = strchr ( p, ',' ) )) break; *p++ = '\0'; ++ char * sLong = p; ++ ++ if (!( p = strchr ( p, ',' ) )) break; *p++ = '\0'; ++ char * sLatVal = p; ++ ++ if (!( p = strchr ( p, ',' ) )) break; *p++ = '\0'; ++ char * sLongVal = p; ++ ++ m_sGeoLatAttr = chop(sLat); ++ m_sGeoLongAttr = chop(sLong); ++ m_fGeoLatitude = (float)atof ( sLatVal ); ++ m_fGeoLongitude = (float)atof ( sLongVal ); ++ m_bGeoAnchor = true; ++ break; ++ } ++ if ( !m_bGeoAnchor ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "geoanchor: parse error, not enough comma-separated arguments" ); ++ SPH_RET(false); ++ } ++ } else if ( !strcmp ( sName, "override" ) ) // name,type,id:value,id:value,... ++ { ++ char * sName = NULL; ++ int iType = 0; ++ CSphSEQuery::Override_t * pOverride = NULL; ++ ++ // get name and type ++ char * sRest = sValue; ++ for ( ;; ) ++ { ++ sName = sRest; ++ if ( !*sName ) ++ break; ++ if (!( sRest = strchr ( sRest, ',' ) )) ++ break; ++ *sRest++ = '\0'; ++ char * sType = sRest; ++ if (!( sRest = strchr ( sRest, ',' ) )) ++ break; ++ ++ static const struct ++ { ++ const char * m_sName; ++ int m_iType; ++ } ++ dAttrTypes[] = ++ { ++ { "int", SPH_ATTR_INTEGER }, ++ { "timestamp", SPH_ATTR_TIMESTAMP }, ++ { "bool", SPH_ATTR_BOOL }, ++ { "float", SPH_ATTR_FLOAT }, ++ { "bigint", SPH_ATTR_BIGINT } ++ }; ++ for ( int i=0; i<sizeof(dAttrTypes)/sizeof(*dAttrTypes); i++ ) ++ if ( !strncmp ( sType, dAttrTypes[i].m_sName, sRest - sType ) ) ++ { ++ iType = dAttrTypes[i].m_iType; ++ break; ++ } ++ break; ++ } ++ ++ // fail ++ if ( !sName || !*sName || !iType ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "override: malformed query" ); ++ SPH_RET(false); ++ } ++ ++ // grab id:value pairs ++ sRest++; ++ while ( sRest ) ++ { ++ char * sId = sRest; ++ if (!( sRest = strchr ( sRest, ':' ) )) break; *sRest++ = '\0'; ++ if (!( sRest - sId )) break; ++ ++ char * sValue = sRest; ++ if ( ( sRest = strchr ( sRest, ',' ) )!=NULL ) ++ *sRest++ = '\0'; ++ if ( !*sValue ) ++ break; ++ ++ if ( !pOverride ) ++ { ++ pOverride = new CSphSEQuery::Override_t; ++ pOverride->m_sName = chop(sName); ++ pOverride->m_iType = iType; ++ m_dOverrides.append ( pOverride ); ++ } ++ ++ ulonglong uId = strtoull ( sId, NULL, 10 ); ++ CSphSEQuery::Override_t::Value_t tValue; ++ if ( iType==SPH_ATTR_FLOAT ) ++ tValue.m_fValue = (float)atof(sValue); ++ else if ( iType==SPH_ATTR_BIGINT ) ++ tValue.m_iValue64 = strtoll ( sValue, NULL, 10 ); ++ else ++ tValue.m_uValue = (uint32)strtoul ( sValue, NULL, 10 ); ++ ++ pOverride->m_dIds.append ( uId ); ++ pOverride->m_dValues.append ( tValue ); ++ } ++ ++ if ( !pOverride ) ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "override: id:value mapping expected" ); ++ SPH_RET(false); ++ } ++ SPH_RET(true); ++ } else ++ { ++ snprintf ( m_sParseError, sizeof(m_sParseError), "unknown parameter '%s'", sName ); ++ SPH_RET(false); ++ } ++ ++ // !COMMIT handle syntax errors ++ ++ SPH_RET(true); ++} ++ ++ ++bool CSphSEQuery::Parse () ++{ ++ SPH_ENTER_METHOD(); ++ SPH_DEBUG ( "query [[ %s ]]", m_sQueryBuffer ); ++ ++ m_bQuery = false; ++ char * pCur = m_sQueryBuffer; ++ char * pNext = pCur; ++ ++ while ( ( pNext = strchr ( pNext, ';' ) )!=NULL ) ++ { ++ // handle escaped semicolons ++ if ( pNext>m_sQueryBuffer && pNext[-1]=='\\' && pNext[1]!='\0' ) ++ { ++ pNext++; ++ continue; ++ } ++ ++ // handle semicolon-separated clauses ++ *pNext++ = '\0'; ++ if ( !ParseField ( pCur ) ) ++ SPH_RET(false); ++ pCur = pNext; ++ } ++ ++ SPH_DEBUG ( "q [[ %s ]]", m_sQuery ); ++ ++ SPH_RET(true); ++} ++ ++ ++void CSphSEQuery::SendBytes ( const void * pBytes, int iBytes ) ++{ ++ SPH_ENTER_METHOD(); ++ if ( m_iBufLeft<iBytes ) ++ { ++ m_bBufOverrun = true; ++ SPH_VOID_RET(); ++ } ++ ++ memcpy ( m_pCur, pBytes, iBytes ); ++ ++ m_pCur += iBytes; ++ m_iBufLeft -= iBytes; ++ SPH_VOID_RET(); ++} ++ ++ ++int CSphSEQuery::BuildRequest ( char ** ppBuffer ) ++{ ++ SPH_ENTER_METHOD(); ++ ++ // calc request length ++ int iReqSize = 124 + 4*m_iWeights ++ + strlen ( m_sSortBy ) ++ + strlen ( m_sQuery ) ++ + strlen ( m_sIndex ) ++ + strlen ( m_sGroupBy ) ++ + strlen ( m_sGroupSortBy ) ++ + strlen ( m_sGroupDistinct ) ++ + strlen ( m_sComment ) ++ + strlen ( m_sSelect ); ++ for ( int i=0; i<m_iFilters; i++ ) ++ { ++ const CSphSEFilter & tFilter = m_dFilters[i]; ++ iReqSize += 12 + strlen ( tFilter.m_sAttrName ); // string attr-name; int type; int exclude-flag ++ switch ( tFilter.m_eType ) ++ { ++ case SPH_FILTER_VALUES: iReqSize += 4 + 8*tFilter.m_iValues; break; ++ case SPH_FILTER_RANGE: iReqSize += 16; break; ++ case SPH_FILTER_FLOATRANGE: iReqSize += 8; break; ++ } ++ } ++ if ( m_bGeoAnchor ) // 1.14+ ++ iReqSize += 16 + strlen ( m_sGeoLatAttr ) + strlen ( m_sGeoLongAttr ); ++ for ( int i=0; i<m_iIndexWeights; i++ ) // 1.15+ ++ iReqSize += 8 + strlen(m_sIndexWeight[i] ); ++ for ( int i=0; i<m_iFieldWeights; i++ ) // 1.18+ ++ iReqSize += 8 + strlen(m_sFieldWeight[i] ); ++ // overrides ++ iReqSize += 4; ++ for ( int i=0; i<m_dOverrides.elements(); i++ ) ++ { ++ CSphSEQuery::Override_t * pOverride = m_dOverrides.at(i); ++ const uint32 uSize = pOverride->m_iType==SPH_ATTR_BIGINT ? 16 : 12; // id64 + value ++ iReqSize += strlen ( pOverride->m_sName ) + 12 + uSize*pOverride->m_dIds.elements(); ++ } ++ // select ++ iReqSize += 4; ++ ++ m_iBufLeft = 0; ++ SafeDeleteArray ( m_pBuf ); ++ ++ m_pBuf = new char [ iReqSize ]; ++ if ( !m_pBuf ) ++ SPH_RET(-1); ++ ++ m_pCur = m_pBuf; ++ m_iBufLeft = iReqSize; ++ m_bBufOverrun = false; ++ (*ppBuffer) = m_pBuf; ++ ++ // build request ++ SendWord ( SEARCHD_COMMAND_SEARCH ); // command id ++ SendWord ( VER_COMMAND_SEARCH ); // command version ++ SendInt ( iReqSize-8 ); // packet body length ++ ++ SendInt ( 1 ); // number of queries ++ SendInt ( m_iOffset ); ++ SendInt ( m_iLimit ); ++ SendInt ( m_eMode ); ++ SendInt ( m_eRanker ); // 1.16+ ++ SendInt ( m_eSort ); ++ SendString ( m_sSortBy ); // sort attr ++ SendString ( m_sQuery ); // query ++ SendInt ( m_iWeights ); ++ for ( int j=0; j<m_iWeights; j++ ) ++ SendInt ( m_pWeights[j] ); // weights ++ SendString ( m_sIndex ); // indexes ++ SendInt ( 1 ); // id64 range follows ++ SendUint64 ( m_iMinID ); // id/ts ranges ++ SendUint64 ( m_iMaxID ); ++ ++ SendInt ( m_iFilters ); ++ for ( int j=0; j<m_iFilters; j++ ) ++ { ++ const CSphSEFilter & tFilter = m_dFilters[j]; ++ SendString ( tFilter.m_sAttrName ); ++ SendInt ( tFilter.m_eType ); ++ ++ switch ( tFilter.m_eType ) ++ { ++ case SPH_FILTER_VALUES: ++ SendInt ( tFilter.m_iValues ); ++ for ( int k=0; k<tFilter.m_iValues; k++ ) ++ SendUint64 ( tFilter.m_pValues[k] ); ++ break; ++ ++ case SPH_FILTER_RANGE: ++ SendUint64 ( tFilter.m_uMinValue ); ++ SendUint64 ( tFilter.m_uMaxValue ); ++ break; ++ ++ case SPH_FILTER_FLOATRANGE: ++ SendFloat ( tFilter.m_fMinValue ); ++ SendFloat ( tFilter.m_fMaxValue ); ++ break; ++ } ++ ++ SendInt ( tFilter.m_bExclude ); ++ } ++ ++ SendInt ( m_eGroupFunc ); ++ SendString ( m_sGroupBy ); ++ SendInt ( m_iMaxMatches ); ++ SendString ( m_sGroupSortBy ); ++ SendInt ( m_iCutoff ); // 1.9+ ++ SendInt ( m_iRetryCount ); // 1.10+ ++ SendInt ( m_iRetryDelay ); ++ SendString ( m_sGroupDistinct ); // 1.11+ ++ SendInt ( m_bGeoAnchor ); // 1.14+ ++ if ( m_bGeoAnchor ) ++ { ++ SendString ( m_sGeoLatAttr ); ++ SendString ( m_sGeoLongAttr ); ++ SendFloat ( m_fGeoLatitude ); ++ SendFloat ( m_fGeoLongitude ); ++ } ++ SendInt ( m_iIndexWeights ); // 1.15+ ++ for ( int i=0; i<m_iIndexWeights; i++ ) ++ { ++ SendString ( m_sIndexWeight[i] ); ++ SendInt ( m_iIndexWeight[i] ); ++ } ++ SendInt ( m_iMaxQueryTime ); // 1.17+ ++ SendInt ( m_iFieldWeights ); // 1.18+ ++ for ( int i=0; i<m_iFieldWeights; i++ ) ++ { ++ SendString ( m_sFieldWeight[i] ); ++ SendInt ( m_iFieldWeight[i] ); ++ } ++ SendString ( m_sComment ); ++ ++ // overrides ++ SendInt ( m_dOverrides.elements() ); ++ for ( int i=0; i<m_dOverrides.elements(); i++ ) ++ { ++ CSphSEQuery::Override_t * pOverride = m_dOverrides.at(i); ++ SendString ( pOverride->m_sName ); ++ SendDword ( pOverride->m_iType ); ++ SendInt ( pOverride->m_dIds.elements() ); ++ for ( int j=0; j<pOverride->m_dIds.elements(); j++ ) ++ { ++ SendUint64 ( pOverride->m_dIds.at(j) ); ++ if ( pOverride->m_iType==SPH_ATTR_FLOAT ) ++ SendFloat ( pOverride->m_dValues.at(j).m_fValue ); ++ else if ( pOverride->m_iType==SPH_ATTR_BIGINT ) ++ SendUint64 ( pOverride->m_dValues.at(j).m_iValue64 ); ++ else ++ SendDword ( pOverride->m_dValues.at(j).m_uValue ); ++ } ++ } ++ ++ // select ++ SendString ( m_sSelect ); ++ ++ // detect buffer overruns and underruns, and report internal error ++ if ( m_bBufOverrun || m_iBufLeft!=0 || m_pCur-m_pBuf!=iReqSize ) ++ SPH_RET(-1); ++ ++ // all fine ++ SPH_RET ( iReqSize ); ++} ++ ++////////////////////////////////////////////////////////////////////////////// ++// SPHINX HANDLER ++////////////////////////////////////////////////////////////////////////////// ++ ++static const char * ha_sphinx_exts[] = { NullS }; ++ ++ ++#if MYSQL_VERSION_ID<50100 ++ha_sphinx::ha_sphinx ( TABLE_ARG * table ) ++ : handler ( &sphinx_hton, table ) ++#else ++ha_sphinx::ha_sphinx ( handlerton * hton, TABLE_ARG * table ) ++ : handler ( hton, table ) ++#endif ++ , m_pShare ( NULL ) ++ , m_iMatchesTotal ( 0 ) ++ , m_iCurrentPos ( 0 ) ++ , m_pCurrentKey ( NULL ) ++ , m_iCurrentKeyLen ( 0 ) ++ , m_pResponse ( NULL ) ++ , m_pResponseEnd ( NULL ) ++ , m_pCur ( NULL ) ++ , m_bUnpackError ( false ) ++ , m_iFields ( 0 ) ++ , m_dFields ( NULL ) ++ , m_iAttrs ( 0 ) ++ , m_dAttrs ( NULL ) ++ , m_bId64 ( 0 ) ++ , m_dUnboundFields ( NULL ) ++{ ++ SPH_ENTER_METHOD(); ++ if ( current_thd ) ++ current_thd->variables.engine_condition_pushdown = true; ++ SPH_VOID_RET(); ++} ++ ++ ++// If frm_error() is called then we will use this to to find out what file extentions ++// exist for the storage engine. This is also used by the default rename_table and ++// delete_table method in handler.cc. ++const char ** ha_sphinx::bas_ext() const ++{ ++ return ha_sphinx_exts; ++} ++ ++ ++// Used for opening tables. The name will be the name of the file. ++// A table is opened when it needs to be opened. For instance ++// when a request comes in for a select on the table (tables are not ++// open and closed for each request, they are cached). ++// ++// Called from handler.cc by handler::ha_open(). The server opens all tables by ++// calling ha_open() which then calls the handler specific open(). ++int ha_sphinx::open ( const char * name, int, uint ) ++{ ++ SPH_ENTER_METHOD(); ++ m_pShare = get_share ( name, table ); ++ if ( !m_pShare ) ++ SPH_RET(1); ++ ++ thr_lock_data_init ( &m_pShare->m_tLock, &m_tLock, NULL ); ++ ++ #if MYSQL_VERSION_ID>50100 ++ *thd_ha_data ( table->in_use, ht ) = NULL; ++ #else ++ table->in_use->ha_data [ sphinx_hton.slot ] = NULL; ++ #endif ++ ++ SPH_RET(0); ++} ++ ++ ++int ha_sphinx::Connect ( const char * sHost, ushort uPort ) ++{ ++ struct sockaddr_in sin; ++#ifndef __WIN__ ++ struct sockaddr_un saun; ++#endif ++ ++ int iDomain = 0; ++ int iSockaddrSize = 0; ++ struct sockaddr * pSockaddr = NULL; ++ ++ in_addr_t ip_addr; ++ ++ if ( uPort ) ++ { ++ iDomain = AF_INET; ++ iSockaddrSize = sizeof(sin); ++ pSockaddr = (struct sockaddr *) &sin; ++ ++ memset ( &sin, 0, sizeof(sin) ); ++ sin.sin_family = AF_INET; ++ sin.sin_port = htons(uPort); ++ ++ // prepare host address ++ if ( (int)( ip_addr = inet_addr(sHost) )!=(int)INADDR_NONE ) ++ { ++ memcpy ( &sin.sin_addr, &ip_addr, sizeof(ip_addr) ); ++ } else ++ { ++ int tmp_errno; ++ bool bError = false; ++ ++#if MYSQL_VERSION_ID>=50515 ++ struct addrinfo tmp_hostent, *hp; ++ tmp_errno = getaddrinfo ( sHost, NULL, &tmp_hostent, &hp ); ++ if ( !tmp_errno ) ++ { ++ freeaddrinfo ( hp ); ++ bError = true; ++ } ++#else ++ struct hostent tmp_hostent, *hp; ++ char buff2 [ GETHOSTBYNAME_BUFF_SIZE ]; ++ hp = my_gethostbyname_r ( sHost, &tmp_hostent, buff2, sizeof(buff2), &tmp_errno ); ++ if ( !hp ) ++ { ++ my_gethostbyname_r_free(); ++ bError = true; ++ } ++#endif ++ ++ if ( bError ) ++ { ++ char sError[256]; ++ my_snprintf ( sError, sizeof(sError), "failed to resolve searchd host (name=%s)", sHost ); ++ ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ SPH_RET(-1); ++ } ++ ++#if MYSQL_VERSION_ID>=50515 ++ memcpy ( &sin.sin_addr, hp->ai_addr, Min ( sizeof(sin.sin_addr), (size_t)hp->ai_addrlen ) ); ++ freeaddrinfo ( hp ); ++#else ++ memcpy ( &sin.sin_addr, hp->h_addr, Min ( sizeof(sin.sin_addr), (size_t)hp->h_length ) ); ++ my_gethostbyname_r_free(); ++#endif ++ } ++ } else ++ { ++#ifndef __WIN__ ++ iDomain = AF_UNIX; ++ iSockaddrSize = sizeof(saun); ++ pSockaddr = (struct sockaddr *) &saun; ++ ++ memset ( &saun, 0, sizeof(saun) ); ++ saun.sun_family = AF_UNIX; ++ strncpy ( saun.sun_path, sHost, sizeof(saun.sun_path)-1 ); ++#else ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "UNIX sockets are not supported on Windows" ); ++ SPH_RET(-1); ++#endif ++ } ++ ++ char sError[512]; ++ int iSocket = socket ( iDomain, SOCK_STREAM, 0 ); ++ ++ if ( iSocket<0 ) ++ { ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "failed to create client socket" ); ++ SPH_RET(-1); ++ } ++ ++ if ( connect ( iSocket, pSockaddr, iSockaddrSize )<0 ) ++ { ++ sphSockClose ( iSocket ); ++ my_snprintf ( sError, sizeof(sError), "failed to connect to searchd (host=%s, errno=%d, port=%d)", ++ sHost, errno, (int)uPort ); ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ SPH_RET(-1); ++ } ++ ++ return iSocket; ++} ++ ++ ++int ha_sphinx::ConnectAPI ( const char * sQueryHost, int iQueryPort ) ++{ ++ SPH_ENTER_METHOD(); ++ ++ const char * sHost = ( sQueryHost && *sQueryHost ) ? sQueryHost : m_pShare->m_sHost; ++ ushort uPort = iQueryPort ? (ushort)iQueryPort : m_pShare->m_iPort; ++ ++ int iSocket = Connect ( sHost, uPort ); ++ if ( iSocket<0 ) ++ SPH_RET ( iSocket ); ++ ++ char sError[512]; ++ ++ int version; ++ if ( ::recv ( iSocket, (char *)&version, sizeof(version), 0 )!=sizeof(version) ) ++ { ++ sphSockClose ( iSocket ); ++ my_snprintf ( sError, sizeof(sError), "failed to receive searchd version (host=%s, port=%d)", ++ sHost, (int)uPort ); ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ SPH_RET(-1); ++ } ++ ++ uint uClientVersion = htonl ( SPHINX_SEARCHD_PROTO ); ++ if ( ::send ( iSocket, (char*)&uClientVersion, sizeof(uClientVersion), 0 )!=sizeof(uClientVersion) ) ++ { ++ sphSockClose ( iSocket ); ++ my_snprintf ( sError, sizeof(sError), "failed to send client version (host=%s, port=%d)", ++ sHost, (int)uPort ); ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ SPH_RET(-1); ++ } ++ ++ SPH_RET ( iSocket ); ++} ++ ++ ++// Closes a table. We call the free_share() function to free any resources ++// that we have allocated in the "shared" structure. ++// ++// Called from sql_base.cc, sql_select.cc, and table.cc. ++// In sql_select.cc it is only used to close up temporary tables or during ++// the process where a temporary table is converted over to being a ++// myisam table. ++// For sql_base.cc look at close_data_tables(). ++int ha_sphinx::close() ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( free_share ( m_pShare ) ); ++} ++ ++ ++int ha_sphinx::HandleMysqlError ( MYSQL * pConn, int iErrCode ) ++{ ++ CSphSEThreadData * pTls = GetTls (); ++ if ( pTls ) ++ { ++ strncpy ( pTls->m_tStats.m_sLastMessage, mysql_error ( pConn ), sizeof ( pTls->m_tStats.m_sLastMessage ) ); ++ pTls->m_tStats.m_bLastError = true; ++ } ++ ++ mysql_close ( pConn ); ++ ++ my_error ( iErrCode, MYF(0), pTls->m_tStats.m_sLastMessage ); ++ return -1; ++} ++ ++ ++int ha_sphinx::extra ( enum ha_extra_function op ) ++{ ++ CSphSEThreadData * pTls = GetTls(); ++ if ( pTls ) ++ { ++ if ( op==HA_EXTRA_WRITE_CAN_REPLACE ) ++ pTls->m_bReplace = true; ++ else if ( op==HA_EXTRA_WRITE_CANNOT_REPLACE ) ++ pTls->m_bReplace = false; ++ } ++ return 0; ++} ++ ++ ++int ha_sphinx::write_row ( byte * ) ++{ ++ SPH_ENTER_METHOD(); ++ if ( !m_pShare || !m_pShare->m_bSphinxQL ) ++ SPH_RET ( HA_ERR_WRONG_COMMAND ); ++ ++ // SphinxQL inserts only, pretty much similar to abandoned federated ++ char sQueryBuf[1024]; ++ char sValueBuf[1024]; ++ ++ String sQuery ( sQueryBuf, sizeof(sQueryBuf), &my_charset_bin ); ++ String sValue ( sValueBuf, sizeof(sQueryBuf), &my_charset_bin ); ++ sQuery.length ( 0 ); ++ sValue.length ( 0 ); ++ ++ CSphSEThreadData * pTls = GetTls (); ++ sQuery.append ( pTls && pTls->m_bReplace ? "REPLACE INTO " : "INSERT INTO " ); ++ sQuery.append ( m_pShare->m_sIndex ); ++ sQuery.append ( " (" ); ++ ++ for ( Field ** ppField = table->field; *ppField; ppField++ ) ++ { ++ sQuery.append ( (*ppField)->field_name ); ++ if ( ppField[1] ) ++ sQuery.append ( ", " ); ++ } ++ sQuery.append ( ") VALUES (" ); ++ ++ for ( Field ** ppField = table->field; *ppField; ppField++ ) ++ { ++ if ( (*ppField)->is_null() ) ++ { ++ sQuery.append ( "''" ); ++ ++ } else ++ { ++ if ( (*ppField)->type()==MYSQL_TYPE_TIMESTAMP ) ++ { ++ Item_field * pWrap = new Item_field ( *ppField ); // autofreed by query arena, I assume ++ Item_func_unix_timestamp * pConv = new Item_func_unix_timestamp ( pWrap ); ++ pConv->quick_fix_field(); ++ unsigned int uTs = (unsigned int) pConv->val_int(); ++ ++ snprintf ( sValueBuf, sizeof(sValueBuf), "'%u'", uTs ); ++ sQuery.append ( sValueBuf ); ++ ++ } else ++ { ++ (*ppField)->val_str ( &sValue ); ++ sQuery.append ( "'" ); ++ sValue.print ( &sQuery ); ++ sQuery.append ( "'" ); ++ sValue.length(0); ++ } ++ } ++ ++ if ( ppField[1] ) ++ sQuery.append ( ", " ); ++ } ++ sQuery.append ( ")" ); ++ ++ // FIXME? pretty inefficient to reconnect every time under high load, ++ // but this was intentionally written for a low load scenario.. ++ MYSQL * pConn = mysql_init ( NULL ); ++ if ( !pConn ) ++ SPH_RET ( ER_OUT_OF_RESOURCES ); ++ ++ unsigned int uTimeout = 1; ++ mysql_options ( pConn, MYSQL_OPT_CONNECT_TIMEOUT, (const char*)&uTimeout ); ++ ++ if ( !mysql_real_connect ( pConn, m_pShare->m_sHost, "root", "", "", m_pShare->m_iPort, m_pShare->m_sSocket, 0 ) ) ++ SPH_RET ( HandleMysqlError ( pConn, ER_CONNECT_TO_FOREIGN_DATA_SOURCE ) ); ++ ++ if ( mysql_real_query ( pConn, sQuery.ptr(), sQuery.length() ) ) ++ SPH_RET ( HandleMysqlError ( pConn, ER_QUERY_ON_FOREIGN_DATA_SOURCE ) ); ++ ++ // all ok! ++ mysql_close ( pConn ); ++ SPH_RET(0); ++} ++ ++ ++static inline bool IsIntegerFieldType ( enum_field_types eType ) ++{ ++ return eType==MYSQL_TYPE_LONG || eType==MYSQL_TYPE_LONGLONG; ++} ++ ++ ++static inline bool IsIDField ( Field * pField ) ++{ ++ enum_field_types eType = pField->type(); ++ ++ if ( eType==MYSQL_TYPE_LONGLONG ) ++ return true; ++ ++ if ( eType==MYSQL_TYPE_LONG && ((Field_num*)pField)->unsigned_flag ) ++ return true; ++ ++ return false; ++} ++ ++ ++int ha_sphinx::delete_row ( const byte * ) ++{ ++ SPH_ENTER_METHOD(); ++ if ( !m_pShare || !m_pShare->m_bSphinxQL ) ++ SPH_RET ( HA_ERR_WRONG_COMMAND ); ++ ++ char sQueryBuf[1024]; ++ String sQuery ( sQueryBuf, sizeof(sQueryBuf), &my_charset_bin ); ++ sQuery.length ( 0 ); ++ ++ sQuery.append ( "DELETE FROM " ); ++ sQuery.append ( m_pShare->m_sIndex ); ++ sQuery.append ( " WHERE id=" ); ++ ++ char sValue[32]; ++ snprintf ( sValue, sizeof(sValue), "%lld", table->field[0]->val_int() ); ++ sQuery.append ( sValue ); ++ ++ // FIXME? pretty inefficient to reconnect every time under high load, ++ // but this was intentionally written for a low load scenario.. ++ MYSQL * pConn = mysql_init ( NULL ); ++ if ( !pConn ) ++ SPH_RET ( ER_OUT_OF_RESOURCES ); ++ ++ unsigned int uTimeout = 1; ++ mysql_options ( pConn, MYSQL_OPT_CONNECT_TIMEOUT, (const char*)&uTimeout ); ++ ++ if ( !mysql_real_connect ( pConn, m_pShare->m_sHost, "root", "", "", m_pShare->m_iPort, m_pShare->m_sSocket, 0 ) ) ++ SPH_RET ( HandleMysqlError ( pConn, ER_CONNECT_TO_FOREIGN_DATA_SOURCE ) ); ++ ++ if ( mysql_real_query ( pConn, sQuery.ptr(), sQuery.length() ) ) ++ SPH_RET ( HandleMysqlError ( pConn, ER_QUERY_ON_FOREIGN_DATA_SOURCE ) ); ++ ++ // all ok! ++ mysql_close ( pConn ); ++ SPH_RET(0); ++} ++ ++ ++int ha_sphinx::update_row ( const byte *, byte * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( HA_ERR_WRONG_COMMAND ); ++} ++ ++ ++// keynr is key (index) number ++// sorted is 1 if result MUST be sorted according to index ++int ha_sphinx::index_init ( uint keynr, bool ) ++{ ++ SPH_ENTER_METHOD(); ++ active_index = keynr; ++ ++ CSphSEThreadData * pTls = GetTls(); ++ if ( pTls ) ++ pTls->m_bCondDone = false; ++ ++ SPH_RET(0); ++} ++ ++ ++int ha_sphinx::index_end() ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET(0); ++} ++ ++ ++uint32 ha_sphinx::UnpackDword () ++{ ++ if ( m_pCur+sizeof(uint32)>m_pResponseEnd ) // NOLINT ++ { ++ m_pCur = m_pResponseEnd; ++ m_bUnpackError = true; ++ return 0; ++ } ++ ++ uint32 uRes = ntohl ( sphUnalignedRead ( *(uint32*)m_pCur ) ); ++ m_pCur += sizeof(uint32); // NOLINT ++ return uRes; ++} ++ ++ ++char * ha_sphinx::UnpackString () ++{ ++ uint32 iLen = UnpackDword (); ++ if ( !iLen ) ++ return NULL; ++ ++ if ( m_pCur+iLen>m_pResponseEnd ) ++ { ++ m_pCur = m_pResponseEnd; ++ m_bUnpackError = true; ++ return NULL; ++ } ++ ++ char * sRes = new char [ 1+iLen ]; ++ memcpy ( sRes, m_pCur, iLen ); ++ sRes[iLen] = '\0'; ++ m_pCur += iLen; ++ return sRes; ++} ++ ++ ++static inline const char * FixNull ( const char * s ) ++{ ++ return s ? s : "(null)"; ++} ++ ++ ++bool ha_sphinx::UnpackSchema () ++{ ++ SPH_ENTER_METHOD(); ++ ++ // cleanup ++ if ( m_dFields ) ++ for ( int i=0; i<(int)m_iFields; i++ ) ++ SafeDeleteArray ( m_dFields[i] ); ++ SafeDeleteArray ( m_dFields ); ++ ++ // unpack network packet ++ uint32 uStatus = UnpackDword (); ++ char * sMessage = NULL; ++ ++ if ( uStatus!=SEARCHD_OK ) ++ { ++ sMessage = UnpackString (); ++ CSphSEThreadData * pTls = GetTls (); ++ if ( pTls ) ++ { ++ strncpy ( pTls->m_tStats.m_sLastMessage, sMessage, sizeof(pTls->m_tStats.m_sLastMessage) ); ++ pTls->m_tStats.m_bLastError = ( uStatus==SEARCHD_ERROR ); ++ } ++ ++ if ( uStatus==SEARCHD_ERROR ) ++ { ++ char sError[1024]; ++ my_snprintf ( sError, sizeof(sError), "searchd error: %s", sMessage ); ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ SafeDeleteArray ( sMessage ); ++ SPH_RET ( false ); ++ } ++ } ++ ++ m_iFields = UnpackDword (); ++ m_dFields = new char * [ m_iFields ]; ++ if ( !m_dFields ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackSchema() failed (fields alloc error)" ); ++ SPH_RET(false); ++ } ++ ++ for ( uint32 i=0; i<m_iFields; i++ ) ++ m_dFields[i] = UnpackString (); ++ ++ SafeDeleteArray ( m_dAttrs ); ++ m_iAttrs = UnpackDword (); ++ m_dAttrs = new CSphSEAttr [ m_iAttrs ]; ++ if ( !m_dAttrs ) ++ { ++ for ( int i=0; i<(int)m_iFields; i++ ) ++ SafeDeleteArray ( m_dFields[i] ); ++ SafeDeleteArray ( m_dFields ); ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackSchema() failed (attrs alloc error)" ); ++ SPH_RET(false); ++ } ++ ++ for ( uint32 i=0; i<m_iAttrs; i++ ) ++ { ++ m_dAttrs[i].m_sName = UnpackString (); ++ m_dAttrs[i].m_uType = UnpackDword (); ++ if ( m_bUnpackError ) // m_sName may be null ++ break; ++ ++ m_dAttrs[i].m_iField = -1; ++ for ( int j=SPHINXSE_SYSTEM_COLUMNS; j<m_pShare->m_iTableFields; j++ ) ++ { ++ const char * sTableField = m_pShare->m_sTableField[j]; ++ const char * sAttrField = m_dAttrs[i].m_sName; ++ if ( m_dAttrs[i].m_sName[0]=='@' ) ++ { ++ const char * sAtPrefix = "_sph_"; ++ if ( strncmp ( sTableField, sAtPrefix, strlen(sAtPrefix) ) ) ++ continue; ++ sTableField += strlen(sAtPrefix); ++ sAttrField++; ++ } ++ ++ if ( !strcasecmp ( sAttrField, sTableField ) ) ++ { ++ // we're almost good, but ++ // let's enforce that timestamp columns can only receive timestamp attributes ++ if ( m_pShare->m_eTableFieldType[j]!=MYSQL_TYPE_TIMESTAMP || m_dAttrs[i].m_uType==SPH_ATTR_TIMESTAMP ) ++ m_dAttrs[i].m_iField = j; ++ break; ++ } ++ } ++ } ++ ++ m_iMatchesTotal = UnpackDword (); ++ ++ m_bId64 = UnpackDword (); ++ if ( m_bId64 && m_pShare->m_eTableFieldType[0]!=MYSQL_TYPE_LONGLONG ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: 1st column must be bigint to accept 64-bit DOCIDs" ); ++ SPH_RET(false); ++ } ++ ++ // network packet unpacked; build unbound fields map ++ SafeDeleteArray ( m_dUnboundFields ); ++ m_dUnboundFields = new int [ m_pShare->m_iTableFields ]; ++ ++ for ( int i=0; i<m_pShare->m_iTableFields; i++ ) ++ { ++ if ( i<SPHINXSE_SYSTEM_COLUMNS ) ++ m_dUnboundFields[i] = SPH_ATTR_NONE; ++ ++ else if ( m_pShare->m_eTableFieldType[i]==MYSQL_TYPE_TIMESTAMP ) ++ m_dUnboundFields[i] = SPH_ATTR_TIMESTAMP; ++ ++ else ++ m_dUnboundFields[i] = SPH_ATTR_INTEGER; ++ } ++ ++ for ( uint32 i=0; i<m_iAttrs; i++ ) ++ if ( m_dAttrs[i].m_iField>=0 ) ++ m_dUnboundFields [ m_dAttrs[i].m_iField ] = SPH_ATTR_NONE; ++ ++ if ( m_bUnpackError ) ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackSchema() failed (unpack error)" ); ++ ++ SPH_RET ( !m_bUnpackError ); ++} ++ ++ ++bool ha_sphinx::UnpackStats ( CSphSEStats * pStats ) ++{ ++ assert ( pStats ); ++ ++ char * pCurSave = m_pCur; ++ for ( uint i=0; i<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT ++ { ++ m_pCur += m_bId64 ? 12 : 8; // skip id+weight ++ for ( uint32 i=0; i<m_iAttrs && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT ++ { ++ if ( m_dAttrs[i].m_uType & SPH_ATTR_MULTI ) ++ { ++ // skip MVA list ++ uint32 uCount = UnpackDword (); ++ m_pCur += uCount*4; ++ } else // skip normal value ++ m_pCur += m_dAttrs[i].m_uType==SPH_ATTR_BIGINT ? 8 : 4; ++ } ++ } ++ ++ pStats->m_iMatchesTotal = UnpackDword (); ++ pStats->m_iMatchesFound = UnpackDword (); ++ pStats->m_iQueryMsec = UnpackDword (); ++ pStats->m_iWords = UnpackDword (); ++ ++ if ( m_bUnpackError ) ++ return false; ++ ++ SafeDeleteArray ( pStats->m_dWords ); ++ if ( pStats->m_iWords<0 || pStats->m_iWords>=SPHINXSE_MAX_KEYWORDSTATS ) ++ return false; ++ pStats->m_dWords = new CSphSEWordStats [ pStats->m_iWords ]; ++ if ( !pStats->m_dWords ) ++ return false; ++ ++ for ( int i=0; i<pStats->m_iWords; i++ ) ++ { ++ CSphSEWordStats & tWord = pStats->m_dWords[i]; ++ tWord.m_sWord = UnpackString (); ++ tWord.m_iDocs = UnpackDword (); ++ tWord.m_iHits = UnpackDword (); ++ } ++ ++ if ( m_bUnpackError ) ++ return false; ++ ++ m_pCur = pCurSave; ++ return true; ++} ++ ++ ++/// condition pushdown implementation, to properly intercept WHERE clauses on my columns ++const COND * ha_sphinx::cond_push ( const COND * cond ) ++{ ++ // catch the simplest case: query_column="some text" ++ for ( ;; ) ++ { ++ if ( cond->type()!=COND::FUNC_ITEM ) ++ break; ++ ++ Item_func * condf = (Item_func *)cond; ++ if ( condf->functype()!=Item_func::EQ_FUNC || condf->argument_count()!=2 ) ++ break; ++ ++ // get my tls ++ CSphSEThreadData * pTls = GetTls (); ++ if ( !pTls ) ++ break; ++ ++ Item ** args = condf->arguments(); ++ if ( !m_pShare->m_bSphinxQL ) ++ { ++ // on non-QL tables, intercept query=value condition for SELECT ++ if (!( args[0]->type()==COND::FIELD_ITEM && args[1]->type()==COND::STRING_ITEM )) ++ break; ++ ++ Item_field * pField = (Item_field *) args[0]; ++ if ( pField->field->field_index!=2 ) // FIXME! magic key index ++ break; ++ ++ // copy the query, and let know that we intercepted this condition ++ Item_string * pString = (Item_string *) args[1]; ++ pTls->m_bQuery = true; ++ strncpy ( pTls->m_sQuery, pString->str_value.c_ptr(), sizeof(pTls->m_sQuery) ); ++ pTls->m_sQuery[sizeof(pTls->m_sQuery)-1] = '\0'; ++ pTls->m_pQueryCharset = pString->str_value.charset(); ++ ++ } else ++ { ++ if (!( args[0]->type()==COND::FIELD_ITEM && args[1]->type()==COND::INT_ITEM )) ++ break; ++ ++ // on QL tables, intercept id=value condition for DELETE ++ Item_field * pField = (Item_field *) args[0]; ++ if ( pField->field->field_index!=0 ) // FIXME! magic key index ++ break; ++ ++ Item_int * pVal = (Item_int *) args[1]; ++ pTls->m_iCondId = pVal->val_int(); ++ pTls->m_bCondId = true; ++ } ++ ++ // we intercepted this condition ++ return NULL; ++ } ++ ++ // don't change anything ++ return cond; ++} ++ ++ ++/// condition popup ++void ha_sphinx::cond_pop () ++{ ++ CSphSEThreadData * pTls = GetTls (); ++ if ( pTls ) ++ pTls->m_bQuery = false; ++} ++ ++ ++/// get TLS (maybe allocate it, too) ++CSphSEThreadData * ha_sphinx::GetTls() ++{ ++ // where do we store that pointer in today's version? ++ CSphSEThreadData ** ppTls; ++#if MYSQL_VERSION_ID>50100 ++ ppTls = (CSphSEThreadData**) thd_ha_data ( table->in_use, ht ); ++#else ++ ppTls = (CSphSEThreadData**) ¤t_thd->ha_data[sphinx_hton.slot]; ++#endif // >50100 ++ ++ // allocate if needed ++ if ( !*ppTls ) ++ *ppTls = new CSphSEThreadData (); ++ ++ // errors will be handled by caller ++ return *ppTls; ++} ++ ++ ++// Positions an index cursor to the index specified in the handle. Fetches the ++// row if available. If the key value is null, begin at the first key of the ++// index. ++int ha_sphinx::index_read ( byte * buf, const byte * key, uint key_len, enum ha_rkey_function ) ++{ ++ SPH_ENTER_METHOD(); ++ char sError[256]; ++ ++ // set new data for thd->ha_data, it is used in show_status ++ CSphSEThreadData * pTls = GetTls(); ++ if ( !pTls ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: TLS malloc() failed" ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ pTls->m_tStats.Reset (); ++ ++ // sphinxql table, just return the key once ++ if ( m_pShare->m_bSphinxQL ) ++ { ++ // over and out ++ if ( pTls->m_bCondDone ) ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ ++ // return a value from pushdown, if any ++ if ( pTls->m_bCondId ) ++ { ++ table->field[0]->store ( pTls->m_iCondId, 1 ); ++ pTls->m_bCondDone = true; ++ SPH_RET(0); ++ } ++ ++ // return a value from key ++ longlong iRef = 0; ++ if ( key_len==4 ) ++ iRef = uint4korr ( key ); ++ else if ( key_len==8 ) ++ iRef = uint8korr ( key ); ++ else ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: unexpected key length" ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ table->field[0]->store ( iRef, 1 ); ++ pTls->m_bCondDone = true; ++ SPH_RET(0); ++ } ++ ++ // parse query ++ if ( pTls->m_bQuery ) ++ { ++ // we have a query from condition pushdown ++ m_pCurrentKey = (const byte *) pTls->m_sQuery; ++ m_iCurrentKeyLen = strlen(pTls->m_sQuery); ++ } else ++ { ++ // just use the key (might be truncated) ++ m_pCurrentKey = key+HA_KEY_BLOB_LENGTH; ++ m_iCurrentKeyLen = uint2korr(key); // or maybe key_len? ++ pTls->m_pQueryCharset = m_pShare ? m_pShare->m_pTableQueryCharset : NULL; ++ } ++ ++ CSphSEQuery q ( (const char*)m_pCurrentKey, m_iCurrentKeyLen, m_pShare->m_sIndex ); ++ if ( !q.Parse () ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), q.m_sParseError ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ // do connect ++ int iSocket = ConnectAPI ( q.m_sHost, q.m_iPort ); ++ if ( iSocket<0 ) ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ ++ // my buffer ++ char * pBuffer; // will be free by CSphSEQuery dtor; do NOT free manually ++ int iReqLen = q.BuildRequest ( &pBuffer ); ++ ++ if ( iReqLen<=0 ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: q.BuildRequest() failed" ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ // send request ++ ::send ( iSocket, pBuffer, iReqLen, 0 ); ++ ++ // receive reply ++ char sHeader[8]; ++ int iGot = ::recv ( iSocket, sHeader, sizeof(sHeader), RECV_FLAGS ); ++ if ( iGot!=sizeof(sHeader) ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "failed to receive response header (searchd went away?)" ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ short int uRespStatus = ntohs ( sphUnalignedRead ( *(short int*)( &sHeader[0] ) ) ); ++ short int uRespVersion = ntohs ( sphUnalignedRead ( *(short int*)( &sHeader[2] ) ) ); ++ uint uRespLength = ntohl ( sphUnalignedRead ( *(uint *)( &sHeader[4] ) ) ); ++ SPH_DEBUG ( "got response header (status=%d version=%d length=%d)", ++ uRespStatus, uRespVersion, uRespLength ); ++ ++ SafeDeleteArray ( m_pResponse ); ++ if ( uRespLength<=SPHINXSE_MAX_ALLOC ) ++ m_pResponse = new char [ uRespLength+1 ]; ++ ++ if ( !m_pResponse ) ++ { ++ my_snprintf ( sError, sizeof(sError), "bad searchd response length (length=%u)", uRespLength ); ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ int iRecvLength = 0; ++ while ( iRecvLength<(int)uRespLength ) ++ { ++ int iRecv = ::recv ( iSocket, m_pResponse+iRecvLength, uRespLength-iRecvLength, RECV_FLAGS ); ++ if ( iRecv<0 ) ++ break; ++ iRecvLength += iRecv; ++ } ++ ++ ::closesocket ( iSocket ); ++ iSocket = -1; ++ ++ if ( iRecvLength!=(int)uRespLength ) ++ { ++ my_snprintf ( sError, sizeof(sError), "net read error (expected=%d, got=%d)", uRespLength, iRecvLength ); ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ // we'll have a message, at least ++ pTls->m_bStats = true; ++ ++ // parse reply ++ m_iCurrentPos = 0; ++ m_pCur = m_pResponse; ++ m_pResponseEnd = m_pResponse + uRespLength; ++ m_bUnpackError = false; ++ ++ if ( uRespStatus!=SEARCHD_OK ) ++ { ++ char * sMessage = UnpackString (); ++ if ( !sMessage ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "no valid response from searchd (status=%d, resplen=%d)", ++ uRespStatus, uRespLength ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ strncpy ( pTls->m_tStats.m_sLastMessage, sMessage, sizeof(pTls->m_tStats.m_sLastMessage) ); ++ SafeDeleteArray ( sMessage ); ++ ++ if ( uRespStatus!=SEARCHD_WARNING ) ++ { ++ my_snprintf ( sError, sizeof(sError), "searchd error: %s", pTls->m_tStats.m_sLastMessage ); ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ ++ pTls->m_tStats.m_bLastError = true; ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ } ++ ++ if ( !UnpackSchema () ) ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ ++ if ( !UnpackStats ( &pTls->m_tStats ) ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackStats() failed" ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ SPH_RET ( get_rec ( buf, key, key_len ) ); ++} ++ ++ ++// Positions an index cursor to the index specified in key. Fetches the ++// row if any. This is only used to read whole keys. ++int ha_sphinx::index_read_idx ( byte *, uint, const byte *, uint, enum ha_rkey_function ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( HA_ERR_WRONG_COMMAND ); ++} ++ ++ ++// Used to read forward through the index. ++int ha_sphinx::index_next ( byte * buf ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( get_rec ( buf, m_pCurrentKey, m_iCurrentKeyLen ) ); ++} ++ ++ ++int ha_sphinx::index_next_same ( byte * buf, const byte * key, uint keylen ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( get_rec ( buf, key, keylen ) ); ++} ++ ++ ++int ha_sphinx::get_rec ( byte * buf, const byte *, uint ) ++{ ++ SPH_ENTER_METHOD(); ++ ++ if ( m_iCurrentPos>=m_iMatchesTotal ) ++ { ++ SafeDeleteArray ( m_pResponse ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ #if MYSQL_VERSION_ID>50100 ++ my_bitmap_map * org_bitmap = dbug_tmp_use_all_columns ( table, table->write_set ); ++ #endif ++ Field ** field = table->field; ++ ++ // unpack and return the match ++ longlong uMatchID = UnpackDword (); ++ if ( m_bId64 ) ++ uMatchID = ( uMatchID<<32 ) + UnpackDword(); ++ uint32 uMatchWeight = UnpackDword (); ++ ++ field[0]->store ( uMatchID, 1 ); ++ field[1]->store ( uMatchWeight, 1 ); ++ field[2]->store ( (const char*)m_pCurrentKey, m_iCurrentKeyLen, &my_charset_bin ); ++ ++ for ( uint32 i=0; i<m_iAttrs; i++ ) ++ { ++ longlong iValue64 = 0; ++ uint32 uValue = UnpackDword (); ++ if ( m_dAttrs[i].m_uType==SPH_ATTR_BIGINT ) ++ iValue64 = ( (longlong)uValue<<32 ) | UnpackDword(); ++ if ( m_dAttrs[i].m_iField<0 ) ++ { ++ // skip MVA ++ if ( m_dAttrs[i].m_uType & SPH_ATTR_MULTI ) ++ for ( ; uValue>0 && !m_bUnpackError; uValue-- ) ++ UnpackDword(); ++ continue; ++ } ++ ++ Field * af = field [ m_dAttrs[i].m_iField ]; ++ switch ( m_dAttrs[i].m_uType ) ++ { ++ case SPH_ATTR_INTEGER: ++ case SPH_ATTR_ORDINAL: ++ case SPH_ATTR_BOOL: ++ af->store ( uValue, 1 ); ++ break; ++ ++ case SPH_ATTR_FLOAT: ++ af->store ( sphDW2F(uValue) ); ++ break; ++ ++ case SPH_ATTR_TIMESTAMP: ++ if ( af->type()==MYSQL_TYPE_TIMESTAMP ) ++ longstore ( af->ptr, uValue ); // because store() does not accept timestamps ++ else ++ af->store ( uValue, 1 ); ++ break; ++ ++ case SPH_ATTR_BIGINT: ++ af->store ( iValue64, 0 ); ++ break; ++ ++ case ( SPH_ATTR_MULTI | SPH_ATTR_INTEGER ): ++ if ( uValue<=0 ) ++ { ++ // shortcut, empty MVA set ++ af->store ( "", 0, &my_charset_bin ); ++ ++ } else ++ { ++ // convert MVA set to comma-separated string ++ char sBuf[1024]; // FIXME! magic size ++ char * pCur = sBuf; ++ ++ for ( ; uValue>0 && !m_bUnpackError; uValue-- ) ++ { ++ uint32 uEntry = UnpackDword (); ++ if ( pCur < sBuf+sizeof(sBuf)-16 ) // 10 chars per 32bit value plus some safety bytes ++ { ++ snprintf ( pCur, sBuf+sizeof(sBuf)-pCur, "%u", uEntry ); ++ while ( *pCur ) *pCur++; ++ if ( uValue>1 ) ++ *pCur++ = ','; // non-trailing commas ++ } ++ } ++ ++ af->store ( sBuf, pCur-sBuf, &my_charset_bin ); ++ } ++ break; ++ ++ default: ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: unhandled attr type" ); ++ SafeDeleteArray ( m_pResponse ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ } ++ ++ if ( m_bUnpackError ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: response unpacker failed" ); ++ SafeDeleteArray ( m_pResponse ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ // zero out unmapped fields ++ for ( int i=SPHINXSE_SYSTEM_COLUMNS; i<(int)table->s->fields; i++ ) ++ if ( m_dUnboundFields[i]!=SPH_ATTR_NONE ) ++ switch ( m_dUnboundFields[i] ) ++ { ++ case SPH_ATTR_INTEGER: table->field[i]->store ( 0, 1 ); break; ++ case SPH_ATTR_TIMESTAMP: longstore ( table->field[i]->ptr, 0 ); break; ++ default: ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), ++ "INTERNAL ERROR: unhandled unbound field type %d", m_dUnboundFields[i] ); ++ SafeDeleteArray ( m_pResponse ); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++ } ++ ++ memset ( buf, 0, table->s->null_bytes ); ++ m_iCurrentPos++; ++ ++ #if MYSQL_VERSION_ID > 50100 ++ dbug_tmp_restore_column_map ( table->write_set, org_bitmap ); ++ #endif ++ ++ SPH_RET(0); ++} ++ ++ ++// Used to read backwards through the index. ++int ha_sphinx::index_prev ( byte * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( HA_ERR_WRONG_COMMAND ); ++} ++ ++ ++// index_first() asks for the first key in the index. ++// ++// Called from opt_range.cc, opt_sum.cc, sql_handler.cc, ++// and sql_select.cc. ++int ha_sphinx::index_first ( byte * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++} ++ ++// index_last() asks for the last key in the index. ++// ++// Called from opt_range.cc, opt_sum.cc, sql_handler.cc, ++// and sql_select.cc. ++int ha_sphinx::index_last ( byte * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( HA_ERR_WRONG_COMMAND ); ++} ++ ++ ++int ha_sphinx::rnd_init ( bool ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET(0); ++} ++ ++ ++int ha_sphinx::rnd_end() ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET(0); ++} ++ ++ ++int ha_sphinx::rnd_next ( byte * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( HA_ERR_END_OF_FILE ); ++} ++ ++ ++void ha_sphinx::position ( const byte * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_VOID_RET(); ++} ++ ++ ++// This is like rnd_next, but you are given a position to use ++// to determine the row. The position will be of the type that you stored in ++// ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key ++// or position you saved when position() was called. ++// Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc. ++int ha_sphinx::rnd_pos ( byte *, byte * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( HA_ERR_WRONG_COMMAND ); ++} ++ ++ ++#if MYSQL_VERSION_ID>=50030 ++int ha_sphinx::info ( uint ) ++#else ++void ha_sphinx::info ( uint ) ++#endif ++{ ++ SPH_ENTER_METHOD(); ++ ++ if ( table->s->keys>0 ) ++ table->key_info[0].rec_per_key[0] = 1; ++ ++ #if MYSQL_VERSION_ID>50100 ++ stats.records = 20; ++ #else ++ records = 20; ++ #endif ++ ++#if MYSQL_VERSION_ID>=50030 ++ SPH_RET(0); ++#else ++ SPH_VOID_RET(); ++#endif ++} ++ ++ ++int ha_sphinx::reset () ++{ ++ SPH_ENTER_METHOD(); ++ CSphSEThreadData * pTls = GetTls (); ++ if ( pTls ) ++ pTls->m_bQuery = false; ++ SPH_RET(0); ++} ++ ++ ++int ha_sphinx::delete_all_rows() ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET ( HA_ERR_WRONG_COMMAND ); ++} ++ ++ ++// First you should go read the section "locking functions for mysql" in ++// lock.cc to understand this. ++// This create a lock on the table. If you are implementing a storage engine ++// that can handle transacations look at ha_berkely.cc to see how you will ++// want to go about doing this. Otherwise you should consider calling flock() ++// here. ++// ++// Called from lock.cc by lock_external() and unlock_external(). Also called ++// from sql_table.cc by copy_data_between_tables(). ++int ha_sphinx::external_lock ( THD *, int ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET(0); ++} ++ ++ ++THR_LOCK_DATA ** ha_sphinx::store_lock ( THD *, THR_LOCK_DATA ** to, ++ enum thr_lock_type lock_type ) ++{ ++ SPH_ENTER_METHOD(); ++ ++ if ( lock_type!=TL_IGNORE && m_tLock.type==TL_UNLOCK ) ++ m_tLock.type = lock_type; ++ ++ *to++ = &m_tLock; ++ SPH_RET(to); ++} ++ ++ ++int ha_sphinx::delete_table ( const char * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET(0); ++} ++ ++ ++// Renames a table from one name to another from alter table call. ++// ++// If you do not implement this, the default rename_table() is called from ++// handler.cc and it will delete all files with the file extentions returned ++// by bas_ext(). ++// ++// Called from sql_table.cc by mysql_rename_table(). ++int ha_sphinx::rename_table ( const char *, const char * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET(0); ++} ++ ++ ++// Given a starting key, and an ending key estimate the number of rows that ++// will exist between the two. end_key may be empty which in case determine ++// if start_key matches any rows. ++// ++// Called from opt_range.cc by check_quick_keys(). ++ha_rows ha_sphinx::records_in_range ( uint, key_range *, key_range * ) ++{ ++ SPH_ENTER_METHOD(); ++ SPH_RET(3); // low number to force index usage ++} ++ ++ ++// create() is called to create a database. The variable name will have the name ++// of the table. When create() is called you do not need to worry about opening ++// the table. Also, the FRM file will have already been created so adjusting ++// create_info will not do you any good. You can overwrite the frm file at this ++// point if you wish to change the table definition, but there are no methods ++// currently provided for doing that. ++// ++// Called from handle.cc by ha_create_table(). ++int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) ++{ ++ SPH_ENTER_METHOD(); ++ char sError[256]; ++ ++ CSphSEShare tInfo; ++ if ( !ParseUrl ( &tInfo, table, true ) ) ++ SPH_RET(-1); ++ ++ // check SphinxAPI table ++ for ( ; !tInfo.m_bSphinxQL; ) ++ { ++ // check system fields (count and types) ++ if ( table->s->fields<SPHINXSE_SYSTEM_COLUMNS ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: there MUST be at least %d columns", ++ name, SPHINXSE_SYSTEM_COLUMNS ); ++ break; ++ } ++ ++ if ( !IsIDField ( table->field[0] ) ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: 1st column (docid) MUST be unsigned integer or bigint", name ); ++ break; ++ } ++ ++ if ( !IsIntegerFieldType ( table->field[1]->type() ) ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: 2nd column (weight) MUST be integer or bigint", name ); ++ break; ++ } ++ ++ enum_field_types f2 = table->field[2]->type(); ++ if ( f2!=MYSQL_TYPE_VARCHAR ++ && f2!=MYSQL_TYPE_BLOB && f2!=MYSQL_TYPE_MEDIUM_BLOB && f2!=MYSQL_TYPE_LONG_BLOB && f2!=MYSQL_TYPE_TINY_BLOB ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: 3rd column (search query) MUST be varchar or text", name ); ++ break; ++ } ++ ++ // check attributes ++ int i; ++ for ( i=3; i<(int)table->s->fields; i++ ) ++ { ++ enum_field_types eType = table->field[i]->type(); ++ if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float", ++ name, i+1, table->field[i]->field_name ); ++ break; ++ } ++ } ++ ++ if ( i!=(int)table->s->fields ) ++ break; ++ ++ // check index ++ if ( ++ table->s->keys!=1 || ++ table->key_info[0].key_parts!=1 || ++ strcasecmp ( table->key_info[0].key_part[0].field->field_name, table->field[2]->field_name ) ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column", ++ name, table->field[2]->field_name ); ++ break; ++ } ++ ++ // all good ++ sError[0] = '\0'; ++ break; ++ } ++ ++ // check SphinxQL table ++ for ( ; tInfo.m_bSphinxQL; ) ++ { ++ sError[0] = '\0'; ++ ++ // check that 1st column is id, is of int type, and has an index ++ if ( strcmp ( table->field[0]->field_name, "id" ) ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name ); ++ break; ++ } ++ ++ if ( !IsIDField ( table->field[0] ) ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be INT UNSIGNED or BIGINT", name ); ++ break; ++ } ++ ++ // check index ++ if ( ++ table->s->keys!=1 || ++ table->key_info[0].key_parts!=1 || ++ strcasecmp ( table->key_info[0].key_part[0].field->field_name, "id" ) ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name ); ++ break; ++ } ++ ++ // check column types ++ for ( int i=1; i<(int)table->s->fields; i++ ) ++ { ++ enum_field_types eType = table->field[i]->type(); ++ if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT ) ++ { ++ my_snprintf ( sError, sizeof(sError), "%s: column %s is of unsupported type (use int/bigint/timestamp/varchar/float)", ++ name, i+1, table->field[i]->field_name ); ++ break; ++ } ++ } ++ if ( sError[0] ) ++ break; ++ ++ // all good ++ break; ++ } ++ ++ // report and bail ++ if ( sError[0] ) ++ { ++ my_error ( ER_CANT_CREATE_TABLE, MYF(0), sError, -1 ); ++ SPH_RET(-1); ++ } ++ ++ SPH_RET(0); ++} ++ ++// show functions ++ ++#if MYSQL_VERSION_ID<50100 ++#define SHOW_VAR_FUNC_BUFF_SIZE 1024 ++#endif ++ ++CSphSEStats * sphinx_get_stats ( THD * thd, SHOW_VAR * out ) ++{ ++#if MYSQL_VERSION_ID>50100 ++ if ( sphinx_hton_ptr ) ++ { ++ CSphSEThreadData *pTls = (CSphSEThreadData *) *thd_ha_data ( thd, sphinx_hton_ptr ); ++ ++ if ( pTls && pTls->m_bStats ) ++ return &pTls->m_tStats; ++ } ++#else ++ CSphSEThreadData *pTls = (CSphSEThreadData *) thd->ha_data[sphinx_hton.slot]; ++ if ( pTls && pTls->m_bStats ) ++ return &pTls->m_tStats; ++#endif ++ ++ out->type = SHOW_CHAR; ++ out->value = ""; ++ return 0; ++} ++ ++int sphinx_showfunc_total ( THD * thd, SHOW_VAR * out, char * ) ++{ ++ CSphSEStats * pStats = sphinx_get_stats ( thd, out ); ++ if ( pStats ) ++ { ++ out->type = SHOW_INT; ++ out->value = (char *) &pStats->m_iMatchesTotal; ++ } ++ return 0; ++} ++ ++int sphinx_showfunc_total_found ( THD * thd, SHOW_VAR * out, char * ) ++{ ++ CSphSEStats * pStats = sphinx_get_stats ( thd, out ); ++ if ( pStats ) ++ { ++ out->type = SHOW_INT; ++ out->value = (char *) &pStats->m_iMatchesFound; ++ } ++ return 0; ++} ++ ++int sphinx_showfunc_time ( THD * thd, SHOW_VAR * out, char * ) ++{ ++ CSphSEStats * pStats = sphinx_get_stats ( thd, out ); ++ if ( pStats ) ++ { ++ out->type = SHOW_INT; ++ out->value = (char *) &pStats->m_iQueryMsec; ++ } ++ return 0; ++} ++ ++int sphinx_showfunc_word_count ( THD * thd, SHOW_VAR * out, char * ) ++{ ++ CSphSEStats * pStats = sphinx_get_stats ( thd, out ); ++ if ( pStats ) ++ { ++ out->type = SHOW_INT; ++ out->value = (char *) &pStats->m_iWords; ++ } ++ return 0; ++} ++ ++int sphinx_showfunc_words ( THD * thd, SHOW_VAR * out, char * sBuffer ) ++{ ++#if MYSQL_VERSION_ID>50100 ++ if ( sphinx_hton_ptr ) ++ { ++ CSphSEThreadData * pTls = (CSphSEThreadData *) *thd_ha_data ( thd, sphinx_hton_ptr ); ++#else ++ { ++ CSphSEThreadData * pTls = (CSphSEThreadData *) thd->ha_data[sphinx_hton.slot]; ++#endif ++ if ( pTls && pTls->m_bStats ) ++ { ++ CSphSEStats * pStats = &pTls->m_tStats; ++ if ( pStats && pStats->m_iWords ) ++ { ++ uint uBuffLen = 0; ++ ++ out->type = SHOW_CHAR; ++ out->value = sBuffer; ++ ++ // the following is partially based on code in sphinx_show_status() ++ sBuffer[0] = 0; ++ for ( int i=0; i<pStats->m_iWords; i++ ) ++ { ++ CSphSEWordStats & tWord = pStats->m_dWords[i]; ++ uBuffLen = my_snprintf ( sBuffer, SHOW_VAR_FUNC_BUFF_SIZE, "%s%s:%d:%d ", sBuffer, ++ tWord.m_sWord, tWord.m_iDocs, tWord.m_iHits ); ++ } ++ ++ if ( uBuffLen > 0 ) ++ { ++ // trim last space ++ sBuffer [ --uBuffLen ] = 0; ++ ++ if ( pTls->m_pQueryCharset ) ++ { ++ // String::c_ptr() will nul-terminate the buffer. ++ // ++ // NOTE: It's not entirely clear whether this conversion is necessary at all. ++ ++ String sConvert; ++ uint iErrors; ++ sConvert.copy ( sBuffer, uBuffLen, pTls->m_pQueryCharset, system_charset_info, &iErrors ); ++ memcpy ( sBuffer, sConvert.c_ptr(), sConvert.length() + 1 ); ++ } ++ } ++ ++ return 0; ++ } ++ } ++ } ++ ++ out->type = SHOW_CHAR; ++ out->value = ""; ++ return 0; ++} ++ ++int sphinx_showfunc_error ( THD * thd, SHOW_VAR * out, char * ) ++{ ++ CSphSEStats * pStats = sphinx_get_stats ( thd, out ); ++ if ( pStats && pStats->m_bLastError ) ++ { ++ out->type = SHOW_CHAR; ++ out->value = pStats->m_sLastMessage; ++ } ++ return 0; ++} ++ ++#if MYSQL_VERSION_ID>50100 ++struct st_mysql_storage_engine sphinx_storage_engine = ++{ ++ MYSQL_HANDLERTON_INTERFACE_VERSION ++}; ++ ++struct st_mysql_show_var sphinx_status_vars[] = ++{ ++ {"sphinx_total", (char *)sphinx_showfunc_total, SHOW_FUNC}, ++ {"sphinx_total_found", (char *)sphinx_showfunc_total_found, SHOW_FUNC}, ++ {"sphinx_time", (char *)sphinx_showfunc_time, SHOW_FUNC}, ++ {"sphinx_word_count", (char *)sphinx_showfunc_word_count, SHOW_FUNC}, ++ {"sphinx_words", (char *)sphinx_showfunc_words, SHOW_FUNC}, ++ {"sphinx_error", (char *)sphinx_showfunc_error, SHOW_FUNC}, ++ {0, 0, (enum_mysql_show_type)0} ++}; ++ ++ ++mysql_declare_plugin(sphinx) ++{ ++ MYSQL_STORAGE_ENGINE_PLUGIN, ++ &sphinx_storage_engine, ++ sphinx_hton_name, ++ "Sphinx developers", ++ sphinx_hton_comment, ++ PLUGIN_LICENSE_GPL, ++ sphinx_init_func, // Plugin Init ++ sphinx_done_func, // Plugin Deinit ++ 0x0001, // 0.1 ++ sphinx_status_vars, ++ NULL, ++ NULL ++} ++mysql_declare_plugin_end; ++ ++#endif // >50100 ++ ++// ++// $Id: ha_sphinx.cc 2752 2011-03-29 08:21:05Z tomat $ ++// +diff -uNr storage/sphinx/ha_sphinx.h storage/sphinx/ha_sphinx.h +--- storage/sphinx/ha_sphinx.h 1970-01-01 01:00:00.000000000 +0100 ++++ storage/sphinx/ha_sphinx.h 2011-10-13 00:59:59.282957578 +0200 +@@ -0,0 +1,169 @@ ++// ++// $Id: ha_sphinx.h 2428 2010-08-03 11:38:09Z shodan $ ++// ++ ++#ifdef USE_PRAGMA_INTERFACE ++#pragma interface // gcc class implementation ++#endif ++ ++ ++#if MYSQL_VERSION_ID>=50515 ++#define TABLE_ARG TABLE_SHARE ++#elif MYSQL_VERSION_ID>50100 ++#define TABLE_ARG st_table_share ++#else ++#define TABLE_ARG st_table ++#endif ++ ++ ++#if MYSQL_VERSION_ID>=50120 ++typedef uchar byte; ++#endif ++ ++ ++/// forward decls ++class THD; ++struct CSphReqQuery; ++struct CSphSEShare; ++struct CSphSEAttr; ++struct CSphSEStats; ++struct CSphSEThreadData; ++ ++/// Sphinx SE handler class ++class ha_sphinx : public handler ++{ ++protected: ++ THR_LOCK_DATA m_tLock; ///< MySQL lock ++ ++ CSphSEShare * m_pShare; ///< shared lock info ++ ++ uint m_iMatchesTotal; ++ uint m_iCurrentPos; ++ const byte * m_pCurrentKey; ++ uint m_iCurrentKeyLen; ++ ++ char * m_pResponse; ///< searchd response storage ++ char * m_pResponseEnd; ///< searchd response storage end (points to wilderness!) ++ char * m_pCur; ///< current position into response ++ bool m_bUnpackError; ///< any errors while unpacking response ++ ++public: ++#if MYSQL_VERSION_ID<50100 ++ ha_sphinx ( TABLE_ARG * table_arg ); // NOLINT ++#else ++ ha_sphinx ( handlerton * hton, TABLE_ARG * table_arg ); ++#endif ++ ~ha_sphinx () {} ++ ++ const char * table_type () const { return "SPHINX"; } ///< SE name for display purposes ++ const char * index_type ( uint ) { return "HASH"; } ///< index type name for display purposes ++ const char ** bas_ext () const; ///< my file extensions ++ ++ #if MYSQL_VERSION_ID>50100 ++ ulonglong table_flags () const { return HA_CAN_INDEX_BLOBS; } ///< bitmap of implemented flags (see handler.h for more info) ++ #else ++ ulong table_flags () const { return HA_CAN_INDEX_BLOBS; } ///< bitmap of implemented flags (see handler.h for more info) ++ #endif ++ ++ ulong index_flags ( uint, uint, bool ) const { return 0; } ///< bitmap of flags that says how SE implements indexes ++ uint max_supported_record_length () const { return HA_MAX_REC_LENGTH; } ++ uint max_supported_keys () const { return 1; } ++ uint max_supported_key_parts () const { return 1; } ++ uint max_supported_key_length () const { return MAX_KEY_LENGTH; } ++ uint max_supported_key_part_length () const { return MAX_KEY_LENGTH; } ++ ++ #if MYSQL_VERSION_ID>50100 ++ virtual double scan_time () { return (double)( stats.records+stats.deleted )/20.0 + 10; } ///< called in test_quick_select to determine if indexes should be used ++ #else ++ virtual double scan_time () { return (double)( records+deleted )/20.0 + 10; } ///< called in test_quick_select to determine if indexes should be used ++ #endif ++ ++ virtual double read_time ( ha_rows rows ) { return (double)rows/20.0 + 1; } ///< index read time estimate ++ ++public: ++ int open ( const char * name, int mode, uint test_if_locked ); ++ int close (); ++ ++ int write_row ( byte * buf ); ++ int update_row ( const byte * old_data, byte * new_data ); ++ int delete_row ( const byte * buf ); ++ int extra ( enum ha_extra_function op ); ++ ++ int index_init ( uint keynr, bool sorted ); // 5.1.x ++ int index_init ( uint keynr ) { return index_init ( keynr, false ); } // 5.0.x ++ ++ int index_end (); ++ int index_read ( byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag ); ++ int index_read_idx ( byte * buf, uint idx, const byte * key, uint key_len, enum ha_rkey_function find_flag ); ++ int index_next ( byte * buf ); ++ int index_next_same ( byte * buf, const byte * key, uint keylen ); ++ int index_prev ( byte * buf ); ++ int index_first ( byte * buf ); ++ int index_last ( byte * buf ); ++ ++ int get_rec ( byte * buf, const byte * key, uint keylen ); ++ ++ int rnd_init ( bool scan ); ++ int rnd_end (); ++ int rnd_next ( byte * buf ); ++ int rnd_pos ( byte * buf, byte * pos ); ++ void position ( const byte * record ); ++ ++#if MYSQL_VERSION_ID>=50030 ++ int info ( uint ); ++#else ++ void info ( uint ); ++#endif ++ ++ int reset(); ++ int external_lock ( THD * thd, int lock_type ); ++ int delete_all_rows (); ++ ha_rows records_in_range ( uint inx, key_range * min_key, key_range * max_key ); ++ ++ int delete_table ( const char * from ); ++ int rename_table ( const char * from, const char * to ); ++ int create ( const char * name, TABLE * form, HA_CREATE_INFO * create_info ); ++ ++ THR_LOCK_DATA ** store_lock ( THD * thd, THR_LOCK_DATA ** to, enum thr_lock_type lock_type ); ++ ++public: ++ virtual const COND * cond_push ( const COND *cond ); ++ virtual void cond_pop (); ++ ++private: ++ uint32 m_iFields; ++ char ** m_dFields; ++ ++ uint32 m_iAttrs; ++ CSphSEAttr * m_dAttrs; ++ int m_bId64; ++ ++ int * m_dUnboundFields; ++ ++private: ++ int Connect ( const char * sQueryHost, ushort uPort ); ++ int ConnectAPI ( const char * sQueryHost, int iQueryPort ); ++ int HandleMysqlError ( struct st_mysql * pConn, int iErrCode ); ++ ++ uint32 UnpackDword (); ++ char * UnpackString (); ++ bool UnpackSchema (); ++ bool UnpackStats ( CSphSEStats * pStats ); ++ ++ CSphSEThreadData * GetTls (); ++}; ++ ++ ++#if MYSQL_VERSION_ID < 50100 ++bool sphinx_show_status ( THD * thd ); ++#endif ++ ++int sphinx_showfunc_total_found ( THD *, SHOW_VAR *, char * ); ++int sphinx_showfunc_total ( THD *, SHOW_VAR *, char * ); ++int sphinx_showfunc_time ( THD *, SHOW_VAR *, char * ); ++int sphinx_showfunc_word_count ( THD *, SHOW_VAR *, char * ); ++int sphinx_showfunc_words ( THD *, SHOW_VAR *, char * ); ++ ++// ++// $Id: ha_sphinx.h 2428 2010-08-03 11:38:09Z shodan $ ++// +diff -uNr storage/sphinx/INSTALL storage/sphinx/INSTALL +--- storage/sphinx/INSTALL 1970-01-01 01:00:00.000000000 +0100 ++++ storage/sphinx/INSTALL 2010-07-07 18:12:02.000000000 +0200 +@@ -0,0 +1,48 @@ ++Building MySQL with SphinxSE ++============================= ++ ++Note: BUILD/autorun.sh step on Linux might malfunction with some ++versions of automake; autorun.sh will not fail but the build will. ++automake 1.9.6 is known to work. ++ ++ ++ ++MySQL 5.0.x on Linux ++--------------------- ++ ++tar zxvf mysql-5.0.91.tar.gz ++cp -R mysqlse mysql-5.0.91/sql/sphinx ++cd mysql-5.0.91 ++ ++patch -p1 -i sql/sphinx/sphinx.5.0.91.diff ++sh BUILD/autorun.sh ++./configure --with-sphinx-storage-engine ++make ++ ++ ++ ++MySQL 5.1.x on Linux ++--------------------- ++ ++tar zxvf mysql-5.1.47.tar.gz ++cp -R -p mysqlse mysql-5.1.47/storage/sphinx ++cd mysql-5.1.47 ++ ++sh BUILD/autorun.sh ++./configure --with-plugins=sphinx ++make ++ ++ ++ ++MySQL 5.0.x on Windows ++----------------------- ++ ++tar zxvf mysql-5.0.91.tar.gz ++cp -R mysqlse mysql-5.0.91/sql/sphinx ++cd mysql-5.0.91 ++ ++patch -p1 -i sql/sphinx/sphinx.5.0.91.diff ++win/configure.js WITH_SPHINX_STORAGE_ENGINE ++win/build-vs8 ++ ++--eof-- +diff -uNr storage/sphinx/Makefile.am storage/sphinx/Makefile.am +--- storage/sphinx/Makefile.am 1970-01-01 01:00:00.000000000 +0100 ++++ storage/sphinx/Makefile.am 2009-02-13 22:26:46.000000000 +0100 +@@ -0,0 +1,59 @@ ++# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 2 of the License, or ++# (at your option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software ++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ ++#called from the top level Makefile ++ ++MYSQLDATAdir = $(localstatedir) ++MYSQLSHAREdir = $(pkgdatadir) ++MYSQLBASEdir= $(prefix) ++MYSQLLIBdir= $(pkglibdir) ++pkgplugindir = $(pkglibdir)/plugin ++INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/include \ ++ -I$(top_srcdir)/regex \ ++ -I$(top_srcdir)/sql \ ++ -I$(srcdir) ++SUBDIRS = ../../include ../../mysys ../../strings ../../dbug ../../extra ++WRAPLIBS= ++ ++LDADD = ++ ++DEFS= @DEFS@ \ ++ -D_REENTRANT -D_PTHREADS -DENGINE -DSTORAGE_ENGINE -DMYSQL_SERVER ++ ++noinst_HEADERS = ha_sphinx.h ++ ++EXTRA_LTLIBRARIES = ha_sphinx.la ++pkgplugin_LTLIBRARIES = @plugin_sphinx_shared_target@ sphinx.la ++ ++ha_sphinx_la_LDFLAGS = -module -rpath $(MYSQLLIBdir) ++ha_sphinx_la_CXXFLAGS= $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN ++ha_sphinx_la_CFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN ++ha_sphinx_la_SOURCES = ha_sphinx.cc ++ ++sphinx_la_LDFLAGS = -module ++sphinx_la_CXXFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN ++sphinx_la_CFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN ++sphinx_la_SOURCES = snippets_udf.cc ++ ++EXTRA_LIBRARIES = libsphinx.a ++noinst_LIBRARIES = @plugin_sphinx_static_target@ ++libsphinx_a_CXXFLAGS = $(AM_CFLAGS) ++libsphinx_a_CFLAGS = $(AM_CFLAGS) ++libsphinx_a_SOURCES= ha_sphinx.cc ++ ++EXTRA_DIST = cmakelists.txt ++# Don't update the files from bitkeeper ++%::SCCS/s.% +diff -uNr storage/sphinx/make-patch.sh storage/sphinx/make-patch.sh +--- storage/sphinx/make-patch.sh 1970-01-01 01:00:00.000000000 +0100 ++++ storage/sphinx/make-patch.sh 2008-09-05 20:06:30.000000000 +0200 +@@ -0,0 +1,36 @@ ++#!/bin/sh ++ ++OUT=$1 ++ORIG=$2 ++NEW=$3 ++ ++if [ ! \( "$1" -a "$2" -a "$3" \) ]; then ++ echo "$0 <patch> <original> <new>" ++ exit 1 ++fi ++ ++FILES=' ++/config/ac-macros/ha_sphinx.m4 ++/configure.in ++/libmysqld/Makefile.am ++/sql/handler.cc ++/sql/handler.h ++/sql/Makefile.am ++/sql/mysqld.cc ++/sql/mysql_priv.h ++/sql/set_var.cc ++/sql/sql_lex.h ++/sql/sql_parse.cc ++/sql/sql_yacc.yy ++/sql/structs.h ++/sql/sql_show.cc ++' ++ ++rm -f $OUT ++if [ -e $OUT ]; then ++ exit 1 ++fi ++ ++for name in $FILES; do ++ diff -BNru "$ORIG$name" "$NEW$name" >> $OUT ++done +diff -uNr storage/sphinx/plug.in storage/sphinx/plug.in +--- storage/sphinx/plug.in 1970-01-01 01:00:00.000000000 +0100 ++++ storage/sphinx/plug.in 2006-06-07 09:28:43.000000000 +0200 +@@ -0,0 +1,5 @@ ++MYSQL_STORAGE_ENGINE(sphinx,, [Sphinx Storage Engine], ++ [Sphinx Storage Engines], [max,max-no-ndb]) ++MYSQL_PLUGIN_DIRECTORY(sphinx, [storage/sphinx]) ++MYSQL_PLUGIN_STATIC(sphinx, [libsphinx.a]) ++MYSQL_PLUGIN_DYNAMIC(sphinx, [ha_sphinx.la]) +diff -uNr storage/sphinx/snippets_udf.cc storage/sphinx/snippets_udf.cc +--- storage/sphinx/snippets_udf.cc 1970-01-01 01:00:00.000000000 +0100 ++++ storage/sphinx/snippets_udf.cc 2011-01-01 03:33:06.000000000 +0100 +@@ -0,0 +1,768 @@ ++// ++// $Id: snippets_udf.cc 2616 2011-01-01 02:33:06Z shodan $ ++// ++ ++// ++// Copyright (c) 2001-2011, Andrew Aksyonoff ++// Copyright (c) 2008-2011, Sphinx Technologies Inc ++// All rights reserved ++// ++// This program is free software; you can redistribute it and/or modify ++// it under the terms of the GNU General Public License. You should have ++// received a copy of the GPL license along with this program; if you ++// did not, you can find it at http://www.gnu.org/ ++// ++ ++#include <stdio.h> ++#include <string.h> ++#include <assert.h> ++ ++#include <sys/un.h> ++#include <netdb.h> ++ ++#include <mysql_version.h> ++ ++#if MYSQL_VERSION_ID>50100 ++#include "mysql_priv.h" ++#include <mysql/plugin.h> ++#else ++#include "../mysql_priv.h" ++#endif ++ ++#include <mysys_err.h> ++#include <my_sys.h> ++ ++#if MYSQL_VERSION_ID>=50120 ++typedef uchar byte; ++#endif ++ ++/// partially copy-pasted stuff that should be moved elsewhere ++ ++#if UNALIGNED_RAM_ACCESS ++ ++/// pass-through wrapper ++template < typename T > inline T sphUnalignedRead ( const T & tRef ) ++{ ++ return tRef; ++} ++ ++/// pass-through wrapper ++template < typename T > void sphUnalignedWrite ( void * pPtr, const T & tVal ) ++{ ++ *(T*)pPtr = tVal; ++} ++ ++#else ++ ++/// unaligned read wrapper for some architectures (eg. SPARC) ++template < typename T > ++inline T sphUnalignedRead ( const T & tRef ) ++{ ++ T uTmp; ++ byte * pSrc = (byte *) &tRef; ++ byte * pDst = (byte *) &uTmp; ++ for ( int i=0; i<(int)sizeof(T); i++ ) ++ *pDst++ = *pSrc++; ++ return uTmp; ++} ++ ++/// unaligned write wrapper for some architectures (eg. SPARC) ++template < typename T > ++void sphUnalignedWrite ( void * pPtr, const T & tVal ) ++{ ++ byte * pDst = (byte *) pPtr; ++ byte * pSrc = (byte *) &tVal; ++ for ( int i=0; i<(int)sizeof(T); i++ ) ++ *pDst++ = *pSrc++; ++} ++ ++#endif ++ ++#define SPHINXSE_MAX_ALLOC (16*1024*1024) ++ ++#define SafeDelete(_arg) { if ( _arg ) delete ( _arg ); (_arg) = NULL; } ++#define SafeDeleteArray(_arg) { if ( _arg ) delete [] ( _arg ); (_arg) = NULL; } ++ ++#define Min(a,b) ((a)<(b)?(a):(b)) ++ ++typedef unsigned int DWORD; ++ ++inline DWORD sphF2DW ( float f ) { union { float f; uint32 d; } u; u.f = f; return u.d; } ++ ++static char * sphDup ( const char * sSrc, int iLen=-1 ) ++{ ++ if ( !sSrc ) ++ return NULL; ++ ++ if ( iLen<0 ) ++ iLen = strlen(sSrc); ++ ++ char * sRes = new char [ 1+iLen ]; ++ memcpy ( sRes, sSrc, iLen ); ++ sRes[iLen] = '\0'; ++ return sRes; ++} ++ ++static inline void sphShowErrno ( const char * sCall ) ++{ ++ char sError[256]; ++ snprintf ( sError, sizeof(sError), "%s() failed: [%d] %s", sCall, errno, strerror(errno) ); ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++} ++ ++static const bool sphReportErrors = true; ++ ++static bool sphSend ( int iFd, const char * pBuffer, int iSize, bool bReportErrors = false ) ++{ ++ assert ( pBuffer ); ++ assert ( iSize > 0 ); ++ ++ const int iResult = send ( iFd, pBuffer, iSize, 0 ); ++ if ( iResult != iSize ) ++ { ++ if ( bReportErrors ) sphShowErrno("send"); ++ return false; ++ } ++ return true; ++} ++ ++static bool sphRecv ( int iFd, char * pBuffer, int iSize, bool bReportErrors = false ) ++{ ++ assert ( pBuffer ); ++ assert ( iSize > 0 ); ++ ++ while ( iSize ) ++ { ++ const int iResult = recv ( iFd, pBuffer, iSize, 0 ); ++ if ( iResult > 0 ) ++ { ++ iSize -= iResult; ++ pBuffer += iSize; ++ } ++ else if ( iResult == 0 ) ++ { ++ if ( bReportErrors ) ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "recv() failed: disconnected" ); ++ return false; ++ } ++ else ++ { ++ if ( bReportErrors ) sphShowErrno("recv"); ++ return false; ++ } ++ } ++ return true; ++} ++ ++enum ++{ ++ SPHINX_SEARCHD_PROTO = 1, ++ ++ SEARCHD_COMMAND_SEARCH = 0, ++ SEARCHD_COMMAND_EXCERPT = 1, ++ ++ VER_COMMAND_SEARCH = 0x116, ++ VER_COMMAND_EXCERPT = 0x100, ++}; ++ ++/// known answers ++enum ++{ ++ SEARCHD_OK = 0, ///< general success, command-specific reply follows ++ SEARCHD_ERROR = 1, ///< general failure, error message follows ++ SEARCHD_RETRY = 2, ///< temporary failure, error message follows, client should retry later ++ SEARCHD_WARNING = 3 ///< general success, warning message and command-specific reply follow ++}; ++ ++#define SPHINXSE_DEFAULT_SCHEME "sphinx" ++#define SPHINXSE_DEFAULT_HOST "127.0.0.1" ++#define SPHINXSE_DEFAULT_PORT 9312 ++#define SPHINXSE_DEFAULT_INDEX "*" ++ ++class CSphBuffer ++{ ++private: ++ bool m_bOverrun; ++ int m_iSize; ++ int m_iLeft; ++ char * m_pBuffer; ++ char * m_pCurrent; ++ ++public: ++ CSphBuffer ( const int iSize ) ++ : m_bOverrun ( false ) ++ , m_iSize ( iSize ) ++ , m_iLeft ( iSize ) ++ { ++ assert ( iSize > 0 ); ++ m_pBuffer = new char[iSize]; ++ m_pCurrent = m_pBuffer; ++ } ++ ++ ~CSphBuffer () ++ { ++ SafeDelete ( m_pBuffer ); ++ } ++ ++ const char * Ptr() const { return m_pBuffer; } ++ ++ bool Finalize() ++ { ++ return !( m_bOverrun || m_iLeft != 0 || m_pCurrent - m_pBuffer != m_iSize ); ++ } ++ ++ void SendBytes ( const void * pBytes, int iBytes ); ++ ++ void SendWord ( short int v ) { v = ntohs(v); SendBytes ( &v, sizeof(v) ); } ++ void SendInt ( int v ) { v = ntohl(v); SendBytes ( &v, sizeof(v) ); } ++ void SendDword ( DWORD v ) { v = ntohl(v) ;SendBytes ( &v, sizeof(v) ); } ++ void SendUint64 ( ulonglong v ) { SendDword ( uint(v>>32) ); SendDword ( uint(v&0xFFFFFFFFUL) ); } ++ void SendString ( const char * v ) { SendString ( v, strlen(v) ); } ++ void SendString ( const char * v, int iLen ) { SendDword(iLen); SendBytes ( v, iLen ); } ++ void SendFloat ( float v ) { SendDword ( sphF2DW(v) ); } ++}; ++ ++void CSphBuffer::SendBytes ( const void * pBytes, int iBytes ) ++{ ++ if ( m_iLeft < iBytes ) ++ { ++ m_bOverrun = true; ++ return; ++ } ++ ++ memcpy ( m_pCurrent, pBytes, iBytes ); ++ ++ m_pCurrent += iBytes; ++ m_iLeft -= iBytes; ++} ++ ++struct CSphUrl ++{ ++ char * m_sBuffer; ++ char * m_sFormatted; ++ ++ char * m_sScheme; ++ char * m_sHost; ++ char * m_sIndex; ++ ++ int m_iPort; ++ ++ CSphUrl() ++ : m_sBuffer ( NULL ) ++ , m_sFormatted ( NULL ) ++ , m_sScheme ( SPHINXSE_DEFAULT_SCHEME ) ++ , m_sHost ( SPHINXSE_DEFAULT_HOST ) ++ , m_sIndex ( SPHINXSE_DEFAULT_INDEX ) ++ , m_iPort ( SPHINXSE_DEFAULT_PORT ) ++ {} ++ ++ ~CSphUrl() ++ { ++ SafeDeleteArray ( m_sFormatted ); ++ SafeDeleteArray ( m_sBuffer ); ++ } ++ ++ bool Parse ( const char * sUrl, int iLen ); ++ int Connect(); ++ const char * Format(); ++}; ++ ++const char * CSphUrl::Format() ++{ ++ if ( !m_sFormatted ) ++ { ++ int iSize = 15 + strlen(m_sHost) + strlen(m_sIndex); ++ m_sFormatted = new char [ iSize ]; ++ if ( m_iPort ) ++ snprintf ( m_sFormatted, iSize, "inet://%s:%d/%s", m_sHost, m_iPort, m_sIndex ); ++ else ++ snprintf ( m_sFormatted, iSize, "unix://%s/%s", m_sHost, m_sIndex ); ++ } ++ return m_sFormatted; ++} ++ ++// the following scheme variants are recognized ++// ++// inet://host/index ++// inet://host:port/index ++// unix://unix/domain/socket:index ++// unix://unix/domain/socket ++bool CSphUrl::Parse ( const char * sUrl, int iLen ) ++{ ++ bool bOk = true; ++ while ( iLen ) ++ { ++ bOk = false; ++ ++ m_sBuffer = sphDup ( sUrl, iLen ); ++ m_sScheme = m_sBuffer; ++ ++ m_sHost = strstr ( m_sBuffer, "://" ); ++ if ( !m_sHost ) ++ break; ++ m_sHost[0] = '\0'; ++ m_sHost += 2; ++ ++ if ( !strcmp ( m_sScheme, "unix" ) ) ++ { ++ // unix-domain socket ++ m_iPort = 0; ++ if (!( m_sIndex = strrchr ( m_sHost, ':' ) )) ++ m_sIndex = SPHINXSE_DEFAULT_INDEX; ++ else ++ { ++ *m_sIndex++ = '\0'; ++ if ( !*m_sIndex ) ++ m_sIndex = SPHINXSE_DEFAULT_INDEX; ++ } ++ bOk = true; ++ break; ++ } ++ if( strcmp ( m_sScheme, "sphinx" ) != 0 && strcmp ( m_sScheme, "inet" ) != 0 ) ++ break; ++ ++ // inet ++ m_sHost++; ++ char * sPort = strchr ( m_sHost, ':' ); ++ if ( sPort ) ++ { ++ *sPort++ = '\0'; ++ if ( *sPort ) ++ { ++ m_sIndex = strchr ( sPort, '/' ); ++ if ( m_sIndex ) ++ *m_sIndex++ = '\0'; ++ else ++ m_sIndex = SPHINXSE_DEFAULT_INDEX; ++ ++ m_iPort = atoi(sPort); ++ if ( !m_iPort ) ++ m_iPort = SPHINXSE_DEFAULT_PORT; ++ } ++ } else ++ { ++ m_sIndex = strchr ( m_sHost, '/' ); ++ if ( m_sIndex ) ++ *m_sIndex++ = '\0'; ++ else ++ m_sIndex = SPHINXSE_DEFAULT_INDEX; ++ } ++ ++ bOk = true; ++ break; ++ } ++ ++ return bOk; ++} ++ ++int CSphUrl::Connect() ++{ ++ struct sockaddr_in sin; ++#ifndef __WIN__ ++ struct sockaddr_un saun; ++#endif ++ ++ int iDomain = 0; ++ int iSockaddrSize = 0; ++ struct sockaddr * pSockaddr = NULL; ++ ++ in_addr_t ip_addr; ++ ++ if ( m_iPort ) ++ { ++ iDomain = AF_INET; ++ iSockaddrSize = sizeof(sin); ++ pSockaddr = (struct sockaddr *) &sin; ++ ++ memset ( &sin, 0, sizeof(sin) ); ++ sin.sin_family = AF_INET; ++ sin.sin_port = htons(m_iPort); ++ ++ // resolve address ++ if ( (int)( ip_addr=inet_addr(m_sHost) ) != (int)INADDR_NONE ) ++ memcpy ( &sin.sin_addr, &ip_addr, sizeof(ip_addr) ); ++ else ++ { ++ int tmp_errno; ++ struct hostent tmp_hostent, *hp; ++ char buff2 [ GETHOSTBYNAME_BUFF_SIZE ]; ++ ++ hp = my_gethostbyname_r ( m_sHost, &tmp_hostent, ++ buff2, sizeof(buff2), &tmp_errno ); ++ if ( !hp ) ++ { ++ my_gethostbyname_r_free(); ++ ++ char sError[256]; ++ snprintf ( sError, sizeof(sError), "failed to resolve searchd host (name=%s)", m_sHost ); ++ ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ return -1; ++ } ++ ++ memcpy ( &sin.sin_addr, hp->h_addr, Min ( sizeof(sin.sin_addr), (size_t)hp->h_length ) ); ++ my_gethostbyname_r_free(); ++ } ++ } ++ else ++ { ++#ifndef __WIN__ ++ iDomain = AF_UNIX; ++ iSockaddrSize = sizeof(saun); ++ pSockaddr = (struct sockaddr *) &saun; ++ ++ memset ( &saun, 0, sizeof(saun) ); ++ saun.sun_family = AF_UNIX; ++ strncpy ( saun.sun_path, m_sHost, sizeof(saun.sun_path)-1 ); ++#else ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "Unix-domain sockets are not supported on Windows" ); ++ return -1; ++#endif ++ } ++ ++ // connect to searchd and exchange versions ++ uint uServerVersion; ++ uint uClientVersion = htonl ( SPHINX_SEARCHD_PROTO ); ++ int iSocket = -1; ++ char * pError = NULL; ++ do ++ { ++ iSocket = socket ( iDomain, SOCK_STREAM, 0 ); ++ if ( iSocket == -1 ) ++ { ++ pError = "Failed to create client socket"; ++ break; ++ } ++ ++ if ( connect ( iSocket, pSockaddr, iSockaddrSize ) == -1) ++ { ++ pError = "Failed to connect to searchd"; ++ break; ++ } ++ ++ if ( !sphRecv ( iSocket, (char *)&uServerVersion, sizeof(uServerVersion) ) ) ++ { ++ pError = "Failed to receive searchd version"; ++ break; ++ } ++ ++ if ( !sphSend ( iSocket, (char *)&uClientVersion, sizeof(uClientVersion) ) ) ++ { ++ pError = "Failed to send client version"; ++ break; ++ } ++ } ++ while(0); ++ ++ // fixme: compare versions? ++ ++ if ( pError ) ++ { ++ char sError[1024]; ++ snprintf ( sError, sizeof(sError), "%s [%d] %s", Format(), errno, strerror(errno) ); ++ my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError ); ++ ++ if ( iSocket != -1 ) ++ close ( iSocket ); ++ ++ return -1; ++ } ++ ++ return iSocket; ++} ++ ++struct CSphResponse ++{ ++ char * m_pBuffer; ++ char * m_pBody; ++ ++ CSphResponse () ++ : m_pBuffer ( NULL ) ++ , m_pBody ( NULL ) ++ {} ++ ++ CSphResponse ( DWORD uSize ) ++ : m_pBody ( NULL ) ++ { ++ m_pBuffer = new char[uSize]; ++ } ++ ++ ~CSphResponse () ++ { ++ SafeDeleteArray ( m_pBuffer ); ++ } ++ ++ static CSphResponse * Read ( int iSocket, int iClientVersion ); ++}; ++ ++CSphResponse * ++CSphResponse::Read ( int iSocket, int iClientVersion ) ++{ ++ char sHeader[8]; ++ if ( !sphRecv ( iSocket, sHeader, sizeof(sHeader) ) ) ++ return NULL; ++ ++ int iStatus = ntohs ( sphUnalignedRead ( *(short int *) &sHeader[0] ) ); ++ int iVersion = ntohs ( sphUnalignedRead ( *(short int *) &sHeader[2] ) ); ++ DWORD uLength = ntohl ( sphUnalignedRead ( *(DWORD *) &sHeader[4] ) ); ++ ++ if ( iVersion < iClientVersion ) // fixme: warn ++ ; ++ ++ if ( uLength <= SPHINXSE_MAX_ALLOC ) ++ { ++ CSphResponse * pResponse = new CSphResponse ( uLength ); ++ if ( !sphRecv ( iSocket, pResponse->m_pBuffer, uLength ) ) ++ { ++ SafeDelete ( pResponse ); ++ return NULL; ++ } ++ ++ pResponse->m_pBody = pResponse->m_pBuffer; ++ if ( iStatus != SEARCHD_OK ) ++ { ++ DWORD uSize = ntohl ( *(DWORD *)pResponse->m_pBuffer ); ++ if ( iStatus == SEARCHD_WARNING ) ++ pResponse->m_pBody += uSize; // fixme: report the warning somehow ++ else ++ { ++ char * sMessage = sphDup ( pResponse->m_pBuffer + sizeof(DWORD), uSize ); ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sMessage ); ++ SafeDelete ( sMessage ); ++ SafeDelete ( pResponse ); ++ return NULL; ++ } ++ } ++ return pResponse; ++ } ++ return NULL; ++} ++ ++/// udf ++ ++extern "C" ++{ ++ my_bool sphinx_snippets_init ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sMessage ); ++ void sphinx_snippets_deinit ( UDF_INIT * pUDF ); ++ char * sphinx_snippets ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sResult, unsigned long * pLength, char * pIsNull, char * sError ); ++}; ++ ++#define MAX_MESSAGE_LENGTH 255 ++#define MAX_RESULT_LENGTH 255 ++ ++struct CSphSnippets ++{ ++ CSphUrl m_tUrl; ++ CSphResponse * m_pResponse; ++ ++ int m_iBeforeMatch; ++ int m_iAfterMatch; ++ int m_iChunkSeparator; ++ int m_iLimit; ++ int m_iAround; ++ int m_iFlags; ++ ++ CSphSnippets() ++ : m_pResponse(NULL) ++ , m_iBeforeMatch(0) ++ , m_iAfterMatch(0) ++ , m_iChunkSeparator(0) ++ // defaults ++ , m_iLimit(256) ++ , m_iAround(5) ++ , m_iFlags(1) ++ { ++ } ++ ++ ~CSphSnippets() ++ { ++ SafeDelete ( m_pResponse ); ++ } ++}; ++ ++#define KEYWORD(NAME) else if ( strncmp ( NAME, pArgs->attributes[i], pArgs->attribute_lengths[i] ) == 0 ) ++ ++#define CHECK_TYPE(TYPE) \ ++ if ( pArgs->arg_type[i] != TYPE ) \ ++ { \ ++ snprintf ( sMessage, MAX_MESSAGE_LENGTH, \ ++ "%.*s argument must be a string", \ ++ (int)pArgs->attribute_lengths[i], \ ++ pArgs->attributes[i] ); \ ++ bFail = true; \ ++ break; \ ++ } \ ++ if ( TYPE == STRING_RESULT && !pArgs->args[i] ) \ ++ { \ ++ snprintf ( sMessage, MAX_MESSAGE_LENGTH, \ ++ "%.*s argument must be constant (and not NULL)", \ ++ (int)pArgs->attribute_lengths[i], \ ++ pArgs->attributes[i] ); \ ++ bFail = true; \ ++ break; \ ++ } ++ ++#define STRING CHECK_TYPE(STRING_RESULT) ++#define INT CHECK_TYPE(INT_RESULT); int iValue = *(long long *)pArgs->args[i] ++ ++my_bool sphinx_snippets_init ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sMessage ) ++{ ++ if ( pArgs->arg_count < 3 ) ++ { ++ strncpy ( sMessage, "insufficient arguments", MAX_MESSAGE_LENGTH ); ++ return 1; ++ } ++ ++ bool bFail = false; ++ CSphSnippets * pOpts = new CSphSnippets; ++ for ( uint i = 0; i < pArgs->arg_count; i++ ) ++ { ++ if ( i < 3 ) ++ { ++ if ( pArgs->arg_type[i] != STRING_RESULT ) ++ { ++ strncpy ( sMessage, "first three arguments must be of string type", MAX_MESSAGE_LENGTH ); ++ bFail = true; ++ break; ++ } ++ } ++ KEYWORD("sphinx") ++ { ++ STRING; ++ if ( !pOpts->m_tUrl.Parse ( pArgs->args[i], pArgs->lengths[i] ) ) ++ { ++ strncpy ( sMessage, "failed to parse connection string", MAX_MESSAGE_LENGTH ); ++ bFail = true; ++ break; ++ } ++ } ++ KEYWORD("before_match") { STRING; pOpts->m_iBeforeMatch = i; } ++ KEYWORD("after_match") { STRING; pOpts->m_iAfterMatch = i; } ++ KEYWORD("chunk_separator") { STRING; pOpts->m_iChunkSeparator = i; } ++ KEYWORD("limit") { INT; pOpts->m_iLimit = iValue; } ++ KEYWORD("around") { INT; pOpts->m_iAround = iValue; } ++ KEYWORD("exact_phrase") { INT; if ( iValue ) pOpts->m_iFlags |= 2; } ++ KEYWORD("single_passage") { INT; if ( iValue ) pOpts->m_iFlags |= 4; } ++ KEYWORD("use_boundaries") { INT; if ( iValue ) pOpts->m_iFlags |= 8; } ++ KEYWORD("weight_order") { INT; if ( iValue ) pOpts->m_iFlags |= 16; } ++ else ++ { ++ snprintf ( sMessage, MAX_MESSAGE_LENGTH, "unrecognized argument: %.*s", ++ (int)pArgs->attribute_lengths[i], pArgs->attributes[i] ); ++ bFail = true; ++ break; ++ } ++ } ++ ++ if ( bFail ) ++ { ++ SafeDelete ( pOpts ); ++ return 1; ++ } ++ pUDF->ptr = (char *)pOpts; ++ return 0; ++} ++ ++#undef STRING ++#undef INT ++#undef KEYWORD ++#undef CHECK_TYPE ++ ++#define ARG(i) pArgs->args[i], pArgs->lengths[i] ++#define ARG_LEN(VAR, LEN) ( VAR ? pArgs->lengths[VAR] : LEN ) ++ ++#define SEND_STRING(INDEX, DEFAULT) \ ++ if ( INDEX ) \ ++ tBuffer.SendString ( ARG(INDEX) ); \ ++ else \ ++ tBuffer.SendString ( DEFAULT, sizeof(DEFAULT) - 1 ); ++ ++ ++char * sphinx_snippets ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sResult, unsigned long * pLength, char * pIsNull, char * pError ) ++{ ++ CSphSnippets * pOpts = (CSphSnippets *)pUDF->ptr; ++ assert ( pOpts ); ++ ++ if ( !pArgs->args[0] || !pArgs->args[1] || !pArgs->args[2] ) ++ { ++ *pIsNull = 1; ++ return sResult; ++ } ++ ++ const int iSize = ++ 8 + // header ++ 8 + ++ 4 + pArgs->lengths[1] + // index ++ 4 + pArgs->lengths[2] + // words ++ 4 + ARG_LEN ( pOpts->m_iBeforeMatch, 3 ) + ++ 4 + ARG_LEN ( pOpts->m_iAfterMatch, 4 ) + ++ 4 + ARG_LEN ( pOpts->m_iChunkSeparator, 5 ) + ++ 12 + ++ 4 + pArgs->lengths[0]; // document ++ ++ CSphBuffer tBuffer(iSize); ++ ++ tBuffer.SendWord ( SEARCHD_COMMAND_EXCERPT ); ++ tBuffer.SendWord ( VER_COMMAND_EXCERPT ); ++ tBuffer.SendDword ( iSize - 8 ); ++ ++ tBuffer.SendDword ( 0 ); ++ tBuffer.SendDword ( pOpts->m_iFlags ); ++ ++ tBuffer.SendString ( ARG(1) ); // index ++ tBuffer.SendString ( ARG(2) ); // words ++ ++ SEND_STRING ( pOpts->m_iBeforeMatch, "<b>" ); ++ SEND_STRING ( pOpts->m_iAfterMatch, "</b>" ); ++ SEND_STRING ( pOpts->m_iChunkSeparator, " ... " ); ++ ++ tBuffer.SendInt ( pOpts->m_iLimit ); ++ tBuffer.SendInt ( pOpts->m_iAround ); ++ ++ // single document ++ tBuffer.SendInt ( 1 ); ++ tBuffer.SendString ( ARG(0) ); ++ ++ int iSocket = -1; ++ do ++ { ++ if ( !tBuffer.Finalize() ) ++ { ++ my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: failed to build request" ); ++ break; ++ } ++ ++ iSocket = pOpts->m_tUrl.Connect(); ++ if ( iSocket == -1 ) break; ++ if ( !sphSend ( iSocket, tBuffer.Ptr(), iSize, sphReportErrors ) ) break; ++ ++ CSphResponse * pResponse = CSphResponse::Read ( iSocket, 0x100 ); ++ if ( !pResponse ) break; ++ ++ close ( iSocket ); ++ pOpts->m_pResponse = pResponse; ++ *pLength = ntohl( *(DWORD *)pResponse->m_pBody ); ++ return pResponse->m_pBody + sizeof(DWORD); ++ } ++ while(0); ++ ++ if ( iSocket != -1 ) ++ close ( iSocket ); ++ ++ *pError = 1; ++ return sResult; ++} ++ ++#undef SEND_STRING ++#undef ARG_LEN ++#undef ARG ++ ++void sphinx_snippets_deinit ( UDF_INIT * pUDF ) ++{ ++ CSphSnippets * pOpts = (CSphSnippets *)pUDF->ptr; ++ SafeDelete ( pOpts ); ++} ++ ++// ++// $Id: snippets_udf.cc 2616 2011-01-01 02:33:06Z shodan $ ++// diff --git a/component/mysql-5.5/mysql_create_system_tables__no_test.patch b/component/mysql-5.5/mysql_create_system_tables__no_test.patch new file mode 100644 index 0000000000000000000000000000000000000000..8d917ea1ad57f02c3e9e0bc94038c58815e7d6a2 --- /dev/null +++ b/component/mysql-5.5/mysql_create_system_tables__no_test.patch @@ -0,0 +1,27 @@ +# 33_scripts__mysql_create_system_tables__no_test.dpatch by <ch@debian.org> + +A user with no password prevents a normal user from login under certain +circumstances as it is checked first. +See http://bugs.debian.org/301741 +and http://bugs.mysql.com/bug.php?id=6901 + +--- scripts/mysql_system_tables_data.sql 2008-12-04 22:59:44.000000000 +0100 ++++ scripts/mysql_system_tables_data.sql 2008-12-04 23:00:07.000000000 +0100 +@@ -26,8 +26,6 @@ + -- Fill "db" table with default grants for anyone to + -- access database 'test' and 'test_%' if "db" table didn't exist + CREATE TEMPORARY TABLE tmp_db LIKE db; +-INSERT INTO tmp_db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y'); +-INSERT INTO tmp_db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y'); + INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0; + DROP TABLE tmp_db; + +@@ -40,8 +38,6 @@ + REPLACE INTO tmp_user SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','' FROM dual WHERE LOWER( @current_hostname) != 'localhost'; + REPLACE INTO tmp_user VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'',''); + REPLACE INTO tmp_user VALUES ('::1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'',''); +-INSERT INTO tmp_user (host,user) VALUES ('localhost',''); +-INSERT INTO tmp_user (host,user) SELECT @current_hostname,'' FROM dual WHERE LOWER(@current_hostname ) != 'localhost'; + INSERT INTO user SELECT * FROM tmp_user WHERE @had_user_table=0; + DROP TABLE tmp_user; + diff --git a/component/ncurses/buildout.cfg b/component/ncurses/buildout.cfg index 3944e63180e01cc713d5e03575518e202f2c6102..63dea92837fb3305b67ef0d18aa2ded492126ccb 100644 --- a/component/ncurses/buildout.cfg +++ b/component/ncurses/buildout.cfg @@ -15,10 +15,9 @@ configure-options = --enable-rpath # tricky way to rerun with --enable-widec make-targets = - install && (for i in curses unctrl eti form menu panel term; do ln -sf ncurses/$i.h ${buildout:parts-directory}/${:_buildout_section_name_}/include/$i.h; done) && ./configure ${:configure-options} --enable-widec && make install + install && (for i in curses unctrl eti form menu panel term; do ln -sf ncurses/$i.h ${buildout:parts-directory}/${:_buildout_section_name_}/include/$i.h; done; ln -sf libncurses.so ${buildout:parts-directory}/${:_buildout_section_name_}/lib/libcurses.so) && ./configure ${:configure-options} --enable-widec && make ${:make-options} install # pass dummy LDCONFIG to skip needless calling of ldconfig by non-root user environment = - LDFLAGS =-Wl,--as-needed LDCONFIG=/bin/echo make-options = -j1 diff --git a/component/openldap/buildout.cfg b/component/openldap/buildout.cfg index c09a6692618d188d0dac7894abc17c1adf896be3..d8ea71c8143970f9817091c7931e7edf93f58b4d 100644 --- a/component/openldap/buildout.cfg +++ b/component/openldap/buildout.cfg @@ -7,8 +7,8 @@ extends = [openldap] recipe = hexagonit.recipe.cmmi -url = ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-2.4.25.tgz -md5sum = ec63f9c2add59f323a0459128846905b +url = ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-2.4.26.tgz +md5sum = f36f3086031dd56ae94f722ffae8df5e configure-options = --disable-static --disable-slapd diff --git a/component/percona-toolkit/buildout.cfg b/component/percona-toolkit/buildout.cfg new file mode 100644 index 0000000000000000000000000000000000000000..847ec6540d30a7def4386edb47925a8d5a5f03a8 --- /dev/null +++ b/component/percona-toolkit/buildout.cfg @@ -0,0 +1,16 @@ +[buildout] +extends = + ../perl/buildout.cfg + ../perl-DBI/buildout.cfg + ../perl-DBD-mariadb/buildout.cfg +parts = + percona-toolkit + +[percona-toolkit] +recipe = hexagonit.recipe.cmmi +depends = + ${perl:version} +url = http://www.percona.com/redir/downloads/percona-toolkit/percona-toolkit-1.0.1.tar.gz +md5sum = 1d843b1b3ebd2eacfa3bf95ef2a00557 +configure-command = + ${perl:location}/bin/perl Makefile.PL diff --git a/component/perl-DBD-MySQL/buildout.cfg b/component/perl-DBD-MySQL/buildout.cfg index f9475e66a10316e536c2311dcd8c1de85ce210fe..8be6ddab2e236de40d2bbb2e6d541fd8bccea604 100644 --- a/component/perl-DBD-MySQL/buildout.cfg +++ b/component/perl-DBD-MySQL/buildout.cfg @@ -1,20 +1,11 @@ [buildout] extends = - ../perl/buildout.cfg - ../perl-DBI/buildout.cfg ../mysql-tritonn-5.0/buildout.cfg - ../zlib/buildout.cfg - ../openssl/buildout.cfg + ../perl-DBD-common/buildout.cfg + parts = perl-DBD-MySQL -[perl-DBD-MySQL-patch] -recipe = hexagonit.recipe.download -md5sum = e12e9233f20b0370cfcf5228ea767fbc -url = ${:_profile_base_location_}/${:filename} -filename = DBD-mysql-4.019.rpathsupport.patch -download-only = true - [perl-DBD-MySQL] recipe = hexagonit.recipe.cmmi version = 4.019 diff --git a/component/perl-DBD-MySQL/DBD-mysql-4.019.rpathsupport.patch b/component/perl-DBD-common/DBD-mysql-4.019.rpathsupport.patch similarity index 100% rename from component/perl-DBD-MySQL/DBD-mysql-4.019.rpathsupport.patch rename to component/perl-DBD-common/DBD-mysql-4.019.rpathsupport.patch diff --git a/component/perl-DBD-common/buildout.cfg b/component/perl-DBD-common/buildout.cfg new file mode 100644 index 0000000000000000000000000000000000000000..4f2229756543b9463705f16fded5c5e6dafb1c4f --- /dev/null +++ b/component/perl-DBD-common/buildout.cfg @@ -0,0 +1,16 @@ +# Please use perl-DBD-MySQL or perl-DBD-mariadb +[buildout] +extends = + ../perl/buildout.cfg + ../perl-DBI/buildout.cfg + ../zlib/buildout.cfg + ../openssl/buildout.cfg + +[perl-DBD-MySQL-patch] +recipe = hexagonit.recipe.download +md5sum = e12e9233f20b0370cfcf5228ea767fbc +url = ${:_profile_base_location_}/${:filename} +filename = DBD-mysql-4.019.rpathsupport.patch +download-only = true + + diff --git a/component/perl-DBD-mariadb/buildout.cfg b/component/perl-DBD-mariadb/buildout.cfg new file mode 100644 index 0000000000000000000000000000000000000000..ac379cb8763273a029a7468868de56b8afdfba03 --- /dev/null +++ b/component/perl-DBD-mariadb/buildout.cfg @@ -0,0 +1,23 @@ +[buildout] +extends = + ../mariadb/buildout.cfg + ../perl-DBD-common/buildout.cfg + +parts = + perl-DBD-mariadb + +[perl-DBD-mariadb] +recipe = hexagonit.recipe.cmmi +version = 4.019 +depends = + ${perl:version} + ${perl-DBI:version} +url = http://search.cpan.org/CPAN/authors/id/C/CA/CAPTTOFU/DBD-mysql-4.019.tar.gz +md5sum = 566d98ab8ffac9626a31f6f6d455558e +patches = + ${perl-DBD-MySQL-patch:location}/${perl-DBD-MySQL-patch:filename} +patch-options = -p1 +configure-command = + ${perl:location}/bin/perl Makefile.PL --mysql_config=${mariadb:location}/bin/mysql_config +environment = + OTHERLDFLAGS=-Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${mariadb:location}/lib/mysql -Wl,-rpath=${openssl:location}/lib \ No newline at end of file diff --git a/component/perl/buildout.cfg b/component/perl/buildout.cfg index 8673f03b367d88516c532944ac0b609f9f2710ec..c27ca6d56574ea9b77333eb06b1448e96d34a88b 100644 --- a/component/perl/buildout.cfg +++ b/component/perl/buildout.cfg @@ -14,9 +14,11 @@ filename = ${:_buildout_section_name_} [perl] recipe = hexagonit.recipe.cmmi -version = 5.14.1 +depends = + ${gdbm:version} +version = 5.14.2 url = http://www.cpan.org/src/5.0/perl-${:version}.tar.bz2 -md5sum = 97cd306a2c22929cc141a09568f43bb0 +md5sum = 04a4c5d3c1f9f19d77daff8e8cd19a26 siteprefix = ${buildout:parts-directory}/site_${:_buildout_section_name_} patch-options = -p1 patches = diff --git a/component/python-2.6/buildout.cfg b/component/python-2.6/buildout.cfg index 6755b652e1f468a03320d81336b3a63443e91693..cf1f1634707b5ee91f7942ada40d0789bd1ece64 100644 --- a/component/python-2.6/buildout.cfg +++ b/component/python-2.6/buildout.cfg @@ -24,6 +24,8 @@ filename = python-2.6.6-no_system_inc_dirs.patch [python2.6] recipe = hexagonit.recipe.cmmi +depends = + ${gdbm:version} # This is actually the default setting for prefix, but we can't use it in # other settings in this part if we don't set it explicitly here. prefix = ${buildout:parts-directory}/${:_buildout_section_name_} diff --git a/component/python-2.7/buildout.cfg b/component/python-2.7/buildout.cfg index 765488f74e0c1a4202ae3966454747b41a0ed630..e4a5f5cbcb7aa889f61c359bcf94003f8ac93737 100644 --- a/component/python-2.7/buildout.cfg +++ b/component/python-2.7/buildout.cfg @@ -38,6 +38,8 @@ python = python2.7 [python2.7common] recipe = hexagonit.recipe.cmmi +depends = + ${gdbm:version} # This is actually the default setting for prefix, but we can't use it in # other settings in this part if we don't set it explicitly here. prefix = ${buildout:parts-directory}/${:_buildout_section_name_} diff --git a/component/rdiff-backup/buildout.cfg b/component/rdiff-backup/buildout.cfg index b6d15bd56f379e0b5b51b72a81e1a2b95fd6af33..e3678ddff245a30206b10486528aa677677509dd 100644 --- a/component/rdiff-backup/buildout.cfg +++ b/component/rdiff-backup/buildout.cfg @@ -13,7 +13,7 @@ library-dirs = ${librsync:location}/lib/ rpath = ${librsync:location}/lib/ -find-links = http://download.savannah.nongnu.org/releases/rdiff-backup/rdiff-backup-1.0.5.tar.gz +find-links = http://pkgs.fedoraproject.org/repo/pkgs/rdiff-backup/rdiff-backup-1.0.5.tar.gz/fa2a165fa07a94be52c52e3545bc7758/rdiff-backup-1.0.5.tar.gz [rdiff-backup] # Scripts only generation part for rdiff-backup diff --git a/component/sphinx/buildout.cfg b/component/sphinx/buildout.cfg index 10257b2e948376e12e1ba236accd964f40697cd4..bb8b0aa7c192c88870a8c49586f4aeb530ca15f3 100644 --- a/component/sphinx/buildout.cfg +++ b/component/sphinx/buildout.cfg @@ -18,8 +18,8 @@ filename = sphinx-1.10-beta-snowball.patch [sphinx] recipe = hexagonit.recipe.cmmi -url = http://sphinxsearch.com/files/sphinx-2.0.1-beta.tar.gz -md5sum = 95c217d81d0b7a4ff73d5297318c3481 +url = http://sphinxsearch.com/files/sphinx-2.0.2-beta.tar.gz +md5sum = fafe0f1a71d0ded32404c067eba7d0b3 configure-options = --with-mysql --with-mysql-includes=${mariadb:location}/include/mysql diff --git a/component/sqlite3/buildout.cfg b/component/sqlite3/buildout.cfg index 168ca9b5fde2c29861cb07c30e45bc441723ce57..05422fa1cb0f5e68e0159c688cb4ff0318af0257 100644 --- a/component/sqlite3/buildout.cfg +++ b/component/sqlite3/buildout.cfg @@ -5,8 +5,8 @@ parts = [sqlite3] recipe = hexagonit.recipe.cmmi -url = http://www.sqlite.org/sqlite-autoconf-3070800.tar.gz -md5sum = 6bfb46d73caaa1bbbcd2b52184b6c542 +url = http://www.sqlite.org/sqlite-autoconf-3070900.tar.gz +md5sum = dce303524736fe89a76b8ed29d566352 configure-options = --disable-static --enable-readline diff --git a/component/stunnel/buildout.cfg b/component/stunnel/buildout.cfg index 6381abbb4e292830ecf6f28cb5dd15a11887c2ef..607e0c794cf329926097383bf0e2c863025bcc7e 100644 --- a/component/stunnel/buildout.cfg +++ b/component/stunnel/buildout.cfg @@ -17,8 +17,8 @@ filename = stunnel-4-hooks.py [stunnel-4] recipe = hexagonit.recipe.cmmi -url = http://mirror.bit.nl/stunnel/stunnel-4.46.tar.gz -md5sum = 978030ff42f087ec26eb8a095ab69994 +url = http://mirror.bit.nl/stunnel/stunnel-4.50.tar.gz +md5sum = d68b4565294496a8bdf23c728a679f53 pre-configure-hook = ${stunnel-4-hook-download:location}/${stunnel-4-hook-download:filename}:pre_configure_hook configure-options = --enable-ipv6 diff --git a/component/xtrabackup/buildout.cfg b/component/xtrabackup/buildout.cfg index 753376d34c0620b1809677a32c637de1e3c12695..6769e684398de27631b19fe9c324c9d954188360 100644 --- a/component/xtrabackup/buildout.cfg +++ b/component/xtrabackup/buildout.cfg @@ -18,7 +18,7 @@ parts = [xtrabackup-build-patch-download] recipe = hexagonit.recipe.download url = ${:_profile_base_location_}/${:filename} -md5sum = 95b2c2ef625f88d85bf8876269a19372 +md5sum = b1536fe65e32592e4a0a14bf3b159885 download-only = true filename = xtrabackup-1.6.2_build.patch @@ -29,17 +29,33 @@ md5sum = d642ea7b30d1322a516fbece4ee100e0 download-only = true filename = ${:_buildout_section_name_} +[mysql-5.1-download] +recipe = hexagonit.recipe.download +version = 5.1.56 +url = http://s3.amazonaws.com/percona.com/downloads/community/mysql-${:version}.tar.gz +md5sum = 15161d67f4830aad3a8a89e083749d49 +download-only = true +filename = mysql-${:version}.tar.gz + +[libtar-download] +recipe = hexagonit.recipe.download +version = 1.2.11 +url = http://s3.amazonaws.com/percona.com/downloads/community/libtar-${:version}.tar.gz +md5sum = 604238e8734ce6e25347a58c4f1a1d7e +download-only = true +filename = libtar-${:version}.tar.gz + [xtrabackup] recipe = hexagonit.recipe.cmmi -url = http://www.percona.com/redir/downloads/XtraBackup/xtrabackup-1.6.2/source/xtrabackup-1.6.2.tar.gz -md5sum = 933243ae362156c98f1211eb87b3b4ea +url = http://www.percona.com/downloads/XtraBackup/XtraBackup-1.6.3/source/xtrabackup-1.6.3.tar.gz +md5sum = d0b827fd18cd76416101eb7b7c56a311 make-binary = true patches = ${xtrabackup-build-patch-download:location}/${xtrabackup-build-patch-download:filename} ${allow_force_ibbackup.patch:location}/${allow_force_ibbackup.patch:filename} patch-options = -p1 location = ${buildout:parts-directory}/${:_buildout_section_name_} -configure-command = utils/build.sh innodb51_builtin ${:location} ${libtool:location} +configure-command = ln -sf ${mysql-5.1-download:location}/${mysql-5.1-download:filename} ${libtar-download:location}/${libtar-download:filename} . && utils/build.sh innodb51_builtin ${:location} ${libtool:location} environment = CPPFLAGS =-I${zlib:location}/include -I${ncurses:location}/include -I${readline:location}/include LDFLAGS =-L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${ncurses:location}/lib -L${readline:location}/lib -Wl,-rpath=${readline:location}/lib diff --git a/component/xtrabackup/xtrabackup-1.6.2_build.patch b/component/xtrabackup/xtrabackup-1.6.2_build.patch index 41c2188fa9429d8c9db685233aafd83931f0a57c..5357f52128860733e3e97836fa0bd81f3730649b 100644 --- a/component/xtrabackup/xtrabackup-1.6.2_build.patch +++ b/component/xtrabackup/xtrabackup-1.6.2_build.patch @@ -27,17 +27,16 @@ diff -ur xtrabackup-1.6.2.orig/utils/build.sh xtrabackup-1.6.2/utils/build.sh exit -1 } -@@ -79,7 +81,12 @@ +@@ -79,7 +81,11 @@ { echo "Configuring the server" cd $server_dir - BUILD/autorun.sh -+ libtoolize -c -f -+ aclocal -I $libtool_location/share/aclocal -I config/ac-macros -+ autoheader -+ automake -c -a -f -+ autoconf -+ touch sql/sql_yacc.yy ++ aclocal -I $libtool_location/share/aclocal -I config/ac-macros || die "Can't execute aclocal" ++ autoheader || die "Can't execute autoheader" ++ libtoolize --automake --force --copy || die "Can't execute libtoolize" ++ automake --add-missing --force --copy || die "Can't execute automake" ++ autoconf || die "Can't execute autoconf" eval $configure_cmd echo "Building the server" @@ -141,12 +140,3 @@ diff -ur xtrabackup-1.6.2.orig/utils/build.sh xtrabackup-1.6.2/utils/build.sh ;; "innodb55" | "5.5") -@@ -230,7 +279,7 @@ - - build_server - -- build_xtrabackup -+ build_xtrabackup - - build_tar4ibd - ;; diff --git a/setup.py b/setup.py index 562d03633ece78b51e68a9d7a264bcaf2fd872a4..e7328ca7ad18eb46899ce750afd4e5332b7deb05 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, find_packages import glob import os -version = '0.38-dev' +version = '0.39-dev' name = 'slapos.cookbook' long_description = open("README.txt").read() + "\n" + \ open("CHANGES.txt").read() + "\n" diff --git a/slapos/recipe/erp5/__init__.py b/slapos/recipe/erp5/__init__.py index 03c7fb227c56cc4f450e889ef8ecfcb96a2622d2..672bf979235765949a032f5c903dab5aaaeaab51 100644 --- a/slapos/recipe/erp5/__init__.py +++ b/slapos/recipe/erp5/__init__.py @@ -668,7 +668,15 @@ SSLCARevocationPath %(ca_crl)s""" # maxconn should be set as the maximum thread we have per zope, like this # haproxy will manage the queue of request with the possibility to # move a request to another node if the initially selected one is dead - server_template = """ server %(name)s %(address)s cookie %(name)s check inter 3s rise 1 fall 2 maxconn %(cluster_zope_thread_amount)s""" + # maxqueue is the number of waiting request in the queue of every zope client. + # It allows to make sure that there is not a zope client handling all + # the work while other clients are doing nothing. This was happening + # even thoug we have round robin distribution because when a node dies + # some seconds, all request are dispatched to other nodes, and then users + # stick in other nodes and are not coming back. Please note this option + # is not an issue if you have more than (maxqueue * node_quantity) requests + # because haproxy will handle a top-level queue + server_template = """ server %(name)s %(address)s cookie %(name)s check inter 3s rise 1 fall 2 maxqueue 5 maxconn %(cluster_zope_thread_amount)s""" config = dict(name=name, ip=ip, port=port, server_check_path=server_check_path,) i = 1 @@ -781,7 +789,12 @@ SSLCARevocationPath %(ca_crl)s""" kumo_conf = {} # XXX Conversion server and memcache server coordinates are not relevant # for pure site creation. - mysql_connection_string = "%(mysql_database)s@%(ip)s:%(tcp_port)s %(mysql_user)s %(mysql_password)s" % mysql_conf + assert mysql_conf['mysql_user'] and mysql_conf['mysql_password'], \ + "ZMySQLDA requires a user and a password for socket connections" + # XXX Use socket access to prevent unwanted connections to original MySQL + # server when cloning an existing ERP5 instance. + # TCP will be required if MySQL is in a different partition/server. + mysql_connection_string = "%(mysql_database)s %(mysql_user)s %(mysql_password)s %(socket)s" % mysql_conf bt5_list = self.parameter_dict.get("bt5_list", "").split() or default_bt5_list bt5_repository_list = self.parameter_dict.get("bt5_repository_list", "").split() \ @@ -1185,6 +1198,8 @@ SSLCARevocationPath %(ca_crl)s""" mysql_conf)) mysql_script_list = [] + mysql_script_list.append(pkg_resources.resource_string(__name__, + 'template/mysql-init-function.sql.in')) for x_database, x_user, x_password in \ [(mysql_conf['mysql_database'], mysql_conf['mysql_user'], @@ -1194,7 +1209,7 @@ SSLCARevocationPath %(ca_crl)s""" mysql_conf['mysql_test_password']), ] + mysql_conf['mysql_parallel_test_dict']: mysql_script_list.append(pkg_resources.resource_string(__name__, - 'template/initmysql.sql.in') % { + 'template/mysql-init-database.sql.in') % { 'mysql_database': x_database, 'mysql_user': x_user, 'mysql_password': x_password}) diff --git a/slapos/recipe/erp5/template/haproxy.cfg.in b/slapos/recipe/erp5/template/haproxy.cfg.in index aa8f8a865ecb58d166cfebcc911c681780651ae8..a732c3bc89b926dbb70b787ee32f9c2c957294d8 100644 --- a/slapos/recipe/erp5/template/haproxy.cfg.in +++ b/slapos/recipe/erp5/template/haproxy.cfg.in @@ -22,12 +22,12 @@ defaults timeout connect 5s # As requested in haproxy doc, make this "at least equal to timeout server". timeout client 305s - # Use "option httpclose" to not preserve client & server persistent connections + # Use "option forceclose" to not preserve client & server persistent connections # while handling every incoming request individually, dispatching them one after # another to servers, in HTTP close mode. This is really needed when haproxy # is configured with maxconn to 1, without this options browser are unable # to render a page - option httpclose + option forceclose listen %(name)s %(ip)s:%(port)s cookie SERVERID insert diff --git a/slapos/recipe/erp5/template/my.cnf.in b/slapos/recipe/erp5/template/my.cnf.in index 632d35c996e4badff3d09e5b6f84ef4dbc6b252f..25d951e863b594e5ac3ca2709897da3763c45754 100644 --- a/slapos/recipe/erp5/template/my.cnf.in +++ b/slapos/recipe/erp5/template/my.cnf.in @@ -15,12 +15,17 @@ socket = %(socket)s datadir = %(data_directory)s pid-file = %(pid_file)s log-error = %(error_log)s -log-slow-file = %(slow_query_log)s +slow_query_log +slow_query_log_file = %(slow_query_log)s long_query_time = 5 max_allowed_packet = 128M query_cache_size = 32M -plugin-load = ha_innodb_plugin.so +plugin-load = ha_groonga.so;ha_sphinx.so + +# By default only 100 connections are allowed, when using zeo +# we may have much more connections +# max_connections = 1000 # The following are important to configure and depend a lot on to the size of # your database and the available resources. diff --git a/slapos/recipe/erp5/template/initmysql.sql.in b/slapos/recipe/erp5/template/mysql-init-database.sql.in similarity index 100% rename from slapos/recipe/erp5/template/initmysql.sql.in rename to slapos/recipe/erp5/template/mysql-init-database.sql.in diff --git a/slapos/recipe/erp5/template/mysql-init-function.sql.in b/slapos/recipe/erp5/template/mysql-init-function.sql.in new file mode 100644 index 0000000000000000000000000000000000000000..c4d0cbde46c01372b3808db3601e517e14788b4e --- /dev/null +++ b/slapos/recipe/erp5/template/mysql-init-function.sql.in @@ -0,0 +1,5 @@ +USE mysql; +DROP FUNCTION IF EXISTS last_insert_grn_id; +CREATE FUNCTION last_insert_grn_id RETURNS INTEGER SONAME 'ha_groonga.so'; +DROP FUNCTION IF EXISTS sphinx_snippets; +CREATE FUNCTION sphinx_snippets RETURNS STRING SONAME 'sphinx.so'; diff --git a/slapos/recipe/lamp/template/my.cnf.in b/slapos/recipe/lamp/template/my.cnf.in index 043fb3ad56bbea3b3e35766a9b019cd8dbf7cc77..00eb280232a8e41206dccdca6be7fe15e58a1591 100644 --- a/slapos/recipe/lamp/template/my.cnf.in +++ b/slapos/recipe/lamp/template/my.cnf.in @@ -15,12 +15,13 @@ socket = %(socket)s datadir = %(data_directory)s pid-file = %(pid_file)s log-error = %(error_log)s -log-slow-file = %(slow_query_log)s +slow_query_log +slow_query_log_file = %(slow_query_log)s long_query_time = 5 max_allowed_packet = 128M query_cache_size = 32M -plugin-load = ha_innodb_plugin.so +plugin-load = ha_groonga.so;ha_sphinx.so # The following are important to configure and depend a lot on to the size of # your database and the available resources. diff --git a/slapos/recipe/mysql/template/my.cnf.in b/slapos/recipe/mysql/template/my.cnf.in index 82af0f3e0289c98810dd6616bb5487cb07b0a33c..00eb280232a8e41206dccdca6be7fe15e58a1591 100644 --- a/slapos/recipe/mysql/template/my.cnf.in +++ b/slapos/recipe/mysql/template/my.cnf.in @@ -15,12 +15,13 @@ socket = %(socket)s datadir = %(data_directory)s pid-file = %(pid_file)s log-error = %(error_log)s -#log-slow-file = %(slow_query_log)s +slow_query_log +slow_query_log_file = %(slow_query_log)s long_query_time = 5 max_allowed_packet = 128M query_cache_size = 32M -plugin-load = ha_innodb_plugin.so +plugin-load = ha_groonga.so;ha_sphinx.so # The following are important to configure and depend a lot on to the size of # your database and the available resources. diff --git a/slapos/recipe/vifib.py b/slapos/recipe/vifib.py index 55ec2243b508aeb3e05114585825ca9e0e59f59d..7bd11b6bac0eec2e3318a84438f6d7989ef8c859 100644 --- a/slapos/recipe/vifib.py +++ b/slapos/recipe/vifib.py @@ -147,7 +147,10 @@ class Recipe(slapos.recipe.erp5.Recipe): conversion_server_conf, # as installERP5Site is not trusted (yet) and this recipe is production # ready expose more information - mysql_url='%(mysql_database)s@%(ip)s:%(tcp_port)s %(mysql_user)s %(mysql_password)s' % mysql_conf, + # XXX Use socket access to prevent unwanted connections to original MySQL + # server when cloning an existing ERP5 instance. + # TCP will be required if MySQL is in a different partition/server. + mysql_url='%(mysql_database)s %(mysql_user)s %(mysql_password)s %(socket)s' % mysql_conf, )) return self.path_list @@ -200,7 +203,10 @@ class Recipe(slapos.recipe.erp5.Recipe): conversion_server_conf, # as installERP5Site is not trusted (yet) and this recipe is production # ready expose more information - mysql_url='%(mysql_database)s@%(ip)s:%(tcp_port)s %(mysql_user)s %(mysql_password)s' % mysql_conf, + # XXX Use socket access to prevent unwanted connections to original MySQL + # server when cloning an existing ERP5 instance. + # TCP will be required if MySQL is in a different partition/server. + mysql_url='%(mysql_database)s %(mysql_user)s %(mysql_password)s %(socket)s' % mysql_conf, )) return self.path_list diff --git a/slapos/recipe/xwiki/template/my.cnf.in b/slapos/recipe/xwiki/template/my.cnf.in index 09171fb6d9837199f4030f574791df31c251dc54..00eb280232a8e41206dccdca6be7fe15e58a1591 100644 --- a/slapos/recipe/xwiki/template/my.cnf.in +++ b/slapos/recipe/xwiki/template/my.cnf.in @@ -15,58 +15,39 @@ socket = %(socket)s datadir = %(data_directory)s pid-file = %(pid_file)s log-error = %(error_log)s -log-slow-queries = %(slow_query_log)s +slow_query_log +slow_query_log_file = %(slow_query_log)s long_query_time = 5 -skip-locking -key_buffer = 384M max_allowed_packet = 128M -table_cache = 512 -sort_buffer_size = 2M -read_buffer_size = 2M -read_rnd_buffer_size = 8M -myisam_sort_buffer_size = 64M -thread_cache_size = 8 query_cache_size = 32M -# Try number of CPU's*2 for thread_concurrency -thread_concurrency = 8 -# Replication Master Server (default) -# binary logging is required for replication -log-bin=mysql-bin +plugin-load = ha_groonga.so;ha_sphinx.so -# required unique id between 1 and 2^32 - 1 -# defaults to 1 if master-host is not set -# but will not function as a master if omitted -server-id = 1 +# The following are important to configure and depend a lot on to the size of +# your database and the available resources. +#innodb_buffer_pool_size = 4G +#innodb_log_file_size = 256M +#innodb_log_buffer_size = 8M + +# Some dangerous settings you may want to uncomment if you only want +# performance or less disk access. Useful for unit tests. +#innodb_flush_log_at_trx_commit = 0 +#innodb_flush_method = nosync +#innodb_doublewrite = 0 +#sync_frm = 0 + +# Uncomment the following if you need binary logging, which is recommended +# on production instances (either for replication or incremental backups). +#log-bin=mysql-bin # Force utf8 usage collation_server = utf8_unicode_ci character_set_server = utf8 -default-character-set = utf8 skip-character-set-client-handshake -[mysqldump] -quick -max_allowed_packet = 16M - [mysql] no-auto-rehash -# Remove the next comment character if you are not familiar with SQL -#safe-updates socket = %(socket)s -[isamchk] -key_buffer = 256M -sort_buffer_size = 256M -read_buffer = 2M -write_buffer = 2M - -[myisamchk] -key_buffer = 256M -sort_buffer_size = 256M -read_buffer = 2M -write_buffer = 2M - [mysqlhotcopy] interactive-timeout - diff --git a/software/erp5/instance.cfg b/software/erp5/instance.cfg index 05e6988f5ef6f3efeefc51139834f45128b12032..26a17f269ee1969d14e195b3633fb30b47a79a87 100644 --- a/software/erp5/instance.cfg +++ b/software/erp5/instance.cfg @@ -34,14 +34,31 @@ tidstorage_repozo_binary = ${buildout:bin-directory}/tidstorage_repozo tidstoraged_binary = ${buildout:bin-directory}/tidstoraged xtrabackup_binary = ${xtrabackup:location}/bin/xtrabackup_51 zabbix_agentd_binary = ${zabbix-agent:location}/sbin/zabbix_agentd -mk-variable-advisor_binary = ${perl:siteprefix}/bin/mk-variable-advisor -mk-table-usage_binary = ${perl:siteprefix}/bin/mk-table-usage -mk-visual-explain_binary = ${perl:siteprefix}/bin/mk-visual-explain -mk-config-diff_binary = ${perl:siteprefix}/bin/mk-config-diff -mk-deadlock-logger_binary = ${perl:siteprefix}/bin/mk-deadlock-logger -mk-error-log_binary = ${perl:siteprefix}/bin/mk-error-log -mk-index-usage_binary = ${perl:siteprefix}/bin/mk-index-usage -mk-query-advisor_binary = ${perl:siteprefix}/bin/mk-query-advisor +pt-archiver_binary = ${perl:siteprefix}/bin/pt-archiver +pt-config-diff_binary = ${perl:siteprefix}/bin/pt-config-diff +pt-deadlock-logger_binary = ${perl:siteprefix}/bin/pt-deadlock-logger +pt-duplicate-key-checker_binary = ${perl:siteprefix}/bin/pt-duplicate-key-checker +pt-fifo-split_binary = ${perl:siteprefix}/bin/pt-fifo-split +pt-find_binary = ${perl:siteprefix}/bin/pt-find +pt-fk-error-logger_binary = ${perl:siteprefix}/bin/pt-fk-error-logger +pt-heartbeat_binary = ${perl:siteprefix}/bin/pt-heartbeat +pt-index-usage_binary = ${perl:siteprefix}/bin/pt-index-usage +pt-kill_binary = ${perl:siteprefix}/bin/pt-kill +pt-log-player_binary = ${perl:siteprefix}/bin/pt-log-player +pt-online-schema-change_binary = ${perl:siteprefix}/bin/pt-online-schema-change +pt-query-advisor_binary = ${perl:siteprefix}/bin/pt-query-advisor +pt-query-digest_binary = ${perl:siteprefix}/bin/pt-query-digest +pt-show-grants_binary = ${perl:siteprefix}/bin/pt-show-grants +pt-slave-delay_binary = ${perl:siteprefix}/bin/pt-slave-delay +pt-slave-find_binary = ${perl:siteprefix}/bin/pt-slave-find +pt-slave-restart_binary = ${perl:siteprefix}/bin/pt-slave-restart +pt-table-checksum_binary = ${perl:siteprefix}/bin/pt-table-checksum +pt-table-sync_binary = ${perl:siteprefix}/bin/pt-table-sync +pt-tcp-model_binary = ${perl:siteprefix}/bin/pt-tcp-model +pt-trend_binary = ${perl:siteprefix}/bin/pt-trend +pt-upgrade_binary = ${perl:siteprefix}/bin/pt-upgrade +pt-variable-advisor_binary = ${perl:siteprefix}/bin/pt-variable-advisor +pt-visual-explain_binary = ${perl:siteprefix}/bin/pt-visual-explain # cloudooo specific configuration ooo_binary_path = ${libreoffice-bin:location}/program @@ -55,6 +72,7 @@ link_binary_list = ${coreutils:location}/bin/ls ${coreutils:location}/bin/tr ${coreutils:location}/bin/uname + ${gettext:location}/lib/gettext/hostname ${git:location}/bin/git ${graphviz:location}/bin/dot ${grep:location}/bin/grep diff --git a/software/erp5/software.cfg b/software/erp5/software.cfg index faf8d4fef24640dbeea61d77fdcbf4295c544b18..8ed1387ebe1fc05bcdc483bd09682e9a95da5abb 100644 --- a/software/erp5/software.cfg +++ b/software/erp5/software.cfg @@ -28,7 +28,7 @@ configurator_bt5_list = erp5_core_proxy_field_legacy erp5_full_text_myisam_catal # Default template for erp5 instance. recipe = slapos.recipe.template url = ${:_profile_base_location_}/instance.cfg -md5sum = 53f225e13bf7ebcd88bbc2b038c83b6f +md5sum = 07f09cca8ad4d7858bb40d723998a889 output = ${buildout:directory}/template.cfg mode = 0644 @@ -40,27 +40,75 @@ md5sum = cbe1d75339c6cb20e1aef818797face1 output = ${buildout:directory}/schema.json mode = 0644 +[networkcache] +# signature certificates of the following uploaders. +# Romain Courteaud +# Sebastien Robin +# Kazuhiko Shiozaki +signature-certificate-list = + -----BEGIN CERTIFICATE----- + MIIB4DCCAUkCADANBgkqhkiG9w0BAQsFADA5MQswCQYDVQQGEwJGUjEZMBcGA1UE + CBMQRGVmYXVsdCBQcm92aW5jZTEPMA0GA1UEChMGTmV4ZWRpMB4XDTExMDkxNTA5 + MDAwMloXDTEyMDkxNTA5MDAwMlowOTELMAkGA1UEBhMCRlIxGTAXBgNVBAgTEERl + ZmF1bHQgUHJvdmluY2UxDzANBgNVBAoTBk5leGVkaTCBnzANBgkqhkiG9w0BAQEF + AAOBjQAwgYkCgYEApYZv6OstoqNzxG1KI6iE5U4Ts2Xx9lgLeUGAMyfJLyMmRLhw + boKOyJ9Xke4dncoBAyNPokUR6iWOcnPHtMvNOsBFZ2f7VA28em3+E1JRYdeNUEtX + Z0s3HjcouaNAnPfjFTXHYj4um1wOw2cURSPuU5dpzKBbV+/QCb5DLheynisCAwEA + ATANBgkqhkiG9w0BAQsFAAOBgQBCZLbTVdrw3RZlVVMFezSHrhBYKAukTwZrNmJX + mHqi2tN8tNo6FX+wmxUUAf3e8R2Ymbdbn2bfbPpcKQ2fG7PuKGvhwMG3BlF9paEC + q7jdfWO18Zp/BG7tagz0jmmC4y/8akzHsVlruo2+2du2freE8dK746uoMlXlP93g + QUUGLQ== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIB8jCCAVugAwIBAgIJAPu2zchZ2BxoMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV + BAMMB3RzeGRldjMwHhcNMTExMDE0MTIxNjIzWhcNMTIxMDEzMTIxNjIzWjASMRAw + DgYDVQQDDAd0c3hkZXYzMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCrPbh+ + YGmo6mWmhVb1vTqX0BbeU0jCTB8TK3i6ep3tzSw2rkUGSx3niXn9LNTFNcIn3MZN + XHqbb4AS2Zxyk/2tr3939qqOrS4YRCtXBwTCuFY6r+a7pZsjiTNddPsEhuj4lEnR + L8Ax5mmzoi9nE+hiPSwqjRwWRU1+182rzXmN4QIDAQABo1AwTjAdBgNVHQ4EFgQU + /4XXREzqBbBNJvX5gU8tLWxZaeQwHwYDVR0jBBgwFoAU/4XXREzqBbBNJvX5gU8t + LWxZaeQwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQA07q/rKoE7fAda + FED57/SR00OvY9wLlFEF2QJ5OLu+O33YUXDDbGpfUSF9R8l0g9dix1JbWK9nQ6Yd + R/KCo6D0sw0ZgeQv1aUXbl/xJ9k4jlTxmWbPeiiPZEqU1W9wN5lkGuLxV4CEGTKU + hJA/yXa1wbwIPGvX3tVKdOEWPRXZLg== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIB7jCCAVegAwIBAgIJAJWA0jQ4o9DGMA0GCSqGSIb3DQEBBQUAMA8xDTALBgNV + BAMMBHg2MXMwIBcNMTExMTI0MTAyNDQzWhgPMjExMTEwMzExMDI0NDNaMA8xDTAL + BgNVBAMMBHg2MXMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANdJNiFsRlkH + vq2kHP2zdxEyzPAWZH3CQ3Myb3F8hERXTIFSUqntPXDKXDb7Y/laqjMXdj+vptKk + 3Q36J+8VnJbSwjGwmEG6tym9qMSGIPPNw1JXY1R29eF3o4aj21o7DHAkhuNc5Tso + 67fUSKgvyVnyH4G6ShQUAtghPaAwS0KvAgMBAAGjUDBOMB0GA1UdDgQWBBSjxFUE + RfnTvABRLAa34Ytkhz5vPzAfBgNVHSMEGDAWgBSjxFUERfnTvABRLAa34Ytkhz5v + PzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAFLDS7zNhlrQYSQO5KIj + z2RJe3fj4rLPklo3TmP5KLvendG+LErE2cbKPqnhQ2oVoj6u9tWVwo/g03PMrrnL + KrDm39slYD/1KoE5kB4l/p6KVOdeJ4I6xcgu9rnkqqHzDwI4v7e8/D3WZbpiFUsY + vaZhjNYKWQf79l6zXfOvphzJ + -----END CERTIFICATE----- + +[erp5] +revision = 7d57428a5cfd0fceed70acb9e86cf274558d606c + [versions] MySQL-python = 1.2.3 Paste = 1.7.5.1 -PasteScript = 1.7.4.2 +PasteScript = 1.7.5 Products.CMFActionIcons = 2.1.3 Products.CMFCalendar = 2.2.2 -Products.CMFCore = 2.2.4 +Products.CMFCore = 2.2.5 Products.CMFDefault = 2.2.2 Products.CMFTopic = 2.2.1 Products.CMFUid = 2.2.1 -Products.DCWorkflowGraph = 0.4nxd001 +Products.DCWorkflowGraph = 0.4.1 Products.ExternalEditor = 1.1.0 -Products.GenericSetup = 1.6.3 +Products.GenericSetup = 1.6.4 Products.MimetypesRegistry = 2.0.3 -Products.PluggableAuthService = 1.7.5 +Products.PluggableAuthService = 1.7.6 Products.PluginRegistry = 1.3b1 Products.TIDStorage = 5.4.7.dev-r45842 Products.Zelenium = 1.0.3 StructuredText = 2.11.1 -Werkzeug = 0.7.1 -buildout-versions = 1.6 +Werkzeug = 0.8.1 cElementTree = 1.0.5-20051216 chardet = 1.0.1 cloudooo = 1.2.3 @@ -73,40 +121,40 @@ elementtree = 1.2.7-20070827-preview erp5.recipe.cmmiforcei686 = 0.1.3 erp5diff = 0.8.1.5 eventlet = 0.9.16 -feedparser = 5.0.1 +feedparser = 5.1 five.localsitemanager = 2.0.5 greenlet = 0.3.1 hexagonit.recipe.cmmi = 1.5.0 hexagonit.recipe.download = 1.5.0 -http-parser = 0.7.0 -ipdb = 0.6 +http-parser = 0.7.1 +ipdb = 0.6.1 +ipython = 0.11 meld3 = 0.6.7 ordereddict = 1.1 paramiko = 1.7.7.1 plone.recipe.command = 1.1 ply = 3.4 -psutil = 0.3.0 -pycrypto = 2.3 -python-ldap = 2.4.3 +psutil = 0.4.0 +python-ldap = 2.4.6 python-memcached = 1.47 -restkit = 3.3.1 +restkit = 3.3.2 rtjp-eventlet = 0.3.2 -slapos.cookbook = 0.25 -slapos.recipe.build = 0.6 -slapos.recipe.template = 1.1 +slapos.cookbook = 0.38 +slapos.recipe.build = 0.7 +slapos.recipe.template = 2.2 threadframe = 0.2 timerserver = 2.0.2 urlnorm = 1.1.2 uuid = 1.30 -validictory = 0.7.1 +validictory = 0.7.2 xupdate-processor = 0.4 # Required by: -# slapos.core==0.14 -Flask = 0.7.2 +# slapos.core==0.20 +Flask = 0.8 # Required by: -# PasteScript==1.7.4.2 +# PasteScript==1.7.5 # cloudooo==1.2.3 PasteDeploy = 1.5.0 @@ -116,48 +164,60 @@ WSGIUtils = 0.7 # Required by: # cloudooo==1.2.3 -# slapos.core==0.14 +# slapos.core==0.20 argparse = 1.1 -# Required by: -# slapos.recipe.template==1.1 -collective.recipe.template = 1.9 - # Required by: # SOAPpy==0.12.0nxd001 fpconst = 0.7.2 # Required by: -# ipdb==0.6 -ipython = 0.11 - -# Required by: -# slapos.cookbook==0.25 +# slapos.cookbook==0.38 netaddr = 0.7.6 # Required by: -# slapos.core==0.14 -netifaces = 0.5 +# slapos.core==0.20 +netifaces = 0.6 # Required by: # cloudooo==1.2.3 python-magic = 0.4.0.1 # Required by: -# slapos.cookbook==0.25 -# slapos.core==0.14 -# zc.buildout==1.5.3-dev-SlapOS-010 +# Products.CMFActionIcons==2.1.3 +# Products.CMFCalendar==2.2.2 +# Products.CMFCore==2.2.5 +# Products.CMFDefault==2.2.2 +# Products.CMFTopic==2.2.1 +# Products.CMFUid==2.2.1 +# Products.DCWorkflow==2.2.3nxd002 +# Products.DCWorkflowGraph==0.4.1 +# Products.ExternalEditor==1.1.0 +# Products.GenericSetup==1.6.4 +# Products.MimetypesRegistry==2.0.3 +# Products.PluggableAuthService==1.7.6 +# Products.PluginRegistry==1.3b1 +# Products.TIDStorage==5.4.7.dev-r45842 +# Products.Zelenium==1.0.3 +# Zope2==2.12.20 +# five.localsitemanager==2.0.5 +# python-ldap==2.4.6 +# slapos.cookbook==0.38 +# slapos.core==0.20 +# zc.buildout==1.6.0-dev-SlapOS-003 # zc.recipe.egg==1.2.2 +# zope.deprecation==3.4.0 +# zope.structuredtext==3.4.0 setuptools = 0.6c12dev-r88846 # Required by: -# slapos.cookbook==0.25 -slapos.core = 0.14 +# slapos.cookbook==0.38 +slapos.core = 0.20 # Required by: -# slapos.core==0.14 +# slapos.core==0.20 supervisor = 3.0a10 # Required by: -# slapos.cookbook==0.25 +# slapos.cookbook==0.38 xml-marshaller = 0.9.7 diff --git a/software/erp5testnode/software.cfg b/software/erp5testnode/software.cfg index d7f8666bc19b3247977c5eb48f52536452da8fdc..9ba59340addc93cc488bbe1eca2424b4120ceb65 100644 --- a/software/erp5testnode/software.cfg +++ b/software/erp5testnode/software.cfg @@ -26,7 +26,6 @@ parts = subversion zip git - checkrecipe # Separate from site eggs allowed-eggs-from-site-packages = @@ -45,105 +44,8 @@ allow-hosts = psutil.googlecode.com www.dabeaz.com -develop = - ${:parts-directory}/slapos.cookbook-repository - ${:parts-directory}/erp5.util-repository - -[checkrecipe] -recipe = plone.recipe.command -stop-on-error = true -update-command = ${:command} -command = grep parts ${buildout:develop-eggs-directory}/slapos.cookbook.egg-link && grep parts ${buildout:develop-eggs-directory}/erp5.util.egg-link - -[slapos.cookbook-repository] -recipe = plone.recipe.command -location = ${buildout:parts-directory}/${:_buildout_section_name_} -stop-on-error = true -repository = http://git.erp5.org/repos/slapos.git -branch = master -revision = 33e2a6315965925b3585d3dcbe246055bb852cba -command = ${git:location}/bin/git clone --quiet -b ${:branch} ${:repository} ${:location} && if [ -n ${:revision} ]; then cd ${:location} && ${git:location}/bin/git reset --quiet --hard ${:revision} ; fi -update-command = cd ${:location} && ${git:location}/bin/git pull --quiet && if [ -n ${:revision} ]; then cd ${:location} && ${git:location}/bin/git reset --quiet --hard ${:revision} ; fi - -[erp5.util-repository] -recipe = plone.recipe.command -location = ${buildout:parts-directory}/${:_buildout_section_name_} -stop-on-error = true -repository = http://git.erp5.org/repos/erp5.git -branch = master -revision = 917d196176e6291d3046abb6177 -command = ${git:location}/bin/git clone --quiet -b ${:branch} ${:repository} ${:location} && if [ -n ${:revision} ]; then cd ${:location} && ${git:location}/bin/git reset --quiet --hard ${:revision} ; fi -update-command = cd ${:location} && ${git:location}/bin/git pull --quiet && if [ -n ${:revision} ]; then cd ${:location} && ${git:location}/bin/git reset --quiet --hard ${:revision} ; fi - -[versions] -# Use SlapOS patched zc.buildout -zc.buildout = 1.5.3-dev-SlapOS-009 - -Jinja2 = 2.6 -Werkzeug = 0.8.1 -buildout-versions = 1.7 -hexagonit.recipe.cmmi = 1.5.0 -lxml = 2.3.2 -meld3 = 0.6.7 -plone.recipe.command = 1.1 -slapos.core = 0.19 -slapos.libnetworkcache = 0.10 -slapos.recipe.template = 2.2 - -# Required by: -# slapos.core==0.19 -Flask = 0.8 - -# Required by: -# slapos.cookbook==0.35 -PyXML = 0.8.5 - -# Required by: -# hexagonit.recipe.cmmi==1.5.0 -hexagonit.recipe.download = 1.5.0 - -# Required by: -# slapos.cookbook==0.35 -netaddr = 0.7.6 - -# Required by: -# slapos.core==0.19 -netifaces = 0.6 - -# Required by: -# erp5.util==0.3-dev -# slapos.cookbook==0.35 -# slapos.core==0.19 -# slapos.libnetworkcache==0.10 -# zc.buildout==1.5.3-dev-SlapOS-009 -setuptools = 0.6c12dev-r88846 - -# Required by: -# slapos.core==0.19 -supervisor = 3.0a10 - -# Required by: -# slapos.cookbook==0.35 -xml-marshaller = 0.9.7 - -# Required by: -# slapos.cookbook==0.35 -zc.recipe.egg = 1.3.2 - -# Required by: -# slapos.core==0.19 -zope.interface = 3.8.0 - - [eggs] recipe = zc.recipe.egg -# Just so buildout executes [slapos.cookbook-repository] before [eggs], as -# - [eggs] references [slapos.cookbook-repository] -# - [instance-recipe] needs [slapos.cookbook-repository] to be finished -# - we cannot rely on anything else being executed before [eggs] -dummy = - ${slapos.cookbook-repository:location} - ${erp5.util-repository:location} eggs = ${lxml-python:egg} zc.buildout @@ -199,3 +101,62 @@ signature-certificate-list = R/KCo6D0sw0ZgeQv1aUXbl/xJ9k4jlTxmWbPeiiPZEqU1W9wN5lkGuLxV4CEGTKU hJA/yXa1wbwIPGvX3tVKdOEWPRXZLg== -----END CERTIFICATE----- + +[versions] +# Use SlapOS patched zc.buildout +zc.buildout = 1.6.0-dev-SlapOS-003 + +Jinja2 = 2.6 +Werkzeug = 0.8.2 +erp5.util = 0.3 +hexagonit.recipe.cmmi = 1.5.0 +lxml = 2.3.2 +meld3 = 0.6.7 +slapos.cookbook = 0.38 +slapos.core = 0.21 +slapos.libnetworkcache = 0.11 +slapos.recipe.template = 2.2 + +# Required by: +# slapos.core==0.21 +Flask = 0.8 + +# Required by: +# slapos.cookbook==0.38 +PyXML = 0.8.5 + +# Required by: +# hexagonit.recipe.cmmi==1.5.0 +hexagonit.recipe.download = 1.5.0 + +# Required by: +# slapos.cookbook==0.38 +netaddr = 0.7.6 + +# Required by: +# slapos.core==0.21 +netifaces = 0.6 + +# Required by: +# erp5.util==0.3 +# slapos.cookbook==0.38 +# slapos.core==0.21 +# slapos.libnetworkcache==0.11 +# zc.buildout==1.6.0-dev-SlapOS-003 +setuptools = 0.6c12dev-r88846 + +# Required by: +# slapos.core==0.21 +supervisor = 3.0a12 + +# Required by: +# slapos.cookbook==0.38 +xml-marshaller = 0.9.7 + +# Required by: +# slapos.cookbook==0.38 +zc.recipe.egg = 1.3.2 + +# Required by: +# slapos.core==0.21 +zope.interface = 3.8.0 diff --git a/software/vifib/software.cfg b/software/vifib/software.cfg index 7a503c5c1f6b297ce0f4049d1c87f7cbbca9edc8..8a9e6c3f5286d45d9c0c9ebeb4b0f8acc1f17d7d 100644 --- a/software/vifib/software.cfg +++ b/software/vifib/software.cfg @@ -4,9 +4,23 @@ extends = parts += vifib + check-slapos.core + +develop = + ${:parts-directory}/vifib + +[check-slapos.core] +recipe = plone.recipe.command +stop-on-error = true +update-command = ${:command} +command = grep parts ${buildout:develop-eggs-directory}/slapos.core.egg-link [eggs] -eggs += slapos.core +dummy = + ${vifib:location} +eggs += + suds + slapos.core [instance-recipe] module = vifib @@ -20,8 +34,7 @@ repository_id_list += vifib/master [vifib] <= erp5 repository = http://git.erp5.org/repos/slapos.core.git -# tag: vifib-0.2 -revision = f42ad28f0aa47d8cdb028ce6a1796eb7ef6f066e +revision = [local-bt5-repository] # XXX: workaround for zc.buildout bug, as list += ends up with adding new entry diff --git a/stack/erp5.cfg b/stack/erp5.cfg index ff41558752c69e6e4f1fa11a72c42a163d23fb8d..a5f0b40cfba521ee69da41ee13d841f9b5f67a07 100644 --- a/stack/erp5.cfg +++ b/stack/erp5.cfg @@ -31,7 +31,7 @@ allow-hosts = extends = # Exact version of Zope - http://svn.zope.org/repos/main/Zope/tags/2.12.19/versions.cfg + http://svn.zope.org/repos/main/Zope/tags/2.12.20/versions.cfg ../component/logrotate/buildout.cfg ../component/dcron/buildout.cfg ../component/file/buildout.cfg @@ -47,11 +47,12 @@ extends = ../component/kumo/buildout.cfg ../component/libreoffice-bin/buildout.cfg ../component/lxml-python/buildout.cfg - ../component/maatkit/buildout.cfg + ../component/percona-toolkit/buildout.cfg ../component/mariadb/buildout.cfg ../component/memcached/buildout.cfg ../component/mysql-python/buildout.cfg ../component/pdftk/buildout.cfg + ../component/pycrypto-python/buildout.cfg ../component/pysvn-python/buildout.cfg ../component/python-2.6/buildout.cfg ../component/python-2.7/buildout.cfg @@ -90,6 +91,7 @@ parts = libpng ghostscript mariadb + mroonga-mariadb sphinx imagemagick kumo @@ -99,6 +101,9 @@ parts = tesseract hookbox bootstrap2.6 + perl-DBD-mariadb + perl-DBI + percona-toolkit zabbix-agent pdftk dcron @@ -219,7 +224,7 @@ location = ${buildout:parts-directory}/${:_buildout_section_name_} stop-on-error = true repository = http://git.erp5.org/repos/erp5.git branch = master -revision = f1bc8fdc0e4ce17530b32468c2affda8a6e9e896 +revision = command = ${git:location}/bin/git clone --quiet -b ${:branch} ${:repository} ${:location} && if [ -n ${:revision} ]; then cd ${:location} && ${git:location}/bin/git reset --quiet --hard ${:revision} ; fi update-command = cd ${:location} && ${git:location}/bin/git pull --quiet && if [ -n ${:revision} ]; then cd ${:location} && ${git:location}/bin/git reset --quiet --hard ${:revision} ; fi @@ -305,7 +310,7 @@ eggs = ${lxml-python:egg} ${python-ldap-python:egg} ${pysvn-python:egg} - pycrypto + ${pycrypto-python:egg} PyXML SOAPpy cElementTree @@ -407,7 +412,7 @@ scripts = [versions] # Use SlapOS patched zc.buildout -zc.buildout = 1.5.3-dev-SlapOS-010 +zc.buildout = 1.6.0-dev-SlapOS-003 # pin Acquisition and Products.DCWorkflow to Nexedi flavour of eggs Acquisition = 2.13.7nxd001