Commit 6c1d891d authored by Romain Courteaud's avatar Romain Courteaud

Merge remote-tracking branch 'origin/erp5' into deployment_generic_CR1094

Conflicts:
	component/mariadb/buildout.cfg
	software/erp5/software.cfg
parents 06130e8f 184c8530
# CA certificates
[buildout]
parts =
ca-certificates
[ca-certificates-sbin-dir.patch]
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/${:filename}
md5sum = 0b4e7d82ce768823c01954ee41ef177b
filename = ${:_buildout_section_name_}
download-only = true
[ca-certificates]
recipe = hexagonit.recipe.cmmi
version = 20111211
url = ftp://ftp.free.fr/mirrors/ftp.debian.org/pool/main/c/ca-certificates/ca-certificates_${:version}.tar.gz
patches =
${ca-certificates-sbin-dir.patch:location}/${ca-certificates-sbin-dir.patch:filename}
patch-options = -p0
configure-command = true
make-targets = install DESTDIR=${buildout:parts-directory}/${:_buildout_section_name_} CERTSDIR=certs SBINDIR=sbin
--- Makefile.orig 2011-12-11 20:54:02.000000000 +0100
+++ Makefile 2012-01-09 17:36:55.059392824 +0100
@@ -17,7 +17,7 @@
install:
for dir in $(SUBDIRS); do \
- mkdir $(DESTDIR)/$(CERTSDIR)/$$dir; \
+ mkdir -p $(DESTDIR)/$(CERTSDIR)/$$dir; \
$(MAKE) -C $$dir install CERTSDIR=$(DESTDIR)/$(CERTSDIR)/$$dir; \
done
for dir in sbin; do \
--- sbin/Makefile.orig 2011-12-11 20:54:02.000000000 +0100
+++ sbin/Makefile 2012-01-09 17:31:45.207387898 +0100
@@ -3,9 +3,12 @@
#
#
+SBINDIR=/usr/sbin
+
all:
clean:
install:
- install -m755 update-ca-certificates $(DESTDIR)/usr/sbin/
+ mkdir -p $(DESTDIR)/$(SBINDIR)
+ install -m755 update-ca-certificates $(DESTDIR)/$(SBINDIR)
[buildout]
extends =
nss.cfg
parts =
corocosync
[corosync]
recipe = hexagonit.recipe.cmmi
url = ftp://ftp:downloads@corosync.org/downloads/corosync-1.3.1/corosync-1.3.1.tar.gz
md5sum = c58459a009a3a9d0b9c00e276a190d90
environment =
CPPFLAGS=-I${nspr:location}/include/nspr -I${nss:location}/include/nss
PKG_CONFIG_PATH=${nss:location}/lib/pkgconfig:${nspr:location}/lib/pkgconfig
LDFLAGS =-L${nspr:location}/lib -Wl,-rpath=${nspr:location}/lib -L${nss:location}/lib -Wl,-rpath=${nss:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib
...@@ -11,8 +11,8 @@ parts = ...@@ -11,8 +11,8 @@ parts =
[curl] [curl]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = http://curl.haxx.se/download/curl-7.21.7.tar.bz2 url = http://curl.haxx.se/download/curl-7.24.0.tar.bz2
md5sum = 5f6d50c4d4ee38c57fe37e3cff75adbd #md5sum = 5f6d50c4d4ee38c57fe37e3cff75adbd
configure-options = configure-options =
--disable-static --disable-static
--disable-ldap --disable-ldap
......
--- cyrus-sasl-2.1.23/include/sasl.h 2010-11-25 18:15:05.000000000 +0100
+++ cyrus-sasl-2.1.23/include/sasl.h 2010-11-25 18:15:34.000000000 +0100
@@ -346,7 +346,7 @@
* Mechanisms must ignore callbacks with id's they don't recognize.
*/
unsigned long id;
- int (*proc)(); /* Callback function. Types of arguments vary by 'id' */
+ int (*proc); /* Callback function. Types of arguments vary by 'id' */
void *context;
} sasl_callback_t;
[buildout]
parts =
erlang
[erlang]
recipe = hexagonit.recipe.cmmi
url = http://www.erlang.org/download/otp_src_R14B03.tar.gz
md5sum = 7979e662d11476b97c462feb7c132fb7
# File - Determines file type using "magic" numbers
# http://www.darwinsys.com/file/
[buildout] [buildout]
parts = file parts = file
extends = extends =
../zlib/buildout.cfg ../zlib/buildout.cfg
[file] [file]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = ftp://ftp.astron.com/pub/file/file-5.07.tar.gz url = ftp://ftp.astron.com/pub/file/file-5.10.tar.gz
md5sum = b8d1f9a8a644067bd0a703cebf3f4858 md5sum = 4cea34b087b060772511e066e2038196
configure-options = configure-options =
--disable-static --disable-static
environment = environment =
......
...@@ -63,6 +63,7 @@ md5sum = d7cd6a27c8801e66cbaa964a039ecfdb ...@@ -63,6 +63,7 @@ md5sum = d7cd6a27c8801e66cbaa964a039ecfdb
filename = ecj.jar filename = ecj.jar
[gcc-download] [gcc-download]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
recipe = hexagonit.recipe.download recipe = hexagonit.recipe.download
url = http://www.mirrorservice.org/sites/sourceware.org/pub/gcc/releases/gcc-4.5.3/gcc-4.5.3.tar.bz2 url = http://www.mirrorservice.org/sites/sourceware.org/pub/gcc/releases/gcc-4.5.3/gcc-4.5.3.tar.bz2
md5sum = 8e0b5c12212e185f3e4383106bfa9cc6 md5sum = 8e0b5c12212e185f3e4383106bfa9cc6
...@@ -70,6 +71,7 @@ strip-top-level-dir = True ...@@ -70,6 +71,7 @@ strip-top-level-dir = True
destination = ${gcc-source:location} destination = ${gcc-source:location}
[gcc-java-download] [gcc-java-download]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
recipe = hexagonit.recipe.download recipe = hexagonit.recipe.download
url = http://www.mirrorservice.org/sites/sourceware.org/pub/gcc/releases/gcc-4.5.3/gcc-java-4.5.3.tar.bz2 url = http://www.mirrorservice.org/sites/sourceware.org/pub/gcc/releases/gcc-4.5.3/gcc-java-4.5.3.tar.bz2
md5sum = 08e045fdbdc22ac9af3aec3b8d16dbab md5sum = 08e045fdbdc22ac9af3aec3b8d16dbab
...@@ -78,6 +80,7 @@ destination = ${gcc-source:location} ...@@ -78,6 +80,7 @@ destination = ${gcc-source:location}
ignore-existing = true ignore-existing = true
[gcc-source] [gcc-source]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
location = ${buildout:parts-directory}/${:_buildout_section_name_} location = ${buildout:parts-directory}/${:_buildout_section_name_}
[gcc-multiarch.patch] [gcc-multiarch.patch]
...@@ -87,7 +90,15 @@ url = ${:_profile_base_location_}/${:filename} ...@@ -87,7 +90,15 @@ url = ${:_profile_base_location_}/${:filename}
filename = ${:_buildout_section_name_} filename = ${:_buildout_section_name_}
download-only = true download-only = true
[gcc-java-pre-4.4.patch]
recipe = hexagonit.recipe.download
md5sum = 9a563576126d9fcf234ef29c2fc7df76
url = ${:_profile_base_location_}/${:filename}
filename = ${:_buildout_section_name_}
download-only = true
[gcc-java-minimal] [gcc-java-minimal]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
depends = depends =
${gcc-download:location} ${gcc-download:location}
${gcc-java-download:location} ${gcc-java-download:location}
...@@ -96,6 +107,7 @@ path = ${gcc-source:location} ...@@ -96,6 +107,7 @@ path = ${gcc-source:location}
md5sum = bb3265edf0fa7543e50cedb93e04e427 md5sum = bb3265edf0fa7543e50cedb93e04e427
patches = patches =
${gcc-multiarch.patch:location}/${gcc-multiarch.patch:filename} ${gcc-multiarch.patch:location}/${gcc-multiarch.patch:filename}
${gcc-java-pre-4.4.patch:location}/${gcc-java-pre-4.4.patch:filename}
patch-options = -p2 patch-options = -p2
configure-command = make clean \\; make distclean \\; ./configure configure-command = make clean \\; make distclean \\; ./configure
# GMP does not correctly detect achitecture so it have to be given # GMP does not correctly detect achitecture so it have to be given
...@@ -121,6 +133,7 @@ environment = ...@@ -121,6 +133,7 @@ environment =
make-targets = install -j1 make-targets = install -j1
[gcc] [gcc]
hack-revision = ${gcc-interconnection-workaround:hack-revision}
depends = depends =
${gcc-download:location} ${gcc-download:location}
${gcc-java-download:location} ${gcc-java-download:location}
...@@ -151,3 +164,10 @@ environment = ...@@ -151,3 +164,10 @@ environment =
PATH=${zip:location}/bin:%(PATH)s PATH=${zip:location}/bin:%(PATH)s
# make install does not work when several core are used # make install does not work when several core are used
make-targets = install -j1 make-targets = install -j1
[gcc-interconnection-workaround]
# gcc parts are interconnected, so buildout is not capable to clean them up
# until gcc will be simplified by using more robust build recipe (like
# slapos.recipe.build) each time any of parts which reuses this one gets updated
# the hack-revision have to be increased
hack-revision = 1
Patch for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=50888
--- a/src/libjava/libjava/prims.cc.orig 2012-01-20 11:30:18.586157610 +0100
+++ b/src/libjava/libjava/prims.cc 2012-01-20 11:30:58.192770947 +0100
@@ -38,6 +38,14 @@
#endif
#ifndef DISABLE_GETENV_PROPERTIES
+#ifdef __GLIBC__
+/* glibc 2.15+ provides even for C++ inline optimized ::isspace etc.
+ Unfortunately those inlines are throw (), and call a function pointer
+ (which is throw () too, but with -fnon-call-exceptions this results
+ in a __cxa_call_unexpected call. This macro disables the optimized
+ version. */
+#define __NO_CTYPE 1
+#endif
#include <ctype.h>
#include <java-props.h>
#define PROCESS_GCJ_PROPERTIES process_gcj_properties()
--- a/src/libjava/prims.cc.orig 2012-01-20 11:30:23.042818341 +0100
+++ b/src/libjava/prims.cc 2012-01-20 11:31:01.389433254 +0100
@@ -38,6 +38,14 @@
#endif
#ifndef DISABLE_GETENV_PROPERTIES
+#ifdef __GLIBC__
+/* glibc 2.15+ provides even for C++ inline optimized ::isspace etc.
+ Unfortunately those inlines are throw (), and call a function pointer
+ (which is throw () too, but with -fnon-call-exceptions this results
+ in a __cxa_call_unexpected call. This macro disables the optimized
+ version. */
+#define __NO_CTYPE 1
+#endif
#include <ctype.h>
#include <java-props.h>
#define PROCESS_GCJ_PROPERTIES process_gcj_properties()
--- Makefile.in 2002-10-08 16:09:12.000000000 +0000
+++ Makefile.in.nochange 2010-11-03 21:17:38.579435530 +0000
@@ -14,10 +14,6 @@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_DATA = @INSTALL_DATA@
-# File ownership and group
-BINOWN = bin
-BINGRP = bin
-
MAKEINFO = makeinfo
TEXI2DVI = texi2dvi
@@ -131,11 +127,11 @@
$(INSTALL_ROOT)$(includedir) $(INSTALL_ROOT)$(man3dir) \
$(INSTALL_ROOT)$(infodir)
$(LIBTOOL) $(INSTALL) -c libgdbm.la $(INSTALL_ROOT)$(libdir)/libgdbm.la
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) gdbm.h \
+ $(INSTALL_DATA) gdbm.h \
$(INSTALL_ROOT)$(includedir)/gdbm.h
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) $(srcdir)/gdbm.3 \
+ $(INSTALL_DATA) $(srcdir)/gdbm.3 \
$(INSTALL_ROOT)$(man3dir)/gdbm.3
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) $(srcdir)/gdbm.info \
+ $(INSTALL_DATA) $(srcdir)/gdbm.info \
$(INSTALL_ROOT)$(infodir)/gdbm.info
install-compat:
@@ -143,9 +139,9 @@
$(INSTALL_ROOT)$(includedir)
$(LIBTOOL) $(INSTALL) -c libgdbm_compat.la \
$(INSTALL_ROOT)$(libdir)/libgdbm_compat.la
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) $(srcdir)/dbm.h \
+ $(INSTALL_DATA) $(srcdir)/dbm.h \
$(INSTALL_ROOT)$(includedir)/dbm.h
- $(INSTALL_DATA) -o $(BINOWN) -g $(BINGRP) $(srcdir)/ndbm.h \
+ $(INSTALL_DATA) $(srcdir)/ndbm.h \
$(INSTALL_ROOT)$(includedir)/ndbm.h
#libgdbm.a: $(OBJS) gdbm.h
...@@ -13,10 +13,8 @@ parts = ...@@ -13,10 +13,8 @@ parts =
[git] [git]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
# url = http://kernel.org/pub/software/scm/git/git-1.7.4.5.tar.bz2 url = http://git-core.googlecode.com/files/git-1.7.8.3.tar.gz
# Circumvent kernel.org downtime md5sum = 7a4bc5160166537d4da5eb48a7670dff
url = http://git-core.googlecode.com/files/git-1.7.8.tar.gz
md5sum = 4a3c03a04dbb857ecc875dae1278b76e
configure-options = configure-options =
--with-curl=${curl:location} --with-curl=${curl:location}
--with-openssl=${openssl:location} --with-openssl=${openssl:location}
......
...@@ -8,17 +8,15 @@ extends = ...@@ -8,17 +8,15 @@ extends =
../fontconfig/buildout.cfg ../fontconfig/buildout.cfg
../freetype/buildout.cfg ../freetype/buildout.cfg
../libpng/buildout.cfg ../libpng/buildout.cfg
../libtool/buildout.cfg
../pkgconfig/buildout.cfg ../pkgconfig/buildout.cfg
../zlib/buildout.cfg ../zlib/buildout.cfg
[graphviz] [graphviz]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = http://www.graphviz.org/pub/graphviz/stable/SOURCES/graphviz-2.26.3.tar.gz url = http://www.graphviz.org/pub/graphviz/stable/SOURCES/graphviz-2.28.0.tar.gz
md5sum = 6f45946fa622770c45609778c0a982ee md5sum = 8d26c1171f30ca3b1dc1b429f7937e58
configure-options = configure-options =
--with-ltdl-include=${libtool:location}/include --with-included-ltdl
--with-ltdl-lib=${libtool:location}/lib
--with-pngincludedir=${libpng:location}/include --with-pngincludedir=${libpng:location}/include
--with-pnglibdir=${libpng:location}/lib --with-pnglibdir=${libpng:location}/lib
--with-zincludedir=${zlib:location}/include --with-zincludedir=${zlib:location}/include
......
# mroonga - a MySQL storage engine using full-text search engine groonga # groonga - an open-source fulltext search engine and column store
# http://mroonga.github.com/
# http://groonga.org/ # http://groonga.org/
[buildout] [buildout]
extends =
../autoconf/buildout.cfg
../automake/buildout.cfg
../glib/buildout.cfg
../libtool/buildout.cfg
../pkgconfig/buildout.cfg
parts = parts =
groonga groonga
[groonga-1.2.8-configure-Wno-cflags-patch]
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/${:filename}
md5sum = b48ac46c7de0ed4c0e632e06118b8a58
filename = groonga-1.2.8-configure-Wno-cflags.patch
download-only = true
[groonga] [groonga]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = http://packages.groonga.org/source/groonga/groonga-1.2.8.tar.gz url = http://packages.groonga.org/source/groonga/groonga-1.2.9.tar.gz
md5sum = a319b1f3a55cbf250ef5255f5c51ff46 md5sum = 47117baa401a3db08362e00f94fced12
patch-options = -p0
patches =
${groonga-1.2.8-configure-Wno-cflags-patch:location}/${groonga-1.2.8-configure-Wno-cflags-patch:filename}
environment =
PATH =${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:%(PATH)s
configure-command =
aclocal -I ${glib:location}/share/aclocal -I ${libtool:location}/share/aclocal -I ${pkgconfig:location}/share/aclocal -I .
libtoolize -c -f
autoheader
automake -c -a -f
autoconf
./configure
configure-options = configure-options =
--prefix=${buildout:parts-directory}/${:_buildout_section_name_}
--disable-static --disable-static
--disable-glibtest --disable-glibtest
--disable-benchmark --disable-benchmark
......
--- configure.ac.orig 2011-12-06 12:54:26.812408976 +0900
+++ configure.ac 2011-12-06 13:17:39.060465045 +0900
@@ -70,9 +70,10 @@
m4_pattern_allow(PKG_CONFIG_LIBDIR)])
AC_DEFUN([CHECK_CFLAG], [
+ cflag=$(echo "$1" | sed -e 's,^-Wno-,-W,')
AC_MSG_CHECKING([if gcc supports $1])
old_CFLAGS=$CFLAGS
- CFLAGS="$CFLAGS $1 -Werror"
+ CFLAGS="$CFLAGS $cflag -Werror"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([])],
[check_cflag=yes],
[check_cflag=no])
@@ -84,9 +85,10 @@
])
AC_DEFUN([CHECK_CXXFLAG], [
+ cxxflag=$(echo "$1" | sed -e 's,^-Wno-,-W,')
AC_MSG_CHECKING([if g++ supports $1])
old_CXXFLAGS=$CXXFLAGS
- CXXFLAGS="$CXXFLAGS $1 -Werror"
+ CXXFLAGS="$CXXFLAGS $cxxflag -Werror"
AC_LANG_PUSH([C++])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([])],
[check_cxxflag=yes],
diff -ur groonga-storage-engine-0.4.orig/configure groonga-storage-engine-0.4/configure
--- groonga-storage-engine-0.4.orig/configure 2010-11-24 06:23:50.000000000 +0100
+++ groonga-storage-engine-0.4/configure 2011-01-01 16:01:07.000000000 +0100
@@ -13925,8 +13925,8 @@
as_fn_error "failed to run \"$ac_mysql_config\": $plugindir" "$LINENO" 5
fi
MYSQL_INC="$MYSQL_INC $($ac_mysql_config --include)"
- ac_mysql_major_version="`$ac_mysql_config --version | cut -b 1-3`"
- if test "$ac_mysql_major_version" = "5.1"; then
+ ac_mysql_major_version="`$ac_mysql_config --version | cut -b 1,3`"
+ if test $ac_mysql_major_version -lt 55; then
MYSQL51="-DMYSQL51"
fi
...@@ -3,8 +3,8 @@ parts = haproxy ...@@ -3,8 +3,8 @@ parts = haproxy
[haproxy] [haproxy]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = http://haproxy.1wt.eu/download/1.4/src/haproxy-1.4.18.tar.gz url = http://haproxy.1wt.eu/download/1.4/src/haproxy-1.4.19.tar.gz
md5sum = 4ac88bb1a76c4b84ed4f6131183bedbe md5sum = 41392d738460dbf99295fd928031c6a4
configure-command = true configure-command = true
# If the system is running on Linux 2.6, we use "linux26" as the TARGET, # If the system is running on Linux 2.6, we use "linux26" as the TARGET,
# otherwise use "generic". # otherwise use "generic".
......
[buildout]
parts =
libmemcached
[libmemcached]
<= libmemcached-0.50
[libmemcached-0.50]
<= libmemcached-common
url = http://launchpad.net/libmemcached/1.0/0.50/+download/libmemcached-0.50.tar.gz
md5sum = c8627014a37cd821cf93317b8de6f9f8
[libmemcached-0.44]
<= libmemcached-common
url = http://launchpad.net/libmemcached/1.0/0.44/+download/libmemcached-0.44.tar.gz
md5sum = e6bd825c46fa080b550f90f9001cba8c
[libmemcached-common]
recipe = hexagonit.recipe.cmmi
configure-options =
--without-docs
--without-memcached
--without-libgtest-prefix
--without-libevent-prefix
--without-libinnodb-prefix
--without-libsasl-prefix
--without-libsasl2-prefix
[buildout]
extends =
../curl/buildout.cfg
../libevent/buildout.cfg
../erlang/buildout.cfg
parts =
membase-source
membase-memcached
membase-libmemcached
membase-libconflate
membase-libvbucket
membase-moxi
membase-bucket_engine
membase-ep-engine
membase-membase-cli
membase-memcachetest
membase-ns_server
membase-vbucketmigrator
[membase]
location = ${buildout:parts-directory}/${:_buildout_section_name_}
[membase-source]
<= membase-1.7.0-source
[membase-1.7.0-source]
<= membase-source-common
url = http://files.couchbase.com/source/membase-server_src-1.7.0.tar.gz
md5sum = c933fffea299d00e43b002cb65738663
[membase-source-common]
recipe = hexagonit.recipe.download
strip-top-level-dir = true
[membase-component-common]
recipe = hexagonit.recipe.cmmi
path = ${membase-source:location}/${:component}
prefix = ${membase:location}
configure-options =
--prefix=${:prefix}
${:component-configure-options}
[membase-libmemcached]
<= membase-component-common
component = libmemcached
component-configure-options =
--disable-static
--enable-shared
--disable-dtrace
--without-docs
--disable-sasl
--with-memcached=${membase:location}/bin/memcached
[membase-memcached]
<= membase-component-common
component = memcached
patches =
configure-command =
./configure
component-configure-options =
--enable-isasl
[membase-libconflate]
<= membase-component-common
component = libconflate
component-configure-options =
--disable-static
--enable-shared
--without-check
--with-libcurl-prefix=${curl:location}
[membase-libvbucket]
<= membase-component-common
component = libvbucket
component-configure-options =
--disable-static
--enable-shared
--without-docs
--with-libhashkit-prefix=${membase:location}
[membase-moxi]
<= membase-component-common
component = moxi
component-configure-options =
--enable-moxi-libvbucket
--enable-moxi-libmemcached
--without-check
--with-libevent-prefix=${libevent:location}
--with-libmemcached-prefix=${membase:location}
--with-memcached=${membase:location}/bin/memcached
--with-libhashkit-prefix=${membase:location}
--with-libconflate-prefix=${membase:location}
--with-libvbucket-prefix=${membase:location}
[membase-bucket_engine]
<= membase-component-common
component = bucket_engine
component-configure-options =
--with-memcached=${membase-source:location}/memcached
[membase-ep-engine]
<= membase-component-common
component = ep-engine
component-configure-options =
--with-memcached=${membase-source:location}/memcached
[membase-membase-cli]
<= membase-component-common
component = membase-cli
patches =
configure-command =
./configure
component-configure-options =
[membase-memcachetest]
<= membase-component-common
component = memcachetest
component-configure-options =
--with-memcached=${membase:location}/bin/memcached
[membase-ns_server]
<= membase-component-common
component = ns_server
patches =
configure-command =
./configure
component-configure-options =
environment =
PATH=${erlang:location}/bin:%(PATH)s
[membase-vbucketmigrator]
<= membase-component-common
component = vbucketmigrator
component-configure-options =
--without-sasl
--with-isasl
[buildout]
extends =
../libmemcached/buildout.cfg
../git/buildout.cfg
../autoconf/buildout.cfg
../automake/buildout.cfg
../libtool/buildout.cfg
parts =
memstrike
[memstrike]
recipe = hexagonit.recipe.cmmi
path = ${memstrikesource:location}
configure-command =
./bootstrap
./configure
configure-options =
--prefix=${buildout:parts-directory}/${:_buildout_section_name_}
--with-libmemcached=${libmemcached:location}
environment =
PATH =${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:%(PATH)s
LDFLAGS =-Wl,-rpath=${libmemcached:location}/lib
[memstrikesource]
recipe=plone.recipe.command
location = ${buildout:parts-directory}/${:_buildout_section_name_}
stop-on-error = true
revision = 39d7a99e8bb7eea6df8b
command =
rm -rf ${:location} &&
${git:location}/bin/git clone --quiet git://github.com/frsyuki/memstrike.git ${:location} &&
cd ${:location} &&
${git:location}/bin/git checkout --quiet ${:revision}
[buildout]
parts = nspr
[nspr-pkgconfig-patch-download]
recipe = hexagonit.recipe.download
filename = nspr-4.8.6-pkgconfig-1.patch
url = http://www.linuxfromscratch.org/patches/blfs/svn/${:filename}
md5sum = 7c00beff0475314f59214842509e407f
download-only = true
[nspr]
recipe = hexagonit.recipe.cmmi
url = https://ftp.mozilla.org/pub/mozilla.org/nspr/releases/v4.8.7/src/nspr-4.8.7.tar.gz
md5sum = 97e30989a56ab813453b71261849c200
patches = ${nspr-pkgconfig-patch-download:location}/${nspr-pkgconfig-patch-download:filename}
patch-options = -p1
configure-command =
cd mozilla/nsprpub
./configure
configure-options =
--prefix=${buildout:parts-directory}/${:_buildout_section_name_}
--with-mozilla
--enable-64bit
make-options =
-C mozilla/nsprpub
[buildout]
extends =
https://svn.erp5.org/repos/public/erp5/trunk/buildout/software-profiles/zlib.cfg
nspr.cfg
sed.cfg
parts =
nss
[nss-patch]
recipe = hexagonit.recipe.download
filename = nss-3.12.9-with-nspr-4.8.7-1.patch
url = http://www.linuxfromscratch.org/patches/blfs/svn/nss-3.12.8-standalone-1.patch
md5sum = ee7b5966961bef16ca896435e78652d3
download-only = true
[nss-download]
recipe = hexagonit.recipe.download
url = ftp://ftp.mozilla.org/pub/mozilla.org/security/nss/releases/NSS_3_12_9_RTM/src/nss-3.12.9.tar.gz
md5sum = bd32f183ca28440c1744650be31a9ccc
strip-top-level-dir = true
[nss]
recipe = plone.recipe.command
source = ${nss-download:location}
destination = ${buildout:parts-directory}/${:_buildout_section_name_}
location = ${buildout:parts-directory}/${:_buildout_section_name_}
compile-location = ${buildout:parts-directory}/${:_buildout_section_name_}_compile_
stop-on-error = true
command =
rm -rf ${:destination} &&
mkdir -p ${:destination} &&
rm -rf ${:compile-location} &&
cp -R ${:source} ${:compile-location} &&
cd ${:compile-location} &&
patch -Np1 -i ${nss-patch:location}/${nss-patch:filename} &&
${sed:location}/bin/sed -i "s/ZLIB_LIBS = -lz//g" ${:compile-location}/mozilla/security/coreconf/Linux.mk &&
${sed:location}/bin/sed -i "s/# INCLUDES += -I\/usr\/include -Y\/usr\/include\/linux/INCLUDES += \$(ZLIB_INCLUDE_DIR)/g" ${:compile-location}/mozilla/security/coreconf/Linux.mk &&
gmake -C mozilla/security/nss \
USE_64=1 \
BUILD_OPT=1 \
ZLIB_INCLUDE_DIR=-I${zlib:location}/include \
ZLIB_LIBS="-lz -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib" \
NSPR_INCLUDE_DIR=${nspr:location}/include/nspr \
NSPR_LIB_DIR="${nspr:location}/lib -Wl,-rpath=${nspr:location}/lib" \
NSSUTIL_LIB_DIR="${:location}/lib -Wl,-rpath=${:location}/lib" \
nss_build_all &&
mkdir -p ${:destination}/bin &&
mkdir -p ${:destination}/include/nss3 &&
mkdir -p ${:destination}/lib/pkgconfig &&
install -v -m755 ${:compile-location}/mozilla/dist/*.OBJ/lib/*.so ${:destination}/lib &&
install -v -m644 ${:compile-location}/mozilla/dist/*.OBJ/lib/{*.chk,libcrmf.a} ${:destination}/lib &&
install -v -m755 -d ${:destination}/include/nss &&
install -v -m755 ${:compile-location}/mozilla/dist/*.OBJ/bin/{certutil,nss-config,pk12util} ${:destination}/bin &&
install -v -m644 ${:compile-location}/mozilla/dist/*.OBJ/lib/pkgconfig/nss.pc ${:destination}/lib/pkgconfig &&
cp -v -RL ${:compile-location}/mozilla/dist/{public,private}/nss/* ${:destination}/include/nss &&
chmod 644 ${:destination}/include/nss/* &&
cd ${buildout:parts-directory} &&
rm -rf ${:compile-location}
[nss-cmmi]
recipe = hexagonit.recipe.cmmi
path = ${nss-download:location}/mozilla/security/nss
configure-command = echo "No need to configure"
make-binary = gmake
make-options = USE_64=1
make-targets = nss_build_all
...@@ -5,15 +5,27 @@ ...@@ -5,15 +5,27 @@
[buildout] [buildout]
extends = extends =
../ca-certificates/buildout.cfg
../zlib/buildout.cfg ../zlib/buildout.cfg
parts = parts =
openssl openssl
[openssl-nodoc.patch]
# Disable doc generation part in Makefile
recipe = hexagonit.recipe.download
md5sum = b4887a7b4e18402447bc6227d2493b92
url = ${:_profile_base_location_}/${:filename}
filename = ${:_buildout_section_name_}
download-only = true
[openssl] [openssl]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = https://www.openssl.org/source/openssl-1.0.0e.tar.gz url = https://www.openssl.org/source/openssl-1.0.0g.tar.gz
md5sum = 7040b89c4c58c7a1016c0dfa6e821c86 md5sum = 07ecbe4324f140d157478637d6beccf1
patches =
${openssl-nodoc.patch:location}/${openssl-nodoc.patch:filename}
patch-options = -p0
configure-command = ./config configure-command = ./config
configure-options = configure-options =
-I${zlib:location}/include -I${zlib:location}/include
...@@ -27,5 +39,7 @@ configure-options = ...@@ -27,5 +39,7 @@ configure-options =
# it seems that parallel build sometimes fails for openssl. # it seems that parallel build sometimes fails for openssl.
make-options = make-options =
-j1 -j1
make-targets =
install && rm -f ${buildout:parts-directory}/${:_buildout_section_name_}/etc/ssl/certs/* && for i in ${ca-certificates:location}/certs/*/*.crt; do ln -sv $i ${buildout:parts-directory}/${:_buildout_section_name_}/etc/ssl/certs/`${buildout:parts-directory}/${:_buildout_section_name_}/bin/openssl x509 -hash -noout -in $i`.0; done; true
LDFLAGS="-Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib" LDFLAGS="-Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib"
SHARED_LDFLAGS="-Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib" SHARED_LDFLAGS="-Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${buildout:parts-directory}/${:_buildout_section_name_}/lib"
--- Makefile 2012-01-18 14:42:28.000000000 +0100
+++ Makefile 2012-01-24 17:43:40.000000000 +0100
@@ -494,7 +494,7 @@
dist_pem_h:
(cd crypto/pem; $(MAKE) -e $(BUILDENV) pem.h; $(MAKE) clean)
-install: all install_docs install_sw
+install: all install_sw
install_sw:
@$(PERL) $(TOP)/util/mkdir-p.pl $(INSTALL_PREFIX)$(INSTALLTOP)/bin \
...@@ -4,8 +4,8 @@ parts = ...@@ -4,8 +4,8 @@ parts =
[pcre] [pcre]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.12.tar.bz2 url = ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.21.tar.bz2
md5sum = f14a9fef3c92f3fc6c5ac92d7a2c7eb3 md5sum = 0a7b592bea64b7aa7f4011fc7171a730
configure-options = configure-options =
--disable-static --disable-static
--enable-utf8 --enable-utf8
......
...@@ -10,7 +10,7 @@ parts = ...@@ -10,7 +10,7 @@ parts =
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
depends = depends =
${perl:version} ${perl:version}
url = http://www.percona.com/redir/downloads/percona-toolkit/percona-toolkit-1.0.1.tar.gz url = http://www.percona.com/redir/downloads/percona-toolkit/percona-toolkit-2.0.1.tar.gz
md5sum = 1d843b1b3ebd2eacfa3bf95ef2a00557 md5sum = 3a78c78672cb7c634bda35dfb2f817bf
configure-command = configure-command =
${perl:location}/bin/perl Makefile.PL ${perl:location}/bin/perl Makefile.PL
...@@ -2,8 +2,15 @@ ...@@ -2,8 +2,15 @@
parts = parts =
readline5 readline5
readline readline
extends =
../ncurses/buildout.cfg # readline-5.x is still used for GPL2 only softwares.
[readline5]
recipe = hexagonit.recipe.cmmi
url = http://ftp.gnu.org/gnu/readline/readline-5.2.tar.gz
md5sum = e39331f32ad14009b9ff49cc10c5e751
configure-options =
--enable-multibyte
--disable-static
# readline-5.x is still used for GPL2 only softwares. # readline-5.x is still used for GPL2 only softwares.
[readline5] [readline5]
...@@ -22,7 +29,5 @@ recipe = hexagonit.recipe.cmmi ...@@ -22,7 +29,5 @@ recipe = hexagonit.recipe.cmmi
url = http://ftp.gnu.org/gnu/readline/readline-6.2.tar.gz url = http://ftp.gnu.org/gnu/readline/readline-6.2.tar.gz
md5sum = 67948acb2ca081f23359d0256e9a271c md5sum = 67948acb2ca081f23359d0256e9a271c
configure-options = configure-options =
--enable-multibyte
--disable-static --disable-static
--with-ncurses=${ncurses:location}
environment =
LDFLAGS =-Wl,-rpath=${ncurses:location}/lib
...@@ -6,7 +6,5 @@ parts = ...@@ -6,7 +6,5 @@ parts =
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = ftp://ftp.gnu.org/gnu/sed/sed-4.2.1.tar.gz url = ftp://ftp.gnu.org/gnu/sed/sed-4.2.1.tar.gz
md5sum = f0fd4d7da574d4707e442285fd2d3b86 md5sum = f0fd4d7da574d4707e442285fd2d3b86
configure-options =
--prefix=${buildout:parts-directory}/${:_buildout_section_name_}
environment = environment =
LDFLAGS =-Wl,--as-needed LDFLAGS =-Wl,--as-needed
[buildout]
parts = sheepstrike
[sheepstrike]
recipe = hexagonit.recipe.cmmi
url = https://gitorious.org/sheepstrike/sheepstrike/archive-tarball/0.1
md5sum = 2c5009eb7c32d7ba5d270d0b88d7e5ab
prefix = ${buildout:parts-directory}/${:_buildout_section_name_}
configure-options = --prefix=${:prefix}
configure-command =
./bootstrap
./configure
...@@ -113,22 +113,24 @@ scripts = ...@@ -113,22 +113,24 @@ scripts =
[versions] [versions]
zc.buildout = 1.6.0-dev-SlapOS-003 zc.buildout = 1.6.0-dev-SlapOS-003
# Generated by buildout-versions
Jinja2 = 2.6 Jinja2 = 2.6
Werkzeug = 0.8.1 Werkzeug = 0.8.2
buildout-versions = 1.7
collective.recipe.template = 1.9 collective.recipe.template = 1.9
hexagonit.recipe.cmmi = 1.5.0 hexagonit.recipe.cmmi = 1.5.0
lxml = 2.3.2 lxml = 2.3.3
meld3 = 0.6.7 meld3 = 0.6.8
netaddr = 0.7.6 netaddr = 0.7.6
setuptools = 0.6c12dev-r88846 slapos.core = 0.21
slapos.core = 0.20 slapos.libnetworkcache = 0.11
slapos.libnetworkcache = 0.10
xml-marshaller = 0.9.7 xml-marshaller = 0.9.7
z3c.recipe.scripts = 1.0.1 z3c.recipe.scripts = 1.0.1
zc.recipe.egg = 1.3.2 zc.recipe.egg = 1.3.2
# Required by: # Required by:
# slapos.core==0.20 # slapos.core==0.21
Flask = 0.8 Flask = 0.8
# Required by: # Required by:
...@@ -136,13 +138,21 @@ Flask = 0.8 ...@@ -136,13 +138,21 @@ Flask = 0.8
hexagonit.recipe.download = 1.5.0 hexagonit.recipe.download = 1.5.0
# Required by: # Required by:
# slapos.core==0.20 # slapos.core==0.21
netifaces = 0.6 netifaces = 0.6
# Required by: # Required by:
# slapos.core==0.20 # slapos.core==0.21
supervisor = 3.0a10 # slapos.libnetworkcache==0.11
# supervisor==3.0a12
# zc.buildout==1.6.0-dev-SlapOS-003
# zope.interface==3.8.0
setuptools = 0.6c12dev-r88846
# Required by:
# slapos.core==0.21
supervisor = 3.0a12
# Required by: # Required by:
# slapos.core==0.20 # slapos.core==0.21
zope.interface = 3.8.0 zope.interface = 3.8.0
\ No newline at end of file
# http://www.sphinxsearch.com/bugs/view.php?id=550
# 0000550: Can not make libsphinxclient
--- sphinx-1.10-beta/api/libsphinxclient/sphinxclient.c 2010-07-15 13:05:40.000000000 +0200
+++ sphinx-1.10-beta/api/libsphinxclient/sphinxclient.c 2010-07-21 20:43:26.760024489 +0200
@@ -1355,11 +1355,13 @@
optval = 1;
#ifndef _WIN32
+ #ifdef SO_NOSIGPIPE
if ( setsockopt ( sock, SOL_SOCKET, SO_NOSIGPIPE, (void *)&optval, (socklen_t)sizeof(optval) ) < 0 )
{
set_error ( client, "setsockopt() failed: %s", sock_error() );
return -1;
}
+ #endif
#endif
res = connect ( sock, (struct sockaddr*)&sa, sizeof(sa) );
...@@ -17,8 +17,8 @@ filename = stunnel-4-hooks.py ...@@ -17,8 +17,8 @@ filename = stunnel-4-hooks.py
[stunnel-4] [stunnel-4]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = http://mirror.bit.nl/stunnel/stunnel-4.50.tar.gz url = http://mirror.bit.nl/stunnel/stunnel-4.52.tar.gz
md5sum = d68b4565294496a8bdf23c728a679f53 md5sum = f5e713dda0e8efa659f372832ecd0c2c
pre-configure-hook = ${stunnel-4-hook-download:location}/${stunnel-4-hook-download:filename}:pre_configure_hook pre-configure-hook = ${stunnel-4-hook-download:location}/${stunnel-4-hook-download:filename}:pre_configure_hook
configure-options = configure-options =
--enable-ipv6 --enable-ipv6
......
...@@ -4,8 +4,8 @@ parts = ...@@ -4,8 +4,8 @@ parts =
[zabbix-agent] [zabbix-agent]
recipe = hexagonit.recipe.cmmi recipe = hexagonit.recipe.cmmi
url = http://prdownloads.sourceforge.net/zabbix/zabbix-1.8.9.tar.gz?download url = http://prdownloads.sourceforge.net/zabbix/zabbix-1.8.10.tar.gz?download
md5sum = fc8c7fad2943dea73d4f2338cd216715 md5sum = 7e89f80c1822787c0831f7c0dbefcd7b
configure-options = configure-options =
--enable-agent --enable-agent
--enable-ipv6 --enable-ipv6
...@@ -46,6 +46,7 @@ setup(name=name, ...@@ -46,6 +46,7 @@ setup(name=name,
'cron.d = slapos.recipe.dcron:Part', 'cron.d = slapos.recipe.dcron:Part',
'davstorage = slapos.recipe.davstorage:Recipe', 'davstorage = slapos.recipe.davstorage:Recipe',
'duplicity = slapos.recipe.duplicity:Recipe', 'duplicity = slapos.recipe.duplicity:Recipe',
'erp5scalabilitytestbed = slapos.recipe.erp5scalabilitytestbed:Recipe',
'erp5testnode = slapos.recipe.erp5testnode:Recipe', 'erp5testnode = slapos.recipe.erp5testnode:Recipe',
'helloworld = slapos.recipe.helloworld:Recipe', 'helloworld = slapos.recipe.helloworld:Recipe',
'generic.cloudooo = slapos.recipe.generic_cloudooo:Recipe', 'generic.cloudooo = slapos.recipe.generic_cloudooo:Recipe',
...@@ -95,5 +96,9 @@ setup(name=name, ...@@ -95,5 +96,9 @@ setup(name=name,
'tidstorage = slapos.recipe.tidstorage:Recipe', 'tidstorage = slapos.recipe.tidstorage:Recipe',
'erp5.update = slapos.recipe.erp5_update:Recipe', 'erp5.update = slapos.recipe.erp5_update:Recipe',
'erp5.test = slapos.recipe.erp5_test:Recipe', 'erp5.test = slapos.recipe.erp5_test:Recipe',
]}, ],
'slapos.recipe.nosqltestbed.plugin': [
'kumo = slapos.recipe.nosqltestbed.kumo:KumoTestBed',
],
},
) )
...@@ -109,10 +109,13 @@ class Recipe(BaseSlapRecipe): ...@@ -109,10 +109,13 @@ class Recipe(BaseSlapRecipe):
def createCertificate(self, size=1024, subject='/C=FR/L=Marcq-en-Baroeul/O=Nexedi'): def createCertificate(self, size=1024, subject='/C=FR/L=Marcq-en-Baroeul/O=Nexedi'):
key_file = os.path.join(self.etc_directory, 'httpd.key') key_file = os.path.join(self.etc_directory, 'httpd.key')
self.path_list.append(key_file)
certificate_file = os.path.join(self.etc_directory, 'httpd.crt') certificate_file = os.path.join(self.etc_directory, 'httpd.crt')
self.path_list.append(certificate_file)
files = [key_file, certificate_file, ]
if not all([os.path.exists(f) for f in files]):
for f in files:
if os.path.exists(f):
os.unlink(f)
subprocess.check_call([self.options['openssl_binary'], subprocess.check_call([self.options['openssl_binary'],
'req', '-x509', '-nodes', 'req', '-x509', '-nodes',
...@@ -121,5 +124,6 @@ class Recipe(BaseSlapRecipe): ...@@ -121,5 +124,6 @@ class Recipe(BaseSlapRecipe):
'-out', certificate_file, '-out', certificate_file,
'-keyout', key_file '-keyout', key_file
]) ])
return dict(key=key_file, return dict(key=key_file,
certificate=certificate_file) certificate=certificate_file)
##############################################################################
#
# Copyright (c) 2011 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs End users who
# are looking for a ready-to-use solution with commercial guarantees and
# support are strongly adviced to contract a Free Software Service Company
#
# This program is Free Software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import pkg_resources
from slapos.recipe.librecipe import BaseSlapRecipe
class Recipe(BaseSlapRecipe):
def _install(self):
self.parameter_dict = self.computer_partition.getInstanceParameterDict()
software_type = self.parameter_dict.get('slap_software_type', 'default')
if software_type is None or software_type == 'RootSoftwareInstance':
software_type = 'erp5_scalability_cloud'
if "run_%s" % software_type in dir(self) and \
callable(getattr(self, "run_%s" % software_type)):
return getattr(self, "run_%s" % software_type)()
else:
raise NotImplementedError("Do not support %s" % software_type)
def run_erp5_scalability_cloud(self):
config = {}
config.update(self.options)
config.update(self.parameter_dict)
config['address'] = self.getGlobalIPv6Address()
config['report_path'] = self.log_directory
config.setdefault('user_range_increment', 1)
config['software_release_url'] = self.software_release_url
config['server_url'] = self.server_url
config['key_file'] = self.key_file
config['cert_file'] = self.cert_file
config['computer_id'] = self.computer_id
config['computer_partition_id'] = self.computer_partition_id
config['plugin_name'] = 'erp5'
if ',' in config['nb_users']:
config['nb_tester_init'] = config['nb_users'].split(',')[0]
config['nb_tester_max'] = config['nb_users'].split(',')[1]
else:
config['nb_tester_init'] = config['nb_users']
config['nb_tester_max'] = config['nb_users']
connection = {}
connection['url'] = 'http://['+config['address']+']:5000/'
connection['erp5_url'] = config['erp5_url']
connection['repeat'] = config['repeat']
connection['nb_users'] = config['nb_users']
connection['benchmark_suites'] = config['benchmark_suites']
connection['erp5_publish_url'] = config.get('erp5_publish_url', '')
connection['erp5_publish_project'] = config.get('erp5_publish_project', '')
self.computer_partition.setConnectionDict(connection)
nosqltester_manager_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join('template', 'erp5tester_manager_run.in'))
nosqltester_manager_runner_path = self.createRunningWrapper("erp5tester_manager",
self.substituteTemplate(nosqltester_manager_wrapper_template_location, config))
return [nosqltester_manager_runner_path]
def run_erp5_tester(self):
tester_config = {}
tester_config.update(self.options)
tester_config.update(self.parameter_dict)
tester_config['tester_address'] = self.getGlobalIPv6Address()
tester_config['report_path'] = self.log_directory
tester_config['filename_prefix'] = '%s-%s' % (self.computer_id,
self.computer_partition_id)
tester_connection = {'url': 'http://[%s]:5000/' % \
tester_config['tester_address']}
self.computer_partition.setConnectionDict(tester_connection)
tester_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join('template', 'nosqltester_run.in'))
tester_runner_path = self.createRunningWrapper("nosqltester",
self.substituteTemplate(tester_wrapper_template_location, tester_config))
return [tester_runner_path]
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(scalability_tester_manager_binary)s %(plugin_name)s -a %(address)s \
-r %(report_path)s -m %(nb_tester_init)s -t %(nb_tester_max)s \
-i %(user_range_increment)s --erp5-publish-url "%(erp5_publish_url)s" \
--erp5-publish-project "%(erp5_publish_project)s" %(software_release_url)s \
%(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s \
%(computer_partition_id)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
cd %(benchmark_suite_path)s && \
exec %(scalability_tester_binary)s -m %(host_address)s -a %(tester_address)s \
-r %(report_path)s -l %(report_path)s --filename-prefix %(filename_prefix)s \
--repeat %(repeat)s %(erp5_url)s 1 %(benchmark_suites)s
...@@ -101,14 +101,18 @@ class Recipe(GenericSlapRecipe): ...@@ -101,14 +101,18 @@ class Recipe(GenericSlapRecipe):
part_list.append(zope_id) part_list.append(zope_id)
part_list.append('logrotate-entry-%s' % zope_id) part_list.append('logrotate-entry-%s' % zope_id)
output += snippet_zope % dict(zope_thread_amount=1, zope_id=zope_id, output += snippet_zope % dict(zope_thread_amount=1, zope_id=zope_id,
zope_port=current_zope_port, zope_timeserver=True, **zope_dict) zope_port=current_zope_port, zope_timeserver=True,
longrequest_logger_file='', longrequest_logger_timeout='',
longrequest_logger_interval='', **zope_dict)
# always one admin node # always one admin node
current_zope_port += 1 current_zope_port += 1
zope_id = 'zope-admin' zope_id = 'zope-admin'
part_list.append(zope_id) part_list.append(zope_id)
part_list.append('logrotate-entry-%s' % zope_id) part_list.append('logrotate-entry-%s' % zope_id)
output += snippet_zope % dict(zope_thread_amount=1, zope_id=zope_id, output += snippet_zope % dict(zope_thread_amount=1, zope_id=zope_id,
zope_port=current_zope_port, zope_timeserver=False, **zope_dict) zope_port=current_zope_port, zope_timeserver=False,
longrequest_logger_file='', longrequest_logger_timeout='',
longrequest_logger_interval='', **zope_dict)
# handle activity key # handle activity key
for q in range(1, json_data['activity']['zopecount'] + 1): for q in range(1, json_data['activity']['zopecount'] + 1):
current_zope_port += 1 current_zope_port += 1
...@@ -116,7 +120,9 @@ class Recipe(GenericSlapRecipe): ...@@ -116,7 +120,9 @@ class Recipe(GenericSlapRecipe):
part_list.append(part_name) part_list.append(part_name)
part_list.append('logrotate-entry-%s' % part_name) part_list.append('logrotate-entry-%s' % part_name)
output += snippet_zope % dict(zope_thread_amount=1, zope_id=part_name, output += snippet_zope % dict(zope_thread_amount=1, zope_id=part_name,
zope_port=current_zope_port, zope_timeserver=True, **zope_dict) zope_port=current_zope_port, zope_timeserver=True,
longrequest_logger_file='', longrequest_logger_timeout='',
longrequest_logger_interval='', **zope_dict)
# handle backend key # handle backend key
snippet_backend = open(self.options['snippet-backend']).read() snippet_backend = open(self.options['snippet-backend']).read()
publish_url_list = [] publish_url_list = []
...@@ -127,9 +133,22 @@ class Recipe(GenericSlapRecipe): ...@@ -127,9 +133,22 @@ class Recipe(GenericSlapRecipe):
part_name = 'zope-%s-%s' % (backend_name, q) part_name = 'zope-%s-%s' % (backend_name, q)
part_list.append(part_name) part_list.append(part_name)
part_list.append('logrotate-entry-%s' % part_name) part_list.append('logrotate-entry-%s' % part_name)
longrequest_logger = backend_configuration.get("longrequest-logger", None)
if longrequest_logger is not None:
longrequest_part_name = '%s-longrequest' %part_name
longrequest_logger_file = '${basedirectory:log}/%s.log' \
%longrequest_part_name
longrequest_logger_timeout = longrequest_logger.get('timeout', '4')
longrequest_logger_interval = longrequest_logger.get('interval', '2')
else:
longrequest_logger_file = longrequest_logger_timeout = \
longrequest_logger_interval = ''
output += snippet_zope % dict( output += snippet_zope % dict(
zope_thread_amount=backend_configuration['thread-amount'], zope_thread_amount=backend_configuration['thread-amount'],
zope_id=part_name, zope_port=current_zope_port, zope_timeserver=False, zope_id=part_name, zope_port=current_zope_port, zope_timeserver=False,
longrequest_logger_file=longrequest_logger_file,
longrequest_logger_timeout=longrequest_logger_timeout,
longrequest_logger_interval=longrequest_logger_interval,
**zope_dict) **zope_dict)
haproxy_backend_list.append('${%(part_name)s:ip}:${%(part_name)s:port}' % dict(part_name=part_name)) haproxy_backend_list.append('${%(part_name)s:ip}:${%(part_name)s:port}' % dict(part_name=part_name))
......
...@@ -117,6 +117,20 @@ class Recipe(GenericBaseRecipe): ...@@ -117,6 +117,20 @@ class Recipe(GenericBaseRecipe):
PATH=self.options['bin-path'], PATH=self.options['bin-path'],
TZ=self.options['timezone'], TZ=self.options['timezone'],
) )
# longrequestlogger product which requires environment settings
longrequest_logger_file = self.options.get('longrequest-logger-file', None)
longrequest_logger_timeout = \
self.options.get('longrequest-logger-timeout', None)
longrequest_logger_interval= \
self.options.get('longrequest-logger-interval', None)
if longrequest_logger_file:
# add needed zope configuration
zope_environment.update(
**dict(longrequestlogger_file = longrequest_logger_file,
longrequestlogger_timeout = longrequest_logger_timeout,
longrequestlogger_interval = longrequest_logger_interval))
# configure default Zope2 zcml # configure default Zope2 zcml
open(self.options['site-zcml'], 'w').write(open(self.getTemplateFilename( open(self.options['site-zcml'], 'w').write(open(self.getTemplateFilename(
'site.zcml')).read()) 'site.zcml')).read())
......
...@@ -25,202 +25,31 @@ ...@@ -25,202 +25,31 @@
# #
############################################################################## ##############################################################################
import os import sys
import urllib
import urllib2
import pkg_resources import pkg_resources
from logging import Formatter
from slapos.recipe.librecipe import BaseSlapRecipe from slapos.recipe.librecipe import BaseSlapRecipe
class NoSQLTestBed(BaseSlapRecipe): class NoSQLTestBed(BaseSlapRecipe):
def _install(self): def _install(self):
self.parameter_dict = self.computer_partition.getInstanceParameterDict() self.parameter_dict = self.computer_partition.getInstanceParameterDict()
try:
entry_point = pkg_resources.iter_entry_points(group='slapos.recipe.nosqltestbed.plugin',
name=self.parameter_dict.get('plugin', 'kumo')).next()
plugin_class = entry_point.load()
testbed = plugin_class()
except:
print Formatter().formatException(sys.exc_info())
return None
software_type = self.parameter_dict.get('slap_software_type', 'default') software_type = self.parameter_dict.get('slap_software_type', 'default')
if software_type is None or software_type == 'RootSoftwareInstance': if software_type is None or software_type == 'RootSoftwareInstance':
software_type = 'kumo_cloud' software_type = 'default'
if "run_%s" % software_type in dir(self) and \ if "run_%s" % software_type in dir(testbed) and \
callable(getattr(self, "run_%s" % software_type)): callable(getattr(testbed, "run_%s" % software_type)):
return getattr(self, "run_%s" % software_type)() return getattr(testbed, "run_%s" % software_type)(self)
else: else:
raise NotImplementedError("Do not support %s" % software_type) raise NotImplementedError("Do not support %s" % software_type)
def run_kumo_cloud(self):
""" Deploy kumofs systeom on a cloud. """
kumo_cloud_config = {}
kumo_cloud_config.update(self.options)
kumo_cloud_config.update(self.parameter_dict)
kumo_cloud_config['address'] = self.getGlobalIPv6Address()
kumo_cloud_config['report_path'] = self.log_directory
if 'nb_server_max' not in kumo_cloud_config:
kumo_cloud_config['nb_server_max'] = 3
if 'nb_tester_max' not in kumo_cloud_config:
kumo_cloud_config['nb_tester_max'] = 3
if 'nb_thread' not in kumo_cloud_config:
kumo_cloud_config['nb_thread'] = 1
if 'nb_request' not in kumo_cloud_config:
kumo_cloud_config['nb_request'] = 1000
kumo_cloud_config['software_release_url'] = self.software_release_url
kumo_cloud_config['server_url'] = self.server_url
kumo_cloud_config['key_file'] = self.key_file
kumo_cloud_config['cert_file'] = self.cert_file
kumo_cloud_config['computer_id'] = self.computer_id
kumo_cloud_config['computer_partition_id'] = self.computer_partition_id
kumo_cloud_config['plugin_name'] = 'kumo'
kumo_cloud_connection = {}
kumo_cloud_connection['url'] = "http://["+kumo_cloud_config['address']+"]:5000/"
self.computer_partition.setConnectionDict(kumo_cloud_connection)
nosqltester_manager_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumotester_manager_run.in'))
nosqltester_manager_runner_path = self.createRunningWrapper("kumotester_manager",
self.substituteTemplate(nosqltester_manager_wrapper_template_location, kumo_cloud_config))
return [nosqltester_manager_runner_path]
def run_all(self):
""" Runs all services on one machine. """
all_config = {}
all_config.update(self.options)
ipaddress = "[%s]" % self.getGlobalIPv6Address()
all_config['manager_address'] = ipaddress
all_config['manager_port'] = 19700
all_config['server_address'] = ipaddress
all_config['server_port'] = 19800
all_config['server_listen_port'] = 19900
all_config['server_storage'] = os.path.join(self.data_root_directory, "kumodb.tch")
all_config['gateway_address'] = ipaddress
all_config['gateway_port'] = 11411
all_config['manager_log'] = os.path.join(self.log_directory, "kumo-manager.log")
all_config['server_log'] = os.path.join(self.log_directory, "kumo-server.log")
all_config['gateway_log'] = os.path.join(self.log_directory, "kumo-gateway.log")
manager_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_manager_run.in'))
manager_runner_path = self.createRunningWrapper("kumo-manager",
self.substituteTemplate(manager_wrapper_template_location, all_config))
server_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_server_run.in'))
server_runner_path = self.createRunningWrapper("kumo-server",
self.substituteTemplate(server_wrapper_template_location, all_config))
gateway_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_gateway_run.in'))
gateway_runner_path = self.createRunningWrapper("kumo-gateway",
self.substituteTemplate(gateway_wrapper_template_location, all_config))
return [manager_runner_path, server_runner_path, gateway_runner_path]
def run_kumo_manager(self, ipaddress=None):
""" Runs the kumofs manager. """
manager_config = {}
manager_config.update(self.options)
if ipaddress is None:
manager_config['manager_address'] = "[%s]" % self.getGlobalIPv6Address()
else:
manager_config['manager_address'] = ipaddress
manager_config['manager_port'] = 19700
manager_config['manager_log'] = os.path.join(self.log_directory, "kumo-manager.log")
manager_connection = {}
manager_connection['address'] = manager_config['manager_address']
manager_connection['port'] = manager_config['manager_port']
self.computer_partition.setConnectionDict(manager_connection)
manager_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_manager_run.in'))
manager_runner_path = self.createRunningWrapper("kumo-manager",
self.substituteTemplate(manager_wrapper_template_location, manager_config))
return [manager_runner_path]
def run_kumo_server(self):
""" Runs the kumofs server. """
server_config = {}
server_config.update(self.options)
server_config.update(self.parameter_dict)
server_config['server_address'] = "[%s]" % self.getGlobalIPv6Address()
server_config['server_port'] = 19800
server_config['server_listen_port'] = 19900
server_config['server_storage'] = os.path.join(self.var_directory,"kumodb.tch")
server_config['server_log'] = os.path.join(self.log_directory, "kumo-server.log")
server_connection = {}
server_connection['address'] = server_config['server_address']
self.computer_partition.setConnectionDict(server_connection)
server_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_server_run.in'))
server_runner_path = self.createRunningWrapper("kumo-server",
self.substituteTemplate(server_wrapper_template_location, server_config))
return [server_runner_path]
def run_kumo_gateway(self):
""" Runs the kumofs gateway. """
gateway_config = {}
gateway_config.update(self.options)
gateway_config.update(self.parameter_dict)
gateway_config['gateway_address'] = "[%s]" % self.getGlobalIPv6Address()
gateway_config['gateway_port'] = 11411
gateway_config['gateway_log'] = os.path.join(self.log_directory, "kumo-gateway.log")
gateway_connection = {}
gateway_connection['address'] = gateway_config['gateway_address']
gateway_connection['port'] = gateway_config['gateway_port']
self.computer_partition.setConnectionDict(gateway_connection)
gateway_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'kumo_gateway_run.in'))
gateway_runner_path = self.createRunningWrapper("kumo-gateway",
self.substituteTemplate(gateway_wrapper_template_location, gateway_config))
return [gateway_runner_path]
def run_kumo_tester(self):
""" Runs the kumofs tester. """
tester_config = {}
tester_config.update(self.options)
tester_config.update(self.parameter_dict)
tester_config['tester_address'] = self.getGlobalIPv6Address()
# tester_config['url'] = "http://%s:5000/" % tester_config['tester_address']
# tester_config['start_url'] = "http://%s:5000/start" % tester_config['tester_address']
tester_config['report_path'] = self.log_directory
tester_config['binary'] = tester_config['memstrike_binary'] + " -l " + \
tester_config['gateway_address'].strip("[]") + " -p " + \
tester_config['gateway_port'] + " -t " + \
tester_config['nb_thread'] + " " + \
tester_config['nb_request'] #" 1000" " -t 32 1024000"
tester_connection = {}
tester_connection['start_url'] = "http://%s:5000/start" % tester_config['tester_address']
self.computer_partition.setConnectionDict(tester_connection)
tester_wrapper_template_location = pkg_resources.resource_filename(
__name__, os.path.join(
'template', 'nosqltester_run.in'))
tester_runner_path = self.createRunningWrapper("nosqltester",
self.substituteTemplate(tester_wrapper_template_location, tester_config))
return [tester_runner_path]
This diff is collapsed.
#!/bin/sh #!/bin/sh
# BEWARE: This file is operated by slapgrid # BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically # BEWARE: It will be overwritten automatically
exec %(gateway_binary)s -F -E -m %(manager_address)s:%(manager_port)s -t %(gateway_address)s:%(gateway_port)s --verbose -o %(gateway_log)s exec %(gateway_binary)s -F -E -m %(manager_address)s:%(manager_port)s \
-t %(gateway_address)s:%(gateway_port)s --verbose -o %(gateway_log)s
#!/bin/sh #!/bin/sh
# BEWARE: This file is operated by slapgrid # BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically # BEWARE: It will be overwritten automatically
exec %(manager_binary)s -a -l %(manager_address)s:%(manager_port)s --verbose -o %(manager_log)s exec %(manager_binary)s -a -l %(manager_address)s:%(manager_port)s \
--verbose -o %(manager_log)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(server_binary)s -l %(server_address)s:%(server_port)s \
-L %(server_listen_port)s -m %(manager_address)s:%(manager_port)s \
-s %(server_storage)s --verbose -o %(server_log)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(nosqltester_manager_binary)s %(plugin_name)s -a %(address)s \
-r %(report_path)s -s %(max_server)s -t %(max_tester)s \
--erp5-publish-url "%(erp5_publish_url)s" --erp5-publish-project "%(erp5_publish_project)s" \
%(software_release_url)s %(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s %(computer_partition_id)s \
%(nb_thread)s %(nb_request)s
#!/bin/sh #!/bin/sh
# BEWARE: This file is operated by slapgrid # BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically # BEWARE: It will be overwritten automatically
exec %(server_binary)s -l %(server_address)s:%(server_port)s -L %(server_listen_port)s -m %(manager_address)s:%(manager_port)s -s %(server_storage)s --verbose -o %(server_log)s exec %(memstrike_binary)s -s -l %(gateway_address)s -p %(gateway_port)s -t %(nb_thread)s %(nb_request)s
#!/bin/sh
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
exec %(nosqltester_manager_binary)s -a %(address)s -r %(report_path)s -s %(nb_server_max)s -t %(nb_tester_max)s %(software_release_url)s %(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s %(computer_partition_id)s %(plugin_name)s %(nb_thread)s %(nb_request)s
#!/bin/sh #!/bin/sh
# BEWARE: This file is operated by slapgrid # BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically # BEWARE: It will be overwritten automatically
exec %(nosqltester_manager_binary)s -a %(address)s -r %(report_path)s -s %(nb_server_max)s -t %(nb_tester_max)s %(software_release_url)s %(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s %(computer_partition_id)s %(plugin_name)s exec %(nosqltester_manager_binary)s %(plugin_name)s -a %(address)s \
-r %(report_path)s -s %(max_server)s -t %(max_tester)s \
--erp5-publish-url "%(erp5_publish_url)s" --erp5-publish-project "%(erp5_publish_project)s" \
%(software_release_url)s %(server_url)s "%(key_file)s" "%(cert_file)s" %(computer_id)s %(computer_partition_id)s
#!/bin/sh #!/bin/sh
# BEWARE: This file is operated by slapgrid # BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically # BEWARE: It will be overwritten automatically
exec %(nosqltester_binary)s -h %(host_address)s -a %(tester_address)s -r %(report_path)s -b "%(binary)s" exec %(nosqltester_binary)s -m %(host_address)s -a %(tester_address)s \
-r %(report_path)s -b "%(binary)s" -l %(log_directory)s \
-c "%(compress_method)s"
This diff is collapsed.
import os
import sys
import time
def runApache(args):
sleep = 60
conf = args[0]
while True:
ready = True
for f in conf.get('required_path_list', []):
if not os.path.exists(f):
print 'File %r does not exists, sleeping for %s' % (f, sleep)
ready = False
if ready:
break
time.sleep(sleep)
apache_wrapper_list = [conf['binary'], '-f', conf['config'], '-DFOREGROUND']
apache_wrapper_list.extend(sys.argv[1:])
sys.stdout.flush()
sys.stderr.flush()
os.execl(apache_wrapper_list[0], *apache_wrapper_list)
import os
import subprocess
import time
import ConfigParser
def popenCommunicate(command_list, input=None):
subprocess_kw = dict(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if input is not None:
subprocess_kw.update(stdin=subprocess.PIPE)
popen = subprocess.Popen(command_list, **subprocess_kw)
result = popen.communicate(input)[0]
if popen.returncode is None:
popen.kill()
if popen.returncode != 0:
raise ValueError('Issue during calling %r, result was:\n%s' % (
command_list, result))
return result
class CertificateAuthority:
def __init__(self, key, certificate, openssl_binary,
openssl_configuration, request_dir):
self.key = key
self.certificate = certificate
self.openssl_binary = openssl_binary
self.openssl_configuration = openssl_configuration
self.request_dir = request_dir
def checkAuthority(self):
file_list = [ self.key, self.certificate ]
ca_ready = True
for f in file_list:
if not os.path.exists(f):
ca_ready = False
break
if ca_ready:
return
for f in file_list:
if os.path.exists(f):
os.unlink(f)
try:
# no CA, let us create new one
popenCommunicate([self.openssl_binary, 'req', '-nodes', '-config',
self.openssl_configuration, '-new', '-x509', '-extensions',
'v3_ca', '-keyout', self.key, '-out', self.certificate,
'-days', '10950'], 'Automatic Certificate Authority\n')
except:
try:
for f in file_list:
if os.path.exists(f):
os.unlink(f)
except:
# do not raise during cleanup
pass
raise
def _checkCertificate(self, common_name, key, certificate):
file_list = [key, certificate]
ready = True
for f in file_list:
if not os.path.exists(f):
ready = False
break
if ready:
return False
for f in file_list:
if os.path.exists(f):
os.unlink(f)
csr = certificate + '.csr'
try:
popenCommunicate([self.openssl_binary, 'req', '-config',
self.openssl_configuration, '-nodes', '-new', '-keyout',
key, '-out', csr, '-days', '3650'],
common_name + '\n')
try:
popenCommunicate([self.openssl_binary, 'ca', '-batch', '-config',
self.openssl_configuration, '-out', certificate,
'-infiles', csr])
finally:
if os.path.exists(csr):
os.unlink(csr)
except:
try:
for f in file_list:
if os.path.exists(f):
os.unlink(f)
except:
# do not raise during cleanup
pass
raise
else:
return True
def checkRequestDir(self):
for request_file in os.listdir(self.request_dir):
parser = ConfigParser.RawConfigParser()
parser.readfp(open(os.path.join(self.request_dir, request_file), 'r'))
if self._checkCertificate(parser.get('certificate', 'name'),
parser.get('certificate', 'key_file'), parser.get('certificate',
'certificate_file')):
print 'Created certificate %r' % parser.get('certificate', 'name')
def runCertificateAuthority(args):
ca_conf = args[0]
ca = CertificateAuthority(ca_conf['key'], ca_conf['certificate'],
ca_conf['openssl_binary'], ca_conf['openssl_configuration'],
ca_conf['request_dir'])
while True:
ca.checkAuthority()
ca.checkRequestDir()
time.sleep(60)
import os
import glob
def controller(args):
"""Creates full or incremental backup
If no full backup is done, it is created
If full backup exists incremental backup is done starting with base
base is the newest (according to date) full or incremental backup
"""
innobackupex_incremental, innobackupex_full, full_backup, incremental_backup \
= args
if len(os.listdir(full_backup)) == 0:
print 'Doing full backup in %r' % full_backup
os.execv(innobackupex_full, [innobackupex_full, full_backup])
else:
backup_list = filter(os.path.isdir, glob.glob(full_backup + "/*") +
glob.glob(incremental_backup + "/*"))
backup_list.sort(key=lambda x: os.path.getmtime(x), reverse=True)
base = backup_list[0]
print 'Doing incremental backup in %r using %r as a base' % (
incremental_backup, base)
os.execv(innobackupex_incremental, [innobackupex_incremental,
'--incremental-basedir=%s'%base, incremental_backup])
import os
import subprocess
import time
import sys
def runMysql(args):
sleep = 60
conf = args[0]
mysqld_wrapper_list = [conf['mysqld_binary'], '--defaults-file=%s' %
conf['configuration_file']]
# we trust mysql_install that if mysql directory is available mysql was
# correctly initalised
if not os.path.isdir(os.path.join(conf['data_directory'], 'mysql')):
while True:
# XXX: Protect with proper root password
# XXX: Follow http://dev.mysql.com/doc/refman/5.0/en/default-privileges.html
popen = subprocess.Popen([conf['mysql_install_binary'],
'--skip-name-resolve', '--no-defaults', '--datadir=%s' %
conf['data_directory']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = popen.communicate()[0]
if popen.returncode is None or popen.returncode != 0:
print "Failed to initialise server.\nThe error was: %s" % result
print "Waiting for %ss and retrying" % sleep
time.sleep(sleep)
else:
print "Mysql properly initialised"
break
else:
print "MySQL already initialised"
print "Starting %r" % mysqld_wrapper_list[0]
sys.stdout.flush()
sys.stderr.flush()
os.execl(mysqld_wrapper_list[0], *mysqld_wrapper_list)
def updateMysql(args):
conf = args[0]
sleep = 30
is_succeed = False
while True:
if not is_succeed:
mysql_upgrade_list = [conf['mysql_upgrade_binary'], '--no-defaults', '--user=root', '--socket=%s' % conf['socket']]
mysql_upgrade = subprocess.Popen(mysql_upgrade_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = mysql_upgrade.communicate()[0]
if mysql_upgrade.returncode is None:
mysql_upgrade.kill()
if mysql_upgrade.returncode != 0 and not 'is already upgraded' in result:
print "Command %r failed with result:\n%s" % (mysql_upgrade_list, result)
print 'Sleeping for %ss and retrying' % sleep
else:
if mysql_upgrade.returncode == 0:
print "MySQL database upgraded with result:\n%s" % result
else:
print "No need to upgrade MySQL database"
mysql_list = [conf['mysql_binary'].strip(), '--no-defaults', '-B', '--user=root', '--socket=%s' % conf['socket']]
mysql = subprocess.Popen(mysql_list, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = mysql.communicate(conf['mysql_script'])[0]
if mysql.returncode is None:
mysql.kill()
if mysql.returncode != 0:
print 'Command %r failed with:\n%s' % (mysql_list, result)
print 'Sleeping for %ss and retrying' % sleep
else:
is_succeed = True
print 'SlapOS initialisation script succesfully applied on database.'
sys.stdout.flush()
sys.stderr.flush()
time.sleep(sleep)
# Apache static configuration
# Automatically generated
# Basic server configuration
PidFile "%(pid_file)s"
LockFile "%(lock_file)s"
Listen %(ip)s:%(port)s
PHPINIDir %(php_ini_dir)s
ServerAdmin someone@email
DefaultType text/plain
TypesConfig conf/mime.types
AddType application/x-compress .Z
AddType application/x-gzip .gz .tgz
AddType application/x-httpd-php .php .phtml .php5 .php4
AddType application/x-httpd-php-source .phps
# Log configuration
ErrorLog "%(error_log)s"
LogLevel warn
LogFormat "%%h %%{REMOTE_USER}i %%l %%u %%t \"%%r\" %%>s %%b \"%%{Referer}i\" \"%%{User-Agent}i\"" combined
LogFormat "%%h %%{REMOTE_USER}i %%l %%u %%t \"%%r\" %%>s %%b" common
CustomLog "%(access_log)s" common
# Directory protection
<Directory />
Options FollowSymLinks
AllowOverride None
Order deny,allow
Deny from all
</Directory>
Alias /mmc %(document_root)s
### Allow access to lmc web directory to everyone
<Directory %(document_root)s>
AllowOverride None
Order allow,deny
allow from all
php_flag short_open_tag on
php_flag magic_quotes_gpc on
</Directory>
DocumentRoot %(document_root)s
DirectoryIndex index.html index.php
# List of modules
LoadModule authz_host_module modules/mod_authz_host.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule setenvif_module modules/mod_setenvif.so
LoadModule version_module modules/mod_version.so
LoadModule proxy_module modules/mod_proxy.so
LoadModule proxy_http_module modules/mod_proxy_http.so
LoadModule mime_module modules/mod_mime.so
LoadModule dav_module modules/mod_dav.so
LoadModule dav_fs_module modules/mod_dav_fs.so
LoadModule negotiation_module modules/mod_negotiation.so
LoadModule rewrite_module modules/mod_rewrite.so
LoadModule headers_module modules/mod_headers.so
LoadModule dir_module modules/mod_dir.so
LoadModule php5_module modules/libphp5.so
%(file_list)s {
daily
dateext
rotate 30
compress
notifempty
sharedscripts
create
postrotate
%(postrotate)s
endscript
olddir %(olddir)s
}
#!/bin/sh
exec %(memcached_binary)s -p %(memcached_port)s -U %(memcached_port)s -l %(memcached_ip)s
#!/bin/sh
exec %(mmc_core_binary)s -f %(mmc_core_config_file)s
[main]
host = %(mmc_host)s
port = %(mmc_port)s
# Credentials for HTTP basic authentication
login = mmc
password = s3cr3t
# RPC Session timeout in seconds.
# If unset default to Twisted hardcoded 900 seconds.
#sessiontimeout = 900
# Multi-threading support (enabled by default)
#multithreading = 1
#maxthreads = 20
# SSL support
enablessl = 1
localcert = %(ssl_localcert)s
cacert = %(ssl_cacert)s
# Certificate check
# verifypeer = 0
# Path to the file containing the Certificate Authority (PEM format)
# cacert =
# Path to the file containing the local key and certificate (PEM format)
# localcert =
[daemon]
user = %(daemon_user)s
group = %(daemon_user)s
umask = 0077
pidfile= %(daemon_pidfile)s
# user = mmc
# group = mmc
# umask = 0007
# pidfile= /var/run/mmc-agent.pid
[loggers]
keys=root
[handlers]
keys=hand01,hand02
[formatters]
keys=form01
[logger_root]
level=NOTSET
handlers=hand01
[handler_hand01]
class=FileHandler
level=INFO
formatter=form01
args=("%s" % %(mmc_log)s,)
[handler_hand02]
class=StreamHandler
level=DEBUG
args=(sys.stderr,)
[formatter_form01]
format=%(asctime)s #%(thread)d %(levelname)s %(message)s
[main]
host = %(mmc_host)s
port = %(mmc_port)s
# Credentials for HTTP basic authentication
login = mmc
password = s3cr3t
# RPC Session timeout in seconds.
# If unset default to Twisted hardcoded 900 seconds.
#sessiontimeout = 900
# Multi-threading support (enabled by default)
#multithreading = 1
#maxthreads = 20
# SSL support
enablessl = 1
localcert = %(ssl_localcert)s
cacert = %(ssl_cacert)s
# Certificate check
# verifypeer = 0
# Path to the file containing the Certificate Authority (PEM format)
# cacert =
# Path to the file containing the local key and certificate (PEM format)
# localcert =
[daemon]
user = %(daemon_user)s
group = %(daemon_user)s
umask = 0077
pidfile= %(daemon_pidfile)s
# user = mmc
# group = mmc
# umask = 0007
# pidfile= /var/run/mmc-agent.pid
[loggers]
keys=root
[handlers]
keys=hand01,hand02
[formatters]
keys=form01
[logger_root]
level=NOTSET
handlers=hand01
[handler_hand01]
class=FileHandler
level=INFO
formatter=form01
args=("%s" % %(mmc_log)s,)
[handler_hand02]
class=StreamHandler
level=DEBUG
args=(sys.stderr,)
[formatter_form01]
format=%(asctime)s #%(thread)d %(levelname)s %(message)s
[global]
; RPC backend to use
backend = xmlrpc
; HTTP basic authentication credentials to use for XMLRPC communication
login = mmc
password = s3cr3t
; Root URL where the MMC web pages are installed
; root= /mmc/ for http://127.0.0.1/mmc/
root = /mmc/
; Filesystem path where the MMC web modules are installed
rootfsmodules = %(mmc_location)s/modules/
; Available pagination steps
pagination = 10 20 50 100
; Number of items to display in lists
maxperpage = 10
; Declare if this installation is a community version or not
; community can be yes or no
community = yes
; Debug section
[debug]
; 0 : no information
; 1 : XMLRPC calls and results are displayed by the web interface
level = 0
; Login page title
; UTF-8 strings must be used
[logintitle]
; Default page title for English and non-translated languages
C =
; French title
fr_FR =
; Spanish title
es_ES =
[server_01]
description = localhost
url = https://127.0.0.1:7080
; Timeout in seconds for all socket I/O operations
; Beware that timeout on a SSL socket only works with PHP >= 5.2.1
; timeout = 300
; SSL certificate check support
; verifypeer = 0
; Path to the file containing the Certificate Authority (PEM format)
; cacert =
; Path to the file containing the local key and certificate (PEM format)
; localcert =
[ldap]
# LDAP we are connected to
# If ldapurl starts with "ldaps://", use LDAP over SSL on the LDAPS port.
# LDAPS is deprecated, and you should use StartTLS.
# If ldapverifypeer = demand, always use the server hostname instead of its IP
# address in the LDAP URL. This hostname must match the CN field of the server
# certificate.
ldapurl = ldap://%(ldap_host)s:%(ldap_port)s
# Network timeout in seconds for LDAP operations. No default timeout set.
#network_timeout =
# TLS connection parameters when LDAPS is not used:
# off - never use TLS (default value)
# start_tls - Use the LDAPv3 StartTLS extended operation (better)
#start_tls = off
# If start_tls != off or LDAPS, specify check to perform on server certificate:
# never - don't ask certificate
# demand - request certificate. If none or bad certificate provided, stop the
# connection (recommended)
#ldapverifypeer = demand
# Client certicates to use (default are empty) for LDAPS or TLS connections:
# For example: /etc/ssl/certs
#cacertdir =
# For example: /etc/mmc/certs/demoCA/cacert.pem
#cacert =
# For example: /etc/mmc/certs/client.cert
#localcert =
# For example: /etc/mmc/certs/client.key
#localkey =
# Accepted ciphers
# Use this for more security: TLSv1+SHA1+AES+DH:CAMELLIA:!NULL:!ADH
#ciphersuites = TLSv1:!NULL
# LDAP debug level - set this to 255 to debug LDAP connection problems
#ldapdebuglevel = 0
# LDAP base DN
baseDN = dc=mandriva, dc=com
# Users location in the LDAP
baseUsersDN = ou=Users, %%(basedn)s
# Groups location in the LDAP
baseGroupsDN = ou=Groups, %%(basedn)s
# LDAP manager
rootName = cn=admin, %%(basedn)s
password = secret
# If enabled, the MMC will create/move/delete the home of the users
# Else will do nothing, but only write user informations into LDAP
userHomeAction = 1
# Skeleton directory to populate a new home directory
skelDir = /etc/skel
# If set, all new users will belong to this group when created
defaultUserGroup = Domain Users
# Default home directory for users
defaultHomeDir = /home
# user uid number start
uidStart = 10000
# group gid number start
gidStart = 10000
# LDAP log file path
logfile = %(ldap_logfile_path)s
# FDS log file path
# logfile = /opt/fedora-ds/slapd-hostname/logs/access
# you can specify here where you can authorized creation of your homedir
# default is your defaultHomeDir
# example:
# authorizedHomeDir = /home, /home2, /mnt/depot/newhome
# LDAP user password scheme to use
# Possible values are "ssha", "crypt" and "passmod"
# "passmod" uses the LDAP Password Modify Extended Operations to change
# password. The password encryption is done by the LDAP server.
passwordscheme = passmod
#[backup-tools]
## Path of the backup tools
#path = /usr/lib/mmc/backup-tools
## Where are put the archives
#destpath = /home/archives
# Computer inventory plugin to use (Pulse 2 related option)
# [computers]
# method = glpi
# method = inventory
# Audit system configuration
# If commented, the audit module will be disabled
# [audit]
# method = database
# dbhost = localhost
# MySQL and PostgreSQL backends are available
# dbdriver = postgres
# dbport = 5432
# dbdriver = mysql
# dbport = 3306
# dbuser = audit
# dbpassword = audit
# dbname = audit
# User authentication configuration
#[authentication]
# Authenticators chain
#method = baseldap externalldap
# baseldap authenticator configuration
#[authentication_baseldap]
# Set a list of login that will only be authentified using this authenticator
#authonly = root
# Externalldap authenticator configuration
#[authentication_externalldap]
# Login list that won't be authenticated with this authenticator.
#exclude =
# If set, only the speficied logins will be authenticated with this
# authenticator.
#authonly =
# Set whether this authenticator is mandatory. If it is mandatory and can't be
# validated during the mmc-agent activation phase, the mmc-agent exits with an
# error.
#mandatory = True
# LDAP server URLs. The LDAP server are selected in the given order when
# authenticating a user.
#ldapurl = ldap://192.168.0.1:389 ldap://192.168.0.2:389
# LDAP connection timeout in seconds. If the LDAP connection failed after this
# timeout, we try the next LDAP server in the list or give up if it the last.
#network_timeout =
# LDAP suffix where to search for user
#suffix = cn=Users,dc=mandriva,dc=com
# How to bind to the LDAP. Empty if anonymous
#bindname = cn=account, cn=Users, dc=linboxad, dc=com
#bindpasswd = s3cr3t
#bindname =
#bindpasswd =
# User filter
#filter = objectClass=*
# User attribute containing her/his login
#attr = cn
# User provisioning configuration
#[provisioning]
#method = externalldap
# externalldap provisioner configuration
#[provisioning_externalldap]
# Login list that won't be provisioned with this provisioner
#exclude = root
# These attributes are mandatory to create a user
#ldap_uid = cn
#ldap_givenName = sn
#ldap_sn = sn
# Other attributes to fill in
#ldap_mail = mail
#...
# We are able to fill the ACL fields the user logs in according to the value of
# an attribute from the external LDAP.
# What is the field name ?
#profile_attr =
# Here we define two profiles: profile1 and profile2
# profile1 allows the user to log in and change her/his password in the web
# interface
#profile_acl_profile1= :base#users#passwd/
# profile2 disallows the user to do anything (no ACL defined)
#profile_acl_profile2 =
# ... You can define as much profile_acl_* options as you need
# For each profile, we can create a group of user, and put users with a given
# profile in the corresponding group automatically when they log in.
# Set the next line to True to activate profile to group mapping
#profile_group_mapping = False
# A prefix for the created group can be set
#profile_group_prefix =
# Example userdefault settings to support Kerberos
# [userdefault]
# objectClass = +krb5KDCEntry,krb5Principal
# krb5KeyVersionNumber = 1
# krb5KDCFlags = 126
# krb5PrincipalName = %uid%@DOMAIN
# Subscription informations
# [subscription]
# product_name = MDS
# vendor_name = Mandriva
# vendor_mail = sales@mandriva.com
# customer_name =
# customer_mail =
# comment =
# users = 0
# computers = 0
# # Support informations
# support_mail = customer@customercare.mandriva.com
# support_phone = 0810 LINBOX
# support_comment =
[ldap]
# LDAP we are connected to
# If ldapurl starts with "ldaps://", use LDAP over SSL on the LDAPS port.
# LDAPS is deprecated, and you should use StartTLS.
# If ldapverifypeer = demand, always use the server hostname instead of its IP
# address in the LDAP URL. This hostname must match the CN field of the server
# certificate.
ldapurl = ldap://%(ldap_host)s:%(ldap_port)s
# Network timeout in seconds for LDAP operations. No default timeout set.
#network_timeout =
# TLS connection parameters when LDAPS is not used:
# off - never use TLS (default value)
# start_tls - Use the LDAPv3 StartTLS extended operation (better)
#start_tls = off
# If start_tls != off or LDAPS, specify check to perform on server certificate:
# never - don't ask certificate
# demand - request certificate. If none or bad certificate provided, stop the
# connection (recommended)
#ldapverifypeer = demand
# Client certicates to use (default are empty) for LDAPS or TLS connections:
# For example: /etc/ssl/certs
#cacertdir =
# For example: /etc/mmc/certs/demoCA/cacert.pem
#cacert =
# For example: /etc/mmc/certs/client.cert
#localcert =
# For example: /etc/mmc/certs/client.key
#localkey =
# Accepted ciphers
# Use this for more security: TLSv1+SHA1+AES+DH:CAMELLIA:!NULL:!ADH
#ciphersuites = TLSv1:!NULL
# LDAP debug level - set this to 255 to debug LDAP connection problems
#ldapdebuglevel = 0
# LDAP base DN
baseDN = dc=mandriva, dc=com
# Users location in the LDAP
baseUsersDN = ou=Users, %%(basedn)s
# Groups location in the LDAP
baseGroupsDN = ou=Groups, %%(basedn)s
# LDAP manager
rootName = cn=admin, %%(basedn)s
password = secret
# If enabled, the MMC will create/move/delete the home of the users
# Else will do nothing, but only write user informations into LDAP
userHomeAction = 1
# Skeleton directory to populate a new home directory
skelDir = /etc/skel
# If set, all new users will belong to this group when created
defaultUserGroup = Domain Users
# Default home directory for users
defaultHomeDir = /home
# user uid number start
uidStart = 10000
# group gid number start
gidStart = 10000
# LDAP log file path
logfile = %(ldap_logfile_path)s
# FDS log file path
# logfile = /opt/fedora-ds/slapd-hostname/logs/access
# you can specify here where you can authorized creation of your homedir
# default is your defaultHomeDir
# example:
# authorizedHomeDir = /home, /home2, /mnt/depot/newhome
# LDAP user password scheme to use
# Possible values are "ssha", "crypt" and "passmod"
# "passmod" uses the LDAP Password Modify Extended Operations to change
# password. The password encryption is done by the LDAP server.
passwordscheme = passmod
#[backup-tools]
## Path of the backup tools
#path = /usr/lib/mmc/backup-tools
## Where are put the archives
#destpath = /home/archives
# Computer inventory plugin to use (Pulse 2 related option)
# [computers]
# method = glpi
# method = inventory
# Audit system configuration
# If commented, the audit module will be disabled
# [audit]
# method = database
# dbhost = localhost
# MySQL and PostgreSQL backends are available
# dbdriver = postgres
# dbport = 5432
# dbdriver = mysql
# dbport = 3306
# dbuser = audit
# dbpassword = audit
# dbname = audit
# User authentication configuration
#[authentication]
# Authenticators chain
#method = baseldap externalldap
# baseldap authenticator configuration
#[authentication_baseldap]
# Set a list of login that will only be authentified using this authenticator
#authonly = root
# Externalldap authenticator configuration
#[authentication_externalldap]
# Login list that won't be authenticated with this authenticator.
#exclude =
# If set, only the speficied logins will be authenticated with this
# authenticator.
#authonly =
# Set whether this authenticator is mandatory. If it is mandatory and can't be
# validated during the mmc-agent activation phase, the mmc-agent exits with an
# error.
#mandatory = True
# LDAP server URLs. The LDAP server are selected in the given order when
# authenticating a user.
#ldapurl = ldap://192.168.0.1:389 ldap://192.168.0.2:389
# LDAP connection timeout in seconds. If the LDAP connection failed after this
# timeout, we try the next LDAP server in the list or give up if it the last.
#network_timeout =
# LDAP suffix where to search for user
#suffix = cn=Users,dc=mandriva,dc=com
# How to bind to the LDAP. Empty if anonymous
#bindname = cn=account, cn=Users, dc=linboxad, dc=com
#bindpasswd = s3cr3t
#bindname =
#bindpasswd =
# User filter
#filter = objectClass=*
# User attribute containing her/his login
#attr = cn
# User provisioning configuration
#[provisioning]
#method = externalldap
# externalldap provisioner configuration
#[provisioning_externalldap]
# Login list that won't be provisioned with this provisioner
#exclude = root
# These attributes are mandatory to create a user
#ldap_uid = cn
#ldap_givenName = sn
#ldap_sn = sn
# Other attributes to fill in
#ldap_mail = mail
#...
# We are able to fill the ACL fields the user logs in according to the value of
# an attribute from the external LDAP.
# What is the field name ?
#profile_attr =
# Here we define two profiles: profile1 and profile2
# profile1 allows the user to log in and change her/his password in the web
# interface
#profile_acl_profile1= :base#users#passwd/
# profile2 disallows the user to do anything (no ACL defined)
#profile_acl_profile2 =
# ... You can define as much profile_acl_* options as you need
# For each profile, we can create a group of user, and put users with a given
# profile in the corresponding group automatically when they log in.
# Set the next line to True to activate profile to group mapping
#profile_group_mapping = False
# A prefix for the created group can be set
#profile_group_prefix =
# Example userdefault settings to support Kerberos
# [userdefault]
# objectClass = +krb5KDCEntry,krb5Principal
# krb5KeyVersionNumber = 1
# krb5KDCFlags = 126
# krb5PrincipalName = %uid%@DOMAIN
# Subscription informations
# [subscription]
# product_name = MDS
# vendor_name = Mandriva
# vendor_mail = sales@mandriva.com
# customer_name =
# customer_mail =
# comment =
# users = 0
# computers = 0
# # Support informations
# support_mail = customer@customercare.mandriva.com
# support_phone = 0810 LINBOX
# support_comment =
[main]
disable = 1
[ppolicy]
# Branch where the password policies are stored
ppolicyDN = ou=Password Policies, %(baseDN)s
# Name of the default password policy
ppolicyDefault = default
# This options are used only once to create the default password policy entry
# into the LDAP
[ppolicyattributes]
pwdAttribute = userPassword
pwdLockout = True
pwdMaxFailure = 5
pwdLockoutDuration = 900
# Password can't be change if it not 7 days old
pwdMinAge = 25200
# Password expiration is 42 days
pwdMaxAge = 3628800
pwdMinLength = 8
pwdInHistory = 5
pwdMustChange = True
# To check password quality
pwdCheckModule = mmc-check-password.so
pwdCheckQuality = 2
# ERP5 buildout my.cnf template based on my-huge.cnf shipped with mysql
# The MySQL server
[mysqld]
# ERP5 by default requires InnoDB storage. MySQL by default fallbacks to using
# different engine, like MyISAM. Such behaviour generates problems only, when
# tables requested as InnoDB are silently created with MyISAM engine.
#
# Loud fail is really required in such case.
sql-mode="NO_ENGINE_SUBSTITUTION"
skip-show-database
port = %(tcp_port)s
bind-address = %(ip)s
socket = %(socket)s
datadir = %(data_directory)s
pid-file = %(pid_file)s
log-error = %(error_log)s
log-slow-file = %(slow_query_log)s
long_query_time = 5
max_allowed_packet = 128M
query_cache_size = 32M
plugin-load = ha_innodb_plugin.so
# The following are important to configure and depend a lot on to the size of
# your database and the available resources.
#innodb_buffer_pool_size = 4G
#innodb_log_file_size = 256M
#innodb_log_buffer_size = 8M
# Some dangerous settings you may want to uncomment if you only want
# performance or less disk access. Useful for unit tests.
#innodb_flush_log_at_trx_commit = 0
#innodb_flush_method = nosync
#innodb_doublewrite = 0
#sync_frm = 0
# Uncomment the following if you need binary logging, which is recommended
# on production instances (either for replication or incremental backups).
#log-bin=mysql-bin
# Force utf8 usage
collation_server = utf8_unicode_ci
character_set_server = utf8
skip-character-set-client-handshake
[mysql]
no-auto-rehash
socket = %(socket)s
[mysqlhotcopy]
interactive-timeout
CREATE DATABASE IF NOT EXISTS %(database)s;
GRANT ALL PRIVILEGES ON %(database)s.* TO %(user)s@localhost IDENTIFIED BY %(password)r;
GRANT ALL PRIVILEGES ON %(database)s.* TO %(user)s@'%%' IDENTIFIED BY %(password)r;
GRANT SHOW DATABASES ON *.* TO %(user)s@localhost IDENTIFIED BY %(password)r;
GRANT SHOW DATABASES ON *.* TO %(user)s@'%%' IDENTIFIED BY %(password)r;
FLUSH PRIVILEGES;
EXIT
This diff is collapsed.
[PHP]
engine = On
safe_mode = Off
expose_php = On
error_reporting = E_ALL | E_STRICT
display_errors = On
display_startup_errors = On
log_errors = On
log_errors_max_len = 1024
ignore_repeated_errors = Off
ignore_repeated_source = Off
[main]
# Is the plugin disable ?
disable = 0
# Are dynamic group enable ?
dynamic_enable = 1
# Are profiles enable ?
# profiles_enable = 0
# Preselected module in the dynamic group creation page
# default_module =
# Maximum number of elements in the static group creation list
# max_elements_for_static_list = 2000
[database]
dbdriver = mysql
dbhost = %(mysql_host)s
dbport = %(mysql_port)s
dbuser = %(mysql_user)s
dbpasswd = %(mysql_password)s
dbname = dyngroup
# dbsslenable = 0
# dbsslca =
# dbsslcert =
# dbsslkey =
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
[querymanager]
# can we query on group names ?
activate = 0
[main]
disable = 0
dbdriver = mysql
dbhost = %(mysql_host)s
dbport = %(mysql_port)s
dbuser = %(mysql_user)s
dbpasswd = %(mysql_password)s
dbname = glpi
# dbsslenable = 0
# dbsslca =
# dbsslcert =
# dbsslkey =
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
# Allow users to filter computers list using an entity selector
localisation = True
# Accepted GLPI profiles. A user must have her/his GLPI profile in this list
# else she/he can't display any computers from the GLPI inventory
# active_profiles = profile1 profile2 profile3
# Only display computers with the specified state
# filter_on = state=3
# Give the uri to link to for a computer inventory
# glpi_computer_uri =
# should be something like that :
# glpi_computer_uri = http://localhost/glpi/front/computer.form.php?ID=
# Tell whether the query manager of the dyngroup plugin can use this module
[querymanager]
activate = True
# GLPI authentication configuration
#[authentication_glpi]
# URL to connect to the GLPI HTTP interface ?
#baseurl = http://glpi-server/glpi/
# GLPI provisioning configuration
#[provisioning_glpi]
# Users that will never be provisioned
#exclude = root
# Before provisioning, should we perform a GLPI authentication to create or
# update the user informations in the GLPI database ?
#doauth = 1
# MMC web interface ACLs definition according to the user GLPI profile
#profile_acl_profile1 = :##:base#main#default
#profile_acl_profile2 =
#profile_acl_profile3 =
# If the user belong to more than one profile, the first profile of this list
# will be used
#profiles_order = profile1 profile2 profile3
[main]
disable = 0
[database]
# dbdriver = mysql
# dbhost = localhost
# dbport = 3306
# dbname = imaging
# dbuser = mmc
# dbpasswd = mmc
# dbsslenable = 0
# dbsslca = /etc/mmc/pulse2/imaging/cacert.pem
# dbsslcert = /etc/mmc/pulse2/imaging/cert.pem
# dbsslkey = /etc/mmc/pulse2/imaging/key.pem
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
# [web]
# ##### Interface customization #####
# user may want to override the way dates are displayed (see http://www.php.net/date for more informations)
# web_def_date_fmt = "%Y-%m-%d %H:%M:%S"
# web_def_default_protocol = nfs
# Menu settings
# web_def_default_menu_name = Menu
# web_def_default_timeout = 60
# web_def_default_background_uri =
# web_def_default_message = Warning ! Your PC is being backed up or restored. Do not reboot !
# start options
# web_def_kernel_parameters = quiet
# backup/restore options
# web_def_image_parameters =
[main]
disable = 0
displayLocalisationBar = 0
# Example of software filter when querying a computer inventory.
# All softwares containing the KB string will be excluded
# software_filter = %KB%
[inventory]
dbdriver = mysql
dbhost = %(mysql_host)s
dbport = %(mysql_port)s
dbuser = %(mysql_user)s
dbpasswd = %(mysql_password)s
dbname = inventory
dbsslenable = 0
dbsslca = %(inventory_ssl_cacert)s
dbsslcert = %(inventory_ssl_cert)s
dbsslkey = %(inventory_ssl_key)s
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
[computers]
# display = cn::Computer Name||displayName::Description
# content =
[expert_mode]
Bios = BiosVersion|ChipSerial|BiosVendor|SmbManufacturer|SmbProduct|SmbVersion|SmbSerial|SmbUUID|SmbType|DateFirstSwitchOn
Network = CardType|MIB|Bandwidth|NetworkType|SubnetMask|State
Hardware = Build|Version|ProcessorCount|SwapSpace|User|Date|Workgroup|RegisteredName|RegisteredCompany|OSSerialNumber|Type|OsSerialKey|ProcessorFrequency|Host
Software = ProductPath|Type|Icon|UninstallPath|ExecutableSize|Application
Controller = ExpandedType|HardwareVersion|StandardType
Drive = DriveType|FileCount|FileSystem
Input = StandardDescription|ExpandedDescription|Connector
Memory = ExtendedDescription|SlotCount
Monitor = Stamp|Type|Serial|Manuf
Pci =
Port = Stamp
Printer =
Slot =
Sound = Description
Storage = ExtendedType|VolumeName|Media
VideoCard =
[graph]
Network = Gateway
Hardware = OperatingSystem|ProcessorType
Memory = Size
# [querymanager]
# list = Entity/Label||Software/ProductName||Hardware/ProcessorType||Hardware/OperatingSystem||Drive/TotalSpace||Inventory/Date
# double = Software/Products::Software/ProductName##Software/ProductVersion
# halfstatic = Registry/Value/display name::Path##DisplayName
# extended = Inventory/Date||Drive/TotalSpace
# [provisioning_inventory]
# Users that will never be provisioned
# exclude = root
# A user can be automatically linked to a list of entities according to his
# profile.
# What is the LDAP field name that defines its profile name ?
# profile_attr =
# Here are the possible notations for profile to entities mapping:
# A simple list of the entities names
# profile_entity_profile1 = entityA entityB
# The dot char is the root entity
# profile_entity_profile2 = .
# In this example the content of the multi-valued 'pulse2entity' LDAP attribute
# will be used
# profile_entity_profile3 = %pulse2entity%
# Here the provisioning plugin 'network_to_entity' will be used
# profile_entity_profile4 = plugin:network_to_entity
[main]
disable = 0
[msc]
# repopath = /var/lib/pulse2/packages
# qactionspath = /var/lib/pulse2/qactions
# download_directory_path = /var/lib/pulse2/downloads
# dbdriver = mysql
# dbhost = localhost
# dbport = 3306
# dbname = msc
# dbuser = mmc
# dbpasswd = mmc
# dbdebug = ERROR
# Database connection lifetime
# dbpoolrecycle = 60
# Database connection pool size
# dbpoolsize = 5
# SSL support
# dbsslenable = 0
# dbsslca =
# dbsslcert =
# dbsslkey =
# Computer's IP addresses filtering
# ignore_non_rfc2780 = 1
# ignore_non_rfc1918 = 0
# Comma separated values of excluded or included IP addresses or ranges
# For example: exclude_ipaddr = 192.168.0.1,10.0.0.0/10.255.255.255
# Included addresses are never filtered.
# exclude_ipaddr =
# include_ipaddr =
# Computer's host name filtering
# ignore_non_fqdn = 0
# ignore_invalid_hostname = 0
# Space separated list regexp for rejected or accepted host name
# Host name matching the regexp in include_hostname are always accepted
# For example: exclude_hostname = ^computer[0-9]*$ ^server[0-9]*$
# exclude_hostname =
# include_hostname =
# Computer's MAC addresses filtering
# wol_macaddr_blacklist =
# default scheduler used by the msc
default_scheduler = scheduler_01
[scheduler_api]
host = %(ipv4)s
port = 9990
username =
password =
enablessl = 1
# verifypeer = 0
# cacert =
# localcert =
[scheduler_01]
host = %(ipv4)s
port = 8000
username = username
password = password
enablessl = 1
# verifypeer = 0
# cacert =
# localcert =
[web]
# ##### Interface customization #####
# user may want to override the way dates are displayed (see http://www.php.net/date for more informations)
# web_def_date_fmt = "%Y-%m-%d %H:%M:%S"
#
# ##### Deployment default settings #####
# pre-check the "awake" checkbox in 'standard' deploy mode
# web_def_awake = 1
# pre-check the "do inventory" checkbox in 'standard' deploy mode
# web_def_inventory = 1
# hidden option
# web_show_reboot = 0
# pre-select the following mode in 'advanced' deploy mode
# the two available modes are push and push_pull
# web_def_mode = push
# if set to False, the following setting allow the user to toggle between push and push/pull mode in advanced mode
# web_force_mode = True
# web_def_maxbw = 0
# web_def_delay = 60
# web_def_attempts = 3
# web_def_deployment_intervals =
# web_dlpath =
# Max bandwidth for file download in Kbit/s
# web_def_dlmaxbw = 0
#
# ##### Proxy default settings ####
# May the local proxy system be used ?
# web_allow_local_proxy = False
# default proxy mode, defaut "multiple", other possible value "single"
# web_def_local_proxy_mode = multiple
# Max number of clients per proxy in proxy mode
# web_def_max_clients_per_proxy = 10
# Number of auto-selected proxy in semi-auto mode
# web_def_proxy_number = 2
# default mode (semi_auto / manual)
# web_def_proxy_selection_mode = semi_auto
#
# ##### VNC Applet default settings ####
# Here we may define the VNC applet behavior (in some way)
# may the VNC applet used ? (this setting simply (en/dis)able the display of the VNC action button)
# vnc_show_icon = True
# allow user to interact with remote desktop ?
# vnc_view_only = True
# use the following VNC client pre-defined rules,
# currently available profiles:
# fiber: for high speed local networks (low latency, 10 Mb/s per connection)
# lan: for 100 Mb local networks (low latency, 3 Mb/s per connection)
# cable: for high-end broadband links (high latency, 400 kb/s per connection)
# dsl: for low-end broadband links (high latency, 120 kb/s per connection)
# isdn: (high latency, 75 kb/s)
# vnc_network_connectivity = lan
# display applet control to user
# vnc_allow_user_control = False
# the port to use to connect to a VNC
# vnc_port = 5900
#
# ##### Client probing behavior ####
# the LED which represents the client status can take four colors:
# black => no probe done
# red => all probe failed
# orange => minimal probe succedeed (ping), maximal probe failed (ssh)
# green => all probe succedeed
# available probes are: none (field is empty), ping, ssh, ping_ssh (ie. both)
# for networks where icmp is not allowed, ping may be disabled: probe_order=ssh
# to speed-up display, ssh may be disabled: probe_order=ping
# to fully disable probe: probe_order=
# default conf: ping_ssh, in other terms: ping = orange, ssh = green
# probe_order=ping_ssh
[package_api]
# mserver = 127.0.0.1
# mport = 9990
# mmountpoint = /rpc
# enablessl = 1
# verifypeer = 0
# localcert =
# cacert =
[main]
disable = 0
[user_package_api]
server = localhost
port = 9990
mountpoint = /upaa
username =
password =
enablessl = 1
# verifypeer = 0
# cacert =
# localcert =
[main]
disable = 0
# [database]
# dbdriver = mysql
# dbhost = localhost
# dbport = 3306
# dbname = pulse2
# dbuser = mmc
# dbpasswd = mmc
# dbsslenable = 0
# dbsslca =
# dbsslcert =
# dbsslkey =
^/tftpboot/revoboot/bin/revoboot.pxe$ bootloader/pxe_boot
^/bootloader/pxe_boot$ bootloader/pxe_boot
^/bootloader/bootsplash.xpm$ bootloader/bootsplash.xpm
^/bootmenus/default bootmenus/default
^/bootmenus/([0-9A-F]{12}) bootmenus/$1
^/bootmenus/([0-9A-F]{8}) bootmenus/$1
^/custom/(.+) custom/$1
^/diskless/kernel$ diskless/kernel
^/diskless/initrd$ diskless/initrd
^/diskless/memtest$ diskless/memtest
^/tools/memtest$ diskless/memtest
^/masters/(.+) masters/$1
^/(.*)$ ?
.* ?
[main]
# We will bind on this address ...
# host = 0.0.0.0
# ... and this UDP port
# port = 1001
# required password to record a client, not checked if empty
# adminpass =
[daemon]
# as we are a service, we need some info to be run, such as:
# - the identity we will run into
# user = root
# group = root
# - our umask
# umask = 0077
# and where to record our PID
# pidfile= /var/run/pulse2-imaging-server.pid
[package-server]
# here are described how to talk to our referent package server
#
# host = 127.0.0.1
# port = 9990
# mount_point = /imaging_api
# enablessl = True
# username = username
# password = password
# cacert = /etc/mmc/pulse2/imaging-server/keys/cacert.pem
# localcert = /etc/mmc/pulse2/imaging-server/keys/privkey.pem
# verifypeer = False
[hooks]
# hooks_dir = /usr/lib/pulse2/imaging-server/hooks
#
# 0xAD => "I'm new" action : menu creation request, triggered by the bootloader
# arg[1] = source MAC (short)
# arg[2] = given ID (short)
# arg[3] = given PASSWORD (optionnal)
# exit 0 on success
# create_client_path = create_client
#
# 0xAA => "I just booted" action : menu update request + inventory processing request, triggered by the bootloader
# arg[1] = source MAC (short)
# boot_client_path = boot_client
# arg[1] = source MAC (short)
# arg[2] = where the inventory is temporaly stored
# process_inventory_path = process_inventory
#
# 0xEC => "I just started a backup" : start a backup request, triggered by revoinc
# arg[1] = source MAC (short)
# arg[2] = kind of backup (L = image, B = Master)
# exit 0 on success
# start_image_path = start_image
#
# 0xED => "I just finished a backup" : end a backup request, triggered by revodoneimage
# end_image_path = end_image
#
# 0xCD => "Change my default menu" => change default menu request, triggered by revodefault
# arg[1] = source MAC (short)
# arg[2] = item to use
# exit 0 on success
# change_default_path = change_default
#
# 0x4C => log stuff
# arg[1] = source MAC (short)
# arg[2] = action :
# 0 => booted
# 1 => took item arg[3]
# 2 => starting restoration (more info in arg[3])
# 3 => finished restoration (more info in arg[3])
# 4 => starting backup (more info in arg[3])
# 5 => finished backup (more info in arg[3])
# 6 => started postinst
# 7 => finished postinst
# 8 => critical error
# arg[3] = optional, see upper
# exit 0 on success
# log_action_path = log_action
#
# 0x1A => asks for its UUID
# arg[1] = source MAC (short)
# exit 0 on success, UUID is the last line on stdout
# get_uuid_path = get_uuid
#
# 0x1B => asks for its Hostname
# arg[1] = source MAC (short)
# exit 0 on success, hostname is the last line on stdout
# get_hostname_path = get_hostname
#
# 0x54 => ask for time sync (for mtftp)
# arg[1] = source MAC (short)
# arg[2] = computed sync
# exit 0 on success
# mtftp_sync_path = mtftp_sync
[loggers]
keys = root
[handlers]
keys = hand01
[formatters]
keys = form01
[logger_root]
level = NOTSET
handlers = hand01
[handler_hand01]
class = FileHandler
level = INFO
formatter = form01
args = ("/var/log/mmc/pulse2-imaging-server.log",)
[formatter_form01]
format = %(asctime)s %(levelname)s %(message)s
[main]
# host =
# port = 9999
# ocsmapping = /etc/mmc/pulse2/inventory-server/OcsNGMap.xml
# enablessl = False
# verifypeer = False
# cacert = /etc/mmc/pulse2/inventory-server/keys/cacert.pem
# localcert = /etc/mmc/pulse2/inventory-server/keys/privkey.pem
# to put the data from the registry as hostname
# hostname = Hardware/Host # by default
# default_entity = .
# entities_rules_file =
[database]
# dbdriver = mysql
# dbhost = localhost
# dbport = 3306
# dbname = inventory
# dbuser = mmc
# dbpasswd = mmc
# dbpoolrecycle = 60
# dbsslenable = 0
# dbsslca = /etc/mmc/pulse2/inventory/cacert.pem
# dbsslcert = /etc/mmc/pulse2/inventory/cert.pem
# dbsslkey = /etc/mmc/pulse2/inventory/key.pem
[daemon]
# pidfile = /var/run/pulse2-inventory-server.pid
# user = root
# group = root
# umask = 0077
# Example of non-root execution settings:
# user = mmc
# group = mmc
# umask = 0007
# [option_01]
# NAME = REGISTRY
# PARAM_01 = NAME::srvcomment||REGKEY::SYSTEM\CurrentControlSet\Services\lanmanserver\parameters||REGTREE::2##srvcomment
# PARAM_02 = NAME::DisplayName||REGKEY::SYSTEM\CurrentControlSet\Services\lanmanserver||REGTREE::2##DisplayName
[loggers]
keys=root
[handlers]
keys=hand01,hand02
[formatters]
keys=form01
[logger_root]
level=NOTSET
handlers=hand01
[handler_hand01]
class=FileHandler
level=INFO
formatter=form01
args=("/var/log/mmc/pulse2-inventory-server.log",)
[handler_hand02]
class=StreamHandler
level=DEBUG
args=(sys.stderr,)
[formatter_form01]
format=%(asctime)s %(levelname)s %(message)s
[launchers]
# Pulse2 Launcher path
# launcher_path = /usr/sbin/pulse2-launcher
## Misc locations ##
# Source directory for push mode
# source_path = /var/lib/pulse2/packages
## Workflow-related commands ##
# global ping command
# ping_path = /usr/sbin/pulse2-ping
# global inventory command
# inventory_command = export P2SRV=`echo $SSH_CONNECTION | cut -f1 -d\ `; export P2PORT=9999; export http_proxy=""; export ftp_proxy=""; ( [ -x /cygdrive/c/Program\ Files/OCS\ Inventory\ Agent/OCSInventory.exe ] && /cygdrive/c/Program\ Files/OCS\ Inventory\ Agent/OCSInventory.exe /np /server:$P2SRV /pnum:$P2PORT ) || ( [ -x /usr/bin/ocsinventory-agent ] && /usr/bin/ocsinventory-agent --server=http://$P2SRV:$P2PORT ) || ( [ -x /usr/sbin/ocsinventory-agent ] && /usr/sbin/ocsinventory-agent --server=http://$P2SRV:$P2PORT ) || ( [ -x /usr/local/sbin/ocs_mac_agent.php ] && /usr/local/sbin/ocs_mac_agent.php )
# global reboot command
# reboot_command = /bin/shutdown.exe -f -r 1 || shutdown -r now
# global halt command
# halt_command = /bin/shutdown.exe -f -s 1 || shutdown -h now
## Target on client ##
# target directory on client
# target_path = /tmp
# temp folder name prefix on client
# temp_folder_prefix = MDVPLS
## Foll-proofing ##
# Above this amount of seconds the command will automatically be killed.
# max_command_age = 86400
# Above this amount of seconds a computer will be considered as
# unreachable.
# max_ping_time = 4
# Above this amount of seconds a computer will be considered as without
# ssh installed
# max_probe_time = 20
[daemon]
# the place were we put the daemons's PID files
# pidfile = /var/run/pulse2
# user = root
# group = root
# umask = 0077
# Example of non-root execution settings:
# user = mmc
# group = mmc
# umask = 0007
[wrapper]
# wrapper path
# path = /usr/sbin/pulse2-output-wrapper
# cap the amount of logs a wrapper can generate (in bytes)
# max_log_size = 512000
# wrapper will quit it process takes longer to complete (in seconds)
# max_exec_time = 21600
[ssh]
# defaultkey is the name of the default SSH key
# default_key = default
# openssl binaries path
# scp_path = /usr/bin/scp
# ssh_path = /usr/bin/ssh
# ssh_agent_path = /usr/bin/ssh-agent
# sshkey_default = /root/.ssh/id_dsa
# sshkey_mysecondkey = /somewhere/my_second_key
# options passed to SSH via "-o"
# ssh_options = LogLevel=ERROR UserKnownHostsFile=/dev/null StrictHostKeyChecking=no Batchmode=yes PasswordAuthentication=no ServerAliveInterval=10 CheckHostIP=no ConnectTimeout=10
# ssh's keyforwarding control: never, always, or let (aka 'let the scheduler decide')
# forward_key = let
[wget]
# wget binary path (on client)
# wget_path = /usr/bin/wget
# wget_options =
# check_certs = False
# resume = True
[rsync]
# rsync_path = /usr/bin/rsync
# resume = True
# set_executable can be yes, no or keep
# set_executable = yes
# set_access can be private, restricted or public
# set_access = private
[wol]
# Pulse2 WOL Path
# wol_path = /usr/sbin/pulse2-wol
# wol_port = 40000
# wol_bcast = 255.255.255.255
[tcp_sproxy]
# Pulse2 SSH Proxy path
# tcp_sproxy_path = /usr/sbin/pulse2-tcp-sproxy
# The "external" VNC IP adress
# tcp_sproxy_host =
# The TCP range to attribue to the proxy
# tcp_sproxy_port_range = 8100-8200
# the initial ssh connection to the client timeout
# tcp_sproxy_establish_delay = 20
# the initial VNC connection to the proxy timeout
# tcp_sproxy_connect_delay = 60
# the number of seconds a connection will stay open after the initial handshake
# tcp_sproxy_session_lenght = 3600
[smart_cleaner]
# Pulse2 Smart Cleaner path (on the target); if empty we do not use it
# smart_cleaner_path = /usr/bin/pulse2-smart-cleaner.sh
# cleaner options, space separated, see doc
# smart_cleaner_options =
[scheduler_01]
# host = 127.0.0.1
# port = 8000
# enablessl = True
# username = username
# password = password
# periodicaly talk to our referent scheduler
# awake_time = 600
# add some randomness in our wake-ups
# awake_incertitude_factor = .2
# if set to yes, do no send results directly after a command completion
# defer_results = no
# Launchers sections, one per launcher
[launcher_01]
# bind = 127.0.0.1
port = 8001
# slots = 300
# username = username
# password = password
# scheduler = scheduler_01
enablessl = True
cacert = /etc/mmc/pulse2/launchers/keys/cacert.pem
localcert = /etc/mmc/pulse2/launchers/keys/privkey.pem
verifypeer = False
# You can override the logger configuration of this launcher with the
# loggconffile option, else the logger configuration from the current file will
# be used.
# logconffile = /etc/mmc/pulse2/launchers/log_launcher_01.ini
[loggers]
keys = root
[handlers]
keys = hand01
[formatters]
keys = form01
[logger_root]
level = NOTSET
handlers = hand01
[handler_hand01]
class = FileHandler
level = INFO
formatter = form01
args = ("/var/log/mmc/pulse2-launchers.log",)
[formatter_form01]
format = %(asctime)s %(levelname)s %(message)s
[loggers]
keys = root
[handlers]
keys = hand01
[formatters]
keys = form01
[logger_root]
level = NOTSET
handlers = hand01
[handler_hand01]
class = FileHandler
level = INFO
formatter = form01
args = ("/var/log/mmc/pulse2-launcher-01.log",)
[formatter_form01]
format = %(asctime)s %(levelname)s %(message)s
[main]
server =
port = 9999
command_name = C:\Program Files\OCS Inventory Agent\OCSInventory.exe
command_attr = /SERVER:127.0.0.1 /PNUM:9999
enablessl = True
verifypeer = False
cacert = cacert.pem
localcert = privkey.pem
# [polling]
# activate = 0
# type = reg
# time = 600
# path = HKEY_LOCAL_MACHINE\Software\Mandriva\Inventory\Client\do_inventory
[loggers]
keys = root
[handlers]
keys = hand01
[formatters]
keys = form01
[logger_root]
level = NOTSET
handlers = hand01
[handler_hand01]
class = handlers.NTEventLogHandler
level = INFO
formatter = form01
args = ("Pulse 2 Proxy SSL",)
# [handler_hand01]
# class = handlers.RotatingFileHandler
# level = DEBUG
# formatter = form01
# args = ("C:\\Program Files\\Mandriva\\Pulse2 Inventory SSL Proxy\\log.txt", "a", 100*1024, 5)
[formatter_form01]
format = %(asctime)s %(levelname)s %(message)s
# Xml update add extra information to OCS XML output
[xmlupdate]
# execute the update
enable = False
# keep a local copy of the updated XML
keepxmlupdate = False
# notify software updates
updatedetection = False
# add software icons to XML output
addicon = False
# add OCS execution debug to OCS XML output
[ocsdebug]
enable = False
\ No newline at end of file
[main]
# server =
# port = 9999
# path = /
# tmpdirname = /tmp/Pulse2InventoryProxy
# command_name = /usr/local/bin/ocsinventory-agent
# command_attr = -l /tmp/Pulse2InventoryProxy
# enablessl = True
# key_file = conf/key/privkey.pem
# cert_file = conf/key/cacert.pem
[main]
# ----------
# NETWORKING
# ----------
# port = 9990
# host =
# The public IP send when a client agent is wondering how to reach this package server, set to the "host" parameter value if not defined
# public_ip =
#
# --------
# PACKAGES
# --------
# Is package autodetection activated ?
# package_detect_activate = 0
# Time between two loops of detection
# package_detect_loop = 60
# methods in none, last_time_modification, check_size
# for more than 1 method, separate with ","
# package_detect_smart_method = none
# package_detect_smart_time = 60
# Package api can synhronise package data to others servers
# package_mirror_loop = 5
# package synchronisation targets
# package_mirror_target =
# package synchronisation state file. used only if package_mirror_target is defined
# File where pending sync are written so that they can be finished on package server restart.
# package_mirror_status_file = /var/data/mmc/status
# package synchronisation command to use
# package_mirror_command = /usr/bin/rsync
# package synchronisation command options
# package_mirror_command_options = -ar --delete
# package synchronisation command on only one level options
# package_mirror_level0_command_options = -d --delete
# options passed to SSH via "-o" if specified --rsh is automatically added to package_mirror_command_options
# package_mirror_command_options_ssh_options =
# for example
# package_mirror_command_options_ssh_options = IdentityFile=/root/.ssh/id_dsa StrictHostKeyChecking=no Batchmode=yes PasswordAuthentication=no ServerAliveInterval=10 CheckHostIP=no ConnectTimeout=10
# loop for the sync of the whole package directory
# can only be activated when package_mirror_target is given
# package_global_mirror_activate = 1
# package_global_mirror_loop = 3600
# package_global_mirror_command_options = -ar --delete
# real package deletion
# real_package_deletion = 0
#
# ----------
# MISC STUFF
# ----------
# machine/mirror assign algo
# mm_assign_algo = default
# user/packageput assign algo
# up_assign_algo = default
# tmp_input_dir = /tmp/packages/default
[daemon]
# the place were we put the daemons's PID files
# pidfile = /var/run/pulse2-package-server.pid
# user = root
# group = root
# umask = 0022
# Example of non-root execution settings:
# user = mmc
# group = mmc
# umask = 0002
[ssl]
# enablessl = 1
# username =
# password =
# certfile = /etc/mmc/pulse2/package-server/keys/cacert.pem
# privkey = /etc/mmc/pulse2/package-server/keys/privkey.pem
# verifypeer = 0
[mmc_agent]
# We sometimes need to talk to our referent agent
# here are some key to speak with it
# host = 127.0.0.1
# port = 7080
# username = mmc
# password = s3cr3t
# enablessl = True
# verifypeer = False
# cacert = /etc/mmc/pulse2/package-server/keys/cacert.pem
# localcert = /etc/mmc/pulse2/package-server/keys/privkey.pem
[mirror_api]
# mount_point = /rpc
[user_packageapi_api]
# mount_point = /upaa
[scheduler_api]
# mount_point = /scheduler_api
# schedulers = scheduler_01
# [imaging_api]
# mount_point = /imaging_api
# uuid = PLEASE_PUT_A_UUID_FOR_THAT_SERVER
##################
# Some locations #
##################
### main tree
# base_folder = /var/lib/pulse2/imaging
### Bootloaders
# Where bootloader (and bootsplash) is stored, relative to "base_folder"
# bootloader_folder = bootloader # Where bootloader (and bootsplash) is stored, relative to "base_folder"
# pxe_bootloader = pxe_boot # the network bootloader
# cd_bootloader = cd_boot # the cdrom bootloader
# bootsplash_file = bootsplash.xpm # the bootloader splashscreen
### Bootmenus
# Where boot menus are generated / being served, relative to "base_folder"
# bootmenus_folder = bootmenus
### Diskless
# Where kernel, initrd and other official diskless tools are stored, relative to "base_folder"
# diskless_folder = diskless # Where kernel, initrd and other official diskless tools are stored, relative to "base_folder"
# diskless_kernel = kernel # Name of the diskless kernel to run
# diskless_initrd = initrd # Name of the diskless initrd to boot (core)
# diskless_initrdcd = initrdcd # Name of the diskless initrd to boot (addon to boot on CD)
# diskless_memtest = memtest # diskless memtest tool
### Inventories
# Where inventories are stored / retrieved, relative to "base_folder"
# inventories_folder = inventories
### NFS
# Where additionnal material (hdmap, exclude) are stored / retrieved, relative to "base_folder"
# (a.k.a. /revoinfo client side)
# computers_folder = computers
# Where images are stored, relative to "base_folder"
# (a.k.a. /revoinfo client side)
# masters_folder = masters
# Where postinst tools are stored, relative to "base_folder"
# (a.k.a. /opt client side)
# postinst_folder = postinst
# will contain archived computer imaging data
# archives_folder = archives
### ISO images generation
# isos_folder = /var/lib/pulse2/imaging/isos # will contain generated ISO images
# isogen_tool = /usr/bin/mkisofs # tool used to generate ISO file
### RPC offline mode
# RPC replay file name, relative to "base_folder"
# rpc_replay_file = rpc-replay.pck
# RPC replay loop timer in seconds
# rpc_loop_timer = 60
# RPC to replay at each loop
# rpc_count = 10
# Interval in seconds between two RPCs
# rpc_interval = 2
# Our UUID cache *inside* base_folder
# uuid_cache_file = uuid-cache.txt
# Our UUID cache lifetime
# uuid_cache_lifetime = 300
[mirror:01]
mount_point = /mirror1
src = /var/lib/pulse2/packages
[package_api_put:01]
mount_point = /package_api_get1
src = /var/lib/pulse2/packages
tmp_input_dir = /tmp/package_tmp/put1
# Logger configuration
[loggers]
keys = root,imaging
[handlers]
keys = hand01,hand_imaging
[formatters]
keys = form01,form_imaging
[logger_root]
level = NOTSET
handlers = hand01
[handler_hand01]
class = FileHandler
level = INFO
formatter = form01
args = ("%s" % %(pulse2_log_file_package_server)s,)
[formatter_form01]
format = %%(asctime)s %%(levelname)s %%(message)s
# Imaging API logger configuration
[logger_imaging]
level = NOTSET
handlers = hand_imaging
propagate = 0
qualname = imaging
[handler_hand_imaging]
class = FileHandler
level = INFO
formatter = form_imaging
args = ("%s" % %(pulse2_log_file_package_server)s,)
[formatter_form_imaging]
format = %%(asctime)s %%(levelname)s Imaging: %%(message)s
#[main]
#dbdriver = mysql
#dbhost = localhost
#dbname = inventory
#dbuser = mmc
#dbpasswd = mmc
#dbpoolrecycle =
#dbport =
#dbsslenable = 0
#dbsslca =
#dbsslcert =
#dbsslkey =
#[associations:XX]
# the mirror url you want to declare
#mirror = MIRROR_URL
# the type of terminal (SAGEM or )
#terminal_types = TERMINAL_TYPE
# the type of api it is (package_api or mirror)
#kind = KIND
[scheduler]
# This scheduler name
id = scheduler_01
## MANAGING ##
# scheduler_path = /usr/sbin/pulse2-scheduler
## NETWORK ##
# port = 8000
# host = 127.0.0.1
# username = username
# password = password
# enablessl = True
# If verifypeer is set, the scheduler will also connect to all the launchers
# using the following certificates.
# verifypeer = False
# cacert = /etc/mmc/pulse2/scheduler/keys/cacert.pem
# localcert = /etc/mmc/pulse2/scheduler/keys/privkey.pem
## SCHEDULING ##
# for some functions below, we add this factor, to prevent load peaks
# incertitude_factor = .2
#
# initial wait time, in seconds, to make stuff stabilize
# initial_wait = 2
#
# attempt to start (and stop) commands every n seconds
# awake_time = 600
#
# preemption settings : preempting every N second, run M commands
# preempt_amount = 50
# preempt_period = 1
#
# status settings : check every N second, +/- incertitude_factor %
# checkstatus_period = 900
#
# health logging : log some stats every N seconds, +/- incertitude_factor %
# loghealth_period = 60
#
# once per day, hour specified below (HH:MM:SS), the scheduler will
# analyse the database, looking to weird / broken commands;
# you should enable this if you see stalled commands in logs
# in this case, please specify an hour when the scheduler is
# not very used (8 pm can be a good choice: analyse_hour = 20:00:00)
# set to empty by default to disable analysis
# analyse_hour =
#
# attempt to clean states in commands every n seconds
# clean_states_time = 3600
# clean state that are going to be clean
# (from the launcher point of view, ie :
# * stop -> the command is not running on the launcher but it's state in the db say it is)
# * run -> the command
# possible states : run, stop
# active_clean_states =
#
# default database encoding (used to inject log files)
# dbencoding = utf-8
#
# scheduler mode, see doc !
# mode = async
#
# max number of slots to uses, for all launchers
# max_slots = 300
#
# locking system
# please use with caution: the only reason to activate this feature
# is for systems under heavy load; risk of double-preemption is
# drastically reduce using this, but your system will be even more slow
# lock_processed_commands = False
## Commands life ##
# Command max default authorized time (for each stage)
# max_command_time = 3600
#
# Command max upload time (aka 'upload' stage), override the previous one for looooong uploads
# max_upload_time = 21600
#
# Command max WOL wait-for-completion time: wait this ammount of second before going futher
# max_wol_time = 300
## NETWORKING ##
# prefered method to reach a client
# available:
# - fqdn: attempt to perform a DNS query on FQDN
# - hosts: same, no DNS query
# - ip: just try IP address
# - netbios: attempt to do a nmblookup on IP adress
# tests are performed using ping
# resolv_order = fqdn hosts netbios ip
## CHECKING ##
# in some situations itn may be useful to do some pre-checks on clients
# checks can be done against every information recorded into the "target" table
# the following informations are available:
# target_name => contain the target Hostname
# target_uuid => contain the target UUID
# target_ipaddr => contain the target IP addresses, only the first one is used
# target_macaddr => contain the target MAC adresses, only the first one is used
# The two following options are formated as follow:
# *_check = <data-to-ask>=<expected-value>,...
# for exemple:
# server_check = IP=ipaddr
# client_check = UUID=uuid,HOSTNAME=name
# => will ask to client if its UUID is 'target_uuid', HOSTNAME is 'target_name',
# and tell it to give us it's IP and check the IP's value is 'target_ipaddr'
# While checking things, we may also want to announce what we are currently try to
# do on client, for each stage, for example TRANFERT while transfering something:
# announce_check = transfert=TRANFERT (comma-separated list as for previous options)
# currently available keywords: transfert, execute, delete, inventory
# announce_check =
# server_check =
# client_check =
## Computer / Group assignment ##
# mg_assign_algo = default
[database]
# dbdriver = mysql
# dbhost = localhost
# dbport = 3306
# dbname = msc
# dbuser = mmc
# dbpasswd = mmc
# dbdebug = ERROR
# dbpoolrecycle = 60
# dbpooltimeout = 30
# SSL support
# dbsslenable = 0
# dbsslca =
# dbsslcert =
# dbsslkey =
[daemon]
# pidfile = /var/run/pulse2
# user = root
# group = root
# umask = 0077
# Example of non-root execution settings:
# user = mmc
# group = mmc
# umask = 0007
# setrlimit option allows to set process system resources usage. Syntax is:
# setrlimit = limit1 soft1 hard1 limit2 soft2 hard2 ...
# For example:
# setrlimit = RLIMIT_NOFILE 2048 2048 RLIMIT_CORE 0 0
# setrlimit =
# Our launchers
[launcher_01]
host=%(ipv4)s
port=8001
username = username
password = password
enablessl = True
[loggers]
keys=root
[handlers]
keys=hand01
[formatters]
keys=form01
[logger_root]
level=NOTSET
handlers=hand01
[handler_hand01]
class=FileHandler
level=INFO
formatter=form01
args=("%s" % %(pulse2_log_file_scheduler)s,)
[formatter_form01]
format=%%(asctime)s %%(levelname)s %%(message)s
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment