Commit 1d1d3127 authored by Rafael Monnerat's avatar Rafael Monnerat

Merge branch 'master' into '1.0'

Update 1.0 branch for Next Release



See merge request !32
parents b32f30c3 7f4aa2b5
# Go language - https://golang.org/
[buildout]
parts = golang
[golang]
<= golang15
[golang-common]
recipe = slapos.recipe.cmmi
configure-command = :
location = ${buildout:parts-directory}/${:_buildout_section_name_}
make-binary =
make-targets= cd src && ./all.bash && cp -alf .. ${:location}
environment =
GOROOT_FINAL=${:location}
${:environment-extra}
[golang14]
<= golang-common
url = https://storage.googleapis.com/golang/go1.4.3.src.tar.gz
md5sum = dfb604511115dd402a77a553a5923a04
environment-extra =
[golang15]
<= golang-common
url = https://storage.googleapis.com/golang/go1.5.1.src.tar.gz
md5sum = 4adfbdfca523cc1c229be8a321f3602f
# go1.5 needs go1.4 to bootstrap
environment-extra =
GOROOT_BOOTSTRAP=${golang14:location}
......@@ -11,8 +11,8 @@ parts = haproxy
[haproxy]
recipe = slapos.recipe.cmmi
url = http://www.haproxy.org/download/1.5/src/haproxy-1.5.14.tar.gz
md5sum = ad9d7262b96ba85a0f8c6acc6cb9edde
url = http://www.haproxy.org/download/1.5/src/haproxy-1.5.15.tar.gz
md5sum = eeaa35744f84c92184cd735ee56dd0a3
configure-command = true
# If the system is running on Linux 2.6, we use "linux26" as the TARGET,
# otherwise use "generic".
......
# helloweb - programs to say hello to the Web in various languages
[buildout]
extends =
../git/buildout.cfg
../ruby/buildout.cfg
../golang/buildout.cfg
parts =
helloweb-python
helloweb-ruby
helloweb-go
# repository with examples
[helloweb-repository]
recipe = slapos.recipe.build:gitclone
git-executable = ${git:location}/bin/git
repository = https://lab.nexedi.com/nexedi/helloweb.git
revision = 0487fa7bc88e83ce8b97d784ab11354aa8fac603
location = ${buildout:parts-directory}/helloweb
# -*- python -*-
[helloweb-egg]
recipe = zc.recipe.egg:develop
egg = helloweb
setup = ${helloweb-repository:location}/python/
[helloweb-python]
recipe = zc.recipe.egg:scripts
eggs = ${helloweb-egg:egg}
scripts = helloweb=helloweb-python
# -*- ruby -*-
# if ruby program is represented as already-released gem, we can install it
# with `gem install ...` (via rubygemsrecipe).
#
# Alternatively if we need to
# install the program from source-checkout, the Ruby way is to use bundler,
# install program dependencies via it, and run the program itself via it.
#
# Since for helloweb.rb we have source checkout - we go the second - bundler way.
# bundler, that we'll use to install gems and run binaries (via `bundle exec ...`)
[bundler]
# rubygemsrecipe with fixed url and this way pinned rubygems version
recipe = rubygemsrecipe
url = https://rubygems.org/rubygems/rubygems-2.4.8.zip
ruby-location = ${ruby2.1:location}
ruby-executable = ${:ruby-location}/bin/ruby
gems = bundler==1.10.6
# bin installed here
bundle = ${buildout:bin-directory}/bundle
# install together with path to ruby enabled
# ( reason: rubygemsrecipe hardcodes PATH inside generated bin/* and it is
# impossible to adjust it later )
#
# bundle exec <smth> ; <smth> starts with `#!/usr/bin/env ruby` as rubygems
environment =
PATH = ${:ruby-location}/bin:%(PATH)s
[helloweb-ruby-bundle]
recipe = slapos.recipe.cmmi
path = ${helloweb-repository:location}/ruby/
configure-command = :
make-binary =
make-targets= cd ${:path} && ${bundler:bundle} install
[helloweb-ruby]
recipe = slapos.cookbook:wrapper
wrapper-path = ${buildout:bin-directory}/${:_buildout_section_name_}
environment =
BUNDLE_GEMFILE = ${helloweb-ruby-bundle:path}/Gemfile
command-line =
${bundler:bundle} exec sh -c 'helloweb.rb "$@"' ${:_buildout_section_name_}
# -*- go -*-
[helloweb-go]
recipe = slapos.recipe.cmmi
path = ${helloweb-repository:location}/go/
go = ${golang15:location}/bin/go
configure-command = :
make-binary =
make-targets= cd ${:path} &&
${:go} build
-o ${buildout:bin-directory}/${:_buildout_section_name_}
helloweb.go
......@@ -18,15 +18,15 @@ environment =
[libpng12]
<= libpng-common
url = http://download.sourceforge.net/libpng/libpng-1.2.53.tar.xz
md5sum = 7d18a74e6fd2029aee76ccd00e00a9e6
url = http://download.sourceforge.net/libpng/libpng-1.2.54.tar.xz
md5sum = bbb7a7264f1c7d9c444fd16bf6f89832
[libpng15]
<= libpng-common
url = http://download.sourceforge.net/libpng/libpng-1.5.23.tar.xz
md5sum = 725f9b98143450df03decf08b4bc42e3
url = http://download.sourceforge.net/libpng/libpng-1.5.24.tar.xz
md5sum = b7f828f2907214fe1e8951a3b9d4623a
[libpng]
<= libpng-common
url = http://download.sourceforge.net/libpng/libpng-1.6.18.tar.xz
md5sum = 6a57c8e0f5469b9c9949a4b43d57b3a1
url = http://download.sourceforge.net/libpng/libpng-1.6.19.tar.xz
md5sum = 1e6a458429e850fc93c1f3b6dc00a48f
......@@ -11,8 +11,8 @@ parts =
[libxml2]
recipe = slapos.recipe.cmmi
url = ftp://ftp.xmlsoft.org/libxml2/libxml2-2.9.2.tar.gz
md5sum = 9e6a9aca9d155737868b3dc5fd82f788
url = http://xmlsoft.org/sources/libxml2-2.9.3.tar.gz
md5sum = daece17e045f1c107610e137ab50c179
configure-options =
--disable-static
--without-python
......
......@@ -20,9 +20,9 @@ parts =
[mariadb]
recipe = slapos.recipe.cmmi
version = 10.1.8
version = 10.1.9
url = https://downloads.mariadb.org/f/mariadb-${:version}/source/mariadb-${:version}.tar.gz/from/http:/ftp.osuosl.org/pub/mariadb
md5sum = 9ba0aaabba40153d83e70edcc1aa43a8
md5sum = 71d0e2408a671e1125c1310ce657f688
location = ${buildout:parts-directory}/${:_buildout_section_name_}
patch-options = -p0
patches =
......
......@@ -19,6 +19,8 @@ md5sum = 27322fbb4b265c0e0cc548f5e6b7f201
configure-options=
--with-ipv6
--with-http_ssl_module
--with-http_spdy_module
--with-http_gzip_static_module
--with-mail
--with-mail_ssl_module
--with-ld-opt="-L ${openssl:location}/lib -L ${pcre:location}/lib -L ${zlib:location}/lib -Wl,-rpath=${openssl:location}/lib -Wl,-rpath=${pcre:location}/lib -Wl,-rpath=${zlib:location}/lib"
......@@ -36,6 +38,8 @@ mode = 0644
configure-options =
--with-ipv6
--with-http_ssl_module
--with-http_spdy_module
--with-http_gzip_static_module
--with-mail
--with-mail_ssl_module
--error-log-path=var/log/nginx.error.log
......
[buildout]
parts =
packer
[packer]
recipe = slapos.recipe.build
# here, two %s are used, first one is for directory name (eg. x86_64), and second one is for filename (eg. x86-64).
url_x86-64 = https://dl.bintray.com/mitchellh/packer/packer_0.7.5_linux_amd64.zip
url_x86 = https://dl.bintray.com/mitchellh/packer/packer_0.7.5_linux_386.zip
# supported architectures md5sums
md5sum_x86 = a545108a0ccfde7c1e74de6c4e6fdded
md5sum_x86-64 = f343d709b84db494e8d6ec38259aa4a6
# script to install.
script =
location = %(location)r
self.failIfPathExists(location)
import sys
ARCH_DIR_MAP = { 'x86': 'x86', 'x86-64': 'x86_64' }
WK_SUFIX_MAP = { 'x86': '386', 'x86-64': 'amd64' }
platform = guessPlatform()
url = self.options['url_' + platform]
md5sum = self.options['md5sum_' + platform]
extract_dir = self.extract(self.download(url, md5sum))
shutil.move(extract_dir, location)
......@@ -12,21 +12,26 @@ parts = postgresql
<= postgresql92
[postgresql91]
[postgresql-common]
recipe = slapos.recipe.cmmi
url = http://ftp.postgresql.org/pub/source/v9.1.13/postgresql-9.1.13.tar.bz2
md5sum = f50e201b4ef7e0581bf32a1a32c9f14c
configure-options = --with-openssl --with-perl
configure-options = --with-openssl
environment =
CPPFLAGS=-I${zlib:location}/include -I${readline:location}/include -I${openssl:location}/include -I${ncurses:location}/lib
LDFLAGS=-L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${readline:location}/lib -Wl,-rpath=${readline:location}/lib -L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${ncurses:location}/lib -L${perl:location}/libs-c -Wl,-rpath=${perl:location}/libs-c
[postgresql91]
<= postgresql-common
url = http://ftp.postgresql.org/pub/source/v9.1.13/postgresql-9.1.13.tar.bz2
md5sum = f50e201b4ef7e0581bf32a1a32c9f14c
configure-options += --with-perl
[postgresql92]
recipe = slapos.recipe.cmmi
url = http://ftp.postgresql.org/pub/source/v9.2.8/postgresql-9.2.8.tar.bz2
md5sum = c5c65a9b45ee53ead0b659be21ca1b97
configure-options = --with-openssl
environment =
CPPFLAGS=-I${zlib:location}/include -I${readline:location}/include -I${openssl:location}/include -I${ncurses:location}/lib
LDFLAGS=-L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${readline:location}/lib -Wl,-rpath=${readline:location}/lib -L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${ncurses:location}/lib -L${perl:location}/libs-c -Wl,-rpath=${perl:location}/libs-c
<= postgresql-common
url = http://ftp.postgresql.org/pub/source/v9.2.14/postgresql-9.2.14.tar.bz2
md5sum = ce2e50565983a14995f5dbcd3c35b627
# ld: .../perl/libs-c/libperl.a(op.o): relocation R_X86_64_32 against `.rodata.str1.8' can not be used when making a shared object; recompile with -fPIC
# .../parts/perl/libs-c/libperl.a: could not read symbols: Bad value
# (because libperl.a is not compiled with fPIC)
# -> no --with-perl
From 4e23f7038ae9bdffeab84f1abb5aaf9552ab580e Mon Sep 17 00:00:00 2001
From: Kirill Smelkov <kirr@nexedi.com>
Date: Mon, 2 Nov 2015 20:22:33 +0300
Subject: [PATCH] tests: Disable replication-psync for now
This tests fails on multi-cpu machine for almost all know Redis version.
A lot of people reported to upstream author and I too provided detailed
feedback about how it fails and on which hardware and offered ssh access
to debug:
https://github.com/antirez/redis/issues/2715#issuecomment-151608948
The issue there is already known for ~ 3 months with several people
allowing to help, but so far it remains broken.
Since we don't use Redis replication, for us it is ok to disable the
test and move on.
NOTE comments in tcl don't work inside lists (consruct inside { ... } )-
that's why the entry was moved out.
---
tests/test_helper.tcl | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl
index d1ebde1..72e7022 100644
--- a/tests/test_helper.tcl
+++ b/tests/test_helper.tcl
@@ -11,6 +11,9 @@ source tests/support/tmpfile.tcl
source tests/support/test.tcl
source tests/support/util.tcl
+# fails on multi-cpu machines
+# https://github.com/antirez/redis/issues/2715#issuecomment-151608948
+# integration/replication-psync
set ::all_tests {
unit/printver
unit/auth
@@ -33,7 +36,6 @@ set ::all_tests {
integration/replication-2
integration/replication-3
integration/replication-4
- integration/replication-psync
integration/aof
integration/rdb
integration/convert-zipmap-hash-on-load
--
2.6.2.521.g3f87150
......@@ -6,9 +6,12 @@ parts =
extends = ../tcl/buildout.cfg
[redis]
recipe = hexagonit.recipe.cmmi
url = https://github.com/geoffgarside/redis/archive/ipv6.tar.gz
md5sum = 0eb594bd4e8ee746a0e2ef67b28cb5d8
<= redis28
[redis28]
recipe = slapos.recipe.cmmi
url = http://download.redis.io/releases/redis-2.8.23.tar.gz
md5sum = ac7f43f845d0eedb8ae3e5e217b34c61
configure-command = true
prefix =
make-options =
......@@ -19,3 +22,6 @@ make-targets =
test
environment =
PATH=${tcl:location}/bin:%(PATH)s
patch-options = -p1
patches =
${:_profile_base_location_}/0001-tests-Disable-replication-psync-for-now.patch#1ed899443ed70fce02d9b95f5e7ca046
......@@ -26,8 +26,8 @@ environment =
[ruby2.1]
<= ruby-common
url = http://ftp.ruby-lang.org/pub/ruby/2.1/ruby-2.1.6.tar.xz
md5sum = ec6f10ca331ce947802ede86259513a8
url = http://ftp.ruby-lang.org/pub/ruby/2.1/ruby-2.1.7.tar.xz
md5sum = 2e43a1d32cc16975a6b5d3ffce399199
[ruby2.2]
......
......@@ -28,7 +28,7 @@ from setuptools import setup, find_packages
import glob
import os
version = '1.0.15'
version = '1.0.17.dev0'
name = 'slapos.cookbook'
long_description = open("README.txt").read() + "\n" + \
open("CHANGES.txt").read() + "\n"
......@@ -71,7 +71,6 @@ setup(name=name,
'zc.buildout': [
'addresiliency = slapos.recipe.addresiliency:Recipe',
'accords = slapos.recipe.accords:Recipe',
'agent = slapos.recipe.agent:Recipe',
'apache.zope.backend = slapos.recipe.apache_zope_backend:Recipe',
'apacheperl = slapos.recipe.apacheperl:Recipe',
'apachephp = slapos.recipe.apachephp:Recipe',
......
##############################################################################
#
# Copyright (c) 2012 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#############################################################################
import zc.buildout
from slapos.recipe.librecipe import GenericBaseRecipe
import sys
class Recipe(GenericBaseRecipe):
def install(self):
path_list = []
configuration_path = self.options["config"]
header = """[DEFAULT]
master_url = %s
key = %s
cert = %s
max_install_duration = %s
max_uninstall_duration = %s
max_request_duration = %s
max_destroy_duration = %s
""" % (self.options["master-url"],
"\n ".join(self.options["key"].split("\n")),
"\n ".join(self.options["cert"].split("\n")),
self.options["default_max_install_duration"],
self.options["default_max_uninstall_duration"],
self.options["default_max_request_duration"],
self.options["default_max_destroy_duration"])
with open(configuration_path, "w") as configuration:
configuration.write(header + self.options["configuration"])
path_list.append(self.createPythonScript(
self.options['wrapper'],
'slapos.recipe.librecipe.execute.execute',
[self.options["agent_binary"], '--pidfile=%s' % self.options["pidfile"],
"--log=%s" % self.options["log"], configuration_path]))
path_list.append(configuration_path)
return path_list
import os
import sys
import time
def catdatefile(args):
directory = args[0]
try:
suffix = args[1]
except IndexError:
suffix = '.log'
f = open(os.path.join(directory,
time.strftime('%Y-%m-%d.%H:%M.%s') + suffix), 'aw')
for line in sys.stdin.read():
f.write(line)
f.close()
......@@ -9,10 +9,10 @@ expected = "%(expected-value)s"
not_expected = "%(expected-not-value)s"
if expected != "" and value != expected:
print "FAIL: %s != %s" % (value, expected)
print "FAIL: %%s != %%s" %% (value, expected)
sys.exit(127)
if not_expected != "" and value == not_expected:
print "FAIL: %s == %s" % (value, not_expected)
print "FAIL: %%s == %%s" %% (value, not_expected)
sys.exit(127)
......@@ -37,7 +37,8 @@ class Recipe(GenericBaseRecipe):
'url': self.options['url'],
'shell_path': self.options['dash_path'],
'curl_path': self.options['curl_path'],
'check_secure': self.options.get('check-secure', 0)
'check_secure': self.options.get('check-secure', 0),
'http_code': self.options.get('http_code', '200'),
}
# XXX-Cedric in this script, curl won't check certificate
......
......@@ -4,7 +4,7 @@
URL="%(url)s"
if [ -z $URL ]; then
if [ -z "$URL" ]; then
echo "No URL specified." >&2
exit 3
fi
......@@ -38,7 +38,7 @@ if [ %(check_secure)s -eq 1 ]; then
fi
fi
if ! [ $CODE -eq 200 ]; then
echo "$URL is not available (returned $CODE)." >&2
if ! [ $CODE -eq %(http_code)s ]; then
echo "$URL is not available (returned $CODE, expected %(http_code)s)." >&2
exit 2
fi
......@@ -66,6 +66,8 @@ def updateMysql(args):
assert 'mysql_script' not in conf
with open(script_filename) as script_file:
conf['mysql_script'] = script_file.read()
is_succeeded = False
while True:
while True:
mysql_upgrade_list = [conf['mysql_upgrade_binary'], '--user=root']
if 'socket' in conf:
......@@ -74,13 +76,13 @@ def updateMysql(args):
result = mysql_upgrade.communicate()[0]
if mysql_upgrade.returncode is None:
mysql_upgrade.kill()
if mysql_upgrade.returncode != 0 and not 'is already upgraded' in result:
print "Command %r failed with result:\n%s" % (mysql_upgrade_list, result)
else:
if mysql_upgrade.returncode == 0:
print "MySQL database upgraded with result:\n%s" % result
else:
elif 'is already upgraded' in result:
print "No need to upgrade MySQL database"
else:
print "Command %r failed with result:\n%s" % (mysql_upgrade_list, result)
break
mysql_list = [conf['mysql_binary'].strip(), '-B', '--user=root']
if 'socket' in conf:
mysql_list.append('--socket=' + conf['socket'])
......@@ -91,7 +93,7 @@ def updateMysql(args):
mysql.kill()
if mysql.returncode != 0:
print 'Command %r failed with:\n%s' % (mysql_list, result)
else:
break
# import timezone database
mysql_tzinfo_to_sql_binary = os.path.join(
os.path.dirname(conf['mysql_binary'].strip()), 'mysql_tzinfo_to_sql')
......@@ -100,9 +102,9 @@ def updateMysql(args):
mysql_tzinfo_to_sql = subprocess.Popen(mysql_tzinfo_to_sql_list, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
timezone_sql = mysql_tzinfo_to_sql.communicate()[0]
if mysql.returncode != 0:
if mysql_tzinfo_to_sql.returncode != 0:
print 'Command %r failed with:\n%s' % (mysql_tzinfo_to_sql_list, result)
else:
break
mysql = subprocess.Popen(mysql_list + ['mysql',], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = mysql.communicate(timezone_sql)[0]
......@@ -110,8 +112,10 @@ def updateMysql(args):
mysql.kill()
if mysql.returncode != 0:
print 'Command %r failed with:\n%s' % (mysql_list, result)
is_succeed = True
if is_succeed:
break
is_succeeded = True
break
if is_succeeded:
print 'SlapOS initialisation script succesfully applied on database.'
break
print 'Sleeping for %ss and retrying' % sleep
......
......@@ -257,8 +257,11 @@ class Recipe(GenericBaseRecipe):
ipv6_file = os.path.join(token_list_path, '%s.ipv6' % slave_reference)
ipv6 = self.readFile(ipv6_file) or '::'
ipv4_file = os.path.join(token_list_path, '%s.ipv4' % slave_reference)
node_ipv4 = self.readFile(ipv4_file) or '0.0.0.0'
computer_partition.setConnectionDict(
{'token':token, '1_info':msg, 'ipv6': ipv6},
{'token':token, '1_info':msg, 'ipv6': ipv6, 'ipv4': node_ipv4},
slave_reference)
except Exception:
self.logger.fatal("Error while sending slave %s informations: %s",
......
......@@ -6,13 +6,37 @@ import time
import sqlite3
import slapos
import traceback
from re6st import registry, utils, x509
import logging
import socket
import select
from re6st import tunnel, ctl, registry, utils, x509
from OpenSSL import crypto
log = logging.getLogger('SLAPOS-RE6STNET')
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logging.trace = logging.debug
class iterRoutes(object):
_waiting = True
def __new__(cls, control_socket, network):
self = object.__new__(cls)
c = ctl.Babel(control_socket, self, network)
c.request_dump()
while self._waiting:
args = {}, {}, ()
c.select(*args)
utils.select(*args)
return (prefix
for neigh_routes in c.neighbours.itervalues()
for prefix in neigh_routes[1]
if prefix)
def babel_dump(self):
self._waiting = False
def loadJsonFile(path):
if os.path.exists(path):
......@@ -185,10 +209,67 @@ def dumpIPv6Network(slave_reference, db, network, ipv6_file):
subnet = network + utils.binFromSubnet(cn)
ipv6 = utils.ipFromBin(subnet)
writeFile(ipv6_file, ipv6)
return ipv6, utils.binFromSubnet(cn)
except Exception:
log.debug('XXX for %s... \n %s' % (slave_reference,
traceback.format_exc()))
def sendto(sock, prefix, code):
return sock.sendto("%s\0%c" % (prefix, code), ('::1', tunnel.PORT))
def recv(sock, code):
try:
prefix, msg = sock.recv(1<<16).split('\0', 1)
int(prefix, 2)
except ValueError:
pass
else:
if msg and ord(msg[0]) == code:
return prefix, msg[1:]
return None, None
def dumpIPv4Network(ipv6_prefix, network, ipv4_file, sock, peer_prefix_list):
try:
if ipv6_prefix == "00000000000000000000000000000000":
# workarround to ignore the first node
ipv4 = "0.0.0.0"
writeFile(ipv4_file, ipv4)
return
peers = []
peer_list = [prefix for prefix in peer_prefix_list if prefix == ipv6_prefix ]
if len(peer_list) == 0:
raise ValueError("Unable to find such prefix on database")
peer = peer_list[0]
sendto(sock, peer, 1)
s = sock,
timeout = 15
end = timeout + time.time()
while select.select(s, (), (), timeout)[0]:
prefix, msg = recv(sock, 1)
if prefix == peer:
break
timeout = max(0, end - time.time())
else:
logging.info("Timeout while querying address for %s/%s", int(peer, 2), len(peer))
msg = ""
if "," in msg:
ipv4 = msg.split(',')[0]
else:
ipv4 = "0.0.0.0"
writeFile(ipv4_file, ipv4)
except Exception:
log.debug('XXX for %s... \n %s' % (ipv6_prefix,
traceback.format_exc()))
def checkService(args, can_bang=True):
base_token_path = args['token_base_path']
token_dict = loadJsonFile(args['token_json'])
......@@ -206,17 +287,25 @@ def checkService(args, can_bang=True):
ca = client.getCa()
network = x509.networkFromCa(crypto.load_certificate(crypto.FILETYPE_PEM, ca))
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
peer_prefix_list = [prefix for prefix in
iterRoutes("/var/run/re6stnet/babeld.sock", network)]
# Check token status
for slave_reference, token in token_dict.iteritems():
status_file = os.path.join(base_token_path, '%s.status' % slave_reference)
ipv6_file = os.path.join(base_token_path, '%s.ipv6' % slave_reference)
ipv4_file = os.path.join(base_token_path, '%s.ipv4' % slave_reference)
if not os.path.exists(status_file):
# This token is not added yet!
continue
msg = readFile(status_file)
if msg == 'TOKEN_USED':
dumpIPv6Network(slave_reference, db, network, ipv6_file)
ipv6, ipv6_prefix = dumpIPv6Network(slave_reference, db, network, ipv6_file)
dumpIPv4Network(ipv6_prefix, network, ipv4_file, sock, peer_prefix_list)
continue
# Check if token is not in the database
......
......@@ -45,6 +45,11 @@ class Recipe(GenericBaseRecipe):
log_file=self.options['log_file'],
master_passwd=master_passwd
)
if self.options.get('unixsocket'):
unixsocket = "unixsocket %s\nunixsocketperm 700" % self.options['unixsocket']
else:
unixsocket = ""
configuration['unixsocket'] = unixsocket
config = self.createFile(config_file,
self.substituteTemplate(self.getTemplateFilename('redis.conf.in'),
......@@ -63,7 +68,8 @@ class Recipe(GenericBaseRecipe):
promise = self.createPythonScript(
promise_script,
'%s.promise.main' % __name__,
dict(host=self.options['ipv6'], port=self.options['port'])
dict(host=self.options['ipv6'], port=self.options['port'],
unixsocket = self.options.get('unixsocket') )
)
path_list.append(promise)
......
......@@ -7,11 +7,11 @@ import sys
def main(args):
host = args['host']
port = int(args['port'])
unixsocket = args['unixsocket']
try:
pool = redis.ConnectionPool(host=host, port=port, db=0)
r = redis.Redis(connection_pool=pool)
r = redis.Redis(host=host, port=port, unix_socket_path=unixsocket, db=0)
r.publish("Promise-Service","SlapOS Promise")
pool.disconnect()
r.connection_pool.disconnect()
sys.exit(0)
except Exception, e:
print str(e)
......
# Redis configuration file example
# Redis configuration file example.
#
# Note that in order to read the configuration file, Redis must be
# started with the file path as first argument:
#
# ./redis-server /path/to/redis.conf
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
......@@ -12,6 +17,26 @@
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis servers but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
# If instead you are interested in using includes to override configuration
# options, it is better to use include as the last line.
#
# include /path/to/local.conf
# include /path/to/other.conf
################################ GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize no
......@@ -22,35 +47,61 @@ pidfile %(pid_file)s
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port6 %(port)s
port 0
port %(port)s
# TCP listen() backlog.
#
# In high requests-per-second environments you need an high backlog in order
# to avoid slow clients connections issues. Note that the Linux kernel
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
tcp-backlog 511
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
# Bind can also be an IPv6 address
# By default Redis listens for connections from all the network interfaces
# available on the server. It is possible to listen to just one or multiple
# interfaces using the "bind" configuration directive, followed by one or
# more IP addresses.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1
# bind ::1
bind6 %(ipv6)s
bind %(ipv6)s
# Specify the path for the unix socket that will be used to listen for
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 755
%(unixsocket)s
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# Set server verbosity to 'debug'
# it can be one of:
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile %(log_file)s
......@@ -70,7 +121,7 @@ logfile %(log_file)s
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
......@@ -84,7 +135,7 @@ databases 16
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
# Note: you can disable saving completely by commenting out all "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
......@@ -98,16 +149,16 @@ save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in an hard way) that data is not persisting
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# distater will happen.
# disaster will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usually even if there are problems with disk,
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
......@@ -117,7 +168,7 @@ stop-writes-on-bgsave-error yes
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since verison 5 of RDB a CRC64 checksum is placed at the end of the file.
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%%) when saving and loading RDB files, so you can disable it
# for maximum performances.
......@@ -134,7 +185,7 @@ dbfilename dump.rdb
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# Also the Append Only File will be created inside this directory.
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir %(server_dir)s
......@@ -142,9 +193,18 @@ dir %(server_dir)s
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
# another Redis server. A few things to understand ASAP about Redis replication.
#
# 1) Redis replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of slaves.
# 2) Redis slaves are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
# 3) Replication is automatic and does not need user intervention. After a
# network partition slaves automatically try to reconnect to masters
# and resynchronize with them.
#
# slaveof <masterip> <masterport>
......@@ -155,14 +215,14 @@ dir %(server_dir)s
#
%(master_passwd)s
# When a slave lost the connection with the master, or when the replication
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale data is set to 'no' the slave will reply with
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
......@@ -179,19 +239,65 @@ slave-serve-stale-data yes
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Replication SYNC strategy: disk or socket.
#
# -------------------------------------------------------
# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
# -------------------------------------------------------
#
# New slaves and reconnecting slaves that are not able to continue the replication
# process just receiving differences, need to do what is called a "full
# synchronization". An RDB file is transmitted from the master to the slaves.
# The transmission can happen in two different ways:
#
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
# file on disk. Later the file is transferred by the parent
# process to the slaves incrementally.
# 2) Diskless: The Redis master creates a new process that directly writes the
# RDB file to slave sockets, without touching the disk at all.
#
# With disk-backed replication, while the RDB file is generated, more slaves
# can be queued and served with the RDB file as soon as the current child producing
# the RDB file finishes its work. With diskless replication instead once
# the transfer starts, new slaves arriving will be queued and a new transfer
# will start when the current one terminates.
#
# When diskless replication is used, the master waits a configurable amount of
# time (in seconds) before starting the transfer in the hope that multiple slaves
# will arrive and the transfer can be parallelized.
#
# With slow disks and fast (large bandwidth) networks, diskless replication
# works better.
repl-diskless-sync no
# When diskless replication is enabled, it is possible to configure the delay
# the server waits in order to spawn the child that trnasfers the RDB via socket
# to the slaves.
#
# This is important since once the transfer starts, it is not possible to serve
# new slaves arriving, that will be queued for the next RDB transfer, so the server
# waits a delay in order to let more slaves arrive.
#
# The delay is specified in seconds, and by default is 5 seconds. To disable
# it entirely just set it to 0 seconds and the transfer will start ASAP.
repl-diskless-sync-delay 5
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
......@@ -199,6 +305,80 @@ slave-read-only yes
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
# slave data when slaves are disconnected for some time, so that when a slave
# wants to reconnect again, often a full resync is not needed, but a partial
# resync is enough, just passing the portion of data the slave missed while
# disconnected.
#
# The bigger the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
#
# repl-backlog-size 1mb
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEE that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
# By default min-slaves-to-write is set to 0 (feature disabled) and
# min-slaves-max-lag is set to 10.
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
......@@ -218,23 +398,26 @@ slave-read-only yes
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# of hard to guess so that it will be still available for internal-use
# tools but not available for general clients.
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command renaming it into
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able ot configure the process file limit to allow for the specified limit
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
......@@ -245,7 +428,7 @@ slave-read-only yes
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# accordingly to the eviction policy selected (see maxmemmory-policy).
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
......@@ -253,7 +436,7 @@ slave-read-only yes
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# an hard memory limit for an instance (using the 'noeviction' policy).
# a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
......@@ -269,19 +452,19 @@ slave-read-only yes
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached? You can select among five behavior:
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# allkeys-lru -> remove any key according to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with all the kind of policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are no suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# At the date of writing these commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
......@@ -322,19 +505,20 @@ slave-read-only yes
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# instead of waiting for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# always: fsync after every write to the append only log. Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec" that's usually the right compromise between
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
......@@ -362,21 +546,22 @@ appendfsync everysec
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving the durability of Redis is
# the same as "appendfsync none", that in practical terms means that it is
# possible to lost up to 30 seconds of log in the worst scenario (with the
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size will growth by the specified percentage.
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (or if no rewrite happened since the restart, the size of
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
......@@ -391,6 +576,30 @@ no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# An AOF file may be found to be truncated at the end during the Redis
# startup process, when the AOF data gets loaded back into memory.
# This may happen when the system where Redis is running
# crashes, especially when an ext4 filesystem is mounted without the
# data=ordered option (however this can't happen when Redis itself
# crashes or aborts but the operating system still works correctly).
#
# Redis can either exit with an error when this happens, or load as much
# data as possible (the default now) and start if the AOF file is found
# to be truncated at the end. The following option controls this behavior.
#
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
# the Redis server starts emitting a log to inform the user of the event.
# Otherwise if the option is set to no, the server aborts with an error
# and refuses to start. When the option is set to no, the user requires
# to fix the AOF file using the "redis-check-aof" utility before to restart
# the server.
#
# Note that if the AOF file will be found to be corrupted in the middle
# the server will still exit with an error. This option only applies when
# Redis will try to read more data from the AOF file but not enough bytes
# will be found.
aof-load-truncated yes
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
......@@ -399,35 +608,16 @@ auto-aof-rewrite-min-size 64mb
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# When a long running script exceeds the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# is the only way to shut down the server in the case a write command was
# already issued by the script but the user doesn't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################ REDIS CLUSTER ###############################
#
# Normal Redis instances can't be part of a Redis Cluster, only nodes that are
# started as cluster nodes can. In order to start a Redis instance as a
# cluster node enable the cluster support uncommenting the following:
#
# cluster-enabled yes
# Every cluster node has a cluster configuration file. This file is not
# intended to be edited by hand. It is created and updated by Redis nodes.
# Every Redis Cluster node requires a different cluster configuration file.
# Make sure that instances running in the same system does not have
# overlapping cluster configuration file names.
#
# cluster-config-file nodes-6379.conf
# In order to setup your cluster make sure to read the documentation
# available at http://redis.io web site.
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
......@@ -452,6 +642,73 @@ slowlog-log-slower-than 10000
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
#
# The system only logs operations that were performed in a time equal or
# greater than the amount of milliseconds specified via the
# latency-monitor-threshold configuration directive. When its value is set
# to zero, the latency monitor is turned off.
#
# By default latency monitoring is disabled since it is mostly not needed
# if you don't have latency issues, and collecting data has a performance
# impact, that while very small, can be measured under big load. Latency
# monitoring can easily be enalbed at runtime using the command
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
latency-monitor-threshold 0
############################# Event notification ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
# messages will be published via Pub/Sub:
#
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
#
# It is possible to select the events that Redis will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@<db>__ prefix.
# E Keyevent events, published with __keyevent@<db>__ prefix.
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
# $ String commands
# l List commands
# s Set commands
# h Hash commands
# z Sorted set commands
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
#
# The "notify-keyspace-events" takes as argument a string that is composed
# of zero or multiple characters. The empty string means that notifications
# are disabled.
#
# Example: to enable list and generic events, from the point of view of the
# event name, use:
#
# notify-keyspace-events Elg
#
# Example 2: to get the stream of the expired keys subscribing to channel
# name __keyevent@0__:expired use:
#
# notify-keyspace-events Ex
#
# By default all notifications are disabled because most users don't need
# this feature and the feature has some overhead. Note that if you don't
# specify at least one of K or E, no events will be delivered.
notify-keyspace-events ""
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
......@@ -467,7 +724,7 @@ list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of just strings that happen to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
......@@ -479,20 +736,34 @@ set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is converted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
# actively rehash the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# not a good thing in your environment that Redis can reply from time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
......@@ -506,9 +777,9 @@ activerehashing yes
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients
# slave -> slave clients and MONITOR clients
# pubsub -> clients subcribed to at least one pubsub channel or pattern
# normal -> normal clients including MONITOR clients
# slave -> slave clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
......@@ -531,17 +802,30 @@ activerehashing yes
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled just setting it to zero.
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeout, purging expired keys that are
# never requested, and so forth.
#
# include /path/to/local.conf
# include /path/to/other.conf
\ No newline at end of file
# Not all tasks are performed with the same frequency, but Redis checks for
# tasks to perform according to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
testing agent
=============
Testing agent simulates a normal user interacting with vifib master. It requests software release installation or software instance instiaction randomly from time to time. SlapOS clients then run these commands remotely. It is the testing agent determining whether an error occurs.
Usage
=====
Request a testing agent instance from vifib with following parameters:
<?xml version='1.0' encoding='utf-8'?>
<instance>
<parameter id="configuration">[DEFAULT]
# ConfigParser's magic section.
computer_list = ["COMP-607"]
master_url = https://slap.vifib.com/
# Note that certificates are now literally in the configuration, meaning
# you may decide to specify different ones for each test. Likewise for
# master_url.
key = -----BEGIN PRIVATE KEY-----
MII[...]
[...]
-----END PRIVATE KEY-----
cert = -----BEGIN CERTIFICATE-----
MII[...]
[...]
-----END CERTIFICATE-----
[agent]
# This section is special: it contains configuration.
# Does not make use of values coming from [DEFAULT] (well, it
# necessarily contains them, but they are not used).
node_title = ...
test_title = ...
project_title = ...
task_count = 2 # Number of tests to run concurrently
report_url = # report_url, find details in erp5 for details
# All other sections are individual tests, whatever they are named.
[test-apache]
# Software release URL
url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
# Optional request_kw parameters: if not provided, will only test SR
# build. Depending on your slap version, it may be required to provide
# a "software_type" parameter, even if you want the default type.
request_kw = {
"filter_kw": {"computer_guid": "..."},
"partition_parameter_kw": {
"domain": "example.com"
}
}
# All are in seconds.
max_install_duration = 3000
max_uninstall_duration = 360
max_request_duration = 700
max_destroy_duration = 360
[impossible-apache]
url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
max_install_duration = 1
max_uninstall_duration = 1
[impossible-apache-2]
url = http://git.erp5.org/gitweb/slapos.git/blob_plain/HEAD:/software/apache-frontend/software.cfg
max_install_duration = 660
max_uninstall_duration = 1
</parameter>
</instance>
[agent]
timeout = ${slap-parameter:timeout}
node_title = ${slap-parameter:node_title}
project_title = ${slap-parameter:project_title}
task_count = ${slap-parameter:task_count}
report_url = ${slap-parameter:report_url}
working_directory = ${directory:testnode}
......@@ -6,28 +6,37 @@ eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
offline = true
[directory]
recipe = slapos.cookbook:mkdirectory
etc = $${buildout:directory}/etc
run = $${:etc}/run
service = $${buildout:directory}/etc/service
run = $${buildout:directory}/etc/run
agentlog = $${buildout:directory}/var/log/agent
srv = $${buildout:directory}/srv
bin = $${buildout:directory}/bin
testnode = $${buildout:directory}/srv/testnode
[instance]
recipe = slapos.cookbook:agent
agent_binary = ${buildout:directory}/bin/agent
pidfile = $${directory:srv}/agent.pid
log = $${directory:agentlog}/agent.log
wrapper = $${directory:run}/agent
config = $${directory:etc}/agent.cfg
master-url = $${slap-parameter:master-url}
key = $${slap-parameter:userkey}
cert = $${slap-parameter:usercertificate}
configuration = $${slap-parameter:configuration}
default_max_install_duration = $${slap-parameter:default_max_install_duration}
default_max_uninstall_duration = $${slap-parameter:default_max_uninstall_duration}
default_max_request_duration = $${slap-parameter:default_max_request_duration}
default_max_destroy_duration = $${slap-parameter:default_max_destroy_duration}
recipe = slapos.cookbook:wrapper
command-line =
${buildout:bin-directory}/agent
--pidfile=$${buildout:directory}/srv/agent.pid
--log=$${buildout:directory}/var/log/agent/agent.log
$${agent-cfg:output}
wrapper-path = $${directory:service}/agent
output = $${:wrapper-path}
[agent-cfg]
recipe = slapos.recipe.template
url = ${agent.cfg.in:target}
output = $${directory:etc}/agent.cfg
[slap-parameter]
timeout = 3600
node_title =
test_title =
project_title =
task_count = 1
report_url =
......@@ -4,41 +4,13 @@ extends =
../../component/git/buildout.cfg
../../stack/slapos.cfg
develop =
${:parts-directory}/slapos.cookbook-repository
${:parts-directory}/slapos.toolbox-repository
parts =
agent.cfg.in
template
template-agent
slapos.cookbook-repository
slapos.toolbox-repository
check-recipe
slapos-cookbook
script
# Local development
[slapos.cookbook-repository]
recipe = slapos.recipe.build:gitclone
repository = http://git.erp5.org/repos/slapos.git
branch = agent
git-executable = ${git:location}/bin/git
[slapos.toolbox-repository]
recipe = slapos.recipe.build:gitclone
repository = https://lab.nexedi.com/nexedi/slapos.toolbox.git
branch = agent3
git-executable = ${git:location}/bin/git
[check-recipe]
recipe = plone.recipe.command
stop-on-error = true
update-command = ${:command}
command =
grep parts ${buildout:develop-eggs-directory}/slapos.cookbook.egg-link;
grep parts ${buildout:develop-eggs-directory}/slapos.toolbox.egg-link
[template]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance.cfg
......@@ -50,7 +22,13 @@ mode = 0644
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-agent.cfg
output = ${buildout:directory}/template-agent.cfg
md5sum = 7c5c43eb98d5a11961d72fce97a8e67b
md5sum = 797e80e43bbf9c0d0e0c5db88a091667
mode = 0644
[agent.cfg.in]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/agent.cfg.in
md5sum = 056b1bb005080c4324cd9d755d254131
mode = 0644
[script]
......@@ -60,3 +38,39 @@ eggs =
slapos.core
slapos.toolbox[agent]
erp5.util
[versions]
apache-libcloud = 0.18.0
ecdsa = 0.13
erp5.util = 0.4.43
gitdb = 0.6.4
pycrypto = 2.6.1
slapos.recipe.download = 1.0
slapos.recipe.template = 2.8
slapos.toolbox = 0.53
smmap = 0.9.0
# Required by:
# slapos.toolbox==0.52
GitPython = 1.0.1
# Required by:
# slapos.toolbox==0.52
atomize = 0.2.0
# Required by:
# slapos.toolbox==0.52
feedparser = 5.2.1
# Required by:
# slapos.toolbox==0.52
lockfile = 0.10.2
# Required by:
# slapos.toolbox==0.52
paramiko = 1.15.2
# Required by:
# slapos.toolbox==0.52
rpdb = 0.1.5
......@@ -11,7 +11,7 @@ plone.recipe.command = 1.1
pycrypto = 2.6.1
rdiff-backup = 1.0.5
slapos.recipe.template = 2.8
slapos.toolbox = 0.52
slapos.toolbox = 0.53
smmap = 0.9.0
# Required by:
......
[buildout]
parts =
request-re6stnet-token-slave
request-frontend-token-slave
request-monitor-test-distributor-slave
publish-connection-informations
connection-parameter-http-checker
connection-parameter-checker
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
......@@ -11,6 +13,7 @@ offline = true
[directory]
recipe = slapos.cookbook:mkdirectory
home = $${buildout:directory}
promise = $${:home}/etc/promise
etc = $${:home}/etc/
var = $${:home}/var/
srv = $${:home}/srv/
......@@ -21,14 +24,15 @@ tmp = $${:home}/tmp/
<= slap-connection
recipe = slapos.cookbook:requestoptional
name = WebSite Frontend
# XXX We have hardcoded SR URL here.
software-url = product.frontend
software-type = custom-personal
slave = true
{% if slapparameter_dict.get('frontend-custom_domain', '80') != '443' %}
config-url = https://[$${request-re6stnet-token-slave:connection-ipv6}1]:{{ slapparameter_dict.get('port', '80') }}/
{% if slapparameter_dict.get('port', '80') != '443' %}
config-url = http://[$${request-re6stnet-token-slave:connection-ipv6}1]:{{ slapparameter_dict.get('port', '80') }}/
{% endif -%}
{% if slapparameter_dict.get('frontend-custom_domain', '80') == '443' %}
{% if slapparameter_dict.get('port', '80') == '443' %}
config-url = https://[$${request-re6stnet-token-slave:connection-ipv6}1]:{{ slapparameter_dict.get('port', '443') }}/
{% endif -%}
......@@ -96,7 +100,6 @@ config-prefer-gzip-encoding-to-backend = {{ slapparameter_dict.get('frontend-pre
config-disabled-cookie-list = {{ slapparameter_dict.get('frontend-disabled-cookie-list', '') }}
{% endif -%}
return = site_url domain
[request-re6stnet-token-slave]
......@@ -106,13 +109,53 @@ name = Re6st token Frontend
# XXX We have hardcoded SR URL here.
software-url = product.re6st
slave = true
return = token info_1 ipv6
return = token info_1 ipv6 ipv4
[request-monitor-test-distributor-slave]
<= slap-connection
recipe = slapos.cookbook:requestoptional
name = Monitor Test
# XXX We have hardcoded SR URL here.
software-url = product.monitor
software-type = distributor
slave = true
config-ping6_ip_list = $${request-re6stnet-token-slave:connection-ipv6}
config-ping_ip_list = $${request-re6stnet-token-slave:connection-ipv4}
return = site_url
[monitor-frontend]
<= slap-connection
recipe = slapos.cookbook:requestoptional
name = Monitor Frontend
# XXX We have hardcoded SR URL here.
software-url = product.frontend
software-type = custom-personal
slave = true
config-url = $${request-monitor-test-distributor-slave:connection-site_url}
return = site_url domain
[publish-connection-informations]
recipe = slapos.cookbook:publish
url = https://$${request-frontend-token-slave:connection-domain}
url-https = https://$${request-frontend-token-slave:connection-domain}
url = http://$${request-frontend-token-slave:connection-domain}
token = $${request-re6stnet-token-slave:connection-token}
ipv6 = $${request-re6stnet-token-slave:connection-ipv6}
info_1 = $${request-re6stnet-token-slave:info_1}
ipv4 = $${request-re6stnet-token-slave:connection-ipv4}
monitor_v6_url = $${request-monitor-test-distributor-slave:connection-site_url}
monitor_url = https://$${monitor-frontend:connection-domain}
info_1 = $${request-re6stnet-token-slave:connection-info_1}
[connection-parameter-checker]
recipe = slapos.cookbook:check_parameter
path = $${directory:promise}/check_re6stnet_ipv6
value = $${request-re6stnet-token-slave:connection-ipv6}
expected-not-value = ::
expected-value =
[connection-parameter-http-checker]
recipe = slapos.cookbook:check_parameter
path = $${directory:promise}/check_re6stnet_http
value = $${publish-connection-informations:url}
expected-not-value = http://
expected-value =
......@@ -21,12 +21,9 @@ mode = 0644
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-cdn-request.cfg.jinja2
output = ${buildout:directory}/template-cdn-request.cfg.jinja2
md5sum = b450c721194eb0834e3738158195975a
md5sum = 11c1bb2dca674d2efe41f8cb0389ec4c
mode = 0644
[slapos.cookbook-repository]
branch = request.product
[eggs]
recipe = zc.recipe.egg
eggs =
......
#!{{ python_executable }}
"""Simple web-server that says "Hello World" for every path
hello-web [--logfile <logfile>] <bind-ip> <bind-port> ...
"""
import sys
import time
import argparse
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from socket import AF_INET6
class WebHello(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200) # ok
self.send_header("Content-type", "text/plain")
self.end_headers()
print >>self.wfile, \
"Hello %s at `%s` ; %s" % (
' '.join(self.server.webhello_argv) or 'world',
self.path, time.asctime())
class HTTPServerV6(HTTPServer):
address_family = AF_INET6
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--logfile', dest='logfile')
parser.add_argument('bind_ip')
parser.add_argument('bind_port', type=int)
parser.add_argument('argv_extra', metavar='...', nargs=argparse.REMAINDER)
args = parser.parse_args()
# HTTPServer logs to sys.stderr - override it if we have --logfile
if args.logfile:
f = open(args.logfile, 'a', buffering=1)
sys.stderr = f
print >>sys.stderr, '* %s Hello-Web starting at %s' % (
time.asctime(), (args.bind_ip, args.bind_port))
# TODO autodetect ipv6/ipv4
httpd = HTTPServerV6( (args.bind_ip, args.bind_port), WebHello)
httpd.webhello_argv = args.argv_extra
httpd.serve_forever()
if __name__ == '__main__':
main()
......@@ -6,15 +6,13 @@
[buildout]
parts =
directory
hello-world
hello-world-promise
publish-connection-parameter
# Define egg directories to be the one from Software Release
# (/opt/slapgrid/...)
# Always the same.
eggs-directory = ${buildout:eggs-directory}
develop-eggs-directory = ${buildout:develop-eggs-directory}
eggs-directory = {{ buildout['eggs-directory'] }}
develop-eggs-directory = {{ buildout['develop-eggs-directory'] }}
offline = true
......@@ -23,15 +21,15 @@ offline = true
# We use the slapconfiguration recipe with a few parameters (partition id,
# computer id, certificate, etc).
# It will then authenticate to SlapOS Master and fetch the instance parameters.
# The parameters are accessible from $${instance-parameter:configuration.name-of-parameter}
# The parameters are accessible from ${instance-parameter:configuration.name-of-parameter}
# Always the same. Just copy/paste.
# See docstring of slapos.cookbook:slapconfiguration for more information.
recipe = slapos.cookbook:slapconfiguration
computer = $${slap_connection:computer_id}
partition = $${slap_connection:partition_id}
url = $${slap_connection:server_url}
key = $${slap_connection:key_file}
cert = $${slap_connection:cert_file}
computer = ${slap_connection:computer_id}
partition = ${slap_connection:partition_id}
url = ${slap_connection:server_url}
key = ${slap_connection:key_file}
cert = ${slap_connection:cert_file}
# Define default parameter(s) that will be used later, in case user didn't
# specify it.
......@@ -46,22 +44,22 @@ configuration.name = John Doe
# Create all needed directories, depending on your needs
[directory]
recipe = slapos.cookbook:mkdirectory
home = $${buildout:directory}
etc = $${:home}/etc
var = $${:home}/var
home = ${buildout:directory}
etc = ${:home}/etc
var = ${:home}/var
# Executables put here will be started but not monitored (for startup scripts)
script = $${:etc}/run/
script = ${:etc}/run/
# Executables put here will be started and monitored (for daemons)
service = $${:etc}/service
service = ${:etc}/service
# Executables put here will be launched after buildout has completed to see
# if instance is running
promise = $${:etc}/promise/
# Path of the log directory used by our service (see [hello-world])
log = $${:var}/log
promise = ${:etc}/promise/
# Path of the log directory used by our service (see [helloweb])
log = ${:var}/log
# Create a simple web server that says "hello <configuration.name>" to the web.
[hello-world]
[helloweb]
# helloworld service is listening on:
# - global IPv6 address, and
# - fixed port
......@@ -69,32 +67,59 @@ log = $${:var}/log
# NOTE because every computer partition is allocated its own global IPv6
# address, it is ok to fix the port - different hello-world instances will have
# different IPv6 addresses and they all will be accessible at the same time.
ipv6 = $${instance-parameter:ipv6-random}
port = 7777
ipv6 = ${instance-parameter:ipv6-random}
# full URL - for convenience
url = http://[$${:ipv6}]:$${:port}
url = http://[${:ipv6}]:${:port}
# the service will log here
logfile = $${directory:log}/hello-world.log
logfile = ${directory:log}/helloweb-${:kind}.log
# Actual script that starts the service:
# This recipe will try to "exec" the command-line after separating parameters.
recipe = slapos.cookbook:wrapper
command-line =
${hello-web-bin:rendered} --logfile $${hello-world:logfile}
$${:ipv6} $${:port} $${instance-parameter:configuration.name}
{{ buildout['bin-directory'] }}/helloweb-${:kind} --logfile ${:logfile}
${:ipv6} ${:port} ${instance-parameter:configuration.name}
# Put this shell script in the "etc/service" directory. Each executable of this
# repository will be started and monitored by supervisord. If a service
# exits/crashes, it will trigger a "bang" and cause a re-run of the instance.
wrapper-path = $${directory:service}/hello-world
wrapper-path = ${directory:service}/helloweb-${:kind}
# promise, that checks that hello-world service is alive
[hello-world-promise]
# promise, that checks that helloweb service is alive
[helloweb-promise]
recipe = slapos.cookbook:check_port_listening
path = $${directory:promise}/hello-world
hostname= $${hello-world:ipv6}
port = $${hello-world:port}
path = ${directory:promise}/helloweb-${:kind}
{# macro to instantiate service of `kind` to listen on `port` #}
{% set service_list = [] %}
{% macro hellowebsrv(kind, port) %}
{% do service_list.append(kind) %}
[helloweb-{{ kind }}]
<= helloweb
kind = {{ kind }}
port = {{ port }}
[helloweb-{{ kind }}-promise]
<= helloweb-promise
kind = {{ kind }}
hostname= ${helloweb-{{ kind }}:ipv6}
port = {{ port }}
{% endmacro %}
# services instantiation
{{ hellowebsrv('python', 7777) }}
{{ hellowebsrv('ruby', 7778) }}
{{ hellowebsrv('go', 7779) }}
# register all services/promises to buildout parts
[buildout]
parts +=
{%- for kind in service_list %}
helloweb-{{ kind }}
helloweb-{{ kind }}-promise
{%- endfor %}
# Publish all the parameters needed for the user to connect to the instance.
......@@ -102,5 +127,7 @@ port = $${hello-world:port}
# Here we'll just echo back the entered name as instance parameter
[publish-connection-parameter]
recipe = slapos.cookbook:publish
name = Hello $${instance-parameter:configuration.name}!
url = $${hello-world:url}
name = Hello ${instance-parameter:configuration.name}!
{%- for kind in service_list %}
url.{{ kind }} = ${helloweb-{{ kind }}:url}
{%- endfor %}
......@@ -6,10 +6,10 @@ extends =
../../stack/slapos.cfg
# Extend here component profiles, like openssl, apache, mariadb, curl...
# Or/and extend a stack (lamp, tomcat) that does most of the work for you
# In this example we don't need anything more than python which is provided by
# above stack/slapos.cfg
# In this example we extend from helloweb component.
# ../../component/component1/buildout.cfg
# ../../component/component2/buildout.cfg
../../component/helloweb/buildout.cfg
parts =
# Call installation of slapos.cookbook egg defined in stack/slapos.cfg (needed
......@@ -19,35 +19,22 @@ parts =
# instance
instance-profile
# "build" python program (install + correct shebang for our python)
hello-web-bin
# build helloweb programs
helloweb-python
helloweb-ruby
helloweb-go
# Download instance.cfg.in (buildout profile used to deployment of instance),
# replace all ${foo:bar} parameters by real values, and change $${foo:bar} to
# ${foo:bar}
[instance-profile]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance.cfg.in
output = ${buildout:directory}/instance.cfg
recipe = slapos.recipe.template:jinja2
template = ${:_profile_base_location_}/instance.cfg.in
rendered = ${buildout:directory}/instance.cfg
# MD5 checksum can be skipped for development (easier to develop), but must be filled for production
md5sum = 968bea0fc81dc604a874c53648b7d13f
md5sum = 6567f8dedb5cdd93542dc29e96edb547
mode = 0644
# install hello-web with correct python_executable
[hello-web-bin]
recipe = slapos.recipe.template:jinja2
filename = hello-web
md5sum = da4a93ff679d40c6682859476dcf4ce0
template = ${:_profile_base_location_}/${:filename}.in
rendered = ${buildout:bin-directory}/${:filename}
mode = 0755
# XXX python_executable should be ${${buildout:python}:executable}
# but buildout cannot support such indirection.
#
# in real-cases, python software is usually installed with zc.recipe.egg
# which cares about correctly specifiing python interpreter for
# entry-points automatically.
extensions = jinja2.ext.do
context =
raw python_executable ${buildout:executable}
section buildout buildout
......@@ -90,7 +90,7 @@ command =
[template]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance.cfg.in
md5sum = 41eb12b66bd1af86b1b98b2166e86417
md5sum = ac94fdcf8e3db4bdb2dff4478426595d
output = ${buildout:directory}/template.cfg
mode = 0644
......@@ -98,7 +98,7 @@ mode = 0644
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/instance-kvm.cfg.jinja2
mode = 644
md5sum = 90309e9a1efe6d6b981a27f503c86277
md5sum = 4056df213786fd87b60efd3d6f1f2bec
download-only = true
on-update = true
......@@ -106,7 +106,7 @@ on-update = true
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/instance-kvm-cluster.cfg.jinja2.in
mode = 644
md5sum = 0e06ef2879454eee140fe00e167a01d5
md5sum = 8e84c7a4e7be009021243c14707e0a1e
download-only = true
on-update = true
......@@ -114,7 +114,7 @@ on-update = true
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/instance-kvm-resilient.cfg.jinja2
mode = 644
md5sum = 450a50069f5617993ac8de47d533d653
md5sum = 7564bfbb74e6557e1041e9d6d1bc5d14
download-only = true
on-update = true
......@@ -179,6 +179,24 @@ mode = 0644
download-only = true
filename = ansible-promise.in
[template-kvm-run]
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/template/template-kvm-run.in
mode = 644
filename = template-kvm-run.in
md5sum = 38265d52fdc03589081cc7dd13999020
download-only = true
on-update = true
[template-kvm-controller]
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/template/kvm-controller-run.in
mode = 644
filename = kvm-controller-run.in
md5sum = b61ef9c54d912fdbfed3899fa985f79c
download-only = true
on-update = true
[template-apache-conf]
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/template/apache.conf.in
......
......@@ -242,23 +242,6 @@
"minimum": 1,
"maximum": 65535
},
"nbd2-host": {
"title": "Second NBD hostname or IP",
"description": "hostname (or IP) of the second NBD server (containing drivers for example).",
"type": "string",
"format": [
"host-name",
"ip-address",
"ipv6"
]
},
"nbd2-port": {
"title": "Second NBD port",
"description": "Port of the second NBD server containing the boot image.",
"type": "integer",
"minimum": 1,
"maximum": 65535
},
"virtual-hard-drive-url": {
"title": "Existing disk image URL",
"description": "If specified, will download an existing disk image (qcow2, raw, ...), and will use it as main virtual hard drive. Can be used to download and use an already installed and customized virtual hard drive.",
......@@ -276,6 +259,12 @@
"type": "boolean",
"default": false
},
"hard-drive-url-check-certificate": {
"title": "Check certificate when downloading virtual hard drive from https.",
"description": "Define if certificate should be checked when downloading virtual hard drive from https url.",
"type": "boolean",
"default": true
},
"external-disk-number": {
"title": "Number of additional disk to create for virtual machine",
"description": "Specify the number of additional disk to create for virtual machine in data folder of SlapOS Node. Requires instance_storage_home to be configured on SlapOS Node.",
......
......@@ -60,6 +60,7 @@ config-enable-vhost = {{ dumps(kvm_parameter_dict.get('enable-vhost', False)) }}
config-virtual-hard-drive-url = {{ dumps(kvm_parameter_dict.get('virtual-hard-drive-url', '')) }}
config-virtual-hard-drive-md5sum = {{ dumps(kvm_parameter_dict.get('virtual-hard-drive-md5sum', '')) }}
config-virtual-hard-drive-gzipped = {{ dumps(kvm_parameter_dict.get('virtual-hard-drive-gzipped', False)) }}
config-hard-drive-url-check-certificate = {{ dumps(kvm_parameter_dict.get('hard-drive-url-check-certificate', True)) }}
config-external-disk-number = {{ dumps(kvm_parameter_dict.get('external-disk-number', 0)) }}
config-external-disk-size = {{ dumps(kvm_parameter_dict.get('external-disk-size', 20)) }}
config-external-disk-format = {{ dumps(kvm_parameter_dict.get('external-disk-format', 'qcow2')) }}
......
......@@ -98,6 +98,12 @@
"type": "boolean",
"default": false
},
"hard-drive-url-check-certificate": {
"title": "Check certificate when downloading virtual hard drive from https.",
"description": "Define if certificate should be checked when downloading virtual hard drive from https url.",
"type": "boolean",
"default": true
},
"external-disk-number": {
"title": "Number of additional disk to create for virtual machine",
......
......@@ -13,7 +13,7 @@ offline = true
# += because we need to take up parts (like instance-custom, slapmonitor etc) from the profile we extended
parts +=
{{ parts.replicate("kvm", backup_amount) }}
publish-connection-informations
publish-connection-information
kvm-frontend-url-promise
kvm-backend-url-promise
......@@ -55,6 +55,6 @@ mode = 700
# Check that backend url is reachable
recipe = slapos.cookbook:check_url_available
path = ${directory:promises}/frontend_promise
url = ${publish-connection-informations:url}
url = ${publish-connection-information:url}
dash_path = /bin/sh
curl_path = {{ curl_executable_location }}
......@@ -3,77 +3,15 @@
{% set use_nat = slapparameter_dict.get('use-nat', 'True').lower() -%}
{% set name = slapparameter_dict.get('name', 'localhost') -%}
{% set monitor = slapparameter_dict.get('enable-monitor', 'True').lower() -%}
{% set disable_ansible_promise = slapparameter_dict.get('disable-ansible-promise', 'False').lower() -%}
{% set frontend_software_type = 'default' -%}
{% set extends_list = [] -%}
{% set part_list = [] -%}
{% if monitor -%}
{% do extends_list.append(template_monitor) -%}
{% endif -%}
{% do extends_list.append(logrotate_cfg) -%}
#############################
#
# Instanciate kvm
#
#############################
[buildout]
parts =
certificate-authority
publish-connection-information
kvm-vnc-promise
kvm-disk-image-corruption-promise
websockify-sighandler
novnc-promise
# kvm-monitor
cron
# cron-entry-monitor
frontend-promise
{% if monitor -%}
# monitor parts
cron-entry-monitor
cron-entry-rss
deploy-index
deploy-status-history-cgi
deploy-status-cgi
# deploy-logfile-cgi
# deploy-resource-consumption-monitoring-cgi
setup-static-files
public-symlink
cgi-httpd-wrapper
cgi-httpd-graceful-wrapper
monitor-promise
monitor-instance-log-access
monitor-access-log
monitor-access-public
# monitor-frontend-promise
{% endif -%}
{% if slapparameter_dict.get('document-host', '') %}
cluster-url-path
{% endif -%}
{% if enable_http == 'true' %}
httpd
httpd-promise
publish-host-config
{% if slapparameter_dict.get('data-to-vm', '') %}
vm-data-content
{% endif -%}
{% if use_tap == 'true' and tap_network_dict.has_key('ipv4') %}
ansible-vm-promise
logrotate-vm-bootstrap
{% endif -%}
{% if slapparameter_dict.get('authorized-key', '') %}
get-authorized-key
{% endif -%}
{% endif -%}
extends =
# Add extends list
{{ extends_list | join('\n ') }}
# {{ template_httpd_cfg }}
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
[directory]
recipe = slapos.cookbook:mkdirectory
......@@ -106,22 +44,19 @@ recipe = slapos.cookbook:generate.password
storage-path = ${directory:srv}/passwd
bytes = 8
[kvm-instance]
# XXX-Cedric: change "KVM" recipe to simple "create wrappers". No need for this
# Specific code. It needs Jinja.
recipe = slapos.cookbook:kvm
[kvm-controller-parameter-dict]
python-path = {{ python_executable }}
vnc-passwd = ${gen-passwd:passwd}
socket-path = ${directory:var}/qmp_socket
[kvm-parameter-dict]
python-path = {{ python_executable }}
ipv4 = ${slap-network-information:local-ipv4}
ipv6 = ${slap-network-information:global-ipv6}
vnc-ip = ${:ipv4}
vnc-port = 5901
# XXX-Cedric: should be named "default-cdrom-iso"
default-disk-image = {{ debian_amd64_netinst_location }}
default-cdrom-iso = {{ debian_amd64_netinst_location }}
nbd-host = ${slap-parameter:nbd-host}
nbd-port = ${slap-parameter:nbd-port}
nbd2-host = ${slap-parameter:nbd2-host}
......@@ -133,8 +68,9 @@ disk-path = ${directory:srv}/virtual.qcow2
disk-size = ${slap-parameter:disk-size}
disk-type = ${slap-parameter:disk-type}
socket-path = ${directory:var}/qmp_socket
pid-file-path = ${directory:run}/pid_file
socket-path = ${kvm-controller-parameter-dict:socket-path}
smp-count = ${slap-parameter:cpu-count}
smp-options = ${slap-parameter:cpu-options}
......@@ -143,24 +79,19 @@ numa = ${slap-parameter:numa}
mac-address = ${create-mac:mac-address}
tap-mac-address = ${create-tap-mac:mac-address}
# XXX-Cedric: should be named runner-wrapper-path and controller-wrapper-path
runner-path = ${directory:services}/kvm
controller-path = ${directory:scripts}/kvm_controller
use-tap = ${slap-parameter:use-tap}
use-nat = ${slap-parameter:use-nat}
nat-rules = ${slap-parameter:nat-rules}
enable-vhost = ${slap-parameter:enable-vhost}
6tunnel-wrapper-path = ${directory:services}/6tunnel
virtual-hard-drive-url = ${slap-parameter:virtual-hard-drive-url}
virtual-hard-drive-md5sum = ${slap-parameter:virtual-hard-drive-md5sum}
virtual-hard-drive-gzipped = ${slap-parameter:virtual-hard-drive-gzipped}
hard-drive-url-check-certificate = ${slap-parameter:hard-drive-url-check-certificate}
shell-path = {{ dash_executable_location }}
qemu-path = {{ qemu_executable_location }}
qemu-img-path = {{ qemu_img_executable_location }}
6tunnel-path = {{ sixtunnel_executable_location }}
etc-directory = ${directory:etc}
disk-storage-list =
......@@ -188,11 +119,55 @@ cluster-doc-port = 0
netcat-binary = {{ netcat_bin }}
language = ${slap-parameter:keyboard-layout-language}
[kvm-run]
recipe = slapos.recipe.template:jinja2
template = {{ template_kvm_run }}
rendered = ${directory:bin}/kvm_raw
mode = 700
context =
section parameter_dict kvm-parameter-dict
[kvm-contoller]
recipe = slapos.recipe.template:jinja2
template = {{ template_kvm_controller_run }}
rendered = ${directory:scripts}/kvm_controller
mode = 700
context =
section parameter_dict kvm-controller-parameter-dict
[tunnel-6to4-base]
recipe = slapos.cookbook:wrapper
ipv4 = ${slap-network-information:local-ipv4}
ipv6 = ${slap-network-information:global-ipv6}
wrapper-path = ${directory:services}/6tunnel-${:ipv6-port}
command-line = {{ sixtunnel_executable_location }} -6 -4 -d -l ${:ipv6} ${:ipv6-port} ${:ipv4} ${:ipv4-port}
{% if use_nat == 'true' -%}
{% set nat_rule_list = slapparameter_dict.get('nat-rules', '22 80 443') %}
{% for port in nat_rule_list.split(' ') -%}
{% set external_port = 10000 + port|int() -%}
{% set section_name = '6tunnel-' ~ external_port -%}
[{{ section_name }}]
<= tunnel-6to4-base
ipv4-port = {{ external_port }}
ipv6-port = {{ external_port }}
{% do part_list.append(section_name) -%}
{% endfor -%}
{% endif -%}
[kvm-instance]
recipe = slapos.cookbook:wrapper
socket-path = ${kvm-controller-parameter-dict:socket-path}
wrapper-path = ${directory:services}/kvm
command-line = ${kvm-run:rendered}
kvm-controller = ${kvm-contoller:rendered}
[kvm-vnc-promise]
recipe = slapos.cookbook:check_port_listening
path = ${directory:promises}/vnc_promise
hostname = ${kvm-instance:vnc-ip}
port = ${kvm-instance:vnc-port}
hostname = ${kvm-parameter-dict:vnc-ip}
port = ${kvm-parameter-dict:vnc-port}
[kvm-disk-image-corruption-promise]
# Check that disk image is not corrupted
......@@ -201,7 +176,7 @@ input = inline:#!/bin/sh
# Return code 0 is "OK"
# Return code 3 is "found leaks, but image is OK"
# http://git.qemu.org/?p=qemu.git;a=blob;f=qemu-img.c;h=4e9a7f5741c9cb863d978225829e68fefcae3947;hb=HEAD#l702
${kvm-instance:qemu-img-path} check ${kvm-instance:disk-path}
${kvm-parameter-dict:qemu-img-path} check ${kvm-parameter-dict:disk-path}
RETURN_CODE=$?
if [ $RETURN_CODE -eq 0 ] || [ $RETURN_CODE -eq 3 ]; then
exit 0
......@@ -217,8 +192,8 @@ recipe = slapos.cookbook:novnc
path = ${ca-novnc:executable}
ip = ${slap-network-information:global-ipv6}
port = 6080
vnc-ip = ${kvm-instance:vnc-ip}
vnc-port = ${kvm-instance:vnc-port}
vnc-ip = ${kvm-parameter-dict:vnc-ip}
vnc-port = ${kvm-parameter-dict:vnc-port}
novnc-location = {{ novnc_location }}
websockify-path = {{ websockify_executable_location }}
ssl-key-path = ${ca-novnc:key-file}
......@@ -366,8 +341,8 @@ check-secure = 1
[publish-connection-information]
recipe = slapos.cookbook:publish
ipv6 = ${slap-network-information:global-ipv6}
backend-url = https://[${novnc-instance:ip}]:${novnc-instance:port}/vnc_auto.html?host=[${novnc-instance:ip}]&port=${novnc-instance:port}&encrypt=1&password=${kvm-instance:vnc-passwd}
url = ${request-slave-frontend:connection-url}/vnc_auto.html?host=${request-slave-frontend:connection-domainname}&port=${request-slave-frontend:connection-port}&encrypt=1&path=${request-slave-frontend:connection-resource}&password=${kvm-instance:vnc-passwd}
backend-url = https://[${novnc-instance:ip}]:${novnc-instance:port}/vnc_auto.html?host=[${novnc-instance:ip}]&port=${novnc-instance:port}&encrypt=1&password=${kvm-controller-parameter-dict:vnc-passwd}
url = ${request-slave-frontend:connection-url}/vnc_auto.html?host=${request-slave-frontend:connection-domainname}&port=${request-slave-frontend:connection-port}&encrypt=1&path=${request-slave-frontend:connection-resource}&password=${kvm-controller-parameter-dict:vnc-passwd}
{% set disk_number = len(storage_dict) -%}
maximum-extra-disk-amount = {{ disk_number }}
{% set iface = 'eth0' -%}
......@@ -378,9 +353,9 @@ maximum-extra-disk-amount = {{ disk_number }}
{% set nat_rule_list = slapparameter_dict.get('nat-rules', '22 80 443') %}
{% for port in nat_rule_list.split(' ') -%}
{% set external_port = 10000 + port|int() -%}
nat-rule-port-{{port}} = ${slap-network-information:global-ipv6} : {{external_port}}
nat-rule-port-{{port}} = ${slap-network-information:global-ipv6} : ${6tunnel-{{external_port}}:ipv6-port}
{% if slapparameter_dict.get('publish-nat-url', False) -%}
nat-rule-url-{{port}} = [${slap-network-information:global-ipv6}]:{{external_port}}
nat-rule-url-{{port}} = [${slap-network-information:global-ipv6}]:${6tunnel-{{external_port}}:ipv6-port}
{% endif -%}
{% endfor -%}
{% endif -%}
......@@ -555,6 +530,8 @@ enable-vhost = False
virtual-hard-drive-url =
virtual-hard-drive-md5sum =
virtual-hard-drive-gzipped = False
# if virtual-hard-drive-url use https, then specify if https certificate should be checked or not
hard-drive-url-check-certificate = True
external-disk-number = 0
external-disk-size = 20
......@@ -571,3 +548,75 @@ data-to-vm =
# Change keyboard layout language
keyboard-layout-language =
#############################
#
# Instanciate kvm (Buildout Section)
#
#############################
# Set Additionals parts
{% if slapparameter_dict.get('document-host', '') %}
{% do part_list.append('cluster-url-path') -%}
{% endif -%}
{% if enable_http == 'true' %}
{% do part_list.extend(['httpd', 'httpd-promise', 'publish-host-config']) -%}
{% if slapparameter_dict.get('data-to-vm', '') %}
{% do part_list.append('vm-data-content') -%}
{% endif -%}
{% if use_tap == 'true' and tap_network_dict.has_key('ipv4') and disable_ansible_promise == 'false' %}
{% do part_list.extend(['ansible-vm-promise', 'logrotate-vm-bootstrap']) -%}
{% endif -%}
{% if slapparameter_dict.get('authorized-key', '') %}
{% do part_list.append('get-authorized-key') -%}
{% endif -%}
{% endif -%}
[buildout]
parts =
certificate-authority
publish-connection-information
kvm-instance
kvm-contoller
kvm-vnc-promise
kvm-disk-image-corruption-promise
websockify-sighandler
novnc-promise
# kvm-monitor
cron
# cron-entry-monitor
frontend-promise
{% if monitor -%}
# monitor parts
cron-entry-monitor
cron-entry-rss
deploy-index
deploy-status-history-cgi
deploy-status-cgi
# deploy-logfile-cgi
# deploy-resource-consumption-monitoring-cgi
setup-static-files
public-symlink
cgi-httpd-wrapper
cgi-httpd-graceful-wrapper
monitor-promise
monitor-instance-log-access
monitor-access-log
monitor-access-public
# monitor-frontend-promise
{% endif -%}
# Complete parts with sections
{{ part_list | join('\n ') }}
extends =
# Add extends list
{{ extends_list | join('\n ') }}
# {{ template_httpd_cfg }}
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
\ No newline at end of file
......@@ -92,11 +92,14 @@ context =
raw novnc_location ${noVNC:location}
raw netcat_bin ${netcat:location}/bin/netcat
raw openssl_executable_location ${openssl:location}/bin/openssl
raw python_executable ${buildout:executable}
raw qemu_executable_location ${kvm:location}/bin/qemu-system-x86_64
raw qemu_img_executable_location ${kvm:location}/bin/qemu-img
raw sixtunnel_executable_location ${6tunnel:location}/bin/6tunnel
raw template_httpd_cfg ${template-httpd:rendered}
raw template_content ${template-content:location}/${template-content:filename}
raw template_kvm_controller_run ${template-kvm-controller:location}/${template-kvm-controller:filename}
raw template_kvm_run ${template-kvm-run:location}/${template-kvm-run:filename}
raw template_monitor ${monitor-template:output}
raw websockify_executable_location ${buildout:directory}/bin/websockify
template-parts-destination = ${template-parts:destination}
......
#!{{ parameter_dict.get('python-path') }}
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
# Echo client program
import socket
import time
# XXX: to be factored with slapos.toolbox qemu qmp wrapper.
socket_path = '{{ parameter_dict.get("socket-path") }}'
vnc_password = '{{ parameter_dict.get("vnc-passwd") }}'
# Connect to KVM qmp socket
so = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
connected = False
while not connected:
try:
so.connect(socket_path)
except socket.error:
time.sleep(1)
else:
connected = True
data = so.recv(1024)
# Enable qmp
so.send('{ "execute": "qmp_capabilities" }')
data = so.recv(1024)
# Set VNC password
so.send('{ "execute": "change", ' \
'"arguments": { "device": "vnc", "target": "password", ' \
' "arg": "' + vnc_password + '" } }')
data = so.recv(1024)
# Finish
so.close()
#!{{ parameter_dict.get('python-path') }}
# BEWARE: This file is operated by slapgrid
# BEWARE: It will be overwritten automatically
import hashlib
import os
import socket
import subprocess
import urllib
import gzip
import shutil
from random import shuffle
import glob
import re
import ssl
# XXX: give all of this through parameter, don't use this as template, but as module
qemu_img_path = '{{ parameter_dict.get("qemu-img-path") }}'
qemu_path = '{{ parameter_dict.get("qemu-path") }}'
disk_size = '{{ parameter_dict.get("disk-size") }}'
disk_type = '{{ parameter_dict.get("disk-type") }}'
socket_path = '{{ parameter_dict.get("socket-path") }}'
nbd_list = (('{{ parameter_dict.get("nbd-host") }}',
{{ parameter_dict.get("nbd-port") }}),
('{{ parameter_dict.get("nbd2-host") }}',
{{ parameter_dict.get("nbd2-port") }}))
default_cdrom_iso = '{{ parameter_dict.get("default-cdrom-iso") }}'
disk_path = '{{ parameter_dict.get("disk-path") }}'
virtual_hard_drive_url = '{{ parameter_dict.get("virtual-hard-drive-url") }}'.strip()
virtual_hard_drive_md5sum = '{{ parameter_dict.get("virtual-hard-drive-md5sum") }}'.strip()
virtual_hard_drive_gzipped = '{{ parameter_dict.get("virtual-hard-drive-gzipped") }}'.strip().lower()
nat_rules = '{{ parameter_dict.get("nat-rules") }}'.strip()
use_tap = '{{ parameter_dict.get("use-tap") }}'.lower()
use_nat = '{{ parameter_dict.get("use-nat") }}'.lower()
enable_vhost = '{{ parameter_dict.get("enable-vhost") }}'.lower()
tap_interface = '{{ parameter_dict.get("tap-interface") }}'
listen_ip = '{{ parameter_dict.get("ipv4") }}'
mac_address = '{{ parameter_dict.get("mac-address") }}'
tap_mac_address = '{{ parameter_dict.get("tap-mac-address") }}'
smp_count = '{{ parameter_dict.get("smp-count") }}'
smp_options = '{{ parameter_dict.get("smp-options") }}'.strip()
numa_list = '{{ parameter_dict.get("numa") }}'.split()
ram_size = '{{ parameter_dict.get("ram-size") }}'
pid_file_path = '{{ parameter_dict.get("pid-file-path") }}'
external_disk_number = {{ parameter_dict.get("external-disk-number") }}
external_disk_size = '{{ parameter_dict.get("external-disk-size") }}'
external_disk_format = '{{ parameter_dict.get("external-disk-format") }}'
disk_storage_dict = {}
disk_storage_list = """{{ parameter_dict.get("disk-storage-list") }}""".split('\n')
map_storage_list = []
etc_directory = '{{ parameter_dict.get("etc-directory") }}'.strip()
httpd_port = {{ parameter_dict.get("httpd-port") }}
netcat_bin = '{{ parameter_dict.get("netcat-binary") }}'.strip()
cluster_doc_host = '{{ parameter_dict.get("cluster-doc-host") }}'
cluster_doc_port = {{ parameter_dict.get("cluster-doc-port") }}
language = '{{ parameter_dict.get("language") }}'
language_list = ['ar', 'da', 'de', 'de-ch', 'en-gb', 'en-us', 'es', 'et', 'fi',
'fo', 'fr', 'fr-be', 'fr-ca', 'fr-ch', 'hr', 'hu', 'is', 'it', 'ja', 'lt',
'lv', 'mk', 'nl', 'nl-be', 'no', 'pl', 'pt', 'pt-br', 'ru', 'sl', 'sv',
'th', 'tr']
url_check_certificate = '{{ parameter_dict.get("hard-drive-url-check-certificate", "true") }}'.lower()
if hasattr(ssl, '_create_unverified_context') and url_check_certificate == 'false':
opener = urllib.FancyURLopener(context=ssl._create_unverified_context())
else:
opener = urllib.FancyURLopener({})
def md5Checksum(file_path):
with open(file_path, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
def getSocketStatus(host, port):
s = None
for res in socket.getaddrinfo(host, port,
socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
s = None
continue
try:
s.connect(sa)
except socket.error, msg:
s.close()
s = None
continue
break
return s
def getMapStorageList(disk_storage_dict, external_disk_number):
map_disk_file = os.path.join(etc_directory, '.data-disk-ids')
last_disk_num_f = os.path.join(etc_directory, '.data-disk-amount')
id_list = []
last_amount = 0
map_f_exist = os.path.exists(map_disk_file)
if os.path.exists(last_disk_num_f):
with open(last_disk_num_f, 'r') as lf:
last_amount = int(lf.readline())
if map_f_exist:
with open(map_disk_file, 'r') as mf:
# ID are writen in one line: data1 data3 data2 ...
content = mf.readline()
for id in content.split(' '):
if disk_storage_dict.has_key(id):
id_list.append(id)
else:
# Mean that this disk path has been removed (disk unmounted)
last_amount -= 1
for key in disk_storage_dict:
if not key in id_list:
id_list.append(key)
if id_list:
if not map_f_exist:
# shuffle the list to not write disk in data1, data2, ... everytime
shuffle(id_list)
if external_disk_number < last_amount:
# Drop created disk is not allowed
external_disk_number = last_amount
with open(map_disk_file, 'w') as mf:
mf.write(' '.join(id_list))
with open(last_disk_num_f, 'w') as lf:
lf.write('%s' % external_disk_number)
return id_list, external_disk_number
# Download existing hard drive if needed at first boot
if not os.path.exists(disk_path) and virtual_hard_drive_url != '':
print('Downloading virtual hard drive...')
try:
downloaded_disk = disk_path
if virtual_hard_drive_gzipped == 'true':
downloaded_disk = '%s.gz' % disk_path
opener.retrieve(virtual_hard_drive_url, downloaded_disk)
except:
if os.path.exists(downloaded_disk):
os.remove(downloaded_disk)
raise
md5sum = virtual_hard_drive_md5sum.strip()
if md5sum:
print('Checking MD5 checksum...')
local_md5sum = md5Checksum(downloaded_disk)
if local_md5sum != md5sum:
os.remove(downloaded_disk)
raise Exception('MD5 mismatch. MD5 of local file is %s, Specified MD5 is %s.' % (
local_md5sum, md5sum))
print('MD5sum check passed.')
else:
print('Warning: not checksum specified.')
if downloaded_disk.endswith('.gz'):
try:
with open(disk_path, 'w') as disk:
with gzip.open(downloaded_disk, 'rb') as disk_gz:
shutil.copyfileobj(disk_gz, disk)
except Exception:
if os.path.exists(disk_path):
os.remove(disk_path)
raise
os.remove(downloaded_disk)
# Create disk if doesn't exist
# XXX: move to Buildout profile
if not os.path.exists(disk_path):
print('Creating virtual hard drive...')
subprocess.check_call([qemu_img_path, 'create' ,'-f', 'qcow2',
disk_path, '%sG' % disk_size])
print('Done.')
# Check and create external disk
additional_disk_list = []
for storage in disk_storage_list:
if storage:
key, val = storage.split(' ')
disk_storage_dict[key.strip()] = val.strip()
if not external_disk_format in ['qcow2', 'raw', 'vdi', 'vmdk', 'cloop', 'qed']:
external_disk_format = 'qcow2'
map_storage_list, external_disk_number = getMapStorageList(disk_storage_dict,
int(external_disk_number))
assert len(map_storage_list) == len(disk_storage_dict)
if disk_storage_dict:
if external_disk_number > 0:
index = 0
while (index < len(disk_storage_dict)) and (index < external_disk_number):
path = disk_storage_dict[map_storage_list[index]]
if os.path.exists(path):
disk_filepath = os.path.join(path,
'kvm_virtual_disk.%s' % external_disk_format)
disk_list = glob.glob('%s.*' % os.path.join(path, 'kvm_virtual_disk'))
if disk_list == []:
print('Creating one additional virtual hard drive...')
process = subprocess.check_call([qemu_img_path, 'create' ,'-f', '%s' % external_disk_format,
disk_filepath, '%sG' % external_disk_size])
else:
# Cannot change or recreate if disk is exists
disk_filepath = disk_list[0]
additional_disk_list.append(disk_filepath)
else:
print('Data folder %s was not used to create external disk %r' % (index +1))
index += 1
# Generate network parameters
# XXX: use_tap should be a boolean
tap_network_parameter = []
nat_network_parameter = []
numa_parameter = []
number = -1
if use_nat == 'true':
number += 1
rules = 'user,id=lan%s,' % number + ','.join('hostfwd=tcp:%s:%s-:%s' % (listen_ip,
int(port) + 10000, port) for port in nat_rules.split())
if httpd_port > 0:
rules += ',guestfwd=tcp:10.0.2.100:80-cmd:%s %s %s' % (netcat_bin,
listen_ip, httpd_port)
if cluster_doc_host and cluster_doc_port > 0:
rules += ',guestfwd=tcp:10.0.2.101:443-cmd:%s %s %s' % (netcat_bin,
cluster_doc_host, cluster_doc_port)
nat_network_parameter = ['-netdev', rules,
'-device', 'virtio-net-pci,netdev=lan%s,mac=%s' % (number, mac_address)]
if use_tap == 'true':
number += 1
vhost = ''
if enable_vhost == 'true':
vhost = ',vhost=on'
tap_network_parameter = ['-netdev',
'tap,id=lan%s,ifname=%s,script=no,downscript=no%s' % (number,
tap_interface, vhost),
'-device', 'virtio-net-pci,netdev=lan%s,mac=%s' % (number, tap_mac_address)]
smp = smp_count
if smp_options:
for option in smp_options.split(','):
key, val = option.split('=')
if key in ('cores', 'threads', 'sockets', 'maxcpus') and val.isdigit():
smp += ',%s=%s' % (key, val)
kvm_argument_list = [qemu_path,
'-enable-kvm', '-smp', smp,
'-m', ram_size, '-vga', 'std',
'-drive', 'file=%s,if=%s' % (disk_path, disk_type),
'-vnc', '%s:1,ipv4,password' % listen_ip,
'-boot', 'order=cd,menu=on',
'-qmp', 'unix:%s,server' % socket_path,
'-pidfile', pid_file_path,
]
rgx = re.compile('^[\w*\,][\=\d+\-\,\w]*$')
for numa in numa_list:
if rgx.match(numa):
numa_parameter.extend(['-numa', numa])
kvm_argument_list += numa_parameter
if tap_network_parameter == [] and nat_network_parameter == []:
print 'Warning : No network interface defined.'
else:
kvm_argument_list += nat_network_parameter + tap_network_parameter
if language in language_list:
kvm_argument_list.extend(['-k', language])
for disk in additional_disk_list:
kvm_argument_list.extend([
'-drive', 'file=%s,if=%s' % (disk, disk_type)])
# Try to connect to NBD server (and second nbd if defined).
# If not available, don't even specify it in qemu command line parameters.
# Reason: if qemu starts with unavailable NBD drive, it will just crash.
for nbd_ip, nbd_port in nbd_list:
if nbd_ip and nbd_port:
s = getSocketStatus(nbd_ip, nbd_port)
if s is None:
# NBD is not available : launch kvm without it
print 'Warning : Nbd is not available.'
else:
# NBD is available
kvm_argument_list.extend([
'-drive',
'file=nbd:[%s]:%s,media=cdrom' % (nbd_ip, nbd_port)])
# If no NBD is specified/available: use internal disk image
else:
kvm_argument_list.extend([
'-drive', 'file=%s,media=cdrom' % default_cdrom_iso
])
print 'Starting KVM: \n %s' % ' '.join(kvm_argument_list)
os.execv(qemu_path, kvm_argument_list)
......@@ -78,7 +78,7 @@ Alias /{{ monitor_private_hash }} {{ directory.get('private-directory') }}/
</Files>
</Directory>
{% set slave_list = json_module.loads(slave_information.get('slave_instance_list')) -%}
{% set slave_list = json_module.loads(slave_information.get('slave_instance_list', '{}')) -%}
{% for slave_instance in slave_list -%}
alias /{{ slave_instance.get('slave_reference') }} {{ directory.get('private-directory') }}/network-user-logs/{{ slave_instance.get('slave_reference') }}
......
......@@ -23,7 +23,7 @@
path {{ crawl_log_directory }}/*/*ping.log.20*
pos_file {{ crawl_log_directory }}/tail_in_ping.pos
tag slapos.monitor.networktest.ping.ipv4
format /^(?<time>[^;]*);(?<logtype>[^;]*);(?<computer_name>[^;]*);(?<type>[^;]*);(?<name_or_ip>[^;]*);(?<code>[^;]*);(?<average>[^;]*);(?<packet_lost>[^;]*);(?<extra>[^;]*)$/
format /^(?<time>[^;]*);(?<computer_name>[^;]*);(?<type>[^;]*);(?<name_or_ip>[^;]*);(?<code>[^;]*);(?<average>[^;]*);(?<packet_lost>[^;]*);(?<extra>[^;]*)$/
read_from_head true
</source>
......@@ -32,7 +32,7 @@
path {{ crawl_log_directory }}/*/*ping6.log.20*
pos_file {{ crawl_log_directory }}/tail_in_ping6.pos
tag slapos.monitor.networktest.ping.ipv6
format /^(?<time>[^;]*);(?<logtype>[^;]*);(?<computer_name>[^;]*);(?<type>[^;]*);(?<name_or_ip>[^;]*);(?<code>[^;]*);(?<average>[^;]*);(?<packet_lost>[^;]*);(?<extra>[^;]*)$/
format /^(?<time>[^;]*);(?<computer_name>[^;]*);(?<type>[^;]*);(?<name_or_ip>[^;]*);(?<code>[^;]*);(?<average>[^;]*);(?<packet_lost>[^;]*);(?<extra>[^;]*)$/
read_from_head true
</source>
......
{% set slave_list = json_module.loads(slave_information.get('slave_instance_list')) -%}
{% set slave_list = json_module.loads(slave_information.get('slave_instance_list', '{}')) -%}
{% set ipv4_list = [] -%}
{% set ipv6_list = [] -%}
{% set url_list = [] -%}
{% set name_list = [] -%}
{% for slave_instance in slave_list -%}
{% if slave_instance.get('ping_ip_list') not in [None, "", "0.0.0.0"] -%}
{% do ipv4_list.append(slave_instance.get('ping_ip_list')) -%}
{% endif -%}
{% if slave_instance.get('ping6_ip_list') not in [None, "", "::"] -%}
{% do ipv6_list.append(slave_instance.get('ping6_ip_list')) -%}
{% endif -%}
{% if slave_instance.get('test_http_url_list') not in [None, ""] -%}
{% do url_list.append(slave_instance.get('test_http_url_list')) -%}
{% endif -%}
{% if slave_instance.get('test_name_list') not in [None, ""] -%}
{% do name_list.append(slave_instance.get('test_name_list')) -%}
{% endif -%}
{% endfor -%}
{
"id" :
[
......@@ -11,57 +30,49 @@
{% endif -%}
{% endfor -%}
],
"ipv4" :
"ping" :
[
{% for slave_instance in slave_list -%}
{% if slave_instance.get('ping_ip_list') not in [None, "", "0.0.0.0"] -%}
{% if slave_instance != slave_list[-1] -%}
"{{ slave_instance.get('ping_ip_list') }}",
{% for ipv4 in ipv4_list -%}
{% if ipv4 != ipv4_list[-1] -%}
"{{ ipv4 }}",
{% endif %}
{% if slave_instance == slave_list[-1] -%}
"{{ slave_instance.get('ping_ip_list') }}"
{% endif -%}
{% if ipv4 == ipv4_list[-1] -%}
"{{ ipv4 }}"
{% endif -%}
{% endfor -%}
],
"ipv6" :
"ping6" :
[
{% for slave_instance in slave_list -%}
{% if slave_instance.get('ping6_ip_list') not in [None, "", "0.0.0.0"] -%}
{% if slave_instance != slave_list[-1] -%}
"{{ slave_instance.get('ping6_ip_list') }}",
{% endif -%}
{% if slave_instance == slave_list[-1] -%}
"{{ slave_instance.get('ping6_ip_list') }}"
{% endif -%}
{% for ipv6 in ipv6_list -%}
{% if ipv6 != ipv6_list[-1] -%}
"{{ ipv6 }}",
{% endif %}
{% if ipv6 == ipv6_list[-1] -%}
"{{ ipv6 }}"
{% endif -%}
{% endfor -%}
],
"url" :
[
{% for slave_instance in slave_list -%}
{% if slave_instance.get('test_http_url_list') not in [None, ""] -%}
{% if slave_instance != slave_list[-1] -%}
"{{ slave_instance.get('test_http_url_list') }}",
{% endif -%}
{% if slave_instance == slave_list[-1] -%}
"{{ slave_instance.get('test_http_url_list') }}"
{% endif -%}
{% for url in url_list -%}
{% if url != url_list[-1] -%}
"{{ url }}",
{% endif %}
{% if url == url_list[-1] -%}
"{{ url }}"
{% endif -%}
{% endfor -%}
],
"dns" :
[
{% for slave_instance in slave_list -%}
{% if slave_instance.get('test_name_list') not in [None, ""] -%}
{% if slave_instance != slave_list[-1] -%}
"{{ slave_instance.get('test_name_list') }}",
{% endif -%}
{% if slave_instance == slave_list[-1] -%}
"{{ slave_instance.get('test_name_list') }}"
{% endif -%}
{% for name in name_list -%}
{% if name != name_list[-1] -%}
"{{ name }}",
{% endif %}
{% if name == name_list[-1] -%}
"{{ name }}"
{% endif -%}
{% endfor -%}
]
......
......@@ -45,7 +45,7 @@ mode = 0644
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/json-test-template.json.in.jinja2
destination = ${buildout:directory}/json-test-template.json.in.jinja2
md5sum = d9c576a2182fc429416aec892fe080f2
md5sum = 2eb5596544d9c341acf653d4f7ce2680
mode = 0644
......@@ -56,7 +56,7 @@ md5sum = 876f18b159fbd9325332d0f42e9172ac
[monitor-httpd-template]
url = ${:_profile_base_location_}/${:filename}
md5sum = 22133e9aa5f52d7818b2fd1fd0415a00
md5sum = e89b66a90409bb8e9099aa197803d337
[network-bench-cfg]
recipe = slapos.recipe.template
......@@ -68,7 +68,7 @@ mode = 0644
[fluentd-agent-conf]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/fluentd-agent.conf.jinja2.in
md5sum = c9da9f22a1cd1fedcf7d659cba916400
md5sum = 4b81ddcbe2f16d23013caac37151d396
output = ${buildout:directory}/fluentd-agent.conf.jinja2.in
mode = 0644
......@@ -109,7 +109,7 @@ apache-libcloud = 0.16.0
ecdsa = 0.11
gitdb = 0.6.0
pycrypto = 2.6.1
slapos.toolbox = 0.52
slapos.toolbox = 0.53
smmap = 0.8.3
# Required by:
......
[buildout]
extends =
../../stack/slapos.cfg
../../component/qemu-kvm/buildout.cfg
../../component/packer/buildout.cfg
parts +=
qemu
\ No newline at end of file
......@@ -7,7 +7,7 @@ extends =
parts =
slapos-cookbook
instance
postgresql92
postgresql
#----------------
......
LoadModule unixd_module modules/mod_unixd.so
LoadModule access_compat_module modules/mod_access_compat.so
LoadModule authz_core_module modules/mod_authz_core.so
LoadModule authz_host_module modules/mod_authz_host.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule setenvif_module modules/mod_setenvif.so
LoadModule version_module modules/mod_version.so
LoadModule proxy_module modules/mod_proxy.so
LoadModule proxy_http_module modules/mod_proxy_http.so
LoadModule socache_shmcb_module modules/mod_socache_shmcb.so
LoadModule ssl_module modules/mod_ssl.so
LoadModule mime_module modules/mod_mime.so
LoadModule dav_module modules/mod_dav.so
LoadModule dav_fs_module modules/mod_dav_fs.so
LoadModule negotiation_module modules/mod_negotiation.so
LoadModule rewrite_module modules/mod_rewrite.so
LoadModule headers_module modules/mod_headers.so
PidFile "{{ parameter_dict['pid-file'] }}"
ServerAdmin admin@
TypesConfig conf/mime.types
AddType application/x-compress .Z
AddType application/x-gzip .gz .tgz
ServerTokens Prod
ServerSignature Off
TraceEnable Off
TimeOut {{ parameter_dict['timeout'] }}
SSLCertificateFile {{ parameter_dict['cert'] }}
SSLCertificateKeyFile {{ parameter_dict['key'] }}
SSLRandomSeed startup builtin
SSLRandomSeed connect builtin
SSLProtocol -ALL +SSLv3 +TLSv1
SSLHonorCipherOrder On
{% if parameter_dict['cipher'] -%}
SSLCipherSuite {{ parameter_dict['cipher'] }}
{% else -%}
SSLCipherSuite RC4-SHA:HIGH:!ADH
{%- endif %}
SSLSessionCache shmcb:{{ parameter_dict['ssl-session-cache'] }}(512000)
SSLProxyEngine On
# As backend is trusting REMOTE_USER header unset it always
RequestHeader unset REMOTE_USER
ErrorLog "{{ parameter_dict['error-log'] }}"
# Default apache log format with request time in microsecond at the end
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %D" combined
CustomLog "{{ parameter_dict['access-log'] }}" combined
<Directory />
Options FollowSymLinks
AllowOverride None
Allow from all
</Directory>
RewriteEngine On
{% for port, _, backend, authentication in parameter_dict['backend-list'] -%}
{% for ip in parameter_dict['ip-list'] -%}
Listen {{ ip }}:{{ port }}
{% endfor -%}
<VirtualHost *:{{ port }}>
{% if authentication -%}
SSLVerifyClient require
RequestHeader set REMOTE_USER %{SSL_CLIENT_S_DN_CN}s
SSLCACertificateFile {{ parameter_dict['ca-cert'] }}
SSLCARevocationPath {{ parameter_dict['crl'] }}
{% endif -%}
SSLEngine on
RewriteRule ^/(.*) {{ backend }}/$1 [L,P]
</VirtualHost>
{% endfor -%}
{% set part_list = [] -%}
{% set ssl_parameter_dict = slapparameter_dict.get('ssl', {}) %}
{% macro section(name) %}{% do part_list.append(name) %}{{ name }}{% endmacro -%}
{% set use_ipv6 = slapparameter_dict.get('use-ipv6', False) -%}
{#
XXX: This template only supports exactly one IPv4 and (if ipv6 is used) one IPv6
per partition. No more (undefined result), no less (IndexError).
-#}
# TODO: insert varnish between apache & haproxy.
# And think of a way to specify which urls goe through varnish, which go
# directly to haproxy. (maybe just passing literal configuration file chunk)
{% set ipv4 = (ipv4_set | list)[0] -%}
{% set apache_ip_list = [ipv4] -%}
{% if ipv6_set -%}
{% set ipv6 = (ipv6_set | list)[0] -%}
{% do apache_ip_list.append('[' ~ ipv6 ~ ']') -%}
{% endif -%}
{% if use_ipv6 -%}
[zope-tunnel-base]
recipe = slapos.cookbook:ipv4toipv6
runner-path = ${directory:services}/${:base-name}
6tunnel-path = {{ parameter_dict['6tunnel'] }}/bin/6tunnel
shell-path = {{ parameter_dict['dash'] }}/bin/dash
ipv4 = {{ ipv4 }}
{% endif -%}
{% set haproxy_dict = {} -%}
{% set apache_dict = {} -%}
{% set next_port = slapparameter_dict['tcpv4-port'] -%}
{% for family_name, parameter_id_list in slapparameter_dict['zope-family-dict'].items() -%}
{% set zope_family_address_list = [] -%}
{% set has_webdav = [] -%}
{% for parameter_id in parameter_id_list -%}
{% set zope_address_list = slapparameter_dict[parameter_id] -%}
{% for zope_address, maxconn, webdav in zope_address_list -%}
{% if webdav -%}
{% do has_webdav.append(None) %}
{% endif -%}
{% if use_ipv6 -%}
[{{ section('zope-tunnel-' ~ next_port) }}]
< = zope-tunnel-base
base-name = {{ 'zeo-tunnel-' ~ next_port }}
ipv4-port = {{ next_port }}
ipv6-port = {{ zope_address.split(']:')[1] }}
ipv6 = {{ zope_address.split(']:')[0][1:] }}
{% set zope_effective_address = ipv4 ~ ":" ~ next_port -%}
{% set next_port = next_port + 1 -%}
{% else -%}
{% set zope_effective_address = zope_address -%}
{% endif -%}
{% do zope_family_address_list.append((zope_effective_address, maxconn, webdav)) -%}
{% endfor -%}
{% endfor -%}
{# Make rendering fail artificially if any family has no known backend.
# This is useful as haproxy's hot-reconfiguration mechanism is
# supervisord-incompatible.
# As jinja2 postpones KeyError until place-holder value is actually used,
# do a no-op getitem.
-#}
{% do zope_family_address_list[0][0] -%}
{% set haproxy_port = next_port -%}
{% set next_port = next_port + 1 -%}
{% do haproxy_dict.__setitem__(family_name, (haproxy_port, zope_family_address_list)) -%}
{% if has_webdav -%}
{% set internal_scheme = 'http' -%}{# mod_rewrite does not recognise webdav scheme -#}
{% set external_scheme = 'webdavs' -%}
{% else %}
{% set internal_scheme = 'http' -%}
{% set external_scheme = 'https' -%}
{% endif -%}
{% set backend_path = slapparameter_dict['backend-path-dict'][family_name] -%}
{% set ssl_authentication = slapparameter_dict['ssl-authentication-dict'][family_name] -%}
{% do apache_dict.__setitem__(family_name, (next_port, external_scheme, internal_scheme ~ '://' ~ ipv4 ~ ':' ~ haproxy_port ~ backend_path, ssl_authentication)) -%}
{% set next_port = next_port + 1 -%}
{% endfor -%}
[apache-certificate-authority]
recipe = slapos.cookbook:certificate_authority
openssl-binary = {{ parameter_dict['openssl'] }}/bin/openssl
ca-dir = ${directory:ca-dir}
requests-directory = ${directory:requests}
wrapper = ${directory:services}/apache-ca
ca-private = ${directory:private}
ca-certs = ${directory:certs}
ca-newcerts = ${directory:newcerts}
ca-crl = ${directory:crl}
country-code = {{ slapparameter_dict['country-code'] }}
email = {{ slapparameter_dict['email'] }}
state = {{ slapparameter_dict['state'] }}
city = {{ slapparameter_dict['city'] }}
company = {{ slapparameter_dict['company'] }}
[haproxy-cfg-parameter-dict]
socket-path = ${directory:run}/haproxy.sock
server-check-path = {{ dumps(slapparameter_dict['haproxy-server-check-path']) }}
backend-dict = {{ dumps(haproxy_dict) }}
ip = {{ ipv4 }}
[haproxy-cfg]
recipe = slapos.recipe.template:jinja2
template = {{ parameter_dict['template-haproxy-cfg'] }}
rendered = ${directory:etc}/haproxy.cfg
context = section parameter_dict haproxy-cfg-parameter-dict
extensions = jinja2.ext.do
[{{ section('haproxy') }}]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:services}/haproxy
command-line = "{{ parameter_dict['haproxy'] }}/sbin/haproxy" -f "${haproxy-cfg:rendered}"
{# TODO: build socat and wrap it as "${directory:bin}/haproxy-ctl" to connect to "${haproxy-cfg-parameter-dict:socket-path}" #}
[apache-conf-ssl]
cert = ${directory:apache-conf}/apache.crt
key = ${directory:apache-conf}/apache.pem
ca-cert = ${directory:apache-conf}/ca.crt
crl = ${directory:apache-conf}/crl.pem
[apache-conf-parameter-dict]
backend-list = {{ dumps(apache_dict.values()) }}
ip-list = {{ dumps(apache_ip_list) }}
pid-file = ${directory:run}/apache.pid
error-log = ${directory:log}/apache-error.log
access-log = ${directory:log}/apache-access.log
# Apache 2.4's default value (60 seconds) can be a bit too short
timeout = 300
# Basic SSL server configuration
cert = ${apache-ssl:cert}
key = ${apache-ssl:key}
cipher =
ssl-session-cache = ${directory:log}/apache-ssl-session-cache
# Client x509 auth
{% if ssl_parameter_dict.get('ca-cert') and ssl_parameter_dict.get('ca-crl') -%}
ca-cert = {{ dumps(ssl_parameter_dict.get('ca-cert')) }}
crl = {{ dumps(ssl_parameter_dict.get('ca-crl')) }}
{% else -%}
ca-cert = ${apache-certificate-authority:ca-dir}/cacert.pem
crl = ${apache-certificate-authority:ca-crl}
{% endif -%}
[apache-conf]
recipe = slapos.recipe.template:jinja2
template = {{ parameter_dict['template-apache-conf'] }}
rendered = ${directory:apache-conf}/apache.conf
context = section parameter_dict apache-conf-parameter-dict
[{{ section('apache') }}]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:services}/apache
command-line = "{{ parameter_dict['apache'] }}/bin/httpd" -f "${apache-conf:rendered}" -DFOREGROUND
[{{ section('apache-promise') }}]
# Check any apache port in ipv4, expect other ports and ipv6 to behave consistently
recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/apache
hostname = {{ ipv4 }}
port = {{ apache_dict.values()[0][0] }}
[publish]
recipe = slapos.cookbook:publish.serialised
{% for family_name, (apache_port, scheme, _, _) in apache_dict.items() -%}
{{ family_name ~ '-v6' }} = {% if ipv6_set %}{{ scheme ~ '://[' ~ ipv6 ~ ']:' ~ apache_port }}{% endif %}
{{ family_name }} = {{ scheme ~ '://' ~ ipv4 ~ ':' ~ apache_port }}
{% endfor -%}
[apache-ssl]
recipe = plone.recipe.command
command = "{{ parameter_dict['openssl'] }}/bin/openssl" req -newkey rsa -batch -new -x509 -days 3650 -nodes -keyout "${:key}" -out "${:cert}"
key = ${apache-conf-ssl:key}
cert = ${apache-conf-ssl:cert}
[logrotate-apache]
recipe = slapos.cookbook:logrotate.d
logrotate-entries = ${logrotate:logrotate-entries}
backup = ${logrotate:backup}
name = apache
log = ${apache-conf-parameter-dict:error-log} ${apache-conf-parameter-dict:access-log}
post = {{ parameter_dict['bin-directory'] }}/slapos-kill --pidfile ${apache-conf-parameter-dict:pid-file} -s USR1
[directory]
recipe = slapos.cookbook:mkdirectory
apache-conf = ${:etc}/apache
bin = ${buildout:directory}/bin
etc = ${buildout:directory}/etc
promise = ${directory:etc}/promise
services = ${:etc}/run
var = ${buildout:directory}/var
run = ${:var}/run
log = ${:var}/log
ca-dir = ${buildout:directory}/srv/ssl
requests = ${:ca-dir}/requests
private = ${:ca-dir}/private
certs = ${:ca-dir}/certs
newcerts = ${:ca-dir}/newcerts
crl = ${:ca-dir}/crl
[buildout]
extends = {{ logrotate_cfg }}
parts +=
publish
logrotate-apache
apache-certificate-authority
{{ part_list | join('\n ') }}
[directory]
recipe = slapos.cookbook:mkdirectory
etc = ${buildout:directory}/etc
services = ${:etc}/run
promise = ${:etc}/promise
[erp5-bootstrap]
recipe = slapos.cookbook:erp5.bootstrap
runner-path = ${directory:services}/erp5-bootstrap
{# Note: a random domain name will be picked if several point to the same IP -#}
{% set reverse_hosts = {} -%}
{% for x, y in publish['hosts-dict'].iteritems() -%}
{% do reverse_hosts.__setitem__(y, x) -%}
{% endfor -%}
{# XXX: Expect the first database to be the one to use for catalog. -#}
{% set mysql_parsed = urlparse.urlparse(publish['mariadb-database-list'][0]) -%}
mysql-url = {{ dumps(urlparse.urlunparse(mysql_parsed[:1] + (mysql_parsed.username + ":" + mysql_parsed.password + "@" + reverse_hosts.get(mysql_parsed.hostname, mysql_parsed.hostname) + ':' ~ mysql_parsed.port, ) + mysql_parsed[2:])) }}
{# Pick the first http[s] family found, they should be all equivalent anyway. -#}
{# Don't pick the https[s] configurated with ssl-authenticat=true. By convention, this family name contain 'service'. -#}
{% set family_list = [] -%}
{% for key, value in publish.items() -%}
{% if key.startswith('family-') and not 'service' in key and value.startswith('http') -%}
{% do family_list.append(value.split('://', 1)) -%}
{% endif -%}
{% endfor -%}
zope-url = {{ dumps(family_list[0][0] + '://' + publish['inituser-login'] + ':' + publish['inituser-password'] + '@' + family_list[0][1] + '/' + publish['site-id']) }}
[promise-erp5-site]
recipe = slapos.cookbook:check_url_available
url = ${erp5-bootstrap:zope-url}
path = ${directory:promise}/erp5-site
dash_path = {{ parameter_dict['dash-location'] }}/bin/dash
curl_path = {{ parameter_dict['curl-location'] }}/bin/curl
[buildout]
parts = promise-erp5-site
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
\ No newline at end of file
{% set frontend_dict = slapparameter_dict.get('frontend', {}) %}
{% set has_frontend = frontend_dict.get('software-url', '') != '' -%}
{% set site_id = slapparameter_dict.get('site-id', 'erp5') -%}
{% set inituser_login = slapparameter_dict.get('inituser-login', 'zope') -%}
{% set publish_dict = {'site-id': site_id, 'inituser-login': inituser_login} -%}
[request-common]
recipe = slapos.cookbook:request.serialised
software-url = ${slap-connection:software-release-url}
server-url = ${slap-connection:server-url}
key-file = ${slap-connection:key-file}
cert-file = ${slap-connection:cert-file}
computer-id = ${slap-connection:computer-id}
partition-id = ${slap-connection:partition-id}
config-use-ipv6 = {{ dumps(slapparameter_dict.get('use-ipv6', False)) }}
{% macro request(name, software_type, config_key, config, ret={'url': True}) -%}
{% do config.update(slapparameter_dict.get(config_key, {})) -%}
{% set section = 'request-' ~ name -%}
[{{ section }}]
< = request-common
name = {{ name }}
software-type = {{ software_type }}
return = {{ ret.keys() | join(' ') }}
{% for ret, publish in ret.items() -%}
{% if publish -%}
{% do publish_dict.__setitem__(name ~ '-' ~ ret, '${' ~ section ~ ':connection-' ~ ret ~ '}')%}
{% endif -%}
{% endfor -%}
sla-computer_guid = {{ dumps(slapparameter_dict.get(config_key + '-computer-guid', computer_id)) }}
{% for option, value in config.items() -%}
config-{{ option }} = {{ dumps(value) }}
{% endfor -%}
{% endmacro -%}
{{ request('memcached-persistent', 'kumofs', 'kumofs', {'tcpv4-port': 2000}) }}
{{ request('memcached-volatile', 'kumofs', 'memcached', {'tcpv4-port': 2010, 'ram-storage-size': 64}) }}
{{ request('cloudooo', 'cloudooo', 'cloudooo', {'tcpv4-port': 2020}) }}
{{ request('mariadb', 'mariadb', 'mariadb', {'tcpv4-port': 2099}, {'database-list': True, 'test-database-list': True}) }}
{# Fail early if an unexpected value is provided -#}
{% set zodb_type = slapparameter_dict.get('zodb-software-type') -%}
{% set zodb_extern = slapparameter_dict.get('zodb-extern') -%}
{% if {'zeo': 1, None: 0}[zodb_type] -%}
{{ request('zodb', 'zodb-' ~ zodb_type, 'zodb', {'tcpv4-port': 2100, 'zodb-dict': {'root': {}}}, {'zodb-storage-type': False, 'zodb-dict': False, 'tidstorage-ip': False, 'tidstorage-port': False}) }}
{% else -%}
{% do zodb_extern[0] %}
{% endif -%}
[inituser-password]
{% set inituser_password = slapparameter_dict.get('inituser-password') -%}
{% if inituser_password -%}
passwd = {{ dumps(inituser_password) }}
{% else -%}
recipe = slapos.cookbook:generate.password
{% endif -%}
[deadlock-debugger-password]
{% set deadlock_debugger_password = slapparameter_dict.get('deadlock-debugger-password') -%}
{% if deadlock_debugger_password -%}
passwd = {{ dumps(deadlock_debugger_password) }}
{% else -%}
recipe = slapos.cookbook:generate.password
{% endif -%}
[request-zope-base]
< = request-common
return =
zope-address-list
hosts-dict
config-bt5 = {{ dumps(slapparameter_dict.get('bt5', 'erp5_full_text_myisam_catalog erp5_configurator_standard erp5_configurator_maxma_demo erp5_configurator_ung erp5_configurator_run_my_doc slapos_configurator')) }}
config-bt5-repository-url = {{ dumps(slapparameter_dict.get('bt5-repository-url', local_bt5_repository)) }}
config-cloudooo-url = ${request-cloudooo:connection-url}
config-deadlock-debugger-password = ${deadlock-debugger-password:passwd}
config-developer-list = {{ dumps(slapparameter_dict.get('developer-list', [inituser_login])) }}
config-hosts-dict = {{ dumps(slapparameter_dict.get('hosts-dict', {})) }}
config-inituser-login = {{ dumps(inituser_login) }}
config-inituser-password = ${inituser-password:passwd}
config-kumofs-url = ${request-memcached-persistent:connection-url}
config-memcached-url = ${request-memcached-volatile:connection-url}
config-mysql-test-url-list = ${request-mariadb:connection-test-database-list}
config-mysql-url-list = ${request-mariadb:connection-database-list}
config-site-id = {{ dumps(site_id) }}
config-smtp-url = {{ dumps(slapparameter_dict.get('smtp-url', 'smtp://localhost:25/')) }}
config-timezone = {{ dumps(slapparameter_dict.get('timezone', 'UTC')) }}
{% if zodb_type -%}
config-tidstorage-ip = ${request-zodb:connection-tidstorage-ip}
config-tidstorage-port = ${request-zodb:connection-tidstorage-port}
config-zodb-dict = ${request-zodb:connection-zodb-dict}
config-zodb-storage-type = ${request-zodb:connection-zodb-storage-type}
{% endif -%}
{% if zodb_extern -%}
config-zodb-extern = {{ dumps(zodb_extern) }}
{% endif -%}
software-type = zope
{% set zope_family_dict = {} -%}
{% set zope_backend_path_dict = {} -%}
{% set ssl_authentication_dict = {} -%}
{% for custom_name, zope_parameter_dict in slapparameter_dict.get('zope-partition-dict', {'1': {}}).items() -%}
{% set partition_name = 'zope-' ~ custom_name -%}
{% set section_name = 'request-' ~ partition_name -%}
{% set backend_path = zope_parameter_dict.get('backend-path', '/') % {'site-id': site_id} %}
{% do zope_family_dict.setdefault(zope_parameter_dict.get('family', 'default'), []).append(section_name) -%}
{% do zope_backend_path_dict.setdefault(zope_parameter_dict.get('family', 'default'), backend_path) -%}
{% do ssl_authentication_dict.setdefault(zope_parameter_dict.get('family', 'default'), zope_parameter_dict.get('ssl-authentication', False)) -%}
[{{ section_name }}]
< = request-zope-base
name = {{ partition_name }}
config-name = {{ dumps(custom_name) }}
config-instance-count = {{ dumps(zope_parameter_dict.get('instance-count', 1)) }}
config-thread-amount = {{ dumps(zope_parameter_dict.get('thread-amount', 1)) }}
config-timerserver-interval = {{ dumps(zope_parameter_dict.get('timerserver-interval', 5)) }}
config-longrequest-logger-interval = {{ dumps(zope_parameter_dict.get('longrequest-logger-interval', -1)) }}
config-longrequest-logger-timeout = {{ dumps(zope_parameter_dict.get('longrequest-logger-timeout', 1)) }}
config-port-base = {{ dumps(zope_parameter_dict.get('port-base', 2200)) }}
config-webdav = {{ dumps(zope_parameter_dict.get('webdav', False)) }}
sla-computer_guid = {{ dumps(zope_parameter_dict.get('computer-guid', computer_id)) }}
{% endfor -%}
{# We need to concatenate lists that we cannot read as lists, so this gets hairy. -#}
{% set zope_address_list_id_dict = {} -%}
{% set zope_family_parameter_dict = {} -%}
{% for family_name, zope_section_id_list in zope_family_dict.items() -%}
{% for zope_section_id in zope_section_id_list -%}
{% set parameter_name = 'zope-family-entry-' ~ zope_section_id -%}
{% do zope_address_list_id_dict.__setitem__(zope_section_id, parameter_name) -%}
{% do zope_family_parameter_dict.setdefault(family_name, []).append(parameter_name) -%}
{% endfor -%}
{% if has_frontend -%}
{% set frontend_name = 'frontend-' ~ family_name -%}
{% set publishable = frontend_name ~ ':connection-site_url' -%}
[{{ frontend_name }}]
< = request-frontend-base
name = {{ frontend_name }}
config-url = ${request-balancer:{{ family_name }}-v6}
{% else -%}
{% set publishable = 'request-balancer:connection-' ~ family_name -%}
{% endif -%}
{% do publish_dict.__setitem__('family-' ~ family_name, '${' ~ publishable ~ '}' ) -%}
{% endfor -%}
{% set balancer_dict = slapparameter_dict.get('balancer', {}) -%}
[request-balancer]
< = request-common
name = balancer
software-type = balancer
sla-computer_guid = {{ dumps(slapparameter_dict.get('balancer-computer-guid', computer_id)) }}
return =
{%- for family in zope_family_dict %}
{{ family }}
{{ family }}-v6
{% endfor -%}
config-zope-family-dict = {{ dumps(zope_family_parameter_dict) }}
config-backend-path-dict = {{ dumps(zope_backend_path_dict) }}
config-ssl-authentication-dict = {{ dumps(ssl_authentication_dict) }}
config-tcpv4-port = {{ dumps(balancer_dict.get('tcpv4-port', 2150)) }}
{% for zope_section_id, name in zope_address_list_id_dict.items() -%}
config-{{ name }} = {{ ' ${' ~ zope_section_id ~ ':connection-zope-address-list}' }}
{% endfor -%}
# XXX: should those really be same for all families ?
config-haproxy-server-check-path = {{ dumps(balancer_dict.get('haproxy-server-check-path', '/') % {'site-id': site_id}) }}
config-ssl = {{ dumps(balancer_dict.get('ssl', {})) }}
config-country-code = {{ slapparameter_dict.get('country-code', 'ZZ') }}
config-email = {{ slapparameter_dict.get('email', 'nobody@example.com') }}
config-state = {{ slapparameter_dict.get('state', "('State',)") }}
config-city = {{ slapparameter_dict.get('city', 'City') }}
config-company = {{ slapparameter_dict.get('company', 'Compagny') }}
[request-frontend-base]
{% if has_frontend -%}
< = request-common
software-url = {{ dumps(frontend_dict['software-url']) }}
software-type = {{ dumps(frontend_dict.get('software-type', 'RootSoftwareInstance')) }}
sla-instance_guid = {{ dumps(frontend_dict['instance-guid']) }}
slave = true
{% set config_dict = {
'type': 'zope',
} -%}
{% if frontend_dict.get('domain') -%}
{% do config_dict.__setitem__('custom_domain', frontend_dict['domain']) -%}
{% endif -%}
{% for name, value in config_dict.items() -%}
config-{{ name }} = {{ value }}
{% endfor -%}
return = site_url
{% endif -%}
[publish]
recipe = slapos.cookbook:publish.serialised
deadlock-debugger-password = ${deadlock-debugger-password:passwd}
inituser-password = ${inituser-password:passwd}
{#
Pick any published hosts-dict, they are expected to be identical - and there is
no way to check here.
-#}
hosts-dict = {{ '${' ~ zope_address_list_id_dict.keys()[0] ~ ':connection-hosts-dict}' }}
{% for name, value in publish_dict.items() -%}
{{ name }} = {{ value }}
{% endfor -%}
[buildout]
parts = publish
......@@ -24,7 +24,7 @@ repository_id_list = erp5 vifib/master
[erp5]
recipe = slapos.recipe.build:gitclone
repository = http://git.erp5.org/repos/erp5.git
branch = interaction-drop
branch = erp5-vifib
git-executable = ${git:location}/bin/git
[vifib]
......@@ -54,150 +54,9 @@ dummy +=
extra-paths +=
${vifib:location}/master
[template-erp5]
md5sum = 6ada1fd4af0a451516443bfb6d00b717
[template-balancer]
md5sum = 818ab59ae966114735866aecef7a8563
[template-apache-conf]
md5sum = bb329fc28bef095a01efc901d2f84149
[template-create-erp5-site-real]
md5sum = 61824aab2172d21f1d6403a35cab47cd
[versions]
python-memcached = 1.47
facebook-sdk = 0.4.0
google-api-python-client = 1.2
erp5diff = 0.8.1.6
[networkcache]
# signature certificates of the following uploaders.
# Romain Courteaud
# Sebastien Robin
# Kazuhiko Shiozaki
# Cedric de Saint Martin
# Yingjie Xu
# Gabriel Monnerat
# Łukasz Nowak
# Test Agent (Automatic update from tests)
# Aurélien Calonne
signature-certificate-list =
-----BEGIN CERTIFICATE-----
MIIB4DCCAUkCADANBgkqhkiG9w0BAQsFADA5MQswCQYDVQQGEwJGUjEZMBcGA1UE
CBMQRGVmYXVsdCBQcm92aW5jZTEPMA0GA1UEChMGTmV4ZWRpMB4XDTExMDkxNTA5
MDAwMloXDTEyMDkxNTA5MDAwMlowOTELMAkGA1UEBhMCRlIxGTAXBgNVBAgTEERl
ZmF1bHQgUHJvdmluY2UxDzANBgNVBAoTBk5leGVkaTCBnzANBgkqhkiG9w0BAQEF
AAOBjQAwgYkCgYEApYZv6OstoqNzxG1KI6iE5U4Ts2Xx9lgLeUGAMyfJLyMmRLhw
boKOyJ9Xke4dncoBAyNPokUR6iWOcnPHtMvNOsBFZ2f7VA28em3+E1JRYdeNUEtX
Z0s3HjcouaNAnPfjFTXHYj4um1wOw2cURSPuU5dpzKBbV+/QCb5DLheynisCAwEA
ATANBgkqhkiG9w0BAQsFAAOBgQBCZLbTVdrw3RZlVVMFezSHrhBYKAukTwZrNmJX
mHqi2tN8tNo6FX+wmxUUAf3e8R2Ymbdbn2bfbPpcKQ2fG7PuKGvhwMG3BlF9paEC
q7jdfWO18Zp/BG7tagz0jmmC4y/8akzHsVlruo2+2du2freE8dK746uoMlXlP93g
QUUGLQ==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB8jCCAVugAwIBAgIJAPu2zchZ2BxoMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV
BAMMB3RzeGRldjMwHhcNMTExMDE0MTIxNjIzWhcNMTIxMDEzMTIxNjIzWjASMRAw
DgYDVQQDDAd0c3hkZXYzMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCrPbh+
YGmo6mWmhVb1vTqX0BbeU0jCTB8TK3i6ep3tzSw2rkUGSx3niXn9LNTFNcIn3MZN
XHqbb4AS2Zxyk/2tr3939qqOrS4YRCtXBwTCuFY6r+a7pZsjiTNddPsEhuj4lEnR
L8Ax5mmzoi9nE+hiPSwqjRwWRU1+182rzXmN4QIDAQABo1AwTjAdBgNVHQ4EFgQU
/4XXREzqBbBNJvX5gU8tLWxZaeQwHwYDVR0jBBgwFoAU/4XXREzqBbBNJvX5gU8t
LWxZaeQwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQA07q/rKoE7fAda
FED57/SR00OvY9wLlFEF2QJ5OLu+O33YUXDDbGpfUSF9R8l0g9dix1JbWK9nQ6Yd
R/KCo6D0sw0ZgeQv1aUXbl/xJ9k4jlTxmWbPeiiPZEqU1W9wN5lkGuLxV4CEGTKU
hJA/yXa1wbwIPGvX3tVKdOEWPRXZLg==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB7jCCAVegAwIBAgIJAJWA0jQ4o9DGMA0GCSqGSIb3DQEBBQUAMA8xDTALBgNV
BAMMBHg2MXMwIBcNMTExMTI0MTAyNDQzWhgPMjExMTEwMzExMDI0NDNaMA8xDTAL
BgNVBAMMBHg2MXMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANdJNiFsRlkH
vq2kHP2zdxEyzPAWZH3CQ3Myb3F8hERXTIFSUqntPXDKXDb7Y/laqjMXdj+vptKk
3Q36J+8VnJbSwjGwmEG6tym9qMSGIPPNw1JXY1R29eF3o4aj21o7DHAkhuNc5Tso
67fUSKgvyVnyH4G6ShQUAtghPaAwS0KvAgMBAAGjUDBOMB0GA1UdDgQWBBSjxFUE
RfnTvABRLAa34Ytkhz5vPzAfBgNVHSMEGDAWgBSjxFUERfnTvABRLAa34Ytkhz5v
PzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAFLDS7zNhlrQYSQO5KIj
z2RJe3fj4rLPklo3TmP5KLvendG+LErE2cbKPqnhQ2oVoj6u9tWVwo/g03PMrrnL
KrDm39slYD/1KoE5kB4l/p6KVOdeJ4I6xcgu9rnkqqHzDwI4v7e8/D3WZbpiFUsY
vaZhjNYKWQf79l6zXfOvphzJ
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAO4V/jiMoICoMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMjMyMCAXDTEyMDIxNjExMTAyM1oYDzIxMTIwMTIzMTExMDIzWjAT
MREwDwYDVQQDDAhDT01QLTIzMjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
wi/3Z8W9pUiegUXIk/AiFDQ0UJ4JFAwjqr+HSRUirlUsHHT+8DzH/hfcTDX1I5BB
D1ADk+ydXjMm3OZrQcXjn29OUfM5C+g+oqeMnYQImN0DDQIOcUyr7AJc4xhvuXQ1
P2pJ5NOd3tbd0kexETa1LVhR6EgBC25LyRBRae76qosCAwEAAaNQME4wHQYDVR0O
BBYEFMDmW9aFy1sKTfCpcRkYnP6zUd1cMB8GA1UdIwQYMBaAFMDmW9aFy1sKTfCp
cRkYnP6zUd1cMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAskbFizHr
b6d3iIyN+wffxz/V9epbKIZVEGJd/6LrTdLiUfJPec7FaxVCWNyKBlCpINBM7cEV
Gn9t8mdVQflNqOlAMkOlUv1ZugCt9rXYQOV7rrEYJBWirn43BOMn9Flp2nibblby
If1a2ZoqHRxoNo2yTmm7TSYRORWVS+vvfjY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAIlBksrZVkK8MA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMzU3MCAXDTEyMDEyNjEwNTUyOFoYDzIxMTIwMTAyMTA1NTI4WjAT
MREwDwYDVQQDDAhDT01QLTM1NzCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
ts+iGUwi44vtIfwXR8DCnLtHV4ydl0YTK2joJflj0/Ws7mz5BYkxIU4fea/6+VF3
i11nwBgYgxQyjNztgc9u9O71k1W5tU95yO7U7bFdYd5uxYA9/22fjObaTQoC4Nc9
mTu6r/VHyJ1yRsunBZXvnk/XaKp7gGE9vNEyJvPn2bkCAwEAAaNQME4wHQYDVR0O
BBYEFKuGIYu8+6aEkTVg62BRYaD11PILMB8GA1UdIwQYMBaAFKuGIYu8+6aEkTVg
62BRYaD11PILMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAMoTRpBxK
YLEZJbofF7gSrRIcrlUJYXfTfw1QUBOKkGFFDsiJpEg4y5pUk1s5Jq9K3SDzNq/W
it1oYjOhuGg3al8OOeKFrU6nvNTF1BAvJCl0tr3POai5yXyN5jlK/zPfypmQYxE+
TaqQSGBJPVXYt6lrq/PRD9ciZgKLOwEqK8w=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAPHoWu90gbsgMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
BAMMCXZpZmlibm9kZTAeFw0xMjAzMTkyMzIwNTVaFw0xMzAzMTkyMzIwNTVaMBQx
EjAQBgNVBAMMCXZpZmlibm9kZTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
ozBijpO8PS5RTeKTzA90vi9ezvv4vVjNaguqT4UwP9+O1+i6yq1Y2W5zZxw/Klbn
oudyNzie3/wqs9VfPmcyU9ajFzBv/Tobm3obmOqBN0GSYs5fyGw+O9G3//6ZEhf0
NinwdKmrRX+d0P5bHewadZWIvlmOupcnVJmkks852BECAwEAAaNQME4wHQYDVR0O
BBYEFF9EtgfZZs8L2ZxBJxSiY6eTsTEwMB8GA1UdIwQYMBaAFF9EtgfZZs8L2ZxB
JxSiY6eTsTEwMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAc43YTfc6
baSemaMAc/jz8LNLhRE5dLfLOcRSoHda8y0lOrfe4lHT6yP5l8uyWAzLW+g6s3DA
Yme/bhX0g51BmI6gjKJo5DoPtiXk/Y9lxwD3p7PWi+RhN+AZQ5rpo8UfwnnN059n
yDuimQfvJjBFMVrdn9iP6SfMjxKaGk6gVmI=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAMNZBmoIOXPBMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMTMyMCAXDTEyMDUwMjEyMDQyNloYDzIxMTIwNDA4MTIwNDI2WjAT
MREwDwYDVQQDDAhDT01QLTEzMjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
6peZQt1sAmMAmSG9BVxxcXm8x15kE9iAplmANYNQ7z2YO57c10jDtlYlwVfi/rct
xNUOKQtc8UQtV/fJWP0QT0GITdRz5X/TkWiojiFgkopza9/b1hXs5rltYByUGLhg
7JZ9dZGBihzPfn6U8ESAKiJzQP8Hyz/o81FPfuHCftsCAwEAAaNQME4wHQYDVR0O
BBYEFNuxsc77Z6/JSKPoyloHNm9zF9yqMB8GA1UdIwQYMBaAFNuxsc77Z6/JSKPo
yloHNm9zF9yqMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAl4hBaJy1
cgiNV2+Z5oNTrHgmzWvSY4duECOTBxeuIOnhql3vLlaQmo0p8Z4c13kTZq2s3nhd
Loe5mIHsjRVKvzB6SvIaFUYq/EzmHnqNdpIGkT/Mj7r/iUs61btTcGUCLsUiUeci
Vd0Ozh79JSRpkrdI8R/NRQ2XPHAo+29TT70=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAKRvzcy7OH0UMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtNzcyMCAXDTEyMDgxMDE1NDI1MVoYDzIxMTIwNzE3MTU0MjUxWjAT
MREwDwYDVQQDDAhDT01QLTc3MjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
o7aipd6MbnuGDeR1UJUjuMLQUariAyQ2l2ZDS6TfOwjHiPw/mhzkielgk73kqN7A
sUREx41eTcYCXzTq3WP3xCLE4LxLg1eIhd4nwNHj8H18xR9aP0AGjo4UFl5BOMa1
mwoyBt3VtfGtUmb8whpeJgHhqrPPxLoON+i6fIbXDaUCAwEAAaNQME4wHQYDVR0O
BBYEFEfjy3OopT2lOksKmKBNHTJE2hFlMB8GA1UdIwQYMBaAFEfjy3OopT2lOksK
mKBNHTJE2hFlMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAaNRx6YN2
M/p3R8/xS6zvH1EqJ3FFD7XeAQ52WuQnKSREzuw0dsw12ClxjcHiQEFioyTiTtjs
5pW18Ry5Ie7iFK4cQMerZwWPxBodEbAteYlRsI6kePV7Gf735Y1RpuN8qZ2sYL6e
x2IMeSwJ82BpdEI5niXxB+iT0HxhmR+XaMI=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB+DCCAWGgAwIBAgIJAKGd0vpks6T/MA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
BAMMCUNPTVAtMTU4NDAgFw0xMzA2MjAxMjE5MjBaGA8yMTEzMDUyNzEyMTkyMFow
FDESMBAGA1UEAwwJQ09NUC0xNTg0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
gQDZTH9etPUC+wMZQ3UIiOwyyCfHsJ+7duCFYjuo1uZrhtDt/fp8qb8qK9ob+df3
EEYgA0IgI2j/9jNUEnKbc5+OrfKznzXjrlrH7zU8lKBVNCLzQuqBKRNajZ+UvO8R
nlqK2jZCXP/p3HXDYUTEwIR5W3tVCEn/Vda4upTLcPVE5wIDAQABo1AwTjAdBgNV
HQ4EFgQU7KXaNDheQWoy5uOU01tn1M5vNkEwHwYDVR0jBBgwFoAU7KXaNDheQWoy
5uOU01tn1M5vNkEwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQASmqCU
Znbvu6izdicvjuE3aKnBa7G++Fdp2bdne5VCwVbVLYCQWatB+n4crKqGdnVply/u
+uZ16u1DbO9rYoKgWqjLk1GfiLw5v86pd5+wZd5I9QJ0/Sbz2vZk5S4ciMIGwArc
m711+GzlW5xe6GyH9SZaGOPAdUbI6JTDwLzEgA==
-----END CERTIFICATE-----
......@@ -156,7 +156,7 @@ mode = 0644
[template-supervisord]
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/template/${:filename}
md5sum = 069e593e50204b227bdb08d29d7292fd
md5sum = d294d0dafd265048399de6da8c96345f
location = ${buildout:parts-directory}/${:_buildout_section_name_}
filename = supervisord.conf.in
download-only = true
......
......@@ -19,7 +19,7 @@ gunicorn = 19.3.0
prettytable = 0.7.2
pycrypto = 2.6.1
slapos.recipe.template = 2.8
slapos.toolbox = 0.52
slapos.toolbox = 0.53
smmap = 0.9.0
# Required by:
......
......@@ -26,7 +26,9 @@ autorestart = {{ supervisord['autorestart'] }}
stdout_logfile = {{ supervisord['no_logfile'] }}
stderr_logfile = {{ supervisord['no_logfile'] }}
directory = {{ supervisord['directory'] }}
environment = PATH="{{- supervisord['path'] -}}",MAKEFLAGS="-j{{- '%d' % builtin.max(1, (multiprocessing.cpu_count() / builtin.int(slapparameter_dict.get('cpu-usage-ratio', 4)))) -}}"
{# how many parallel build jobs to spawn when compiling software -#}
{% set njobs = builtin.max(1, (multiprocessing.cpu_count() // builtin.int(slapparameter_dict.get('cpu-usage-ratio', 4)))) -%}
environment = PATH="{{- supervisord['path'] -}}",MAKEFLAGS="-j{{ njobs }}",NPY_NUM_BUILD_JOBS="{{ njobs }}",BUNDLE_JOBS="{{ njobs }}"
[program:{{- supervisord['slapgrid-cp'] -}}]
command = {{ supervisord['slapgrid-cp-command'] }}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment