Commit bb55ce61 authored by Kazuhiko Shiozaki's avatar Kazuhiko Shiozaki

Merge remote-tracking branch 'origin/master' into erp5-component

parents 1c985f2f 94b0ad8d
Changes Changes
======= =======
0.84.1 (2013-10-03)
-------------------
* Resiliency: PBS: promise should NOT bang. [64886cd]
0.84 (2013-09-30)
-----------------
* Request.py: improve instance-state handling. [ba5f160]
* Resilient recipe: remove hashing of urls/names. [ee2aec8]
* Resilient pbs recipe: recover from rdiff-backup failures. [be7f2fc, 92ee0c3]
* Resilience: add pidfiles in PBS. [0b3ad5c]
* Resilient: don't hide exception, print it. [05b3d64, d2b0494]
* Resiliency: Only keep 10 increments of backup. [4e89e33]
* KVM SR: add fallback in case of download exception. [de8d796]
* slaprunner: don't check certificate for importer. [53dc772]
0.83.1 (2013-09-10) 0.83.1 (2013-09-10)
------------------ ------------------
......
...@@ -17,6 +17,7 @@ extends = ...@@ -17,6 +17,7 @@ extends =
../pkgconfig/buildout.cfg ../pkgconfig/buildout.cfg
../popt/buildout.cfg ../popt/buildout.cfg
../python-2.7/buildout.cfg ../python-2.7/buildout.cfg
../python-openssl/buildout.cfg
../readline/buildout.cfg ../readline/buildout.cfg
../sqlite3/buildout.cfg ../sqlite3/buildout.cfg
../swig/buildout.cfg ../swig/buildout.cfg
...@@ -90,10 +91,14 @@ output = ${buildout:directory}/environment.sh ...@@ -90,10 +91,14 @@ output = ${buildout:directory}/environment.sh
[lxml-python] [lxml-python]
python = python2.7 python = python2.7
[python-openssl]
python = python2.7
[slapos] [slapos]
recipe = z3c.recipe.scripts recipe = z3c.recipe.scripts
python = python2.7 python = python2.7
eggs = eggs =
${python-openssl:egg}
slapos.libnetworkcache slapos.libnetworkcache
zc.buildout zc.buildout
${lxml-python:egg} ${lxml-python:egg}
...@@ -131,46 +136,64 @@ scripts = py ...@@ -131,46 +136,64 @@ scripts = py
[versions] [versions]
# Use our own buildout version # Use our own buildout version
zc.buildout = 1.6.0-dev-SlapOS-010 zc.buildout = 1.6.0-dev-SlapOS-012
# Force to use zc.recipe.egg 1.x # Force to use zc.recipe.egg 1.x
zc.recipe.egg = 1.3.2 zc.recipe.egg = 1.3.2
# Use own version of h.r.download to be able to open archives not supported by python2.x: .xz # Use own version of h.r.download to be able to open archives not supported by python2.x: .xz
hexagonit.recipe.download = 1.6nxd002 hexagonit.recipe.download = 1.7nxd002
Jinja2 = 2.7 Jinja2 = 2.7.1
MarkupSafe = 0.18 MarkupSafe = 0.18
Werkzeug = 0.8.3 Pygments = 1.6
Werkzeug = 0.9.4
buildout-versions = 1.7 buildout-versions = 1.7
cmd2 = 0.6.7
collective.recipe.template = 1.10 collective.recipe.template = 1.10
lxml = 3.1.2 itsdangerous = 0.23
lxml = 3.2.3
meld3 = 0.6.10 meld3 = 0.6.10
netaddr = 0.7.10 netaddr = 0.7.10
prettytable = 0.7.2
pyOpenSSL = 0.13.1
pyparsing = 2.0.1
setuptools = 1.1.6
slapos.core = 1.0.0rc6
slapos.libnetworkcache = 0.13.4 slapos.libnetworkcache = 0.13.4
slapos.recipe.cmmi = 0.1.1
xml-marshaller = 0.9.7 xml-marshaller = 0.9.7
z3c.recipe.scripts = 1.0.1 z3c.recipe.scripts = 1.0.1
# Required by: # Required by:
# slapos.core==0.35.2-dev # slapos.core==1.0.0rc6
Flask = 0.9 Flask = 0.10.1
# Required by: # Required by:
# slapos.core==0.35.2-dev # slapos.core==1.0.0rc6
bpython = 0.12
# Required by:
# slapos.core==1.0.0rc6
cliff = 1.4.5
# Required by:
# slapos.core==1.0.0rc6
ipython = 1.1.0
# Required by:
# slapos.core==1.0.0rc6
netifaces = 0.8 netifaces = 0.8
# Required by: # Required by:
# slapos.core==0.35.2-dev # slapos.core==1.0.0rc6
# slapos.libnetworkcache==0.13.3 requests = 2.0.0
# supervisor==3.0b1
# zc.buildout==1.6.0-dev-SlapOS-010
# zope.interface==4.0.5
setuptools = 0.6c12dev-r88846
# Required by: # Required by:
# slapos.core==0.35.2-dev # slapos.core==1.0.0rc6
supervisor = 3.0b2 supervisor = 3.0
# Required by: # Required by:
# slapos.core==0.35.2-dev # slapos.core==1.0.0rc6
zope.interface = 4.0.5 zope.interface = 4.0.5
# This file is used to install testing, not-stable-yet, version of SlapOS Node
[buildout]
extends =
buildout.cfg
# Add hosting location of testing version of slapos.core
find-links +=
http://www.nexedi.org/static/packages/source/slapos.core-testing/
[versions]
slapos.core =
...@@ -28,7 +28,7 @@ from setuptools import setup, find_packages ...@@ -28,7 +28,7 @@ from setuptools import setup, find_packages
import glob import glob
import os import os
version = '0.83.1' version = '0.84.1'
name = 'slapos.cookbook' name = 'slapos.cookbook'
long_description = open("README.txt").read() + "\n" + \ long_description = open("README.txt").read() + "\n" + \
open("CHANGES.txt").read() + "\n" open("CHANGES.txt").read() + "\n"
......
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import logging import logging
import time import time
import traceback
import slapos import slapos
from slapos.slap.slap import NotFoundError from slapos.slap.slap import NotFoundError
...@@ -65,6 +66,7 @@ def takeover(server_url, key_file, cert_file, computer_guid, ...@@ -65,6 +66,7 @@ def takeover(server_url, key_file, cert_file, computer_guid,
cp_winner.rename(new_name=cp_exporter_ref) cp_winner.rename(new_name=cp_exporter_ref)
break break
except NotFoundError: except NotFoundError:
traceback.print_exc()
log.warning('Impossible to rename. Retrying in a few seconds...') log.warning('Impossible to rename. Retrying in a few seconds...')
log.debug('Renamed.') log.debug('Renamed.')
......
...@@ -60,7 +60,11 @@ def getSocketStatus(host, port): ...@@ -60,7 +60,11 @@ def getSocketStatus(host, port):
# Download existing hard drive if needed at first boot # Download existing hard drive if needed at first boot
if not os.path.exists(disk_path) and virtual_hard_drive_url != '': if not os.path.exists(disk_path) and virtual_hard_drive_url != '':
print('Downloading virtual hard drive...') print('Downloading virtual hard drive...')
try:
urllib.urlretrieve(virtual_hard_drive_url, disk_path) urllib.urlretrieve(virtual_hard_drive_url, disk_path)
except:
os.remove(disk_path)
raise
md5sum = virtual_hard_drive_md5sum.strip() md5sum = virtual_hard_drive_md5sum.strip()
if md5sum: if md5sum:
print('Checking MD5 checksum...') print('Checking MD5 checksum...')
......
...@@ -50,6 +50,9 @@ class Recipe(GenericBaseRecipe): ...@@ -50,6 +50,9 @@ class Recipe(GenericBaseRecipe):
class Callback(GenericBaseRecipe): class Callback(GenericBaseRecipe):
def createCallback(self, notification_id, callback): def createCallback(self, notification_id, callback):
# XXX: hashing the name here and in
# slapos.toolbox/slapos/pubsub/__init__.py is completely messed up and
# prevent any debug.
callback_id = sha512(notification_id).hexdigest() callback_id = sha512(notification_id).hexdigest()
filepath = os.path.join(self.options['callbacks'], callback_id) filepath = os.path.join(self.options['callbacks'], callback_id)
......
...@@ -25,43 +25,24 @@ ...@@ -25,43 +25,24 @@
# #
############################################################################## ##############################################################################
import hashlib
import json import json
import os import os
import signal
import subprocess import subprocess
import sys import sys
import textwrap
import urlparse import urlparse
from slapos.recipe.librecipe import GenericSlapRecipe from slapos.recipe.librecipe import GenericSlapRecipe
from slapos.recipe.dropbear import KnownHostsFile from slapos.recipe.dropbear import KnownHostsFile
from slapos.recipe.notifier import Notify from slapos.recipe.notifier import Notify
from slapos.recipe.notifier import Callback from slapos.recipe.notifier import Callback
from slapos import slap as slapmodule
def promise(args): def promise(args):
ssh = subprocess.Popen(
def failed_ssh(): [args['ssh_client'], '%(user)s@%(host)s/%(port)s' % args],
sys.stderr.write("SSH Connection failed\n") stdin=subprocess.PIPE, stdout=None, stderr=None
partition = slap.registerComputerPartition(args['computer_id'], )
args['partition_id'])
partition.bang("SSH Connection failed. rdiff-backup is unusable.")
def sigterm_handler(signum, frame):
failed_ssh()
signal.signal(signal.SIGTERM, sigterm_handler)
slap = slapmodule.slap()
slap.initializeConnection(args['server_url'],
key_file=args.get('key_file'),
cert_file=args.get('cert_file'))
ssh = subprocess.Popen([args['ssh_client'], '%(user)s@%(host)s/%(port)s' % args],
stdin=subprocess.PIPE,
stdout=open(os.devnull, 'w'),
stderr=open(os.devnull, 'w'))
# Rdiff Backup protocol quit command # Rdiff Backup protocol quit command
quitcommand = 'q' + chr(255) + chr(0) * 7 quitcommand = 'q' + chr(255) + chr(0) * 7
...@@ -74,7 +55,7 @@ def promise(args): ...@@ -74,7 +55,7 @@ def promise(args):
if ssh.poll() is None: if ssh.poll() is None:
return 1 return 1
if ssh.returncode != 0: if ssh.returncode != 0:
failed_ssh() sys.stderr.write("SSH Connection failed\n")
return ssh.returncode return ssh.returncode
...@@ -87,16 +68,19 @@ class Recipe(GenericSlapRecipe, Notify, Callback): ...@@ -87,16 +68,19 @@ class Recipe(GenericSlapRecipe, Notify, Callback):
url = entry.get('url') url = entry.get('url')
if not url: if not url:
raise ValueError('Missing URL parameter for PBS recipe') raise ValueError('Missing URL parameter for PBS recipe')
parsed_url = urlparse.urlparse(url)
# We assume that thanks to sha512 there's no collisions slave_type = entry['type']
url_hash = hashlib.sha512(url).hexdigest() if not slave_type in ['pull', 'push']:
name_hash = hashlib.sha512(entry['name']).hexdigest() raise ValueError('type parameter must be either pull or push.')
promise_path = os.path.join(self.options['promises-directory'], slave_id = entry['notification-id']
url_hash)
parsed_url = urlparse.urlparse(url) print 'Processing PBS slave %s with type %s' % (slave_id, slave_type)
promise_dict = self.promise_base_dict.copy()
promise_dict.update(user=parsed_url.username, promise_path = os.path.join(self.options['promises-directory'], slave_id)
promise_dict = dict(ssh_client=self.options['sshclient-binary'],
user=parsed_url.username,
host=parsed_url.hostname, host=parsed_url.hostname,
port=parsed_url.port) port=parsed_url.port)
promise = self.createPythonScript(promise_path, promise = self.createPythonScript(promise_path,
...@@ -104,72 +88,109 @@ class Recipe(GenericSlapRecipe, Notify, Callback): ...@@ -104,72 +88,109 @@ class Recipe(GenericSlapRecipe, Notify, Callback):
promise_dict) promise_dict)
path_list.append(promise) path_list.append(promise)
host = parsed_url.hostname host = parsed_url.hostname
known_hosts_file[host] = entry['server-key'] known_hosts_file[host] = entry['server-key']
notifier_wrapper_path = os.path.join(self.options['wrappers-directory'], slave_id)
rdiff_wrapper_path = notifier_wrapper_path + '_raw'
# Create the rdiff-backup wrapper
# It is useful to separate it from the notifier so that we can run it
# Manually.
rdiffbackup_parameter_list = []
# XXX use -y because the host might not yet be in the # XXX use -y because the host might not yet be in the
# trusted hosts file until the next time slapgrid is run. # trusted hosts file until the next time slapgrid is run.
rdiffbackup_remote_schema = '%(ssh)s -y -p %%s %(user)s@%(host)s' % {
remote_schema = '%(ssh)s -y -p %%s %(user)s@%(host)s' % \
{
'ssh': self.options['sshclient-binary'], 'ssh': self.options['sshclient-binary'],
'user': parsed_url.username, 'user': parsed_url.username,
'host': parsed_url.hostname, 'host': parsed_url.hostname,
} }
parameters = ['--remote-schema', remote_schema]
remote_directory = '%(port)s::%(path)s' % {'port': parsed_url.port, remote_directory = '%(port)s::%(path)s' % {'port': parsed_url.port,
'path': parsed_url.path} 'path': parsed_url.path}
local_directory = self.createDirectory(self.options['directory'], entry['name'])
local_directory = self.createDirectory(self.options['directory'],
name_hash) if slave_type == 'push':
# Create a simple rdiff-backup wrapper that will push
if entry['type'] == 'push': rdiffbackup_parameter_list.extend(['--remote-schema', rdiffbackup_remote_schema])
parameters.extend(['--restore-as-of', 'now']) rdiffbackup_parameter_list.extend(['--restore-as-of', 'now'])
parameters.append('--force') rdiffbackup_parameter_list.append('--force')
parameters.extend([local_directory, remote_directory]) rdiffbackup_parameter_list.append(local_directory)
comments = ['','Push data to a PBS *-import instance.',''] rdiffbackup_parameter_list.append(remote_directory)
else: comments = ['', 'Push data to a PBS *-import instance.', '']
parameters.extend([remote_directory, local_directory]) rdiff_wrapper = self.createWrapper(
comments = ['','Pull data from a PBS *-export instance.',''] name=rdiff_wrapper_path,
wrapper_basepath = os.path.join(self.options['wrappers-directory'],
url_hash)
if 'notify' in entry:
wrapper_path = wrapper_basepath + '_raw'
else:
wrapper_path = wrapper_basepath
wrapper = self.createWrapper(name=wrapper_path,
command=self.options['rdiffbackup-binary'], command=self.options['rdiffbackup-binary'],
parameters=parameters, parameters=rdiffbackup_parameter_list,
comments = comments) comments=comments,
path_list.append(wrapper) pidfile=os.path.join(self.options['run-directory'], '%s_raw.pid' % slave_id),
)
elif slave_type == 'pull':
# Wrap rdiff-backup call into a script that checks consistency of backup
# We need to manually escape the remote schema
rdiffbackup_parameter_list.extend(['--remote-schema', '"%s"' % rdiffbackup_remote_schema])
rdiffbackup_parameter_list.append(remote_directory)
rdiffbackup_parameter_list.append(local_directory)
comments = ['', 'Pull data from a PBS *-export instance.', '']
rdiff_wrapper_template = textwrap.dedent("""\
#!/bin/sh
# %(comment)s
RDIFF_BACKUP="%(rdiffbackup_binary)s"
$RDIFF_BACKUP %(rdiffbackup_parameter)s
if [ ! $? -eq 0 ]; then
# Check the backup, go to the last consistent backup, so that next
# run will be okay.
echo "Checking backup directory..."
$RDIFF_BACKUP --check-destination-dir %(local_directory)s
if [ ! $? -eq 0 ]; then
# Here, two possiblities:
# * The first backup failed. It is safe to remove it since there is nothing valuable there.
# * The backup has been complete, but is now in a really weird state. Not safe to remove it.
echo "Impossible to check backup: we move it to a safe place."
# XXX: bang
mv %(local_directory)s %(local_directory)s.$(date +%%s)
fi
else
# Everything's okay, cleaning up...
$RDIFF_BACKUP --remove-older-than %(remove_backup_older_than)s --force %(local_directory)s
fi
""")
rdiff_wrapper_content = rdiff_wrapper_template % {
'comment': comments,
'rdiffbackup_binary': self.options['rdiffbackup-binary'],
'local_directory': local_directory,
'rdiffbackup_parameter': ' \\\n '.join(rdiffbackup_parameter_list),
# XXX: only 10 increments is not enough by default.
'remove_backup_older_than': entry.get('remove-backup-older-than', '3B')
}
rdiff_wrapper = self.createFile(
name=rdiff_wrapper_path,
content=rdiff_wrapper_content,
mode=0700
)
path_list.append(rdiff_wrapper)
if 'notify' in entry: # Create notifier wrapper
feed_url = '%s/get/%s' % (self.options['notifier-url'], notifier_wrapper = self.createNotifier(
entry['notification-id']) notifier_binary=self.options['notifier-binary'],
wrapper = self.createNotifier(notifier_binary=self.options['notifier-binary'], wrapper=notifier_wrapper_path,
wrapper=wrapper_basepath, executable=rdiff_wrapper,
executable=wrapper_path,
log=os.path.join(self.options['feeds'], entry['notification-id']), log=os.path.join(self.options['feeds'], entry['notification-id']),
title=entry.get('title', 'Untitled'), title=entry.get('title', slave_id),
notification_url=entry['notify'], notification_url=entry['notify'],
feed_url=feed_url, feed_url='%s/get/%s' % (self.options['notifier-url'], entry['notification-id']),
pidfile=os.path.join(self.options['run-directory'], '%s.pid' % slave_id)
) )
path_list.append(wrapper) path_list.append(notifier_wrapper)
#self.setConnectionDict(dict(feed_url=feed_url), entry['slave_reference'])
if 'on-notification' in entry: if 'on-notification' in entry:
path_list.append(self.createCallback(str(entry['on-notification']), path_list.append(self.createCallback(str(entry['on-notification']),
wrapper)) notifier_wrapper))
else: else:
cron_entry = os.path.join(self.options['cron-entries'], url_hash) cron_entry = os.path.join(self.options['cron-entries'], slave_id)
with open(cron_entry, 'w') as cron_entry_file: with open(cron_entry, 'w') as cron_entry_file:
cron_entry_file.write('%s %s' % (entry['frequency'], wrapper)) cron_entry_file.write('%s %s' % (entry['frequency'], notifier_wrapper))
path_list.append(cron_entry) path_list.append(cron_entry)
return path_list return path_list
...@@ -181,20 +202,9 @@ class Recipe(GenericSlapRecipe, Notify, Callback): ...@@ -181,20 +202,9 @@ class Recipe(GenericSlapRecipe, Notify, Callback):
if self.optionIsTrue('client', True): if self.optionIsTrue('client', True):
self.logger.info("Client mode") self.logger.info("Client mode")
slap_connection = self.buildout['slap-connection']
self.promise_base_dict = {
'server_url': slap_connection['server-url'],
'computer_id': slap_connection['computer-id'],
'cert_file': slap_connection.get('cert-file'),
'key_file': slap_connection.get('key-file'),
'partition_id': slap_connection['partition-id'],
'ssh_client': self.options['sshclient-binary'],
}
slaves = json.loads(self.options['slave-instance-list']) slaves = json.loads(self.options['slave-instance-list'])
known_hosts = KnownHostsFile(self.options['known-hosts']) known_hosts = KnownHostsFile(self.options['known-hosts'])
with known_hosts: with known_hosts:
# XXX this API could be cleaner
for slave in slaves: for slave in slaves:
path_list.extend(self.add_slave(slave, known_hosts)) path_list.extend(self.add_slave(slave, known_hosts))
else: else:
...@@ -209,4 +219,3 @@ class Recipe(GenericSlapRecipe, Notify, Callback): ...@@ -209,4 +219,3 @@ class Recipe(GenericSlapRecipe, Notify, Callback):
path_list.append(wrapper) path_list.append(wrapper)
return path_list return path_list
...@@ -119,8 +119,11 @@ class Recipe(object): ...@@ -119,8 +119,11 @@ class Recipe(object):
)) ))
slave = options.get('slave', 'false').lower() in \ slave = options.get('slave', 'false').lower() in \
librecipe.GenericBaseRecipe.TRUE_VALUES librecipe.GenericBaseRecipe.TRUE_VALUES
# By default XXXX Way of doing it is ugly and dangerous # By default XXXX Way of doing it is ugly and dangerous
requested_state = options.get('state', buildout['slap-connection'].get('requested','started')) requested_state = options.get('state', buildout['slap-connection'].get('requested','started'))
options['requested-state'] = requested_state
slap = slapmodule.slap() slap = slapmodule.slap()
slap.initializeConnection( slap.initializeConnection(
options['server-url'], options['server-url'],
...@@ -134,6 +137,7 @@ class Recipe(object): ...@@ -134,6 +137,7 @@ class Recipe(object):
self._raise_request_exception = None self._raise_request_exception = None
self._raise_request_exception_formatted = None self._raise_request_exception_formatted = None
self.instance = None self.instance = None
# Try to do the request and fetch parameter dict... # Try to do the request and fetch parameter dict...
try: try:
self.instance = request(software_url, software_type, self.instance = request(software_url, software_type,
...@@ -141,14 +145,17 @@ class Recipe(object): ...@@ -141,14 +145,17 @@ class Recipe(object):
filter_kw=filter_kw, shared=slave, state=requested_state) filter_kw=filter_kw, shared=slave, state=requested_state)
return_parameter_dict = self._getReturnParameterDict(self.instance, return_parameter_dict = self._getReturnParameterDict(self.instance,
return_parameters) return_parameters)
# Fetch the instance-guid and the instance-state
# Note: SlapOS Master does not support it for slave instances
if not slave: if not slave:
try: try:
options['instance-guid'] = self.instance.getInstanceGuid() options['instance-guid'] = self.instance.getInstanceGuid()
# XXX: deprecated, to be removed # XXX: deprecated, to be removed
options['instance_guid'] = self.instance.getInstanceGuid() options['instance_guid'] = self.instance.getInstanceGuid()
options['instance-state'] = self.instance.getState()
except (slapmodule.ResourceNotReady, AttributeError): except (slapmodule.ResourceNotReady, AttributeError):
# Backward compatibility. Old SlapOS master and core don't know this. # Backward compatibility. Old SlapOS master and core don't know this.
self.logger.warning("Impossible to fetch instance GUID.") self.logger.warning("Impossible to fetch instance GUID nor state.")
except (slapmodule.NotFoundError, slapmodule.ServerError, slapmodule.ResourceNotReady) as exc: except (slapmodule.NotFoundError, slapmodule.ServerError, slapmodule.ResourceNotReady) as exc:
self._raise_request_exception = exc self._raise_request_exception = exc
self._raise_request_exception_formatted = traceback.format_exc() self._raise_request_exception_formatted = traceback.format_exc()
...@@ -162,13 +169,6 @@ class Recipe(object): ...@@ -162,13 +169,6 @@ class Recipe(object):
except KeyError: except KeyError:
if self.failed is None: if self.failed is None:
self.failed = param self.failed = param
options['requested-state'] = requested_state
try:
options['instance-state'] = self.instance.getState()
except slapmodule.ResourceNotReady:
# Odd case: SlapOS Master doesn't send the state of a slave partition.
# XXX Should be fixed in the SlapOS Master, we should not care here.
pass
def _filterForStorage(self, partition_parameter_kw): def _filterForStorage(self, partition_parameter_kw):
return partition_parameter_kw return partition_parameter_kw
......
...@@ -124,8 +124,8 @@ class ImportRecipe(GenericBaseRecipe): ...@@ -124,8 +124,8 @@ class ImportRecipe(GenericBaseRecipe):
ifs=$IFS IFS=';' ifs=$IFS IFS=';'
read user pass remaining < %(etc-directory)s/.users read user pass remaining < %(etc-directory)s/.users
IFS=$ifs IFS=$ifs
%(curl-binary)s -vg6L -F clogin="$user" -F cpwd="$pass" --dump-header login_cookie %(backend-url)s/doLogin; %(curl-binary)s --insecure -vg6L -F clogin="$user" -F cpwd="$pass" --dump-header login_cookie %(backend-url)s/doLogin;
%(curl-binary)s -vg6LX POST --cookie login_cookie --max-time 5 %(backend-url)s/runSoftwareProfile; %(curl-binary)s --insecure -vg6LX POST --cookie login_cookie --max-time 5 %(backend-url)s/runSoftwareProfile;
rm -f login_cookie rm -f login_cookie
""" % self.options) """ % self.options)
self.createExecutable(wrapper, content=content) self.createExecutable(wrapper, content=content)
......
...@@ -94,15 +94,17 @@ mode = 0644 ...@@ -94,15 +94,17 @@ mode = 0644
recipe = hexagonit.recipe.download recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/instance-kvm-resilient.cfg.jinja2 url = ${:_profile_base_location_}/instance-kvm-resilient.cfg.jinja2
mode = 644 mode = 644
md5sum = 6753004b582c0470bd028253ce1964ad #md5sum = 6753004b582c0470bd028253ce1964ad
download-only = true download-only = true
on-update = true
[template-kvm-resilient-test] [template-kvm-resilient-test]
recipe = hexagonit.recipe.download recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/instance-kvm-resilient-test.cfg.jinja2 url = ${:_profile_base_location_}/instance-kvm-resilient-test.cfg.jinja2
md5sum = 027d68d9decbc6aec59365fa723975d7 #md5sum = 027d68d9decbc6aec59365fa723975d7
mode = 0644 mode = 0644
download-only = true download-only = true
on-update = true
[template-kvm-import] [template-kvm-import]
recipe = slapos.recipe.template recipe = slapos.recipe.template
...@@ -115,7 +117,7 @@ mode = 0644 ...@@ -115,7 +117,7 @@ mode = 0644
recipe = hexagonit.recipe.download recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/template/kvm-import.sh.in url = ${:_profile_base_location_}/template/kvm-import.sh.in
filename = kvm-import.sh.in filename = kvm-import.sh.in
md5sum = a731372420dc59c0b5ba7bc5f39a14ad md5sum = e03ed049cddd8d157228b09e1ebc071a
download-only = true download-only = true
mode = 0755 mode = 0755
......
...@@ -42,15 +42,19 @@ recipe = slapos.cookbook:request ...@@ -42,15 +42,19 @@ recipe = slapos.cookbook:request
software-url = ${slap-connection:software-release-url} software-url = ${slap-connection:software-release-url}
software-type = kvm-resilient software-type = kvm-resilient
name = Resilient KVM (Root Instance) name = Resilient KVM (Root Instance)
config = virtual-hard-drive-url virtual-hard-drive-md5sum resiliency-backup-periodicity {% set cluster_parameter_dict = slapparameter_dict.get('cluster', {}) -%}
config = virtual-hard-drive-url virtual-hard-drive-md5sum resiliency-backup-periodicity {{ cluster_parameter_dict.keys() | join(' ') }}
{% for key, value in cluster_parameter_dict.items() -%}
config-{{ key }} = {{ dumps(value) }}
{% endfor -%}
config-virtual-hard-drive-url = ${slap-parameter:virtual-hard-drive-url} config-virtual-hard-drive-url = ${slap-parameter:virtual-hard-drive-url}
config-virtual-hard-drive-md5sum = ${slap-parameter:virtual-hard-drive-md5sum} config-virtual-hard-drive-md5sum = ${slap-parameter:virtual-hard-drive-md5sum}
config-resiliency-backup-periodicity = */5 config-resiliency-backup-periodicity = */5
# We don't use url parameter, but we want it to be there to make sure root instance is ready. # We don't use url parameter, but we want it to be there to make sure root instance is ready.
return = url return = url
# XXX What to do? # XXX What to do?
#sla = instance_guid sla = computer_guid
#sla-instance_guid = ${slap-parameter:frontend-instance-guid} sla-computer_guid = ${slap-connection:computer-id}
[slap-parameter] [slap-parameter]
virtual-hard-drive-url = https://softinst43236.host.vifib.net/data/public/8e2138.php?dl=true virtual-hard-drive-url = https://softinst43236.host.vifib.net/data/public/8e2138.php?dl=true
......
...@@ -13,7 +13,7 @@ parts += ...@@ -13,7 +13,7 @@ parts +=
{{ parts.replicate("kvm", "3") }} {{ parts.replicate("kvm", "3") }}
publish-connection-informations publish-connection-informations
{{ replicated.replicate("kvm", "3", "kvm-export", "kvm-import") }} {{ replicated.replicate("kvm", "3", "kvm-export", "kvm-import", slapparameter_dict=slapparameter_dict) }}
# Bubble down the parameters of the requested instance to the user # Bubble down the parameters of the requested instance to the user
[request-kvm] [request-kvm]
......
...@@ -34,6 +34,7 @@ cert = $${slap-connection:cert-file} ...@@ -34,6 +34,7 @@ cert = $${slap-connection:cert-file}
recipe = slapos.recipe.template:jinja2 recipe = slapos.recipe.template:jinja2
template = ${template-kvm-resilient:location}/instance-kvm-resilient.cfg.jinja2 template = ${template-kvm-resilient:location}/instance-kvm-resilient.cfg.jinja2
rendered = $${buildout:directory}/template-kvm-resilient.cfg rendered = $${buildout:directory}/template-kvm-resilient.cfg
extensions = jinja2.ext.do
context = context =
key develop_eggs_directory buildout:develop-eggs-directory key develop_eggs_directory buildout:develop-eggs-directory
key eggs_directory buildout:eggs-directory key eggs_directory buildout:eggs-directory
......
...@@ -48,53 +48,58 @@ signature-certificate-list = ...@@ -48,53 +48,58 @@ signature-certificate-list =
-----END CERTIFICATE----- -----END CERTIFICATE-----
[versions] [versions]
Werkzeug = 0.9.3 Werkzeug = 0.9.4
apache-libcloud = 0.13.0 apache-libcloud = 0.13.2
async = 0.6.1 async = 0.6.1
buildout-versions = 1.7 buildout-versions = 1.7
erp5.util = 0.4.36
gitdb = 0.5.4 gitdb = 0.5.4
itsdangerous = 0.22 itsdangerous = 0.23
lxml = 3.2.3 lxml = 3.2.3
meld3 = 0.6.10 meld3 = 0.6.10
plone.recipe.command = 1.1 plone.recipe.command = 1.1
psutil = 1.1.0
pycrypto = 2.6 pycrypto = 2.6
rdiff-backup = 1.0.5 rdiff-backup = 1.0.5
slapos.cookbook = 0.79 slapos.cookbook = 0.84
slapos.recipe.cmmi = 0.2 slapos.recipe.cmmi = 0.2
slapos.recipe.download = 1.0.dev-r4053 slapos.recipe.download = 1.0.dev-r4053
slapos.recipe.template = 2.4.2 slapos.toolbox = 0.37
slapos.toolbox = 0.35.0
smmap = 0.8.2 smmap = 0.8.2
websockify = 0.5.1 websockify = 0.5.1
z3c.recipe.scripts = 1.0.1 z3c.recipe.scripts = 1.0.1
# Required by: # Required by:
# slapos.core==0.35.1 # slapos.core==0.35.1
# slapos.toolbox==0.35.0 # slapos.toolbox==0.37
Flask = 0.10.1 Flask = 0.10.1
# Required by: # Required by:
# slapos.toolbox==0.35.0 # slapos.toolbox==0.37
GitPython = 0.3.2.RC1 GitPython = 0.3.2.RC1
# Required by: # Required by:
# slapos.toolbox==0.35.0 # slapos.toolbox==0.37
atomize = 0.1.1 atomize = 0.1.1
# Required by: # Required by:
# slapos.toolbox==0.35.0 # paramiko==1.12.0
ecdsa = 0.8
# Required by:
# slapos.toolbox==0.37
feedparser = 5.1.3 feedparser = 5.1.3
# Required by: # Required by:
# slapos.cookbook==0.79 # slapos.cookbook==0.84
inotifyx = 0.2.0-1 inotifyx = 0.2.0-1
# Required by: # Required by:
# slapos.cookbook==0.79 # slapos.cookbook==0.84
lock-file = 2.0 lock-file = 2.0
# Required by: # Required by:
# slapos.cookbook==0.79 # slapos.cookbook==0.84
netaddr = 0.7.10 netaddr = 0.7.10
# Required by: # Required by:
...@@ -106,37 +111,33 @@ netifaces = 0.8-1 ...@@ -106,37 +111,33 @@ netifaces = 0.8-1
numpy = 1.7.1 numpy = 1.7.1
# Required by: # Required by:
# slapos.toolbox==0.35.0 # slapos.toolbox==0.37
paramiko = 1.11.0 paramiko = 1.12.0
# Required by:
# slapos.toolbox==0.35.0
psutil = 1.0.1
# Required by: # Required by:
# slapos.core==0.35.1 # slapos.core==0.35.1
pyflakes = 0.7.3 pyflakes = 0.7.3
# Required by: # Required by:
# slapos.cookbook==0.79 # slapos.cookbook==0.84
pytz = 2013b pytz = 2013d
# Required by: # Required by:
# slapos.cookbook==0.79 # slapos.cookbook==0.84
# slapos.toolbox==0.35.0 # slapos.toolbox==0.37
slapos.core = 0.35.1 slapos.core = 0.35.1
# Required by: # Required by:
# slapos.core==0.35.1 # slapos.core==0.35.1
supervisor = 3.0b2 supervisor = 3.0
# Required by: # Required by:
# slapos.core==0.35.1 # slapos.core==0.35.1
unittest2 = 0.5.1 unittest2 = 0.5.1
# Required by: # Required by:
# slapos.cookbook==0.79 # slapos.cookbook==0.84
# slapos.toolbox==0.35.0 # slapos.toolbox==0.37
xml-marshaller = 0.9.7 xml-marshaller = 0.9.7
# Required by: # Required by:
......
...@@ -3,5 +3,5 @@ DISK_PATH=${:disk-path} ...@@ -3,5 +3,5 @@ DISK_PATH=${:disk-path}
BACKUP_PATH=${:backup-disk-path} BACKUP_PATH=${:backup-disk-path}
# TODO: Use rdiff # TODO: Use rdiff
rm $DISK_PATH && \ rm $DISK_PATH
cp $BACKUP_PATH $DISK_PATH cp $BACKUP_PATH $DISK_PATH
...@@ -94,7 +94,7 @@ mode = 0644 ...@@ -94,7 +94,7 @@ mode = 0644
recipe = slapos.recipe.template recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-runner-export.cfg.in url = ${:_profile_base_location_}/instance-runner-export.cfg.in
output = ${buildout:directory}/instance-runner-export.cfg output = ${buildout:directory}/instance-runner-export.cfg
md5sum = 7e71622c09271790b5cef21c8613b8ac md5sum = 9ce3482e64a0c957f7a5f25ad0dc03ae
mode = 0644 mode = 0644
[template-resilient] [template-resilient]
...@@ -107,7 +107,7 @@ mode = 0644 ...@@ -107,7 +107,7 @@ mode = 0644
[template-resilient-test] [template-resilient-test]
recipe = slapos.recipe.download recipe = slapos.recipe.download
url = ${:_profile_base_location_}/instance-resilient-test.cfg.jinja2 url = ${:_profile_base_location_}/instance-resilient-test.cfg.jinja2
#md5sum = 0ee2cea5239278a8c1572d7a04798fdc md5sum = ac772d3a1cce4072acfabd563df449bb
filename = instance-resilient-test.cfg.jinja2 filename = instance-resilient-test.cfg.jinja2
mode = 0644 mode = 0644
......
...@@ -39,8 +39,7 @@ git-executable = ${git:location}/bin/git ...@@ -39,8 +39,7 @@ git-executable = ${git:location}/bin/git
[erp5.util-repository] [erp5.util-repository]
recipe = slapos.recipe.build:gitclone recipe = slapos.recipe.build:gitclone
repository = http://git.erp5.org/repos/erp5.git repository = http://git.erp5.org/repos/erp5.git
#branch = scalability-master2 branch = scalability-master2
revision = f10da882ab5e1dc03a812f3d0e7390dc8da2b59
git-executable = ${git:location}/bin/git git-executable = ${git:location}/bin/git
#[slapos.core-repository] #[slapos.core-repository]
......
...@@ -29,7 +29,7 @@ command-line = {{ bin_directory }}/runResiliencyTest ${:testnode-parameters} ${: ...@@ -29,7 +29,7 @@ command-line = {{ bin_directory }}/runResiliencyTest ${:testnode-parameters} ${:
wrapper-path = ${directory:scripts}/runResiliencyTestSuite wrapper-path = ${directory:scripts}/runResiliencyTestSuite
[deploy-standalone-resiliency-test] [deploy-standalone-resiliency-test]
# Used to manually run the KVM test if we don't have a running testnode. # Used to manually run the resilient test if we don't have a running testnode.
recipe = slapos.cookbook:wrapper recipe = slapos.cookbook:wrapper
test-suite-title = slaprunner test-suite-title = slaprunner
command-line = {{ bin_directory }}/runStandaloneResiliencyTest --test-suite-title=${:test-suite-title} ${deploy-resiliency-test:test-parameters} command-line = {{ bin_directory }}/runStandaloneResiliencyTest --test-suite-title=${:test-suite-title} ${deploy-resiliency-test:test-parameters}
...@@ -41,7 +41,11 @@ recipe = slapos.cookbook:request ...@@ -41,7 +41,11 @@ recipe = slapos.cookbook:request
software-url = ${slap-connection:software-release-url} software-url = ${slap-connection:software-release-url}
software-type = resilient software-type = resilient
name = Resilient Instance (Root Instance) name = Resilient Instance (Root Instance)
config = resiliency-backup-periodicity frontend-domain cloud9-frontend-domain {% set cluster_parameter_dict = slapparameter_dict.get('cluster', {}) -%}
config = resiliency-backup-periodicity frontend-domain cloud9-frontend-domain {{ cluster_parameter_dict.keys() | join(' ') }}
{% for key, value in cluster_parameter_dict.items() -%}
config-{{ key }} = {{ dumps(value) }}
{% endfor -%}
config-resiliency-backup-periodicity = * config-resiliency-backup-periodicity = *
# XXX hardcoded # XXX hardcoded
config-frontend-domain = google.com config-frontend-domain = google.com
......
...@@ -22,7 +22,7 @@ parts += ...@@ -22,7 +22,7 @@ parts +=
symlinks symlinks
node-frontend-promise node-frontend-promise
nginx-promise nginx-promise
urls publish-connection-informations
cron-entry-backup cron-entry-backup
[exporter] [exporter]
...@@ -36,11 +36,5 @@ rsync-binary = ${rsync:location}/bin/rsync ...@@ -36,11 +36,5 @@ rsync-binary = ${rsync:location}/bin/rsync
# Extends publish section with resilient parameters # Extends publish section with resilient parameters
[urls] [publish-connection-informations]
<= resilient-publish-connection-parameter <= resilient-publish-connection-parameter
backend_url = $${slaprunner:access-url}
url = https://$${request-frontend:connection-domain}
cloud9_backend_url = $${node-frontend:access-url}
cloud9_url = https://$${request-cloud9-frontend:connection-domain}
ssh_command = ssh $${dropbear-runner-server:host} -p $${dropbear-runner-server:port}
password_recovery_code = $${recovery-code:passwd}
* Report, from pbs and from clone, when a backup failed
* Make sure, when a takeover is done, that "importer" script finishes to run while importer instance is changed into exporter.
* Test that, after a successful backup/takeover, another backup is possible and will be successful.
* PBSs and mirrors should monitor/replace themselves * PBSs and mirrors should monitor/replace themselves
* Report errors from backup * Report errors from backup
* If a PBS master is down and then back again, it might want to participate in the ongoing election, then.. what happens? * If a PBS master is down and then back again, it might want to participate in the ongoing election, then.. what happens?
* If the network is partitioned (the two backups don't see each other, but each can see the slapos master) there will be two concurrent elections taking place, with two winners and two renames. * If the network is partitioned (the two backups don't see each other, but each can see the slapos master) there will be two concurrent elections taking place, with two winners and two renames.
* How to know that a backup is working? define "check that it works". Does it deploys? But then, how to ensure data integrity? By application?
* How to ensure "synchronization" between two main instances? example: Wordpress: mysql is down, then replaced, then inconsistency between apache and the new mysql * How to ensure "synchronization" between two main instances? example: Wordpress: mysql is down, then replaced, then inconsistency between apache and the new mysql
* How to deal with big data? I.e how to have working backup/restore system of 1TB data with slow connection? * How to deal with big data? I.e how to have working backup/restore system of 1TB data with slow connection?
* How to be sure that elected importer contains a/ the latest data and b/ has finished to pull. We should prevent importer not having a/ and b/ to become the main. * How to be sure that elected importer contains a/ the latest data and b/ has finished to pull. We should prevent importer not having a/ and b/ to become the main.
* How to say "I don't want this instance to be here" + Be able to define "here". Allows to automate deployment of PBS and backup instances
* Should we crypt backed up data? * Should we crypt backed up data?
* If a PBS is lost, a new PBS should be created from another one, in order ot keep history * If a PBS is lost, a new PBS should be created from another one, in order ot keep history
......
...@@ -30,7 +30,7 @@ parts = ...@@ -30,7 +30,7 @@ parts =
recipe = slapos.recipe.template recipe = slapos.recipe.template
url = ${:_profile_base_location_}/pbsready.cfg.in url = ${:_profile_base_location_}/pbsready.cfg.in
output = ${buildout:directory}/pbsready.cfg output = ${buildout:directory}/pbsready.cfg
md5sum = 9f4212a79f10bee8f6d75061943110e2 md5sum = 570e0b54c97d510befa2ea981c1e90e0
mode = 0644 mode = 0644
[pbsready-import] [pbsready-import]
...@@ -39,7 +39,7 @@ mode = 0644 ...@@ -39,7 +39,7 @@ mode = 0644
recipe = slapos.recipe.template recipe = slapos.recipe.template
url = ${:_profile_base_location_}/pbsready-import.cfg.in url = ${:_profile_base_location_}/pbsready-import.cfg.in
output = ${buildout:directory}/pbsready-import.cfg output = ${buildout:directory}/pbsready-import.cfg
md5sum = 3c2e73f49abdc52282fc045e6d91f3e9 md5sum = cc9c776500ccd07cb51969beb68ffcda
mode = 0644 mode = 0644
[pbsready-export] [pbsready-export]
...@@ -48,20 +48,20 @@ mode = 0644 ...@@ -48,20 +48,20 @@ mode = 0644
recipe = slapos.recipe.template recipe = slapos.recipe.template
url = ${:_profile_base_location_}/pbsready-export.cfg.in url = ${:_profile_base_location_}/pbsready-export.cfg.in
output = ${buildout:directory}/pbsready-export.cfg output = ${buildout:directory}/pbsready-export.cfg
md5sum = 5e27c391ceafb6a58032f1f87fba7826 md5sum = 25d05b3929fb4c6cf275866bad678d6a
mode = 0644 mode = 0644
[template-pull-backup] [template-pull-backup]
recipe = slapos.recipe.template recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-pull-backup.cfg.in url = ${:_profile_base_location_}/instance-pull-backup.cfg.in
output = ${buildout:directory}/instance-pull-backup.cfg output = ${buildout:directory}/instance-pull-backup.cfg
md5sum = c67a9dad66490ae264f9e7003521bf59 md5sum = c49e5911b94078d87b94507fb4efc93b
mode = 0644 mode = 0644
[template-replicated] [template-replicated]
recipe = slapos.recipe.download recipe = slapos.recipe.download
url = ${:_profile_base_location_}/template-replicated.cfg.in url = ${:_profile_base_location_}/template-replicated.cfg.in
#md5sum = 9e20f283bf709c63c9c6692d5e1f8972 md5sum = c762a625f65193bc8a570b4d56a0d08c
mode = 0644 mode = 0644
destination = ${buildout:directory}/template-replicated.cfg.in destination = ${buildout:directory}/template-replicated.cfg.in
......
...@@ -75,7 +75,7 @@ callbacks = $${directory:notifier-callbacks} ...@@ -75,7 +75,7 @@ callbacks = $${directory:notifier-callbacks}
id-file = $${rootdirectory:etc}/notifier.id id-file = $${rootdirectory:etc}/notifier.id
equeue-socket = $${equeue:socket} equeue-socket = $${equeue:socket}
host = $${slap-network-information:global-ipv6} host = $${slap-network-information:global-ipv6}
port = 8080 port = 8088
wrapper = $${basedirectory:services}/notifier wrapper = $${basedirectory:services}/notifier
server-binary = ${buildout:bin-directory}/pubsubserver server-binary = ${buildout:bin-directory}/pubsubserver
notifier-binary = ${buildout:bin-directory}/pubsubnotifier notifier-binary = ${buildout:bin-directory}/pubsubnotifier
...@@ -105,6 +105,7 @@ promises-directory = $${basedirectory:promises} ...@@ -105,6 +105,7 @@ promises-directory = $${basedirectory:promises}
directory = $${directory:pbs-backup} directory = $${directory:pbs-backup}
cron-entries = $${cron:cron-entries} cron-entries = $${cron:cron-entries}
wrappers-directory = $${directory:pbs-wrappers} wrappers-directory = $${directory:pbs-wrappers}
run-directory = $${basedirectory:run}
# XXX: this should be named "notifier-host" # XXX: this should be named "notifier-host"
notifier-url = http://[$${notifier:host}]:$${notifier:port} notifier-url = http://[$${notifier:host}]:$${notifier:port}
slave-instance-list = $${slap-parameter:slave_instance_list} slave-instance-list = $${slap-parameter:slave_instance_list}
......
...@@ -25,6 +25,12 @@ home = $${buildout:directory} ...@@ -25,6 +25,12 @@ home = $${buildout:directory}
var = $${:home}/var var = $${:home}/var
pid = $${:var}/pid pid = $${:var}/pid
# Define port of ssh server. It has to be different from import so that it
# supports export/import using same IP (slaprunner, slapos-in-partition,
# ipv4...)
[dropbear-server]
port = 22221
[resilient-publish-connection-parameter] [resilient-publish-connection-parameter]
notification-id = http://[$${notifier:host}]:$${notifier:port}/get/$${notifier-exporter:name} notification-id = http://[$${notifier:host}]:$${notifier:port}/get/$${notifier-exporter:name}
......
...@@ -23,6 +23,12 @@ parts = ...@@ -23,6 +23,12 @@ parts =
[resilient-publish-connection-parameter] [resilient-publish-connection-parameter]
notification-url = http://[$${notifier:host}]:$${notifier:port}/notify notification-url = http://[$${notifier:host}]:$${notifier:port}/notify
# Define port of ssh server. It has to be different from import so that it
# supports export/import using same IP (slaprunner, slapos-in-partition,
# ipv4...)
[dropbear-server]
port = 22220
[import-on-notification] [import-on-notification]
# notifier.callback runs a script when a notification (sent by a parent PBS) # notifier.callback runs a script when a notification (sent by a parent PBS)
# is received # is received
......
...@@ -213,7 +213,8 @@ wrapper = $${basedirectory:services}/sshd ...@@ -213,7 +213,8 @@ wrapper = $${basedirectory:services}/sshd
[dropbear-server] [dropbear-server]
recipe = slapos.cookbook:dropbear recipe = slapos.cookbook:dropbear
host = $${slap-network-information:global-ipv6} host = $${slap-network-information:global-ipv6}
port = 2222 # Explicitely excludes to define "port" argument. It will be defined in
# pbs-ready-import.cfg.in and pbs-ready-export.cfg.in
home = $${directory:ssh} home = $${directory:ssh}
wrapper = $${rootdirectory:bin}/raw_sshd wrapper = $${rootdirectory:bin}/raw_sshd
shell = $${rdiff-backup-server:wrapper} shell = $${rdiff-backup-server:wrapper}
......
...@@ -79,7 +79,7 @@ software-url = ${slap-connection:software-release-url} ...@@ -79,7 +79,7 @@ software-url = ${slap-connection:software-release-url}
software-type = {{typeimport}} software-type = {{typeimport}}
return = ssh-public-key ssh-url notification-url ip return = ssh-public-key ssh-url notification-url ip
pbs-notification-id = ${slap-connection:computer-id}-${slap-connection:partition-id}-{{namebase}}-push pbs-notification-id = ${slap-connection:computer-id}-${slap-connection:partition-id}-{{namebase}}-{{id}}-push
config = number authorized-key on-notification ip-list namebase config = number authorized-key on-notification ip-list namebase
config-number = {{id}} config-number = {{id}}
...@@ -133,8 +133,6 @@ config-ip-list = ${request-{{namebase}}:connection-ip}{% for j in range(1,nbback ...@@ -133,8 +133,6 @@ config-ip-list = ${request-{{namebase}}:connection-ip}{% for j in range(1,nbback
## Having 3 backups pulling from the same PBS provides ## Having 3 backups pulling from the same PBS provides
##only availability, not resiliency ##only availability, not resiliency
## WARNING : SLAVES ARE ALLOCATED AT RANDOM, THIS NEEDS TO BE FIXED.
[request-pbs-common] [request-pbs-common]
<= slap-connection <= slap-connection
recipe = slapos.cookbook:request recipe = slapos.cookbook:request
...@@ -173,15 +171,16 @@ sla-{{ key }} = {{ value }} ...@@ -173,15 +171,16 @@ sla-{{ key }} = {{ value }}
[request-pull-backup-server-{{namebase}}-{{id}}] [request-pull-backup-server-{{namebase}}-{{id}}]
<= request-pbs-common <= request-pbs-common
name = PBS {{id}} pulling from ${request-{{namebase}}:name} name = PBS {{id}} pulling from ${request-{{namebase}}:name}
config = url name type server-key on-notification notify notification-id title config = url name type server-key on-notification notify notification-id title remove-backup-older-than
config-url = ${request-{{namebase}}:connection-ssh-url} config-url = ${request-{{namebase}}:connection-ssh-url}
config-name = ${slap-connection:computer-id}-${slap-connection:partition-id}-{{namebase}}-{{id}}
config-type = pull config-type = pull
config-server-key = ${request-{{namebase}}:connection-ssh-public-key} config-server-key = ${request-{{namebase}}:connection-ssh-public-key}
config-on-notification = ${request-{{namebase}}:connection-notification-id} config-on-notification = ${request-{{namebase}}:connection-notification-id}
config-notify = ${request-pbs-{{namebase}}-{{id}}:connection-notification-url} config-notify = ${request-pbs-{{namebase}}-{{id}}:connection-notification-url}
config-notification-id = ${slap-connection:computer-id}-${slap-connection:partition-id}-{{namebase}}-{{id}}-pull config-notification-id = ${slap-connection:computer-id}-${slap-connection:partition-id}-{{namebase}}-{{id}}-pull
config-name = ${slap-connection:computer-id}-${slap-connection:partition-id}-{{namebase}}-{{id}}
config-title = Pulling from {{namebase}} config-title = Pulling from {{namebase}}
config-remove-backup-older-than = {{ slapparameter_dict.get('remove-backup-older-than', '3B') }}
slave = true slave = true
sla = instance_guid sla = instance_guid
sla-instance_guid = ${request-pbs-{{namebase}}-{{id}}:instance_guid} sla-instance_guid = ${request-pbs-{{namebase}}-{{id}}:instance_guid}
...@@ -191,12 +190,12 @@ sla-instance_guid = ${request-pbs-{{namebase}}-{{id}}:instance_guid} ...@@ -191,12 +190,12 @@ sla-instance_guid = ${request-pbs-{{namebase}}-{{id}}:instance_guid}
name = PBS pushing on ${request-{{namebase}}-pseudo-replicating-{{id}}:name} name = PBS pushing on ${request-{{namebase}}-pseudo-replicating-{{id}}:name}
config = url name type server-key on-notification notify notification-id title config = url name type server-key on-notification notify notification-id title
config-url = ${request-{{namebase}}-pseudo-replicating-{{id}}:connection-ssh-url} config-url = ${request-{{namebase}}-pseudo-replicating-{{id}}:connection-ssh-url}
config-name = ${request-pull-backup-server-{{namebase}}-{{id}}:config-name}
config-type = push config-type = push
config-server-key = ${request-{{namebase}}-pseudo-replicating-{{id}}:connection-ssh-public-key} config-server-key = ${request-{{namebase}}-pseudo-replicating-{{id}}:connection-ssh-public-key}
config-on-notification = ${request-pbs-{{namebase}}-{{id}}:connection-feeds-url}${request-pull-backup-server-{{namebase}}-{{id}}:config-notification-id} config-on-notification = ${request-pbs-{{namebase}}-{{id}}:connection-feeds-url}${request-pull-backup-server-{{namebase}}-{{id}}:config-notification-id}
config-notify = ${request-{{namebase}}-pseudo-replicating-{{id}}:connection-notification-url} config-notify = ${request-{{namebase}}-pseudo-replicating-{{id}}:connection-notification-url}
config-notification-id = ${request-{{namebase}}-pseudo-replicating-{{id}}:pbs-notification-id} config-notification-id = ${request-{{namebase}}-pseudo-replicating-{{id}}:pbs-notification-id}
config-name = ${slap-connection:computer-id}-${slap-connection:partition-id}-{{namebase}}-{{id}}
config-title = Pushing to {{namebase}} backup {{id}} config-title = Pushing to {{namebase}} backup {{id}}
slave = true slave = true
sla = instance_guid sla = instance_guid
......
...@@ -12,6 +12,7 @@ extensions += ...@@ -12,6 +12,7 @@ extensions +=
# Use shacache and lxml # Use shacache and lxml
extends = extends =
../component/lxml-python/buildout.cfg ../component/lxml-python/buildout.cfg
../component/python-openssl/buildout.cfg
# Separate from site eggs # Separate from site eggs
allowed-eggs-from-site-packages = allowed-eggs-from-site-packages =
...@@ -59,6 +60,7 @@ networkcache-section = networkcache ...@@ -59,6 +60,7 @@ networkcache-section = networkcache
recipe = zc.recipe.egg recipe = zc.recipe.egg
eggs = eggs =
${lxml-python:egg} ${lxml-python:egg}
${python-openssl:egg}
slapos.cookbook slapos.cookbook
cliff cliff
hexagonit.recipe.download hexagonit.recipe.download
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment