runner-import.sh.jinja2 8.32 KB
Newer Older
1
#!{{ shell_binary }}
2 3
LC_ALL=C
export LC_ALL
4
umask 077
5

6 7 8 9
# Exit on any error, to prevent inconsistent backup
# Error on unset variable expansion
set -eu

10
# Redirect output to log
11
exec > >(tee -ai '{{ output_log_file }}')
12 13
exec 2>&1

14 15 16
echo -e "\n\n$0 run at : $(date)"

srv_directory='{{ directory["srv"] }}'
17
backup_directory='{{ directory["backup"] }}'
18 19 20 21
etc_directory='{{ directory["etc"] }}'

RESTORE_EXIT_CODE_FILE='{{ restore_exit_code_file }}'
RESTORE_ERROR_MESSAGE_FILE='{{ restore_error_message_file }}'
22
ERROR_MESSAGE=""
23 24 25

fail_with_exit_code () {
  echo 1 > $RESTORE_EXIT_CODE_FILE
26
  echo -e "Failure during step : $ERROR_MESSAGE" > $RESTORE_ERROR_MESSAGE_FILE
27
  exit 1
28 29
}
trap fail_with_exit_code ERR
30

31 32 33 34 35
log_message () {
    ERROR_MESSAGE=$1
    echo -e $1
}
# Delete the error message file, to not keep it even after a successful build
36 37 38 39 40 41 42 43
rm "$RESTORE_ERROR_MESSAGE_FILE" || true

rsync () {
  set -x
  '{{ rsync_binary }}' -rlptgov --stats --safe-links --delete "$@"
  set +x
}

44
log_message "Restoring WebRunner content..."
45
(
46
  # XXX: code duplication with runner-export.sh.jinja2
47 48 49 50 51 52 53 54
  path=$srv_directory/runner
  backup_path=$backup_directory/runner/
  cd "$backup_path"

  if [ -d instance ]; then
    # Concatenate the exclude file of each partition of webrunner
    # to create a global exclude file.
    # Also, ignore all buildout-managed files.
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
    exclude=$({{ sys.executable }} - "$path" <<EOF
if 1:
        import glob, errno, os, sys
        sys.path[:0] = {{ repr(easy_install.buildout_and_setuptools_path) }}
        from zc.buildout.configparser import parse
        path = sys.argv[1]

        def print_relative(path_list):
            for p in path_list:
                p = p.strip()
                if p:
                    print(os.path.relpath(p, path))
        print("*.sock")
        print("*.socket")
        print("*.pid")
        print(".installed*.cfg")
        for partition in glob.glob(path + "/instance/slappart*"):
            os.chdir(partition)
            try:
                with open("srv/exporter.exclude") as f:
                    exclude = f.readlines()
            except IOError as e:
                if e.errno != errno.ENOENT:
                    raise
            else:
                print_relative(exclude)
            for installed in glob.glob(".installed*.cfg"):
                try:
                    with open(installed) as f:
                        installed = parse(f, installed)
                except IOError as e:
                    if e.errno != errno.ENOENT:
                        raise
                else:
                    for section in installed.itervalues():
                        print_relative(section.get(
                            '__buildout_installed__', '').splitlines())
EOF
)
    echo "$exclude" |rsync --exclude-from=- instance "$path"
95
  fi
96

97 98 99
  test -d project  && rsync project "$path"
  test -d public  && rsync public "$path"
  test -f proxy.db && rsync proxy.db "$path"
100
)
101

102 103 104 105 106 107 108
log_message "Restoring WebRunner config (etc directory)..."
(
  cd "$backup_directory"/etc/
  rsync config.json "$etc_directory"
  # Hidden files are related to the webrunner's internals
  cp -r .??* "$etc_directory"
)
109

110
# Invoke arbitrary script to perform specific restoration
111
# procedure.
112
runner_import_restore=$srv_directory/runner-import-restore
113 114 115
if [ -x "$runner_import_restore" ]; then
  log_message "Running $runner_import_restore..."
  "$srv_directory/runner-import-restore"
116
fi
117

118
# If no "etc/.project" neither "srv/runner/proxy.db", we can safely assume
119 120
# that there is no instance deployed on runner0
if [ ! -f "$etc_directory/.project" -a ! -f "$srv_directory/runner/proxy.db" ]; then
121
  log_message "No Software Requested... Writing status file... End"
122 123 124 125
  echo 0 > $RESTORE_EXIT_CODE_FILE
  exit 0
fi

126
log_message "Updating slapproxy database..."
127
HOME='{{ directory["home"] }}'
128 129 130
# XXX Hardcoded
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
export MAKEFLAGS=-j4
131
SLAPOS='{{ directory["bin"] }}'/slapos
132 133 134
# XXX hardcoded
SQLITE3="$HOME/software_release/parts/sqlite3/bin/sqlite3"
DATABASE="$HOME/srv/runner/proxy.db"
135 136 137 138 139 140 141
db_query () {
  # Try opening locked tables for 5 seconds to prevent "database is locked" error
"$SQLITE3" "$DATABASE" <<EOF
.timeout 5000
$@
EOF
}
142 143 144 145 146 147
# If slapproxy database is empty then no software release was opened
if [ ! -s "$DATABASE" ]; then
  log_message "Slapproxy database empty, no Software Requested... Writing status file... End"
  echo 0 > $RESTORE_EXIT_CODE_FILE
  exit 0
fi
148
# Change slapproxy database to point instances to new software release
149
# XXX hardcoded
150
PARTITION=$(basename $HOME)
151
OLD_SOFTWARE_RELEASE=$(db_query "select software_release from partition11 where reference='slappart0';")
152
SOFTWARE_RELEASE=$(echo "$OLD_SOFTWARE_RELEASE" | sed 's/\/\(slappart\|test0-\)[0-9][0-9]*\//\/'"$PARTITION"'\//')
153 154
db_query "update partition11 set software_release='$SOFTWARE_RELEASE' where software_release NOT NULL;"
db_query "update software11 set url='$SOFTWARE_RELEASE' where url='$OLD_SOFTWARE_RELEASE';" || db_query "delete from software11 where url='$OLD_SOFTWARE_RELEASE';"
155
# Change slapproxy database to have all instances stopped
156
db_query "update partition11 set requested_state='stopped';"
157
# Change slapproxy database to get correct IPs
158 159
IPV4='{{ ipv4 }}'
IPV6='{{ ipv6 }}'
160 161
db_query "update partition_network11 set address='$IPV4' where netmask='255.255.255.255';"
db_query "update partition_network11 set address='$IPV6' where netmask='ffff:ffff:ffff::';"
162

163
MASTERURL='http://{{ ipv4 }}:{{ proxy_port }}'
164

165 166
log_message "Removing old supervisord service description files..."
# XXX: Path hardcoded in slapos.core
167 168 169 170 171
rm '{{ instance_folder }}'/etc/supervisord.conf.d/* || true

SLAPOSCFG='{{ supervisord["slapos-cfg"] }}'
SLAPGRIDSRLOG='{{ supervisord["slapgrid-sr-log"] }}'
SLAPGRIDCPLOG='{{ supervisord["slapgrid-cp-log"] }}'
172

173 174 175 176 177 178 179 180 181 182 183 184 185
contain_software_release=0

SOFTWARE_RELEASES_COUNT=$("$SQLITE3" "$DATABASE" 'SELECT count(1) FROM software11 WHERE url != "";')
if [ $SOFTWARE_RELEASES_COUNT -gt 0 ]; then
  contain_software_release=1
fi

if [ $contain_software_release -eq 0 ]; then
  log_message "No Software Release were deployed, so skip to continue..."
  echo 0 > $RESTORE_EXIT_CODE_FILE
  exit 0
fi

186
log_message "Building newest Software Release..."
187 188 189
"$SLAPOS" node software --cfg "$SLAPOSCFG" --all --master-url="$MASTERURL" --logfile "$SLAPGRIDSRLOG" >/dev/null 2>&1 ||
"$SLAPOS" node software --cfg "$SLAPOSCFG" --all --master-url="$MASTERURL" --logfile "$SLAPGRIDSRLOG" >/dev/null 2>&1 ||
"$SLAPOS" node software --cfg "$SLAPOSCFG" --all --master-url="$MASTERURL" --logfile "$SLAPGRIDSRLOG" >/dev/null 2>&1 ||
190
(tail -n 200 "$SLAPGRIDSRLOG" && false)
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206

contain_instance=0
for folder in $srv_directory/runner/instance/slappart*/; do
  if [ -f "$folder/buildout.cfg" ]; then
    contain_instance=1
  fi
done

# If instance do not contains template.cfg it means the user contains no instance.
# so it is safer to assume that he is using slaprunner for develop buildout rather them slapos.
if [ $contain_instance -eq 0 ]; then
  log_message "None Instance were deployed with this software release, so skip to continue..."
  echo 0 > $RESTORE_EXIT_CODE_FILE
  exit 0
fi

207
# Remove defined scripts to force buildout to recreate them to have updated paths
208
rm "$srv_directory"/runner/instance/slappart*/srv/runner-import-restore || true
209
log_message "Fixing Instances as needed after import..."
210
# XXX hardcoded
211 212 213
"$SLAPOS" node instance --cfg "$SLAPOSCFG" --master-url=$MASTERURL --logfile "$SLAPGRIDCPLOG" >/dev/null 2>&1 ||
"$SLAPOS" node instance --cfg "$SLAPOSCFG" --master-url=$MASTERURL --logfile "$SLAPGRIDCPLOG" >/dev/null 2>&1 ||
"$SLAPOS" node instance --cfg "$SLAPOSCFG" --master-url=$MASTERURL --logfile "$SLAPGRIDCPLOG" >/dev/null 2>&1 ||
214
(tail -n 200 "$SLAPGRIDCPLOG" && false)
215

216
# Invoke defined scripts for each partition inside of slaprunner
217
log_message "Invoke custom import scripts defined by each instances..."
218
for partition in "$srv_directory"/runner/instance/slappart*/
219 220
do
  script=$partition/srv/runner-import-restore
221
  if [ -x "$script" ]; then
222
    log_message "Running custom instance script : $script..."
223
    "$script"
224 225
  fi
done
226

227
# Change back slapproxy database to have all instances started
228
log_message "Set instances as to start after takeover..."
229
db_query "update partition11 set requested_state='started';"
230 231

# Write exit code to an arbitrary file that will be checked by promise/monitor
232
log_message "Writing status file... End"
233 234
echo 0 > $RESTORE_EXIT_CODE_FILE
exit 0