Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neo
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Jobs
Commits
Open sidebar
Kirill Smelkov
neo
Commits
fc261214
Commit
fc261214
authored
Sep 18, 2017
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
104985ea
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
48 additions
and
49 deletions
+48
-49
go/neo/t/t.sh
go/neo/t/t.sh
+46
-47
go/neo/t/zhash.go
go/neo/t/zhash.go
+1
-1
go/neo/t/zhash.py
go/neo/t/zhash.py
+1
-1
No files found.
go/neo/t/t.sh
View file @
fc261214
#!/bin/bash -e
# run tests and benchmarks against FileStorage, ZEO and various NEO/py{sql,sqlite}, NEO/go clusters
# XXX neo/go - must be `go install'ed`
# XXX use `go run ...` so it does not need go install?
# XXX neo/py, wendelin.core, ... - must be pip install'ed
# XXX neo/py: run via relative path to neomaster? (../../neo/neomaster) so we do not need to `pip install -e` ?
# port allocations
Abind
=
127.0.0.1:5551
Mbind
=
127.0.0.1:5552
Sbind
=
127.0.0.1:5553
Zbind
=
127.0.0.1:5554
# XXX 127.0.0.1 -> `hostname`? (or `hostname -i`)? or use @addr option? (-> -bindif=...)
Abind
=
127.0.0.1:5551
# NEO admin
Mbind
=
127.0.0.1:5552
# NEO master
Zbind
=
127.0.0.1:5553
# ZEO
# NEO storage. bind not strictly needed but we make sure no 2 storages are
# started at the same time
Sbind
=
127.0.0.1:5554
# disk allocation
log
=
`
pwd
`
/log
;
mkdir
-p
$log
...
...
@@ -48,16 +56,14 @@ kill $j' EXIT
# M{py,go} ... - spawn master
Mpy
()
{
# XXX run via relative path to neomaster? (../../neo/neomaster) so we do not need to `pip install -e` ?
# XXX --autostart=1 ?
# --autostart=1
exec
-a
Mpy
\
neomaster
--cluster
=
$cluster
--bind
=
$Mbind
--masters
=
$Mbind
-r
1
-p
1
--logfile
=
$log
/Mpy.log
"
$@
"
&
}
Mgo
()
{
exec
-a
Mgo
\
neo
--log_dir
=
$log
master
-cluster
=
$cluster
-bind
=
$Mbind
neo
--log_dir
=
$log
master
-cluster
=
$cluster
-bind
=
$Mbind
"
$@
"
&
}
# Spy ... - spawn NEO/py storage
...
...
@@ -71,8 +77,6 @@ Spy() {
# Sgo <data.fs> - spawn NEO/go storage
Sgo
()
{
# XXX use `go run ...` so it does not need go install?
# -alsologtostderr
# -cpuprofile cpu.out
# -trace trace.out
...
...
@@ -80,7 +84,6 @@ Sgo() {
neo
-log_dir
=
$log
storage
-cluster
=
$cluster
-bind
=
$Sbind
-masters
=
$Mbind
"
$@
"
&
}
# Apy ... - spawn NEO/py admin
Apy
()
{
exec
-a
Apy
\
...
...
@@ -110,7 +113,7 @@ NEOpylite() {
Apy
}
# spawn
neo
/py cluster working on mariadb
# spawn
NEO
/py cluster working on mariadb
NEOpysql
()
{
MDB
sleep
1
# XXX fragile
...
...
@@ -188,8 +191,8 @@ EOF
export
WENDELIN_CORE_ZBLK_FMT
=
ZBlk1
# XXX 32 temp - raise
#
work=32 # array size generated (MB)
work
=
64
work
=
32
# array size generated (MB)
#
work=64
#work=512 # array size generated (MB)
# generate data in data.fs
...
...
@@ -273,13 +276,16 @@ sync
go build
-o
zhash_go zhash.go
# run benchmarks
N
=
`
seq
1
`
# XXX repeat benchmarks N time
#hashfunc=sha1
#hashfunc=adler32
#hashfunc=crc32
hashfunc
=
null
Nrun
=
4
# repeat benchmarks N time
Npar
=
8
# run so many parallel clients in parallel phase
# nrun ... - run ... Nrun times
nrun
()
{
for
i
in
`
seq
$Nrun
`
;
do
"
$@
"
done
}
# runpar ... - run several program instances in parallel
runpar
()
{
local
jobv
...
...
@@ -290,12 +296,17 @@ runpar() {
wait
$jobv
}
# bench1 <url> - run benchmarks on the URL once
bench1
()
{
#hashfunc=sha1
#hashfunc=adler32
#hashfunc=crc32
hashfunc
=
null
# bench <url> - run benchmarks against URL
bench
()
{
url
=
$1
# time demo-zbigarray read $url
#
nrun
time demo-zbigarray read $url
./zhash.py
--
$hashfunc
$url
nrun
./zhash.py
--
$hashfunc
$url
# echo -e "\n# ${Npar} clients in parallel"
# runpar ./zhash.py --$hashfunc $url
...
...
@@ -304,62 +315,50 @@ bench1() {
return
fi
echo
bench
1
_go
$url
bench_go
$url
}
# go-only part of bench
1
bench
1
_go
()
{
# go-only part of bench
bench_go
()
{
url
=
$1
./zhash_go
--log_dir
=
$log
-
$hashfunc
$url
# ./zhash_go --log_dir=$log -$hashfunc -useprefetch $url
nrun
./zhash_go
--log_dir
=
$log
-
$hashfunc
$url
#
nrun
./zhash_go --log_dir=$log -$hashfunc -useprefetch $url
# echo -e "\n# ${Npar} clients in parallel"
# runpar ./zhash_go --log_dir=$log -$hashfunc $url
}
echo
-e
"
\n
*** FileStorage"
for
i
in
$N
;
do
bench1
$fs1
/data.fs
done
bench
$fs1
/data.fs
echo
-e
"
\n
*** ZEO"
Zpy
$fs1
/data.fs
for
i
in
$N
;
do
bench1 zeo://
$Zbind
done
bench zeo://
$Zbind
killall runzeo
wait
echo
-e
"
\n
*** NEO/py sqlite"
NEOpylite
for
i
in
$N
;
do
bench1 neo://
$cluster
@
$Mbind
done
bench neo://
$cluster
@
$Mbind
xneoctl
set
cluster stopping
wait
echo
-e
"
\n
*** NEO/py sql"
NEOpysql
for
i
in
$N
;
do
bench1 neo://
$cluster
@
$Mbind
done
bench neo://
$cluster
@
$Mbind
xneoctl
set
cluster stopping
xmysql
-e
"SHUTDOWN"
wait
echo
-e
"
\n
*** NEO/go"
NEOgo
for
i
in
$N
;
do
bench1 neo://
$cluster
@
$Mbind
done
bench neo://
$cluster
@
$Mbind
xneoctl
set
cluster stopping
wait
echo
-e
"
\n
*** NEO/go (sha1 disabled)"
X_NEOGO_SHA1_SKIP
=
y NEOgo
for
i
in
$N
;
do
X_NEOGO_SHA1_SKIP
=
y bench1_go neo://
$cluster
@
$Mbind
done
X_NEOGO_SHA1_SKIP
=
y bench_go neo://
$cluster
@
$Mbind
xneoctl
set
cluster stopping
wait
...
...
go/neo/t/zhash.go
View file @
fc261214
...
...
@@ -162,7 +162,7 @@ func zhash(ctx context.Context, url string, h hasher, useprefetch bool) (err err
//defer profile.Start(profile.CPUProfile).Stop()
}
for
qqq
:=
0
;
qqq
<
4
;
qqq
++
{
for
qqq
:=
0
;
qqq
<
1
;
qqq
++
{
tstart
:=
time
.
Now
()
h
.
Reset
()
// XXX temp
...
...
go/neo/t/zhash.py
View file @
fc261214
...
...
@@ -103,7 +103,7 @@ def main():
last_tid
=
stor
.
lastTransaction
()
before
=
p64
(
u64
(
last_tid
)
+
1
)
for
zzz
in
range
(
4
):
for
zzz
in
range
(
1
):
tstart
=
time
()
# vvv h.reset() XXX temp
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment