Commit f696b55f authored by Joanne Hugé's avatar Joanne Hugé

Merge branch 'master' into packet-exchange

parents feebe168 7aa27106
#!/usr/bin/env python3
import statistics
import json
import markdown_table
import argparse
import os
import parse
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
class MeasureSetHandler:
measures_dir = "measures"
graphs_dir = "{}/graphs".format(measures_dir)
measure_sets_file_name = "measure_sets.json"
measure_sets_path = measures_dir + "/" + measure_sets_file_name
def __init__(self):
with open(MeasureSetHandler.measure_sets_path) as measure_sets_file:
self.measure_sets = json.load(measure_sets_file)
def __str__(self):
s = "List of measures:\n\n"
for measure_set in self.measure_sets:
s += " {} : {}".format(measure_set, self.measure_sets[measure_set])
return s
def save(self):
remove_list = []
for mtype in self.measure_sets:
if len(self.measure_sets[mtype]['ids']) == 0:
remove_list.append(mtype)
for mtype in remove_list:
del self.measure_sets[mtype]
with open(MeasureSetHandler.measure_sets_path, 'w') as measure_sets_file:
json.dump(self.measure_sets, measure_sets_file)
def get_measure_set(self, measure_name):
measure_set = MeasureSet()
measure_path = "{}/{}.json".format(self.measures_dir, measure_name)
measure_set.import_from_json(measure_path, False)
return measure_set
def add_measure_set(self, measure_set):
mtype = measure_set.measure_type
if mtype not in self.measure_sets:
self.measure_sets[mtype] = {'ids': [], 'next_id': 0}
next_id = self.measure_sets[mtype]['next_id']
measure_file_name = "{}/{}{}.json".format(MeasureSetHandler.measures_dir, mtype, next_id)
measure_set.export_to_json(measure_file_name)
self.measure_sets[mtype]['ids'].append(next_id)
self.measure_sets[mtype]['next_id'] += 1
self.save()
print("Saved measure as {}{}".format(mtype, next_id))
def remove_measure_set(self, mtype, mid):
if mtype in self.measure_sets and len(self.measure_sets[mtype]['ids']) > 0:
self.measure_sets[mtype]['ids'].remove(mid)
measure_file_name = "{}/{}{}.json".format(MeasureSetHandler.measures_dir, mtype, mid)
os.remove(measure_file_name)
self.save()
print("Removed measure {}{}".format(mtype, mid))
def remove_all(self):
for mtype in self.measure_sets:
while True:
if len(self.measure_sets[mtype]['ids']) == 0:
break
mid = self.measure_sets[mtype]['ids'][0]
print(" Deleting {}{}...".format(mtype, mid))
self.remove_measure_set(mtype, mid)
print("Removed all measures")
def generate_graphs(self, metadata_masks):
# List of colors to use to superimpose the different plots
colors = ['red', 'blue', 'green', 'purple', 'yellow']
# For each type of measure set
for mtype in self.measure_sets:
# List all the measures of this type
measures = []
for mid in self.measure_sets[mtype]['ids']:
measures.append(self.get_measure_set("{}{}".format(mtype, mid)))
if len(measures) == 0:
continue
# Get the type and number of properties, which are all identical for
# measures of the same type
props_type = measures[0].props_type
nb_props = len(measures[0].props)
# For each property draw a graph
for i in range(nb_props):
prop_name = measures[0].props_names[i]
subplots = []
metadata_infos = []
# Superimpose all the different measures of the same type and property type
for j, mid in enumerate(self.measure_sets[mtype]['ids']):
measure = self.get_measure_set("{}{}".format(mtype, mid))
graph_name = "{}{}".format(mtype, mid)
# Generate the plot name from the metadata mask
metadata_info = ["{}".format(measure.metadata[metadata_name]) for metadata_name in metadata_masks[mtype]]
metadata_infos.append(", ".join(metadata_info))
if props_type == 'histogram':
subplots.append(measure.generate_histogram(i, colors[j], metadata_info))
else:
subplots.append(measure.generate_chrono_graph(i, colors[j], metadata_info))
fig, ax = plt.gcf(), plt.gca()
# Set meta graph information
if props_type == 'histogram':
ax.set_xlabel('Latency (us)')
ax.set_ylabel('Number of latency samples')
ax.set_title('{}, {} histogram'.format(mtype, prop_name))
else:
ax.set_xlabel('Time (us)')
ax.set_ylabel(prop_name)
ax.set_title('{}, {} graph'.format(mtype, prop_name))
plt.legend(subplots, metadata_infos)
#ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)
fig.set_size_inches(11.0, 5.5)
# Save the graph
plt.savefig("{}/{}{}.png".format(MeasureSetHandler.graphs_dir, mtype, prop_name))
plt.clf()
def generate_metadata_text(self):
metadata_masks = {}
common_metadata_strs = {}
for mtype in self.measure_sets:
metadata_mask = []
measures = []
for mid in self.measure_sets[mtype]['ids']:
measures.append(self.get_measure_set("{}{}".format(mtype, mid)))
if len(measures) == 0:
continue
first_metadata = measures[0].metadata
# Generate the metadata mask, by grouping the identical metadata
for measure in measures[1:]:
for metadata_name in measure.metadata:
# If it is not already in the metadata mask
if metadata_name not in metadata_mask:
# If there are two different metadata, they are added to the mask
if measure.metadata[metadata_name] != first_metadata[metadata_name]:
metadata_mask.append(metadata_name)
metadata_masks[mtype] = metadata_mask
# Compute the identical metadata
common_metadata = []
for metadata_name in first_metadata:
if metadata_name not in metadata_mask:
if metadata_name in MeasureSet.abbreviations:
common_metadata.append("{}: {}".format(MeasureSet.abbreviations[metadata_name], first_metadata[metadata_name]))
else:
common_metadata.append("{}: {}".format(metadata_name, first_metadata[metadata_name]))
common_metadata_strs[mtype] = ", ".join(common_metadata)
return (metadata_masks, common_metadata_strs)
def generate_report(self, include_graphs=True):
metadata_masks, common_metadata_strs = self.generate_metadata_text()
if include_graphs:
self.generate_graphs(metadata_masks)
with open(self.measures_dir + "/" + "measure-report.md", 'w+') as report:
report.write("## Measurements\n\n")
report.write("### Abbreviations used\n\n")
for abbr_name in MeasureSet.abbreviations:
report.write("* {}: {}\n".format(abbr_name, MeasureSet.abbreviations[abbr_name]))
report.write("\n")
for mtype in self.measure_sets:
need_header = True
props_lens = []
report.write("### {} results\n\n".format(mtype))
measures = []
for mid in self.measure_sets[mtype]['ids']:
measures.append(self.get_measure_set("{}{}".format(mtype, mid)))
if len(measures) == 0:
continue
first_metadata = measures[0].metadata
# Write the identical metadata before the table
report.write("**Common test metadata:** {}\n\n".format(common_metadata_strs[mtype]))
for mid in self.measure_sets[mtype]['ids']:
measure = self.get_measure_set("{}{}".format(mtype, mid))
if need_header:
table_str, props_lens = measure.generate_table(headers=True, metadata_mask=metadata_masks[mtype])
report.write(table_str)
need_header = False
else:
report.write(measure.generate_table(headers=False, props_lens=props_lens, metadata_mask=metadata_masks[mtype])[0])
report.write("\n")
# Include the graphs
if include_graphs:
for i in range(len(measures[0].props)):
report.write('\n![alt text](graphs/{}{}.png "{} Graph")\n'.format(mtype,
measures[0].props_names[i],
mtype,
measures[0].props_names[i]))
report.write("\n")
report.write("\n")
class MeasureSet:
abbreviations = {
'ker': 'Linux kernel version',
'prio': 'Task priority',
'i': 'Interval',
'board': 'Board name',
'boot_p': 'Boot Parameters',
'delta': 'ETF qdisc delta',
'load': 'Device and processor load',
'duration': 'Test duration',
}
def __init__(self):
self.metadata = {}
self.metadata['board'] = "Emerald"
self.metadata['ker'] = "4.19"
self.metadata['boot_p'] = "isolcpus"
self.metadata['i'] = "200us"
self.metadata['delta'] = "200us"
self.metadata['prio'] = "99"
self.metadata['load'] = "ssh"
self.metadata['duration'] = "24h"
self.props = []
self.props_name = []
self.units = []
def __str__(self):
return "Cols: " + str(self.props) + "\n"
def input_metadata(self):
metadata = {}
metadata.update(self.metadata)
metadata_name = ""
while True:
print("Current metadata:\n")
for metadata_name in metadata:
print(" {}: {}".format(metadata_name, metadata[metadata_name]))
metadata_name = input('Enter metadata name (type "done" to exit): ')
if metadata_name == "done":
break
metadata_value = input('Enter metadata value (type "done" to exit, "cancel" to cancel current metadata): ')
if metadata_value == "done":
break
if metadata_value == "cancel":
continue
metadata[metadata_name] = metadata_value
return metadata
def add_metadata(self, measure_type, units, metadata):
self.measure_type = measure_type
self.units = units
self.metadata.update(metadata)
def add_chronological(self, props_names, props):
self.props = props
self.props_names = props_names
self.props_type = 'chronological'
self.max = [max(prop) for prop in props]
self.min = [min(prop) for prop in props]
self.avg = [statistics.mean(prop) for prop in props]
self.var = [statistics.variance(prop) for prop in props]
def add_histogram(self, props_names, props):
self.props = props
self.props_names = props_names
self.props_type = 'histogram'
self.max = []
self.min = []
self.avg = []
self.var = []
for prop in props:
enumerate_prop = list(enumerate(prop))
self.max.append(max(list(map(lambda x: x[0] if x[1] else 0, enumerate_prop))))
self.min.append(min(map(lambda x: x[0] if x[1] else 0, enumerate_prop)))
sum_prop = sum(prop)
avg = sum(map(lambda x: x[0]*x[1], enumerate_prop)) / sum_prop
var = 0
for x, p in enumerate_prop:
var += p * x**2
var /= sum_prop
var -= avg**2
self.avg.append(avg)
self.var.append(var)
def export_to_json(self, path):
with open(path, 'w') as outfile:
json.dump({'measure_type': self.measure_type,
'props_names': self.props_names,
'units': self.units,
'props': self.props,
'props_type': self.props_type,
'metadata': self.metadata}, outfile)
def parse_cyclictest(infile):
data = {}
data['measure_type'] = 'cyclictest_wake-up_latency'
data['props_type'] = 'histogram'
data['props_names'] = ['wake-up latency']
data['units'] = ['us']
data['props'] = [[]]
lines = [line for line in infile]
for line in lines[2:]:
if line[0] == '#':
break
i, x = parse.parse('{:d} {:d}', line)
data['props'][0].append(x)
return data
def import_from_json(self, path, flat=False, cyclictest=False):
with open(path) as infile:
if cyclictest:
data = MeasureSet.parse_cyclictest(infile)
data['metadata'] = self.input_metadata()
else:
data = json.load(infile)
measure_type = data['measure_type']
units = data['units']
metadata = data['metadata']
self.add_metadata(measure_type, units, metadata)
props_names = data['props_names']
if data['props_type'] == 'histogram':
props = data['props']
self.add_histogram(props_names, props)
else:
if flat:
values = data['values']
nb_props = len(props_names)
props = [[] for c in range(nb_props)]
for i,value in enumerate(values):
props[i % nb_props].append(value)
else:
props = data['props']
self.add_chronological(props_names, props)
def generate_histogram(self, i, color, name):
bins = [i for i in range(self.max[i] + 1)]
vals = self.props[i][:self.max[i] + 1]
min_val = max(vals) / 140
vals = list(map(lambda x: min_val if x != 0 and x < min_val else x, vals))
return plt.bar(bins, vals, alpha=0.4)
def generate_chrono_graph(self, i, color, name):
prop = self.props[i]
x = [i for i in range(len(prop))]
return plt.plot(x, prop, color=color)
def generate_table(self, headers=True, values=True, metadata_mask=[], props_lens=[]):
if headers == False and values == False:
return ""
table = []
if headers:
headers = ["Min", "Max", "Avg", "Var"]
if metadata_mask != []:
table += [["Metadata"] + headers]
table += [["**" + ", ".join(metadata_mask) + "**"] + ["**" + " - ".join(self.props_names) + "**"] * len(headers)]
else:
table += [headers]
table += [["**" + " - ".join(self.props_names) + "**"] * len(headers)]
if values:
m = [self.min, self.max, self.avg, self.var]
values = [[ (format(m[i][j], '.4f') + self.units[j]) for j in range(len(m[0]))] for i in range(len(m))]
if metadata_mask != {}:
metadata_info = ["{}".format(self.metadata[metadata_name]) for metadata_name in metadata_mask]
table += [[", ".join(metadata_info)] + [" - ".join(values[i]) for i in range(len(values))]]
else:
table += [[" - ".join(values[i]) for i in range(len(values))]]
if props_lens == []:
props_lens = [max([len(table[i][j]) for i in range(len(table))]) for j in range(len(table[0]))]
table = [[ table[i][j].ljust(props_lens[j]) for j in range(len(table[0]))] for i in range(len(table))]
table_str = ""
if headers:
table_str += " | ".join(table[0]) + "\n"
table_str += " | ".join([ ("-" * props_lens[i]) for i in range(len(props_lens)) ]) + "\n"
if values:
table_str += "\n".join([" | ".join(line) for line in table[1:]])
else:
table_str += "\n".join([" | ".join(line) for line in table])
return (table_str, props_lens)
def parse_args():
parser = argparse
parser = argparse.ArgumentParser(description='Measure analysis')
parser.add_argument('-i', nargs=1, required=False, help='import file')
parser.add_argument('-c', action='store_true', required=False, help='parse cyclictest histogram')
parser.add_argument('--remove', nargs=1, required=False, help='remove a measure')
parser.add_argument('--remove-all', action='store_true', help='remove all measure sets')
parser.add_argument('-t', nargs='?', const='input_file', required=False, help='generate table')
parser.add_argument('-R', action='store_true', required=False, help='generate full measure report')
parser.add_argument('-G', action='store_true', required=False, help='generate all graphs')
parser.add_argument('-s', action='store_true', help='show measures')
args = parser.parse_args()
ms_handler = MeasureSetHandler()
if args.remove is not None:
file_name = args.remove[0]
mid_start = 0
for c in range(len(file_name)-1, -1, -1):
if not str.isdigit(file_name[c]):
mid_start = c+1
break
mtype = file_name[:mid_start]
mid = int(file_name[mid_start:])
ms_handler.remove_measure_set(mtype, mid)
if args.i is not None:
measure_set = MeasureSet()
if args.c:
measure_set.import_from_json(args.i[0], cyclictest=True)
else:
measure_set.import_from_json(args.i[0], flat=True)
ms_handler.add_measure_set(measure_set)
if args.t is not None:
print(measure_set.generate_table()[0])
elif args.t is not None and args.t != "input_file":
measure_set = ms_handler.get_measure_set(args.t)
print(measure_set.generate_table(True, True, {'board', 'linux_version', 'boot_params'})[0])
if args.R:
ms_handler.generate_report()
if args.G:
ms_handler.generate_graphs()
if args.remove_all:
confirm = input("Are you sure all measure sets should be removed ? [Yes] / [No]: ")
if confirm == "Yes":
ms_handler.remove_all()
if args.s:
print( ms_handler)
parse_args()
{"measure_type": "cyclictest_wake-up_latency", "props_names": ["wake-up latency"], "units": ["us"], "props": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 71354, 227648, 2119727, 18949203, 57902118, 83646995, 73486890, 51000779, 35574312, 25745841, 17325399, 10116697, 5008749, 2095840, 741363, 226972, 63008, 17855, 6353, 3275, 2017, 1396, 1075, 934, 884, 817, 701, 668, 563, 472, 396, 401, 330, 328, 318, 271, 240, 160, 160, 137, 97, 57, 50, 27, 22, 10, 8, 6, 1, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "props_type": "histogram", "metadata": {"board": "Slate", "ker": "4.19", "boot_p": "isolcpus", "i": "200us", "delta": "200us", "prio": "99", "load": "hackbench", "duration": "21h22"}}
\ No newline at end of file
{"measure_type": "cyclictest_wake-up_latency", "props_names": ["wake-up latency"], "units": ["us"], "props": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 89, 226068, 4712348, 41843151, 88650170, 88277162, 60997921, 37379247, 25081959, 17888586, 11083137, 5453618, 2109893, 665225, 176194, 42654, 10652, 3837, 1956, 1390, 1139, 1034, 902, 823, 716, 602, 504, 508, 441, 360, 359, 335, 296, 285, 253, 203, 201, 146, 121, 118, 81, 48, 46, 33, 18, 11, 10, 3, 4, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "props_type": "histogram", "metadata": {"board": "Emerald", "ker": "4.19", "boot_p": "isolcpus", "i": "200us", "delta": "200us", "prio": "99", "load": "hackbench", "duration": "21h22"}}
\ No newline at end of file
## Measurements
### Abbreviations used
* ker: Linux kernel version
* prio: Task priority
* i: Interval
* board: Board name
* boot_p: Boot Parameters
* delta: ETF qdisc delta
* load: Device and processor load
* duration: Test duration
### cyclictest_wake-up_latency results
**Common test metadata:** Linux kernel version: 4.19, Boot Parameters: isolcpus, Interval: 200us, ETF qdisc delta: 200us, Task priority: 99, Device and processor load: hackbench, Test duration: 21h22
Metadata | Min | Max | Avg | Var
--------- | ------------------- | ------------------- | ------------------- | -------------------
**board** | **wake-up latency** | **wake-up latency** | **wake-up latency** | **wake-up latency**
Slate | 0.0000us | 65.0000us | 21.2779us | 4.7291us
Emerald | 0.0000us | 112.0000us | 19.5352us | 4.0545us
![alt text](graphs/cyclictest_wake-up_latencywake-up latency.png "cyclictest_wake-up_latency Graph")
{"cyclictest_wake-up_latency": {"ids": [25, 26], "next_id": 28}}
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment