Commit d2949726 authored by Georgios Dagkakis's avatar Georgios Dagkakis

Merge branch 'batchesTabExit'

parents a374dd32 cf627e2f
...@@ -126,6 +126,10 @@ class AddBatchStations(plugin.InputPreparationPlugin): ...@@ -126,6 +126,10 @@ class AddBatchStations(plugin.InputPreparationPlugin):
# add an edge from batchReassembly to destination # add an edge from batchReassembly to destination
self.addEdge(data, batchReassemblyId, destination) self.addEdge(data, batchReassemblyId, destination)
# set all the Queue types to gether wip data
for node in data["graph"]["node"].values():
if node['_class'] in ['Dream.Queue', 'Dream.LineClearance', 'Dream.RoutingQueue']:
node['gatherWipStat'] = 1
return data return data
......
from copy import copy
import json
import time
import random
import operator
import StringIO
import xlrd
import numpy
from dream.plugins import plugin
class BatchesTabularExit(plugin.OutputPreparationPlugin):
""" Output the exit stats in a tab
"""
def postprocess(self, data):
numberOfReplications=int(data['general']['numberOfReplications'])
confidenceLevel=float(data['general']['confidenceLevel'])
maxSimTime=data['general']['maxSimTime']
timeUnit=data['general']['timeUnit']
if numberOfReplications==1:
# create the titles of the columns
data['result']['result_list'][0]['exit_output'] = [['KPI','Unit','Value']]
# loop the results and search for elements that have 'Exit' as family
for record in data['result']['result_list'][-1]['elementList']:
family=record.get('family',None)
# when found, add a row with the results of the specific exit
if family=='Exit':
batchesThroughput=record['results']['throughput'][0]
data['result']['result_list'][0]['exit_output'].append(['Number of batches produced','Batches',
batchesThroughput])
unitsThroughput=record['results']['unitsThroughput'][0]
data['result']['result_list'][0]['exit_output'].append(['Number of units produced','Units',
unitsThroughput])
lineThroughput=batchesThroughput/float(maxSimTime)
data['result']['result_list'][0]['exit_output'].append(['Line throughput','Batches/'+timeUnit,
"%.2f" % lineThroughput])
unitDepartureRate=unitsThroughput/float(maxSimTime)
data['result']['result_list'][0]['exit_output'].append(['Average Unit Departure Rate',
'Units/'+timeUnit,
"%.2f" % unitDepartureRate])
avgCycleTime=record['results']['lifespan'][0]
data['result']['result_list'][0]['exit_output'].append(['Average Cycle Time',
timeUnit,
"%.2f" % avgCycleTime])
elif numberOfReplications>1:
# create the titles of the columns
data['result']['result_list'][0]['exit_output'] = [['KPI','Unit','Average','Std Dev','Min','Max',
str(float(confidenceLevel)*100)+'% CI LB ',
str(float(confidenceLevel)*100)+'% CI UB']]
for record in data['result']['result_list'][0]['elementList']:
family=record.get('family',None)
# when found, add a row with the results of the specific exit
if family=='Exit':
batchesThroughputList=record['results']['throughput']
batchesThroughputCI=self.getConfidenceInterval(batchesThroughputList,confidenceLevel)
data['result']['result_list'][0]['exit_output'].append(['Number of batches produced','Batches',
"%.2f" % self.getAverage(batchesThroughputList),
"%.2f" % self.getStDev(batchesThroughputList),
min(batchesThroughputList),
max(batchesThroughputList),
"%.2f" % batchesThroughputCI['lb'],
"%.2f" % batchesThroughputCI['ub']]
)
unitsThroughputList=record['results']['unitsThroughput']
unitsThroughputCI=self.getConfidenceInterval(unitsThroughputList,confidenceLevel)
data['result']['result_list'][0]['exit_output'].append(['Number of units produced','Units',
"%.2f" % self.getAverage(unitsThroughputList),
"%.2f" % self.getStDev(unitsThroughputList),
min(unitsThroughputList),
max(unitsThroughputList),
"%.2f" % unitsThroughputCI['lb'],
"%.2f" % unitsThroughputCI['ub']]
)
lineThroughputList=[x/float(maxSimTime) for x in batchesThroughputList]
lineThroughputCI=self.getConfidenceInterval(lineThroughputList,confidenceLevel)
data['result']['result_list'][0]['exit_output'].append(['Line throughput','Batches/'+timeUnit,
"%.2f" % self.getAverage(lineThroughputList),
"%.2f" % self.getStDev(lineThroughputList),
"%.2f" % min(lineThroughputList),
"%.2f" % max(lineThroughputList),
"%.2f" % lineThroughputCI['lb'],
"%.2f" % lineThroughputCI['ub']]
)
unitDepartureRateList=[x/float(maxSimTime) for x in unitsThroughputList]
unitDepartureRateCI=self.getConfidenceInterval(unitDepartureRateList,confidenceLevel)
data['result']['result_list'][0]['exit_output'].append(['Unit Departure Rate',
'Units/'+timeUnit,
"%.2f" % self.getAverage(unitDepartureRateList),
"%.2f" % self.getStDev(unitDepartureRateList),
"%.2f" % min(unitDepartureRateList),
"%.2f" % max(unitDepartureRateList),
"%.2f" % unitDepartureRateCI['lb'],
"%.2f" % unitDepartureRateCI['ub']]
)
avgCycleTime=record['results']['lifespan']
avgCycleTimeList=record['results']['lifespan']
avgCycleTimeCI=self.getConfidenceInterval(avgCycleTimeList,confidenceLevel)
data['result']['result_list'][0]['exit_output'].append(['Cycle Time',timeUnit,
"%.2f" % self.getAverage(avgCycleTimeList),
"%.2f" % self.getStDev(avgCycleTimeList),
"%.2f" % min(avgCycleTimeList),
"%.2f" % max(avgCycleTimeList),
"%.2f" % avgCycleTimeCI['lb'],
"%.2f" % avgCycleTimeCI['ub']]
)
return data
\ No newline at end of file
from copy import copy
import json
import time
import random
import operator
import StringIO
import xlrd
import math
from dream.plugins import plugin
class BatchesTabularQueues(plugin.OutputPreparationPlugin):
""" Output the exit stats in a tab
"""
def postprocess(self, data):
numberOfReplications=int(data['general']['numberOfReplications'])
confidenceLevel=float(data['general']['confidenceLevel'])
maxSimTime=float(data['general']['maxSimTime'])
if numberOfReplications==1:
# create the titles of the columns
data['result']['result_list'][-1]['buffer_output'] = [['Buffer','Final Value','Average',
'Std Dev','Min','Max',]]
# loop the results and search for elements that have 'Exit' as family
for record in data['result']['result_list'][-1]['elementList']:
family=record.get('family',None)
# when found, add a row with the results of the specific exit
if family=='Buffer':
bufferId=record['id']
wip_stat_list=record['results']['wip_stat_list'][0]
bufferLevels=[int(x[1]) for x in wip_stat_list]
maxLevel=max(bufferLevels)
finalValue=wip_stat_list[-1][1]
timeListDict=self.createTimeListDict(wip_stat_list,maxSimTime)
totalLevel=0
minLevel=float('inf')
# count the minimum that has no zero duration
for level, duration in timeListDict.iteritems():
if duration and (level<minLevel):
minLevel=level
for level, duration in timeListDict.iteritems():
totalLevel+=level*duration
averageLevel=totalLevel/float(maxSimTime)
totalDistance=0
for level, duration in timeListDict.iteritems():
totalDistance+=((level-averageLevel)*(level-averageLevel))*duration
stdevLevel=math.sqrt(totalDistance/float(maxSimTime-1))
data['result']['result_list'][-1]['buffer_output'].append([bufferId,finalValue,
"%.2f" % averageLevel,
"%.2f" % stdevLevel,
minLevel,maxLevel])
elif numberOfReplications>1:
# create the titles of the columns
pass
return data
# takes the time list that ManPy outputs and creates a dict so that it is easier to get avg etc
def createTimeListDict(self, timeList,maxSimTime):
timeListDict={}
i=0
for record in timeList:
time=record[0]
level=int(record[1])
try:
nextTime=timeList[i+1][0]
except IndexError:
nextTime=maxSimTime
i+=1
if not (level in timeListDict.keys()):
timeListDict[level]=0
timeListDict[level]+=nextTime-time
return timeListDict
from copy import deepcopy from copy import deepcopy
import json import json
import numpy
from zope.dottedname.resolve import resolve from zope.dottedname.resolve import resolve
...@@ -32,6 +33,25 @@ class Plugin(object): ...@@ -32,6 +33,25 @@ class Plugin(object):
successors.append(edge['destination']) successors.append(edge['destination'])
return successors return successors
# calcualted the confidence inteval for a list and a confidence level
def getConfidenceInterval(self, value_list, confidenceLevel):
from dream.KnowledgeExtraction.ConfidenceIntervals import Intervals
from dream.KnowledgeExtraction.StatisticalMeasures import BasicStatisticalMeasures
BSM=BasicStatisticalMeasures()
lb, ub = Intervals().ConfidIntervals(value_list, confidenceLevel)
return {'lb': lb,
'ub': ub,
'avg': BSM.mean(value_list)
}
# return the average of a list
def getAverage(self, value_list):
return sum(value_list) / float(len(value_list))
# return the standard deviation of a list
def getStDev(self, value_list):
return numpy.std(value_list)
# returns name of a node given its id # returns name of a node given its id
def getNameFromId(self, data, node_id): def getNameFromId(self, data, node_id):
return data['graph']['node'][node_id]['name'] return data['graph']['node'][node_id]['name']
......
...@@ -201,24 +201,24 @@ ...@@ -201,24 +201,24 @@
} }
}, },
"output": { "output": {
"view_exit_stats": { "view_exit_results": {
"configuration": { "configuration": {
"properties": { "handsontable_options": {},
"lifespan": { "output_id": "exit_output"
"type": "number"
},
"taktTime": {
"type": "number"
},
"throughput": {
"type": "number"
}
}
}, },
"gadget": "Output_viewExitStatistics", "gadget": "Output_viewSpreadsheet",
"title": "Exit Statistics", "title": "Exit statistics",
"type": "object_view" "type": "object_view"
}, },
"view_buffer_state": {
"configuration": {
"handsontable_options": {},
"output_id": "buffer_output"
},
"gadget": "Output_viewSpreadsheet",
"title": "Buffer Levels",
"type": "object_view"
},
"view_operator_gantt": { "view_operator_gantt": {
"configuration": { "configuration": {
"data": { "data": {
...@@ -257,8 +257,15 @@ ...@@ -257,8 +257,15 @@
{ {
"_class": "dream.plugins.PostProcessQueueStatistics.PostProcessQueueStatistics", "_class": "dream.plugins.PostProcessQueueStatistics.PostProcessQueueStatistics",
"output_id": "queue_statistics" "output_id": "queue_statistics"
},
{
"_class": "dream.plugins.BatchesTabularExit.BatchesTabularExit",
"output_id": "exit_output"
},
{
"_class": "dream.plugins.BatchesTabularQueues.BatchesTabularQueues",
"output_id": "buffer_output"
} }
] ]
}, },
"pre_processing": { "pre_processing": {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment