Commit 3baf1efd authored by Georgios Dagkakis's avatar Georgios Dagkakis

Merge branch 'ketool3'

parents fc8b96eb 18c17e72
'''
Created on 24 Sep 2014
@author: Panos
'''
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
from StatisticalMeasures import BasicStatisticalMeasures
#The DetectOuliers object
class HandleOutliers(BasicStatisticalMeasures):
#Two different approaches to handle the outliers are included in this object,
#the first one delete both the mild and extreme outliers while the second approach delete only the extreme outliers in the given data set
def DeleteOutliers(self,mylist): #Delete the ouliers (both mild and extreme) in a given data set
A= BasicStatisticalMeasures() #Call the BasicStatisticalMeasures to calculate the quantiles and interquartile range
Q1= A.quantile(mylist)[1]
Q3= A.quantile(mylist)[3]
IQ= A.IQR(mylist)
LIF= Q1 - 1.5*IQ #Calculate the lower inner fence
UIF= Q3 + 1.5*IQ #Calculate the upper inner fence
LOF= Q1 - 3*IQ #Calculate the lower outer fence
UOF= Q3 + 3*IQ #Calculate the upper outer fence
i=0
listx=[]
for value in mylist:
if not ((value<LOF or value>UOF) or (value<LIF or value>UIF)): #If the value is beyond the inner fence ([LIF,UIF]) on either side (mild outlier) or beyond the outer fence ([LOF,UOF]) on either side (extreme outlier) doesn't pass the control and deleted
listx.append(value)
i+=1
return listx
def DeleteExtremeOutliers(self,mylist): #Delete only the extreme ouliers in a given data set
A= BasicStatisticalMeasures()
Q1= A.quantile(mylist)[1]
Q3= A.quantile(mylist)[3]
IQ= A.IQR(mylist)
LOF= Q1 - 3*IQ
UOF= Q3 + 3*IQ
i=0
listx=[]
for value in mylist:
if not (value<LOF or value>UOF): #If the value is beyond the outer fence ([LOF,UOF]) on either side (extreme outlier) doesn't pass the control and deleted
listx.append(value)
i+=1
return listx
\ No newline at end of file
......@@ -234,7 +234,7 @@ class DistFittest:
except RRuntimeError:
return None
gam=self.Gam
self.Gamtest= rkstest(data,"pgamma",scale=gam[0][1],shape=gam[0][0])
self.Gamtest= rkstest(data,"pgamma",rate=gam[0][1],shape=gam[0][0])
return self.Gamtest
def Weib_kstest(self,data):
......
......@@ -239,11 +239,20 @@ class Output(BasicStatisticalMeasures,DistFittest):
del A['min']
del A['max']
del A['distributionType']
sheet2.write(14,15,(A.keys()[0]))
sheet2.write(14,16,(A.keys()[1]))
sheet2.write(15,15,(A.values()[0]))
sheet2.write(15,16,(A.values()[1]))
elif A['distributionType']=='Exp' or A['distributionType']=='Poisson' or A['distributionType']=='Geometric':
del A['distributionType']
sheet2.write(14,15,(A.keys()[0]))
sheet2.write(15,15,(A.values()[0]))
else:
del A['distributionType']
sheet2.write(14,15,(A.keys()[0]))
sheet2.write(14,16,(A.keys()[1]))
sheet2.write(15,15,(A.values()[0]))
sheet2.write(15,16,(A.values()[1]))
sheet2.write(14,15,(A.keys()[0]))
sheet2.write(14,16,(A.keys()[1]))
sheet2.write(15,15,(A.values()[0]))
sheet2.write(15,16,(A.values()[1]))
book.save(fileName) #Save the excel document
......@@ -359,14 +359,14 @@ def CMSD_example(list1,list2):
Name.text=str(list1['P1']['distributionType'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='mean'
Name.text='shape'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P1']['mean'])
Value.text=str(list1['P1']['shape'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='stdev'
Name.text='rate'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P1']['stdev'])
Value.text=str(list1['P1']['rate'])
Property=SubElement(Process,'Property')
Name=SubElement(Property,'Name')
......@@ -420,14 +420,14 @@ def CMSD_example(list1,list2):
Name.text=str(list1['P2']['distributionType'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='mean'
Name.text='shape'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P2']['mean'])
Value.text=str(list1['P2']['shape'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='stdev'
Name.text='rate'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P2']['stdev'])
Value.text=str(list1['P2']['rate'])
Property=SubElement(Process,'Property')
Name=SubElement(Property,'Name')
......@@ -606,14 +606,14 @@ def CMSD_example(list1,list2):
Name.text=str(list1['P5']['distributionType'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='mean'
Name.text='shape'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P5']['mean'])
Value.text=str(list1['P5']['shape'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='stdev'
Name.text='rate'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P5']['mean'])
Value.text=str(list1['P5']['rate'])
Property=SubElement(Process,'Property')
Name=SubElement(Property,'Name')
......@@ -792,14 +792,14 @@ def CMSD_example(list1,list2):
Name.text=str(list1['P8']['distributionType'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='mean'
Name.text='shape'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P8']['mean'])
Value.text=str(list1['P8']['shape'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='stdev'
Name.text='rate'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P8']['stdev'])
Value.text=str(list1['P8']['rate'])
Property=SubElement(Process,'Property')
Name=SubElement(Property,'Name')
......@@ -854,14 +854,14 @@ def CMSD_example(list1,list2):
Name.text=str(list1['P9']['distributionType'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='mean'
Name.text='shape'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P9']['mean'])
Value.text=str(list1['P9']['shape'])
DistributionParameter=SubElement(Distribution,'DistributionParameter')
Name=SubElement(DistributionParameter,'Name')
Name.text='stdev'
Name.text='rate'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P9']['stdev'])
Value.text=str(list1['P9']['rate'])
Property=SubElement(Process,'Property')
Name=SubElement(Property,'Name')
......@@ -923,7 +923,7 @@ def CMSD_example(list1,list2):
Name=SubElement(DistributionParameter,'Name')
Name.text='stdev'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P9']['mean'])
Value.text=str(list1['P10']['stdev'])
Property=SubElement(Process,'Property')
Name=SubElement(Property,'Name')
......@@ -985,7 +985,7 @@ def CMSD_example(list1,list2):
Name=SubElement(DistributionParameter,'Name')
Name.text='stdev'
Value=SubElement(DistributionParameter,'Value')
Value.text=str(list1['P11']['mean'])
Value.text=str(list1['P11']['stdev'])
Property=SubElement(Process,'Property')
Name=SubElement(Property,'Name')
......
'''
Created on 19 Feb 2014
Created on 9 Oct 2014
@author: Panos
'''
......@@ -24,7 +24,6 @@ Created on 19 Feb 2014
import json
def JSON_example(list1,list2):
jsonFile= open('JSON_example.json','r')
......@@ -36,73 +35,16 @@ def JSON_example(list1,list2):
name=element.get('name')
scrapQuantity=element.get('scrapQuantity',{})
processingTime=element.get('processingTime',{})
if name =='P1':
scrapQuantity['mean']=str(list2['P1'])
processingTime['distributionType']=str(list1['P1']['distributionType'])
processingTime['mean']=str(list1['P1']['mean'])
processingTime['stdev']=str(list1['P1']['stdev'])
elif name=='P4':
scrapQuantity['mean']=str(list2['P4'])
processingTime['distributionType']=str(list1['P4']['distributionType'])
processingTime['mean']=str(list1['P4']['mean'])
processingTime['stdev']=str(list1['P4']['stdev'])
elif name=='P2':
scrapQuantity['mean']=str(list2['P2'])
processingTime['distributionType']=str(list1['P2']['distributionType'])
processingTime['mean']=str(list1['P2']['mean'])
processingTime['stdev']=str(list1['P2']['stdev'])
elif name=='P5':
scrapQuantity['mean']=str(list2['P5'])
processingTime['distributionType']=str(list1['P5']['distributionType'])
processingTime['mean']=str(list1['P5']['mean'])
processingTime['stdev']=str(list1['P5']['stdev'])
elif name=='P3':
scrapQuantity['mean']=str(list2['P3'])
processingTime['distributionType']=str(list1['P3']['distributionType'])
processingTime['mean']=str(list1['P3']['mean'])
processingTime['stdev']=str(list1['P3']['stdev'])
elif name=='P6':
scrapQuantity['mean']=str(list2['P6'])
processingTime['distributionType']=str(list1['P6']['distributionType'])
processingTime['mean']=str(list1['P6']['mean'])
processingTime['stdev']=str(list1['P6']['stdev'])
elif name=='P7':
scrapQuantity['mean']=str(list2['P7'])
processingTime['distributionType']=str(list1['P7']['distributionType'])
processingTime['mean']=str(list1['P7']['mean'])
processingTime['stdev']=str(list1['P7']['stdev'])
elif name=='P8':
scrapQuantity['mean']=str(list2['P8'])
processingTime['distributionType']=str(list1['P8']['distributionType'])
processingTime['mean']=str(list1['P8']['mean'])
processingTime['stdev']=str(list1['P8']['stdev'])
elif name=='P9':
scrapQuantity['mean']=str(list2['P9'])
processingTime['distributionType']=str(list1['P9']['distributionType'])
processingTime['mean']=str(list1['P9']['mean'])
processingTime['stdev']=str(list1['P9']['stdev'])
elif name=='P10':
scrapQuantity['mean']=str(list2['P10'])
processingTime['distributionType']=str(list1['P10']['distributionType'])
processingTime['mean']=str(list1['P10']['mean'])
processingTime['stdev']=str(list1['P10']['stdev'])
elif name=='P11':
scrapQuantity['mean']=str(list2['P11'])
processingTime['distributionType']=str(list1['P11']['distributionType'])
processingTime['mean']=str(list1['P11']['mean'])
processingTime['stdev']=str(list1['P11']['stdev'])
if name in list1.keys():
element['processingTime']= list1[name]
else:
continue
if name in list2.keys():
element['scrapQuantity']= list2[name]
else:
continue
continue
jsonFile = open('JSON_exampleOutput.json',"w")
jsonFile.write(json.dumps(data, indent=True))
jsonFile.close()
return json.dumps(data, indent=True)
return json.dumps(data, indent=True)
\ No newline at end of file
......@@ -24,6 +24,7 @@ Created on 19 Feb 2014
from StatisticalMeasures import BasicStatisticalMeasures
from DataManipulation import DataManagement
from DistributionFitting import DistFittest
from DistributionFitting import Distributions
from CMSD_Output import CMSD_example
from JSON_Output import JSON_example
......@@ -36,7 +37,7 @@ import dream.simulation.LineGenerationJSON as ManPyMain
#================= Main script of KE tool =====================================#
#Read from the given directory the Excel document with the input data
workbook = xlrd.open_workbook('inputData.xls')
workbook = xlrd.open_workbook('input_Data.xls')
worksheets = workbook.sheet_names()
worksheet_ProcessingTimes = worksheets[1] #Define the worksheet with the Processing times data
worksheet_ScrapQuantity = worksheets[0] #Define the worksheet with the Scrap Quantity data
......@@ -46,38 +47,57 @@ ProcessingTimes= A.Input_data(worksheet_ProcessingTimes, workbook) #Create the
ScrapQuantity=A.Input_data(worksheet_ScrapQuantity, workbook) #Create the Scrap Quantity dictionary with keys the different stations in the line and values the scrap quantity data of different batches in these stations
##Get from the Scrap Quantity dictionary the different keys and define the following lists with the scrap quantity data of the different stations in the topology
P7_Scrap = ScrapQuantity.get('P7',[])
P1_Scrap = ScrapQuantity.get('P1',[])
P2_Scrap= ScrapQuantity.get('P3',[])
P3_Scrap=ScrapQuantity.get('P3',[])
P8_Scrap=ScrapQuantity.get('P8',[])
P1_Scrap= ScrapQuantity.get('P1',[])
P2_Scrap= ScrapQuantity.get('P2',[])
P3_Scrap= ScrapQuantity.get('P3',[])
P4_Scrap= ScrapQuantity.get('P4',[])
P5_Scrap= ScrapQuantity.get('P5',[])
P6_Scrap= ScrapQuantity.get('P6',[])
P7_Scrap= ScrapQuantity.get('P7',[])
P8_Scrap= ScrapQuantity.get('P8',[])
P9_Scrap= ScrapQuantity.get('P9',[])
P10_Scrap= ScrapQuantity.get('P10',[])
P11_Scrap= ScrapQuantity.get('P11',[])
##Get from the Processing times dictionary the different keys and define the following lists with the processing times data of the different stations in the topology
P7_Proc = ProcessingTimes.get('P7',[])
P1_Proc = ProcessingTimes.get('P1',[])
P1_Proc= ProcessingTimes.get('P1',[])
P2_Proc= ProcessingTimes.get('P2',[])
P3_Proc=ProcessingTimes.get('P3',[])
P8_Proc=ProcessingTimes.get('P8',[])
P3_Proc= ProcessingTimes.get('P3',[])
P4_Proc= ProcessingTimes.get('P4',[])
P5_Proc= ProcessingTimes.get('P5',[])
P6_Proc= ProcessingTimes.get('P6',[])
P7_Proc= ProcessingTimes.get('P7',[])
P8_Proc= ProcessingTimes.get('P8',[])
P9_Proc= ProcessingTimes.get('P9',[])
P10_Proc= ProcessingTimes.get('P10',[])
P11_Proc= ProcessingTimes.get('P11',[])
#Call the HandleMissingValues object and replace with zero the missing values in the lists with the scrap quantity data
B=HandleMissingValues()
P7_Scrap= B.ReplaceWithZero(P7_Scrap)
P1_Scrap= B.ReplaceWithZero(P1_Scrap)
P2_Scrap= B.ReplaceWithZero(P2_Scrap)
P3_Scrap= B.ReplaceWithZero(P3_Scrap)
P4_Scrap= B.ReplaceWithZero(P4_Scrap)
P5_Scrap= B.ReplaceWithZero(P5_Scrap)
P6_Scrap= B.ReplaceWithZero(P6_Scrap)
P7_Scrap= B.ReplaceWithZero(P7_Scrap)
P8_Scrap= B.ReplaceWithZero(P8_Scrap)
P9_Scrap= B.ReplaceWithZero(P9_Scrap)
P10_Scrap= B.ReplaceWithZero(P10_Scrap)
P11_Scrap= B.ReplaceWithZero(P11_Scrap)
# #Call the BasicSatatisticalMeasures object
C=BasicStatisticalMeasures()
#Create a list with values the calculated mean value of scrap quantity on the different stations in the line
listScrap=[C.mean(P1_Scrap),C.mean(P2_Scrap),C.mean(P3_Scrap),C.mean(P1_Scrap),C.mean(P2_Scrap),C.mean(P3_Scrap),C.mean(P7_Scrap),C.mean(P8_Scrap),C.mean(P8_Scrap),C.mean(P9_Scrap), C.mean(P9_Scrap)]
listScrap=[C.mean(P1_Scrap),C.mean(P2_Scrap),C.mean(P3_Scrap),C.mean(P4_Scrap),C.mean(P5_Scrap),C.mean(P6_Scrap),C.mean(P7_Scrap),C.mean(P8_Scrap),C.mean(P9_Scrap),C.mean(P10_Scrap), C.mean(P11_Scrap)]
F=DataManagement()
D= DataManagement()
listScrap=F.round(listScrap) #Round the mean values of the list so as to get integers
listScrap=D.round(listScrap) #Round the mean values of the list so as to get integers
dictScrap={}
dictScrap['P1']= listScrap[0]
......@@ -92,35 +112,30 @@ dictScrap['P9']= listScrap[8]
dictScrap['P10']= listScrap[9]
dictScrap['P11']= listScrap[10]
#Create a tuple with the Processing times data lists of the different stations
a=(P1_Proc,P2_Proc,P3_Proc,P1_Proc,P2_Proc,P3_Proc,P7_Proc,P8_Proc,P8_Proc,P9_Proc,P9_Proc)
E=Distributions() #Call the DistFittest object
E= DistFittest()
dictProc={}
dictProc['P1']= E.Normal_distrfit(P1_Proc)
dictProc['P2']= E.Normal_distrfit(P2_Proc)
dictProc['P3']= E.Normal_distrfit(P3_Proc)
dictProc['P4']= E.Normal_distrfit(P1_Proc)
dictProc['P5']= E.Normal_distrfit(P2_Proc)
dictProc['P6']= E.Normal_distrfit(P3_Proc)
dictProc['P7']= E.Normal_distrfit(P7_Proc)
dictProc['P8']= E.Normal_distrfit(P8_Proc)
dictProc['P9']= E.Normal_distrfit(P8_Proc)
dictProc['P10']= E.Normal_distrfit(P9_Proc)
dictProc['P11']= E.Normal_distrfit(P9_Proc)
D=Output()
D.PrintDistributionFit(P2_Proc,"DistributionFittingResults_P2Proc.xls")
D.PrintStatisticalMeasures(P2_Proc, "StatisticalMeasuresResults_P2Proc.xls")
dictProc['P1']= E.ks_test(P1_Proc)
dictProc['P2']= E.ks_test(P2_Proc)
dictProc['P3']= E.ks_test(P3_Proc)
dictProc['P4']= E.ks_test(P4_Proc)
dictProc['P5']= E.ks_test(P5_Proc)
dictProc['P6']= E.ks_test(P6_Proc)
dictProc['P7']= E.ks_test(P7_Proc)
dictProc['P8']= E.ks_test(P8_Proc)
dictProc['P9']= E.ks_test(P9_Proc)
dictProc['P10']= E.ks_test(P10_Proc)
dictProc['P11']= E.ks_test(P11_Proc)
F= Output()
F.PrintDistributionFit(P2_Proc,"DistributionFittingResults_P2Proc.xls")
F.PrintStatisticalMeasures(P2_Proc, "StatisticalMeasuresResults_P2Proc.xls")
CMSD_example(dictProc,dictScrap) #Print the CMSD document, calling the CMSD_example method with arguments the dictProc and dictScrap dictionaries
JSON_example(dictProc,dictScrap) #Print the JSON file, calling the JSON_example method
#calls ManPy main script with the input
simulationOutput=ManPyMain.main(input_data=str((JSON_example(dictProc,dictScrap))))
# save the simulation output
jsonFile = open('ManPyOutput.json',"w") #It opens the JSON file
......
{
"capacity_by_project_spreadsheet": [
[
"Project Name",
"Sequence",
"Capacity Requirements"
],
[
null,
null,
null
]
],
"capacity_by_station_spreadsheet": [
[
"Machine",
"Day 0",
"Day 1",
"Day 2",
"Day 3",
null
],
[
null,
null,
null,
null,
null,
null
]
],
"edges": {
"con_10": [
"Q1",
"St1",
{}
],
"con_15": [
"Q1",
"St2",
{}
],
"con_20": [
"St1",
"E1",
{}
],
"con_25": [
"St2",
"E1",
{}
],
"con_5": [
"S1",
"Q1",
{}
]
},
"general": {
"confidenceLevel": 0.95,
"currentDate": "2014/06/16",
"maxSimTime": 1000,
"numberOfReplications": 5,
"processTimeout": 10,
"seed": "",
"trace": "No"
},
"nodes": {
"E1": {
"_class": "Dream.Exit",
"element_id": "DreamNode_5",
"name": "Exit"
},
"Q1": {
"_class": "Dream.Queue",
"capacity": 1,
"element_id": "DreamNode_2",
"name": "Queue",
"schedulingRule": "FIFO"
},
"S1": {
"_class": "Dream.BatchSource",
"batchNumberOfUnits": 80,
"element_id": "DreamNode_1",
"entity": "Dream.Batch",
"interarrivalTime": {
"distributionType": "Fixed",
"mean": 1
},
"name": "Source"
},
"St1": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_3",
"failures": {
},
"name": "Milling1",
"processingTime": {
"distributionType": "Fixed",
"max": "",
"mean": 0.75,
"min": "",
"stdev": ""
}
},
"St2": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_4",
"failures": {
},
"name": "Milling2",
"processingTime": {
"distributionType": "Fixed",
"max": "",
"mean": 0.75,
"min": "",
"stdev": ""
}
}
},
"preference": {
"coordinates": {
"E1": {
"left": 0.838470725910555,
"top": 0.5011550303652008
},
"Q1": {
"left": 0.28592454969899506,
"top": 0.4898081240173095
},
"S1": {
"left": 0.053083038762682624,
"top": 0.48224351978538194
},
"St1": {
"left": 0.5199724933344594,
"top": 0.22882927801580868
},
"St2": {
"left": 0.5157499788874278,
"top": 0.7243108552070638
}
}
},
"shift_spreadsheet": [
[
"Day",
"Machines",
"Start",
"End"
],
[
null,
null,
null,
null
]
],
"wip_part_spreadsheet": [
[
"Order ID",
"Due Date",
"Priority",
"Project Manager",
"Part",
"Part Type",
"Sequence",
"Processing Times",
"Prerequisites Parts"
],
[
null,
null,
null,
null,
null,
null,
null,
null,
null
]
]
}
\ No newline at end of file
'''
Created on 20 Jun 2014
@author: Panos
'''
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
from DistributionFitting import DistFittest
from DistributionFitting import Distributions
from ImportExceldata import Import_Excel
from ExcelOutput import Output
from ReplaceMissingValues import HandleMissingValues
import xlrd
import json
import dream.simulation.LineGenerationJSON as ManPyMain #import ManPy main JSON script
#Read from the given directory the Excel document with the input data
workbook = xlrd.open_workbook('inputData.xls')
worksheets = workbook.sheet_names()
worksheet_ProcessingTimes = worksheets[0] #Define the worksheet with the Processing times data
inputData = Import_Excel() #Call the Python object Import_Excel
ProcessingTimes = inputData.Input_data(worksheet_ProcessingTimes, workbook) #Create the Processing Times dictionary with key Machines 1,2 and values the processing time data
##Get from the above dictionaries the M1 key and define the following lists with data
M1_ProcTime = ProcessingTimes.get('M1',[])
M2_ProcTime = ProcessingTimes.get('M2',[])
#Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
misValues =HandleMissingValues()
M1_ProcTime = misValues.ReplaceWithMean(M1_ProcTime)
M2_ProcTime = misValues.ReplaceWithMean(M2_ProcTime)
MLE = Distributions() #Call the Distributions object (Maximum Likelihood Estimation - MLE)
KS = DistFittest() #Call the DistFittest object (Kolmoghorov-Smirnov test)
M1ProcTime_dist = KS.ks_test(M1_ProcTime)
M2ProcTime_dist = MLE.Normal_distrfit(M2_ProcTime)
#======================= Output preparation: output the updated values in the JSON file of this example ================================#
jsonFile = open('JSON_TwoParallelStations.json','r') #It opens the JSON file
data = json.load(jsonFile) #It loads the file
jsonFile.close()
nodes = data.get('nodes',[]) #It creates a variable that holds the 'nodes' dictionary
for element in nodes:
processingTime = nodes[element].get('processingTime',{}) #It creates a variable that gets the element attribute 'processingTime'
if element == 'St1':
nodes['St1']['processingTime'] = M1ProcTime_dist #It checks using if syntax if the element is 'M1'
elif element == 'St2':
nodes['St2']['processingTime'] = M2ProcTime_dist #It checks using if syntax if the element is 'M2'
jsonFile = open('JSON_ParallelStations_Output.json',"w") #It opens the JSON file
jsonFile.write(json.dumps(data, indent=True)) #It writes the updated data to the JSON file
jsonFile.close() #It closes the file
#=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================#
export=Output()
export.PrintStatisticalMeasures(M1_ProcTime,'M1_ProcTime_StatResults.xls')
export.PrintStatisticalMeasures(M2_ProcTime,'M2_ProcTime_StatResults.xls')
export.PrintDistributionFit(M1_ProcTime,'M1_ProcTime_DistFitResults.xls')
export.PrintDistributionFit(M2_ProcTime,'M2_ProcTime_DistFitResults.xls')
#calls ManPy main script with the input
simulationOutput=ManPyMain.main(input_data=json.dumps(data))
# save the simulation output
jsonFile = open('ManPyOutput.json',"w") #It opens the JSON file
jsonFile.write(simulationOutput) #It writes the updated data to the JSON file
jsonFile.close() #It closes the file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment