Commit 2a7fed31 authored by Roque Porchetto's avatar Roque Porchetto

erp5_wendelin_telecom_ingestion: minor fix in pending file metadata and...

erp5_wendelin_telecom_ingestion: minor fix in pending file metadata and logging references besides ids
parent f10ce0bb
...@@ -5,7 +5,7 @@ log("Data_chunk size: %s" % str(len(data_chunk))) ...@@ -5,7 +5,7 @@ log("Data_chunk size: %s" % str(len(data_chunk)))
decoded = base64.b64decode(data_chunk) decoded = base64.b64decode(data_chunk)
log("Decoded data_chunk size: %s" % str(len(decoded))) log("Decoded data_chunk size: %s" % str(len(decoded)))
log("Appending to data stream: %s." % data_stream) log("Appending to data stream: %s - reference: %s" % (data_stream, data_stream.getReference()))
data_stream.appendData(decoded) data_stream.appendData(decoded)
......
...@@ -37,7 +37,7 @@ for line_data_ingestion in portal_catalog(**query_dict): ...@@ -37,7 +37,7 @@ for line_data_ingestion in portal_catalog(**query_dict):
destination_project = data_ingestion.getDestinationProject()) destination_project = data_ingestion.getDestinationProject())
context.logEntry("Data Analyisis created for Data Ingestion %s (ID: %s)" % (str(data_ingestion.getReference()), data_analysis.getId())) context.logEntry("Data Analyisis created for Data Ingestion %s (ID: %s)" % (str(data_ingestion.getReference()), data_analysis.getId()))
except: except:
context.logEntry("[ERROR] Error creating Data Analysis for Data Ingestion '%s'. Script returned" % data_ingestion.getId()) context.logEntry("[ERROR] Error creating Data Analysis for Data Ingestion '%s' (ID: %s). Script returned" % (str(data_ingestion.getReference()), data_analysis.getId()))
return # Data Analysis was already created return # Data Analysis was already created
# create input and output lines # create input and output lines
...@@ -70,7 +70,7 @@ for line_data_ingestion in portal_catalog(**query_dict): ...@@ -70,7 +70,7 @@ for line_data_ingestion in portal_catalog(**query_dict):
aggregate_set.update(related_line.getAggregateSet()) aggregate_set.update(related_line.getAggregateSet())
related_line.getParentValue().deliver() related_line.getParentValue().deliver()
log("DATA INGESTION DELIVERED") log("DATA INGESTION DELIVERED")
context.logEntry("Data Ingestion '%s' delivered." % data_ingestion.getId()) context.logEntry("Data Ingestion '%s' delivered. (ID: %s)" % (str(data_ingestion.getReference()), data_analysis.getId()))
else: else:
# it is an output line # it is an output line
# create new item based on item_type: data array, stream, descriptor, etc. # create new item based on item_type: data array, stream, descriptor, etc.
...@@ -101,4 +101,4 @@ for line_data_ingestion in portal_catalog(**query_dict): ...@@ -101,4 +101,4 @@ for line_data_ingestion in portal_catalog(**query_dict):
data_analysis.plan() data_analysis.plan()
log("DATA ANALYSIS PLANNED") log("DATA ANALYSIS PLANNED")
except Exception as e: except Exception as e:
context.logEntry("[ERROR] Error creating Data Analysis for Data Ingestion '%s': %s" % (data_ingestion.getId(), str(e))) context.logEntry("[ERROR] Error creating Data Analysis for Data Ingestion '%s' (ID: %s): %s" % (data_ingestion.getReference(), data_ingestion.getId(), str(e)))
...@@ -33,7 +33,7 @@ for data_ingestion in portal_catalog(portal_type = "Data Ingestion", ...@@ -33,7 +33,7 @@ for data_ingestion in portal_catalog(portal_type = "Data Ingestion",
related_split_ingestions = portal_catalog(portal_type = "Data Ingestion", related_split_ingestions = portal_catalog(portal_type = "Data Ingestion",
reference = data_ingestion.getReference()) reference = data_ingestion.getReference())
if len(related_split_ingestions) == 1: if len(related_split_ingestions) == 1:
context.logEntry("Started single ingestion (not split) found: " + data_ingestion.getId()) context.logEntry("Started single ingestion (not split) found: %s - reference: %s" % (data_ingestion.getId(), data_ingestion.getReference()))
data_stream = portal_catalog.getResultValue( data_stream = portal_catalog.getResultValue(
portal_type = 'Data Stream', portal_type = 'Data Stream',
reference = data_ingestion.getReference()) reference = data_ingestion.getReference())
...@@ -44,14 +44,14 @@ for data_ingestion in portal_catalog(portal_type = "Data Ingestion", ...@@ -44,14 +44,14 @@ for data_ingestion in portal_catalog(portal_type = "Data Ingestion",
data_stream.validate() data_stream.validate()
if data_ingestion.getSimulationState() == "started": if data_ingestion.getSimulationState() == "started":
data_ingestion.stop() data_ingestion.stop()
context.logEntry("Data Ingestion %s stopped." % data_ingestion.getId()) context.logEntry("Data Ingestion %s stopped. Reference: %s." % (data_ingestion.getId(), data_ingestion.getReference()))
# append split ingestions # append split ingestions
for data_ingestion in portal_catalog(portal_type = "Data Ingestion", for data_ingestion in portal_catalog(portal_type = "Data Ingestion",
simulation_state = "started", simulation_state = "started",
id = "%001"): id = "%001"):
if not data_ingestion.getReference().endswith("_invalid"): if not data_ingestion.getReference().endswith("_invalid"):
context.logEntry("Started split ingestion found: " + data_ingestion.getId()) context.logEntry("Started split ingestion found: %s - reference: %s" % (data_ingestion.getId(), data_ingestion.getReference()))
try: try:
last_data_stream_id = "" last_data_stream_id = ""
query = Query(portal_type="Data Stream", reference=data_ingestion.getReference(), validation_state="draft") query = Query(portal_type="Data Stream", reference=data_ingestion.getReference(), validation_state="draft")
...@@ -85,7 +85,7 @@ for data_ingestion in portal_catalog(portal_type = "Data Ingestion", ...@@ -85,7 +85,7 @@ for data_ingestion in portal_catalog(portal_type = "Data Ingestion",
else: else:
ingestion.setReference(ingestion.getReference() + "_invalid") ingestion.setReference(ingestion.getReference() + "_invalid")
ingestion.deliver() ingestion.deliver()
context.logEntry("Chunks of split ingestion where appended into Data Stream %s. Corresponding Data Ingestion stopped." % full_data_stream.getId()) context.logEntry("Chunks of split ingestion where appended into Data Stream %s. Reference: %s. Corresponding Data Ingestion stopped." % (full_data_stream.getId(), full_data_stream.getReference()))
except Exception as e: except Exception as e:
context.logEntry("ERROR appending split data streams for ingestion: %s" % data_ingestion.getReference()) context.logEntry("ERROR appending split data streams for ingestion: %s - reference: %s." % (data_ingestion.getId(), data_ingestion.getReference()))
context.logEntry(e) context.logEntry(e)
...@@ -55,7 +55,6 @@ try: ...@@ -55,7 +55,6 @@ try:
validation_state = 'validated')] validation_state = 'validated')]
# create a new data ingestion # create a new data ingestion
context.logEntry("Data Ingestion created. ID: %s" % data_ingestion_id)
data_ingestion = portal.data_ingestion_module.newContent( data_ingestion = portal.data_ingestion_module.newContent(
id = data_ingestion_id, id = data_ingestion_id,
portal_type = "Data Ingestion", portal_type = "Data Ingestion",
...@@ -63,6 +62,7 @@ try: ...@@ -63,6 +62,7 @@ try:
reference = data_ingestion_reference, reference = data_ingestion_reference,
start_date = now, start_date = now,
specialise_value_list = specialise_value_list) specialise_value_list = specialise_value_list)
context.logEntry("Data Ingestion created. ID: %s - Reference: %s" % (data_ingestion_id, data_ingestion_reference))
property_list = ["title", property_list = ["title",
"source", "source",
...@@ -111,7 +111,7 @@ try: ...@@ -111,7 +111,7 @@ try:
title = "%s%s" % (data_ingestion.getTitle(), "."+extension if extension != "none" else ""), title = "%s%s" % (data_ingestion.getTitle(), "."+extension if extension != "none" else ""),
reference = data_ingestion_reference) reference = data_ingestion_reference)
context.logEntry("Data Stream created. ID: %s" % data_stream.getId()) context.logEntry("Data Stream created. ID: %s - Reference: %s" % (data_stream.getId(), data_ingestion_reference))
input_line.setDefaultAggregateValue(data_stream) input_line.setDefaultAggregateValue(data_stream)
if dataset_reference is not None: if dataset_reference is not None:
......
...@@ -14,6 +14,18 @@ ing_dict = { ...@@ -14,6 +14,18 @@ ing_dict = {
ingestions = portal_catalog(**ing_dict) ingestions = portal_catalog(**ing_dict)
if len(ingestions) == 1: if len(ingestions) == 1:
data_ingestion = ingestions[0] data_ingestion = ingestions[0]
elif len(ingestions) == 0:
ing_dict = {
"simulation_state": "started",
"portal_type": "Data Ingestion",
"id": "%END",
"reference": reference}
single_started_ingestions = portal_catalog(**ing_dict)
if len(single_started_ingestions) == 1:
return '{"metadata":"Metadata not ready yet, please wait some minutes."}'
else:
context.logEntry("ERROR getting Data Ingestion of file %s. The file does not have a unique data ingestion in correct state." % reference)
return '{"metadata":"No metadata available for this type of file yet"}'
else: else:
context.logEntry("ERROR getting Data Ingestion of file %s. The file does not have a unique data ingestion in correct state." % reference) context.logEntry("ERROR getting Data Ingestion of file %s. The file does not have a unique data ingestion in correct state." % reference)
return '{"metadata":"No metadata available for this type of file yet"}' return '{"metadata":"No metadata available for this type of file yet"}'
...@@ -36,7 +48,7 @@ try: ...@@ -36,7 +48,7 @@ try:
data_descriptor = context.restrictedTraverse(url) data_descriptor = context.restrictedTraverse(url)
except Exception as e: except Exception as e:
# backward compatibility # backward compatibility
context.logEntry("ERROR while looking for data descriptor with id %s : %s" % (str(data_ingestion.getId()), str(e))) context.logEntry("ERROR while looking for data descriptor with id %s (reference: %s) : %s" % (str(data_ingestion.getId()), data_ingestion.getReference(), str(e)))
query = Query(portal_type="Data Descriptor") query = Query(portal_type="Data Descriptor")
data_descriptor = None data_descriptor = None
for document in portal_catalog(query=query): for document in portal_catalog(query=query):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment