Commit c0170922 authored by Martin Manchev's avatar Martin Manchev Committed by Ivan Tyagov

Revert "Changes in 'erp5_wendelin' bt ..."

This reverts commit e6f7b0097f7c27109fd61c1692c3588b625bd90e.
parent 8ca6ce99
...@@ -124,7 +124,7 @@ class DataArray(BigFile): ...@@ -124,7 +124,7 @@ class DataArray(BigFile):
zarray = self.getArray() zarray = self.getArray()
if zarray is not None: if zarray is not None:
return zarray.dtype return zarray.dtype
security.declareProtected(Permissions.AccessContentsInformation, 'getArrayDtypeNames') security.declareProtected(Permissions.AccessContentsInformation, 'getArrayDtypeNames')
def getArrayDtypeNames(self): def getArrayDtypeNames(self):
""" """
...@@ -142,7 +142,7 @@ class DataArray(BigFile): ...@@ -142,7 +142,7 @@ class DataArray(BigFile):
self.getArray().dtype.names = names self.getArray().dtype.names = names
security.declareProtected(Permissions.View, 'index_html') security.declareProtected(Permissions.View, 'index_html')
def index_html(self, REQUEST, RESPONSE, format=_MARKER, inline=_MARKER, **kw): def index_html(self, REQUEST, RESPONSE, format=_MARKER, inline=_MARKER, **kw): # pylint:disable=redefined-builtin
""" """
Support streaming Support streaming
""" """
...@@ -174,15 +174,15 @@ class DataArray(BigFile): ...@@ -174,15 +174,15 @@ class DataArray(BigFile):
RESPONSE.write(self.getArray()[tuple(slice_index_list)].tobytes()) RESPONSE.write(self.getArray()[tuple(slice_index_list)].tobytes())
return True return True
range = REQUEST.get_header('Range', None) http_range = REQUEST.get_header('Range', None)
request_range = REQUEST.get_header('Request-Range', None) request_range = REQUEST.get_header('Request-Range', None)
if request_range is not None: if request_range is not None:
# Netscape 2 through 4 and MSIE 3 implement a draft version # Netscape 2 through 4 and MSIE 3 implement a draft version
# Later on, we need to serve a different mime-type as well. # Later on, we need to serve a different mime-type as well.
range = request_range http_range = request_range
if_range = REQUEST.get_header('If-Range', None) if_range = REQUEST.get_header('If-Range', None)
if range is not None: if http_range is not None:
ranges = HTTPRangeSupport.parseRange(range) ranges = HTTPRangeSupport.parseRange(http_range)
array = self.getArray() array = self.getArray()
...@@ -200,7 +200,7 @@ class DataArray(BigFile): ...@@ -200,7 +200,7 @@ class DataArray(BigFile):
# Date # Date
date = if_range.split( ';')[0] date = if_range.split( ';')[0]
try: mod_since=long(DateTime(date).timeTime()) try: mod_since=long(DateTime(date).timeTime())
except: mod_since=None except Exception: mod_since=None
if mod_since is not None: if mod_since is not None:
last_mod = self._data_mtime() last_mod = self._data_mtime()
if last_mod is None: if last_mod is None:
...@@ -291,3 +291,4 @@ class DataArray(BigFile): ...@@ -291,3 +291,4 @@ class DataArray(BigFile):
data = '{}\r\n--{}--\r\n'.format(data, boundary) data = '{}\r\n--{}--\r\n'.format(data, boundary)
RESPONSE.setBody(data, lock=True) RESPONSE.setBody(data, lock=True)
return True return True
...@@ -39,11 +39,7 @@ ...@@ -39,11 +39,7 @@
<item> <item>
<key> <string>text_content_warning_message</string> </key> <key> <string>text_content_warning_message</string> </key>
<value> <value>
<tuple> <tuple/>
<string>W:145, 42: Redefining built-in \'format\' (redefined-builtin)</string>
<string>W:177, 4: Redefining built-in \'range\' (redefined-builtin)</string>
<string>W:203, 10: No exception type(s) specified (bare-except)</string>
</tuple>
</value> </value>
</item> </item>
<item> <item>
......
...@@ -73,7 +73,7 @@ class DataArrayView(DataArray): ...@@ -73,7 +73,7 @@ class DataArrayView(DataArray):
Data Array like view on one or multiple Data Arrays Data Array like view on one or multiple Data Arrays
""" """
def initArray(self, shape, dtype): def initArray(self, shape, dimensional_type):
""" """
Not Implemented. Not Implemented.
""" """
......
...@@ -126,10 +126,10 @@ class DataBucketStream(Document): ...@@ -126,10 +126,10 @@ class DataBucketStream(Document):
PropertySheet.SortIndex PropertySheet.SortIndex
) )
def __init__(self, id, **kw): def __init__(self, identifier, **kw):
self.initBucketTree() self.initBucketTree()
self.initIndexTree() self.initIndexTree()
Document.__init__(self, id, **kw) Document.__init__(self, identifier, **kw)
def __len__(self): def __len__(self):
return len(self._tree) return len(self._tree)
...@@ -192,7 +192,7 @@ class DataBucketStream(Document): ...@@ -192,7 +192,7 @@ class DataBucketStream(Document):
except ValueError: except ValueError:
return None return None
def _getOb(self, id, *args, **kw): def _getOb(self, identifier, *args, **kw):
return None return None
def getBucketByKey(self, key=None): def getBucketByKey(self, key=None):
......
...@@ -6,12 +6,6 @@ ...@@ -6,12 +6,6 @@
</pickle> </pickle>
<pickle> <pickle>
<dictionary> <dictionary>
<item>
<key> <string>_recorded_property_dict</string> </key>
<value>
<persistent> <string encoding="base64">AAAAAAAAAAI=</string> </persistent>
</value>
</item>
<item> <item>
<key> <string>default_reference</string> </key> <key> <string>default_reference</string> </key>
<value> <string>DataBucketStream</string> </value> <value> <string>DataBucketStream</string> </value>
...@@ -45,10 +39,7 @@ ...@@ -45,10 +39,7 @@
<item> <item>
<key> <string>text_content_warning_message</string> </key> <key> <string>text_content_warning_message</string> </key>
<value> <value>
<tuple> <tuple/>
<string>W:124, 21: Redefining built-in \'id\' (redefined-builtin)</string>
<string>W:180, 19: Redefining built-in \'id\' (redefined-builtin)</string>
</tuple>
</value> </value>
</item> </item>
<item> <item>
...@@ -58,28 +49,13 @@ ...@@ -58,28 +49,13 @@
<item> <item>
<key> <string>workflow_history</string> </key> <key> <string>workflow_history</string> </key>
<value> <value>
<persistent> <string encoding="base64">AAAAAAAAAAM=</string> </persistent> <persistent> <string encoding="base64">AAAAAAAAAAI=</string> </persistent>
</value> </value>
</item> </item>
</dictionary> </dictionary>
</pickle> </pickle>
</record> </record>
<record id="2" aka="AAAAAAAAAAI="> <record id="2" aka="AAAAAAAAAAI=">
<pickle>
<global name="PersistentMapping" module="Persistence.mapping"/>
</pickle>
<pickle>
<dictionary>
<item>
<key> <string>data</string> </key>
<value>
<dictionary/>
</value>
</item>
</dictionary>
</pickle>
</record>
<record id="3" aka="AAAAAAAAAAM=">
<pickle> <pickle>
<global name="PersistentMapping" module="Persistence.mapping"/> <global name="PersistentMapping" module="Persistence.mapping"/>
</pickle> </pickle>
...@@ -92,7 +68,7 @@ ...@@ -92,7 +68,7 @@
<item> <item>
<key> <string>component_validation_workflow</string> </key> <key> <string>component_validation_workflow</string> </key>
<value> <value>
<persistent> <string encoding="base64">AAAAAAAAAAQ=</string> </persistent> <persistent> <string encoding="base64">AAAAAAAAAAM=</string> </persistent>
</value> </value>
</item> </item>
</dictionary> </dictionary>
...@@ -101,7 +77,7 @@ ...@@ -101,7 +77,7 @@
</dictionary> </dictionary>
</pickle> </pickle>
</record> </record>
<record id="4" aka="AAAAAAAAAAQ="> <record id="3" aka="AAAAAAAAAAM=">
<pickle> <pickle>
<global name="WorkflowHistoryList" module="Products.ERP5Type.Workflow"/> <global name="WorkflowHistoryList" module="Products.ERP5Type.Workflow"/>
</pickle> </pickle>
......
"""
Get a chunks of data from a Data Stream, convert it to numpy array
and return proper start and end for next record.
This script assumes stream has following format.
{dict1}{dict2}
{dict3}
And it's possible that last chunk in its last line is incomplete dictionary
thus correction needed.
"""
import json
chunk_text = ''.join(chunk_list)
#context.log('%s %s %s' %(start, end, len(chunk_text)))
# remove last line as it might be uncomplete and correct start and end offsets
line_list = chunk_text.split('\n')
last_line = line_list[-1]
line_list.pop(-1)
for line in line_list:
# must have proper format
assert line.endswith('}')
assert line.startswith('{')
# fix ' -> "
line = line.replace("'", '"')
if line.count('{') > 1:
# multiple concatenated dictionaries in one line, bad format ignore for now
pass
else:
d = json.loads(line)
# xxx: save this value as a Data Array identified by data_array_reference
# start and enf offsets may not match existing record structure in stream
# thus corrections in start and end offsets is needed thus we
# return transformed values which is just last line length
start -= len(last_line)
end -= len(last_line)
return start, end
<?xml version="1.0"?>
<ZopeData>
<record id="1" aka="AAAAAAAAAAE=">
<pickle>
<global name="PythonScript" module="Products.PythonScripts.PythonScript"/>
</pickle>
<pickle>
<dictionary>
<item>
<key> <string>_bind_names</string> </key>
<value>
<object>
<klass>
<global name="_reconstructor" module="copy_reg"/>
</klass>
<tuple>
<global name="NameAssignments" module="Shared.DC.Scripts.Bindings"/>
<global name="object" module="__builtin__"/>
<none/>
</tuple>
<state>
<dictionary>
<item>
<key> <string>_asgns</string> </key>
<value>
<dictionary>
<item>
<key> <string>name_container</string> </key>
<value> <string>container</string> </value>
</item>
<item>
<key> <string>name_context</string> </key>
<value> <string>context</string> </value>
</item>
<item>
<key> <string>name_m_self</string> </key>
<value> <string>script</string> </value>
</item>
<item>
<key> <string>name_subpath</string> </key>
<value> <string>traverse_subpath</string> </value>
</item>
</dictionary>
</value>
</item>
</dictionary>
</state>
</object>
</value>
</item>
<item>
<key> <string>_params</string> </key>
<value> <string>chunk_list, start, end, data_array_reference=None</string> </value>
</item>
<item>
<key> <string>id</string> </key>
<value> <string>DataStream_convertoNumpyArray</string> </value>
</item>
</dictionary>
</pickle>
</record>
</ZopeData>
from DateTime import DateTime from DateTime import DateTime
from erp5.component.module.DateUtils import addToDate from erp5.component.module.DateUtils import addToDate
from Products.ZSQLCatalog.SQLCatalog import Query, SimpleQuery from Products.ZSQLCatalog.SQLCatalog import Query
portal_catalog = context.getPortalObject().portal_catalog portal_catalog = context.getPortalObject().portal_catalog
...@@ -55,6 +55,6 @@ if len(parent_uid_list) != 0: ...@@ -55,6 +55,6 @@ if len(parent_uid_list) != 0:
# we need to wait until there are 2 batches until we can stop it # we need to wait until there are 2 batches until we can stop it
# TODO: this should be implemented in transformation, not here # TODO: this should be implemented in transformation, not here
continue continue
data_ingestion.setStopDate(DateTime()) data_ingestion.setStopDate(DateTime())
data_ingestion.stop() data_ingestion.stop()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment