Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
W
wendelin
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Paul Graydon
wendelin
Commits
d63569f9
Commit
d63569f9
authored
Jan 15, 2024
by
Ivan Tyagov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Moved tests to their bt5s.
parent
ae45e8ac
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
931 additions
and
15 deletions
+931
-15
bt5/erp5_wendelin/TestTemplateItem/portal_components/test.erp5.testWendelin.py
...tTemplateItem/portal_components/test.erp5.testWendelin.py
+1
-9
bt5/erp5_wendelin/TestTemplateItem/portal_components/test.erp5.testWendelin.xml
...TemplateItem/portal_components/test.erp5.testWendelin.xml
+930
-6
No files found.
bt5/erp5_wendelin/TestTemplateItem/portal_components/test.erp5.testWendelin.py
View file @
d63569f9
...
@@ -162,7 +162,7 @@ class Test(ERP5TypeTestCase):
...
@@ -162,7 +162,7 @@ class Test(ERP5TypeTestCase):
real_data
=
'
\
n
'
.
join
(
number_string_list
)
real_data
=
'
\
n
'
.
join
(
number_string_list
)
data_stream
=
portal
.
data_stream_module
.
newContent
(
data_stream
=
portal
.
data_stream_module
.
newContent
(
portal_type
=
'Data Stream'
,
portal_type
=
'Data Stream'
,
f
reference
=
reference
)
reference
=
reference
)
data_stream
.
appendData
(
real_data
)
data_stream
.
appendData
(
real_data
)
data_stream
.
validate
()
data_stream
.
validate
()
...
@@ -295,16 +295,8 @@ class Test(ERP5TypeTestCase):
...
@@ -295,16 +295,8 @@ class Test(ERP5TypeTestCase):
# the default json ingestion is used in HowTo / Docs
# the default json ingestion is used in HowTo / Docs
self
.
assertNotEqual
(
None
,
self
.
assertNotEqual
(
None
,
getattr
(
self
.
portal
.
portal_ingestion_policies
,
"default"
,
None
))
getattr
(
self
.
portal
.
portal_ingestion_policies
,
"default"
,
None
))
self
.
assertNotEqual
(
None
,
getattr
(
self
.
portal
.
portal_ingestion_policies
,
"default_ebulk"
,
None
))
self
.
assertNotEqual
(
None
,
getattr
(
self
.
portal
.
portal_ingestion_policies
,
"default_mqtt"
,
None
))
self
.
assertNotEqual
(
None
,
self
.
assertNotEqual
(
None
,
getattr
(
self
.
portal
.
data_supply_module
,
"default"
,
None
))
getattr
(
self
.
portal
.
data_supply_module
,
"default"
,
None
))
self
.
assertNotEqual
(
None
,
getattr
(
self
.
portal
.
data_supply_module
,
"default_ebulk"
,
None
))
self
.
assertNotEqual
(
None
,
getattr
(
self
.
portal
.
data_supply_module
,
"default_mqtt"
,
None
))
def
test_07_LinkedDataStreamList
(
self
):
def
test_07_LinkedDataStreamList
(
self
):
"""
"""
...
...
bt5/erp5_wendelin/TestTemplateItem/portal_components/test.erp5.testWendelin.xml
View file @
d63569f9
...
@@ -6,6 +6,12 @@
...
@@ -6,6 +6,12 @@
</pickle>
</pickle>
<pickle>
<pickle>
<dictionary>
<dictionary>
<item>
<key>
<string>
_recorded_property_dict
</string>
</key>
<value>
<persistent>
<string
encoding=
"base64"
>
AAAAAAAAAAI=
</string>
</persistent>
</value>
</item>
<item>
<item>
<key>
<string>
default_reference
</string>
</key>
<key>
<string>
default_reference
</string>
</key>
<value>
<string>
testWendelin
</string>
</value>
<value>
<string>
testWendelin
</string>
</value>
...
@@ -33,7 +39,9 @@
...
@@ -33,7 +39,9 @@
<item>
<item>
<key>
<string>
text_content_error_message
</string>
</key>
<key>
<string>
text_content_error_message
</string>
</key>
<value>
<value>
<tuple/>
<tuple>
<string>
E:166, 0: invalid syntax (syntax-error)
</string>
</tuple>
</value>
</value>
</item>
</item>
<item>
<item>
...
@@ -49,13 +57,929 @@
...
@@ -49,13 +57,929 @@
<item>
<item>
<key>
<string>
workflow_history
</string>
</key>
<key>
<string>
workflow_history
</string>
</key>
<value>
<value>
<persistent>
<string
encoding=
"base64"
>
AAAAAAAAAA
I
=
</string>
</persistent>
<persistent>
<string
encoding=
"base64"
>
AAAAAAAAAA
M
=
</string>
</persistent>
</value>
</value>
</item>
</item>
</dictionary>
</dictionary>
</pickle>
</pickle>
</record>
</record>
<record
id=
"2"
aka=
"AAAAAAAAAAI="
>
<record
id=
"2"
aka=
"AAAAAAAAAAI="
>
<pickle>
<global
name=
"PersistentMapping"
module=
"Persistence.mapping"
/>
</pickle>
<pickle>
<dictionary>
<item>
<key>
<string>
data
</string>
</key>
<value>
<dictionary>
<item>
<key>
<string>
text_content
</string>
</key>
<value>
<string
encoding=
"cdata"
>
<![CDATA[
##############################################################################\n
#\n
# Copyright (c) 2002-2015 Nexedi SA and Contributors. All Rights Reserved.\n
#\n
# This program is free software: you can Use, Study, Modify and Redistribute\n
# it under the terms of the GNU General Public License version 3, or (at your\n
# option) any later version, as published by the Free Software Foundation.\n
#\n
# You can also Link and Combine this program with other software covered by\n
# the terms of any of the Free Software licenses or any of the Open Source\n
# Initiative approved licenses and Convey the resulting work. Corresponding\n
# source of such a combination shall include the source code for all other\n
# software used.\n
#\n
# This program is distributed WITHOUT ANY WARRANTY; without even the implied\n
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n
#\n
# See COPYING file for full licensing terms.\n
# See https://www.nexedi.com/licensing for rationale and options.\n
#\n
##############################################################################\n
\n
from cStringIO import StringIO\n
import base64\n
import binascii\n
from httplib import NO_CONTENT\n
import msgpack\n
import numpy as np\n
import string\n
import random\n
import struct\n
import textwrap\n
import urllib\n
import uuid\n
from zExceptions import BadRequest\n
\n
from Products.ERP5Type.tests.ERP5TypeTestCase import ERP5TypeTestCase\n
from Products.ERP5Type.tests.utils import createZODBPythonScript, removeZODBPythonScript\n
from wendelin.bigarray.array_zodb import ZBigArray\n
\n
from App.version_txt import getZopeVersion\n
if getZopeVersion() < (4, ): # BBB Zope2\n
# Zope set http status code 204 for empty response, but\n
# on Zope 2 this is not correctly reflected in ERP5TypeTestCase.publish,\n
# for such responses the status is set to 200 (but for "real" requests\n
# it is set to 204, this only affected the testing)\n
NO_CONTENT = 200\n
\n
\n
def getRandomString():\n
return \'test_%s\' %\'\'.join([random.choice(string.ascii_letters + string.digits) \\\n
for _ in xrange(32)])\n
\n
def chunks(l, n):\n
"""Yield successive n-sized chunks from l."""\n
for i in xrange(0, len(l), n):\n
yield l[i:i+n]\n
\n
class Test(ERP5TypeTestCase):\n
"""\n
Wendelin Test\n
"""\n
\n
def getTitle(self):\n
return "Wendelin Test"\n
\n
def createAndRunScript(self, code, expected=None):\n
# we do not care the script name for security test thus use uuid1\n
name = str(uuid.uuid1())\n
script_container = self.portal.portal_skins.custom\n
try:\n
createZODBPythonScript(script_container, name, \'**kw\', textwrap.dedent(code))\n
self.assertEqual(getattr(self.portal, name)(), expected)\n
finally:\n
removeZODBPythonScript(script_container, name)\n
\n
def test_01_IngestionFromFluentd(self, old_fluentd=False):\n
"""\n
Test ingestion using a POST Request containing a msgpack encoded message\n
simulating input from fluentd.\n
"""\n
portal = self.portal\n
\n
# add brand new ingestion\n
reference = getRandomString()\n
ingestion_policy, data_supply, _ = portal.portal_ingestion_policies.IngestionPolicyTool_addIngestionPolicy(\n
reference = reference,\n
title = reference,\n
batch_mode=1)\n
self.tic()\n
\n
number_string_list = []\n
for my_list in list(chunks(range(0, 100001), 10)):\n
number_string_list.append(\',\'.join([str(x) for x in my_list]))\n
real_data = \'\\n\'.join(number_string_list)\n
# make sure real_data tail is also a full line\n
real_data += \'\\n\'\n
\n
# simulate fluentd\n
body = msgpack.packb([0, real_data], use_bin_type=True)\n
if old_fluentd:\n
env = {\'CONTENT_TYPE\': \'application/x-www-form-urlencoded\'}\n
body = urllib.urlencode({\'data_chunk\': body})\n
else:\n
env = {\'CONTENT_TYPE\': \'application/octet-stream\'}\n
path = ingestion_policy.getPath() + \'/ingest?reference=\' + reference\n
publish_kw = dict(user=\'ERP5TypeTestCase\', env=env,\n
request_method=\'POST\', stdin=StringIO(body))\n
response = self.publish(path, **publish_kw)\n
self.assertEqual(NO_CONTENT, response.getStatus())\n
# at every ingestion if no specialised Data Ingestion exists it is created\n
# thus it is needed to wait for server side activities to be processed\n
self.tic()\n
\n
# get related Data ingestion\n
data_ingestion = data_supply.Base_getRelatedObjectList(portal_type=\'Data Ingestion\')[0]\n
self.assertNotEqual(None, data_ingestion)\n
data_ingestion_line = [x for x in data_ingestion.objectValues() if x.getReference() == \'out_stream\'][0]\n
\n
data_stream = data_ingestion_line.getAggregateValue()\n
self.assertEqual(\'Data Stream\', data_stream.getPortalType())\n
\n
data_stream_data = data_stream.getData()\n
self.assertEqual(real_data, data_stream_data)\n
\n
# try sample transformation\n
data_array = portal.data_array_module.newContent(\n
portal_type = \'Data Array\',\n
reference = reference)\n
data_array.validate()\n
self.tic()\n
\n
data_stream.DataStream_transform(\\\n
chunk_length = 10450, \\\n
transform_script_id = \'DataStream_copyCSVToDataArray\',\n
data_array_reference = reference)\n
self.tic()\n
\n
# test that extracted array contains same values as input CSV\n
zarray = data_array.getArray()\n
self.assertEqual(np.average(zarray), np.average(np.arange(100001)))\n
self.assertTrue(np.array_equal(zarray, np.arange(100001)))\n
\n
# clean up\n
data_array.invalidate()\n
data_stream.setData(None)\n
self.tic()\n
\n
def test_01_1_IngestionFromOldFluentd(self):\n
self.test_01_IngestionFromFluentd(True)\n
\n
def test_01_02_ParallelTransformation(self):\n
"""\n
test parallel execution.\n
Note: determining row length is important in this case\n
"""\n
portal = self.portal\n
reference = getRandomString()\n
\n
row = \',\'.join([\'%s\' %x for x in range(1000)])\n
number_string_list = [row]*20\n
real_data = \'\\n\'.join(number_string_list)\n
\n
data_stream = portal.data_stream_module.newContent(\n
portal_type = \'Data Stream\',\n
reference = reference)\n
data_stream.appendData(real_data)\n
data_stream.validate()\n
data_array = portal.data_array_module.newContent(\n
portal_type = \'Data Array\',\n
reference = reference)\n
data_array.validate()\n
self.tic()\n
\n
data_stream.DataStream_transform(\\\n
chunk_length = len(row), \\\n
transform_script_id = \'DataStream_copyCSVToDataArray\',\n
data_array_reference = reference,\n
parallelize = 1)\n
\n
self.tic()\n
\n
def test_02_Examples(self):\n
"""\n
Test we can use python scientific libraries by using directly created\n
Wendelin examples.\n
"""\n
portal = self.portal\n
portal.game_of_life()\n
portal.game_of_life_out_of_core()\n
portal.game_of_life_out_of_core_activities()\n
\n
def test_03_DataArray(self):\n
"""\n
Test persistently saving a ZBig Array to a Data Array.\n
"""\n
data_array = self.portal.data_array_module.newContent( \\\n
portal_type = \'Data Array\')\n
self.assertEqual(None, data_array.getArray())\n
data_array.initArray((3, 3), np.uint8)\n
self.tic()\n
\n
# test array stored and we return ZBig Array instance\n
persistent_zbig_array = data_array.getArray()\n
self.assertEqual(ZBigArray, persistent_zbig_array.__class__)\n
\n
# try to resize its numpy "view" and check that persistent one is not saved\n
# as these are differerent objects\n
pure_numpy_array = persistent_zbig_array[:,:] # ZBigArray ->
ndarray view of it\n
pure_numpy_array = np.resize(pure_numpy_array, (4, 4))\n
self.assertNotEquals(pure_numpy_array.shape, persistent_zbig_array.shape)\n
\n
# test copy numpy -> wendelin but first resize persistent one (add new one)\n
data_array.initArray((4, 4), np.uint8)\n
persistent_zbig_array = data_array.getArray()\n
new_array = np.arange(1,17).reshape((4,4))\n
persistent_zbig_array[:,:] = new_array\n
self.assertEquals(new_array.shape, persistent_zbig_array.shape)\n
self.assertTrue(np.array_equal(new_array, persistent_zbig_array))\n
\n
# test set element in zbig array\n
persistent_zbig_array[:2, 2] = 0\n
self.assertFalse(np.array_equal(new_array, persistent_zbig_array))\n
\n
# resize Zbig Array\n
persistent_zbig_array = np.resize(persistent_zbig_array, (100,100))\n
self.assertNotEquals(pure_numpy_array.shape, persistent_zbig_array.shape)\n
\n
# get array slice (fails)\n
data_array = self.portal.data_array_module.newContent( \\\n
portal_type = \'Data Array\')\n
shape = (1000,)\n
data_array.initArray(shape, np.uint8)\n
self.tic()\n
\n
persistent_zbig_array = data_array.getArray()\n
new_array = np.arange(1000)\n
new_array.resize(shape)\n
\n
self.assertEquals(new_array.shape, persistent_zbig_array.shape)\n
\n
persistent_zbig_array[:,] = new_array\n
self.tic()\n
\n
self.assertTrue(\n
np.array_equal(data_array.getArraySlice(0,100), \\\n
new_array[:100]))\n
\n
def test_04_DataBucket(self):\n
"""\n
Test data bucket\n
"""\n
bucket_stream = self.portal.data_stream_module.newContent( \\\n
portal_type = \'Data Bucket Stream\')\n
self.tic()\n
\n
self.assertEqual(0, len(bucket_stream))\n
\n
# test set and get\n
bin_string = "1"*100000\n
key = len(bucket_stream) + 1\n
bucket_stream.insertBucket(key, bin_string )\n
self.assertEqual(bin_string, bucket_stream.getBucketByKey(key))\n
\n
# test sequence\n
self.assertEqual(1, len(bucket_stream))\n
\n
# test delete bucket by key\n
bucket_stream.delBucketByKey(key)\n
self.assertEqual(0, len(bucket_stream))\n
\n
# set many buckets\n
for i in range(100):\n
bucket_stream.insertBucket(i, i*10000)\n
\n
self.assertEqual(100, len(bucket_stream))\n
self.assertEqual(range(100), bucket_stream.getKeyList())\n
\n
# test as sequence\n
bucket = bucket_stream.getBucketKeyItemSequenceByKey(start_key=10, count=1)[0]\n
self.assertEqual(100000, bucket[1].value)\n
\n
def test_05_DataAnalyses(self):\n
"""\n
Test data analyses\' default configuration.\n
By default we have no Data Analyses configured thus test is minimal.\n
"""\n
self.portal.Alarm_handleAnalysis()\n
self.tic()\n
\n
def test_06_DefaultWendelinConfigurationExistency(self):\n
"""\n
Test that nobody accidently removes needed by HowTo\'s default configurations.\n
"""\n
# the default json ingestion is used in HowTo / Docs\n
self.assertNotEqual(None,\n
getattr(self.portal.portal_ingestion_policies, "default", None))\n
self.assertNotEqual(None,\n
getattr(self.portal.portal_ingestion_policies, "default_ebulk", None))\n
self.assertNotEqual(None,\n
getattr(self.portal.portal_ingestion_policies, "default_mqtt", None))\n
self.assertNotEqual(None,\n
getattr(self.portal.data_supply_module, "default", None))\n
self.assertNotEqual(None,\n
getattr(self.portal.data_supply_module, "default_ebulk", None))\n
self.assertNotEqual(None,\n
getattr(self.portal.data_supply_module, "default_mqtt", None))\n
\n
def test_07_LinkedDataStreamList(self):\n
"""\n
Test linked Data Streams\n
"""\n
data_stream_1 = self.portal.data_stream_module.newContent(title = "Data Stream 1", \\\n
portal_type = "Data Stream")\n
data_stream_2 = self.portal.data_stream_module.newContent(title = "Data Stream 2", \\\n
portal_type = "Data Stream")\n
data_stream_3 = self.portal.data_stream_module.newContent(title = "Data Stream 3", \\\n
portal_type = "Data Stream")\n
data_stream_4 = self.portal.data_stream_module.newContent(title = "Data Stream 4", \\\n
portal_type = "Data Stream")\n
data_stream_5 = self.portal.data_stream_module.newContent(title = "Data Stream 5", \\\n
portal_type = "Data Stream")\n
\n
# test nothing linked\n
self.assertSameSet([], data_stream_2.getRecursiveSuccessorValueList())\n
self.assertSameSet([], data_stream_2.getRecursivePredecessorValueList())\n
\n
# set linked data streams (1
<-->
2
<-->
3
<-->
4
<-->
5)\n
data_stream_1.setSuccessorValue(data_stream_2)\n
data_stream_2.setSuccessorValue(data_stream_3)\n
data_stream_3.setSuccessorValue(data_stream_4)\n
data_stream_4.setSuccessorValue(data_stream_5)\n
\n
# set predecessor\n
data_stream_2.setPredecessorValue(data_stream_1)\n
data_stream_3.setPredecessorValue(data_stream_2)\n
data_stream_4.setPredecessorValue(data_stream_3)\n
data_stream_5.setPredecessorValue(data_stream_4)\n
\n
# test successor\n
self.assertSameSet(data_stream_2.getRecursiveSuccessorValueList(), \\\n
[data_stream_3, data_stream_4, data_stream_5])\n
self.assertSameSet(data_stream_5.getRecursiveSuccessorValueList(), \\\n
[])\n
\n
# test predecessor\n
self.assertSameSet(data_stream_1.getRecursivePredecessorValueList(), \\\n
[])\n
self.assertSameSet(data_stream_2.getRecursivePredecessorValueList(), \\\n
[data_stream_1])\n
self.assertSameSet(data_stream_5.getRecursivePredecessorValueList(), \\\n
[data_stream_4, data_stream_3, data_stream_2, data_stream_1])\n
\n
def test_08_ImportSklearn(self):\n
"""\n
Test import of Scikit-learn and minimal example of usage.\n
"""\n
\n
from sklearn.linear_model import LinearRegression\n
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n
\n
# y = 1 * x_0 + 2 * x_1 + 3\n
y = np.dot(X, np.array([1, 2])) + 3\n
\n
reg = LinearRegression().fit(X, y)\n
predicted = reg.predict(np.array([[4, 10]]))\n
self.assertEqual(predicted.all(),np.array([27.]).all())\n
\n
def test_09_IngestionFromFluentdStoreMsgpack(self, old_fluentd=False):\n
"""\n
Test ingestion using a POST Request containing a msgpack encoded message\n
simulating input from fluentd.\n
"""\n
from datetime import datetime\n
import time\n
\n
portal = self.portal\n
now = datetime.now()\n
\n
reference="test_sensor.test_product"\n
ingestion_policy = portal.portal_ingestion_policies[\'default\']\n
data_supply = portal.data_supply_module["default"]\n
\n
data_list = []\n
int_date = int(time.mktime(now.timetuple()))\n
real_data = []\n
\n
# create data for ingestion in [date, value] format\n
for x in range(0, 10001):\n
data_list = []\n
data_list = [int_date, x]\n
real_data.append(data_list)\n
int_date = int_date + 1000\n
\n
# simulate fluentd\n
body = msgpack.packb(real_data, use_bin_type=True)\n
env = {\'CONTENT_TYPE\': \'application/octet-stream\'}\n
\n
path = ingestion_policy.getPath() + \'/ingest?reference=\' + reference\n
publish_kw = dict(user=\'ERP5TypeTestCase\', env=env,\n
request_method=\'POST\', stdin=StringIO(body))\n
response = self.publish(path, **publish_kw)\n
\n
self.assertEqual(NO_CONTENT, response.getStatus())\n
\n
self.tic()\n
\n
# get related Data ingestion\n
data_ingestion = data_supply.Base_getRelatedObjectList(portal_type=\'Data Ingestion\')[0]\n
self.assertNotEqual(None, data_ingestion)\n
data_ingestion_line = [x for x in data_ingestion.objectValues() if x.getReference() == \'out_stream\'][0]\n
\n
data_stream = data_ingestion_line.getAggregateValue()\n
self.assertEqual(\'Data Stream\', data_stream.getPortalType())\n
\n
data_stream_data = data_stream.getData()\n
# body is msgpacked real data.\n
self.assertEqual(body, data_stream_data)\n
\n
# unpack data\n
start = 0\n
end = len(data_stream_data)\n
unpacked, end = data_stream.readMsgpackChunkList(start, end)\n
# compare unpacked data with real data\n
self.assertEqual([real_data], unpacked)\n
\n
# extract dates and compare with real dates\n
f = data_stream.extractDateTime\n
for i in range(0, len(unpacked[0])):\n
self.assertEqual(np.datetime64(real_data[i][0], \'s\'), f(unpacked[0][i][0]))\n
\n
# clean up\n
data_stream.setData(None)\n
self.tic()\n
\n
def _removeDocument(self, document_to_remove):\n
path = document_to_remove.getRelativeUrl()\n
container, _, object_id = path.rpartition(\'/\')\n
parent = self.portal.unrestrictedTraverse(container)\n
parent.manage_delObjects([object_id])\n
self.commit()\n
\n
def _addDataIngestionToDataSupply(self, data_supply):\n
portal = self.portal\n
\n
data_supply_line = data_supply.objectValues()[0]\n
data_ingestion = portal.data_ingestion_module.newContent(\n
portal_type="Data Ingestion",\n
title="TestDI_%s" % data_supply.getTitle(),\n
reference="Test_DI_%s" % data_supply.getTitle(),\n
specialise=data_supply.getRelativeUrl(),\n
source=data_supply.getSource(),\n
source_section=data_supply.getSourceSection(),\n
destination="organisation_module/test_wendelin_destination",\n
destination_section="organisation_module/test_wendelin_destination_section",\n
start_date=data_supply.getCreationDate())\n
\n
self.addCleanup(self._removeDocument, data_ingestion)\n
\n
data_ingestion.newContent(\n
portal_type="Data Ingestion Line",\n
title="Ingest Data",\n
reference="ingestion_operation",\n
aggregate=data_supply_line.getSource(),\n
resource="data_operation_module/wendelin_ingest_data",\n
int_index=1\n
)\n
\n
data_ingestion.newContent(\n
portal_type="Data Ingestion Line",\n
title="test_wendelin_data_product",\n
reference="out_stream",\n
aggregate_list=[data_supply_line.getSource(), data_supply_line.getDestination()],\n
resource="data_product_module/test_wendelin_data_product",\n
use="use/big_data/ingestion/stream",\n
quantity=1,\n
int_index=2\n
)\n
\n
data_ingestion.start()\n
\n
return data_ingestion\n
\n
def test_10_DataAnalysesCreation(self):\n
"""\n
Test data ingestion and analyses execution.\n
"""\n
portal = self.portal\n
\n
# test DataSupply_viewAddRelatedDataIngestionActionDialog dialog\n
# this test code will create a Data Ingestion which itself will\n
# be used in later tests of ERP5Site_createDataAnalysisList\n
data_supply = portal.data_supply_module.test_10_DataAnalysesCreation_data_supply\n
data_ingestion = self._addDataIngestionToDataSupply(data_supply)\n
\n
self.assertNotEqual(None, data_ingestion)\n
self.tic()\n
\n
before_data_analysis_list = portal.portal_catalog(\n
portal_type = "Data Analysis",\n
simulation_state = "started")\n
\n
# create DA from DI\n
portal.ERP5Site_createDataAnalysisList()\n
self.tic()\n
\n
# check new three Data Arrays created\n
after_data_analysis_list = portal.portal_catalog(\n
portal_type = "Data Analysis",\n
simulation_state = "started")\n
\n
before_data_analysis_list = set([x.getObject() for x in before_data_analysis_list])\n
after_data_analysis_list = set([x.getObject() for x in after_data_analysis_list])\n
self.assertEqual(1, len(after_data_analysis_list) - len(before_data_analysis_list))\n
\n
# check properly created\n
to_delete_data_analysis = after_data_analysis_list - before_data_analysis_list\n
\n
for data_analysis in list(to_delete_data_analysis):\n
if data_ingestion == data_analysis.getCausalityValue():\n
to_delete_data_analysis = data_analysis\n
break\n
\n
self.addCleanup(self._removeDocument, to_delete_data_analysis)\n
\n
data_transformation = portal.data_transformation_module.test_wendelin_data_transformation\n
\n
self.assertEqual(data_ingestion, to_delete_data_analysis.getCausalityValue())\n
self.assertSameSet(\n
[data_supply, data_transformation],\n
to_delete_data_analysis.getSpecialiseValueList()\n
)\n
# all lines should be properly created\n
self.assertEqual(len(data_transformation.objectValues()),\n
len(to_delete_data_analysis.objectValues()))\n
self.assertEqual("started", to_delete_data_analysis.getSimulationState())\n
\n
def test_11_temporaryDataArray(self):\n
"""\n
Test if temporary Data Array is functional.\n
"""\n
portal = self.portal\n
ndarray = np.array([[0, 1], [2, 3]])\n
temporary_data_array = portal.data_array_module.newContent(\n
portal_type=\'Data Array\',\n
temp_object=True\n
)\n
zbigarray = temporary_data_array.initArray(shape=ndarray.shape, dtype=ndarray.dtype)\n
zbigarray.append(ndarray)\n
self.assertTrue(np.array_equal(zbigarray[2:], ndarray))\n
\n
def test_12_numpyWendelinConversion(self):\n
"""\n
Test if conversion from numpy arrays to wendelin data works.\n
"""\n
portal = self.portal\n
ndarray = np.array([[0, 1], [2, 3]])\n
wendelin_data = portal.Base_numpyToWendelinData(ndarray)\n
reconverted_ndarray = portal.Base_wendelinDataToNumpy(wendelin_data)\n
\n
self.assertIsInstance(wendelin_data, bytes)\n
\n
# Check for header\n
self.assertEqual(wendelin_data[:4], b\'\\x92WEN\')\n
\n
# Test checksum\n
checksum = struct.unpack(\'
<i
\',
wendelin_data[6:10])[0]\n
self.assertEqual(checksum,
binascii.crc32(wendelin_data[10:]))\n
\n
#
Test
inverse
conversion
works\n
self.assertTrue(np.array_equal(ndarray,
reconverted_ndarray))\n
\n
self.assertTrue(\n
np.array_equal(\n
portal.Base_wendelinTextToNumpy(\n
"
kldFTgABq96QqZNOVU1QWQEARgB7J2Rlc2NyJzogJzxmOCcsICdmb3J0cmFuX29yZGVyJzogRmFsc2UsICdzaGFwZSc6ICgwLCksIH0gICAgICAgICAgICAK=
"\n
),\n
np.array([]),\n
)\n
)\n
\n
def test_13_unpackLazy(self):\n
"
""\n
Ensure
unpackLazy
is
available
and
functional
in
restricted
python\n
"""\n
ingestion_policy_id =
"test_13_unpackLazy_IngestionPolicy"
\n
try:\n
ingestion_policy =
self.getPortal().portal_ingestion_policies.newContent(\n
id=
ingestion_policy_id,\n
title=
ingestion_policy_id,\n
portal_type=
\'Ingestion
Policy\',\n
reference=
ingestion_policy_id,\n
version =
\'001\',\n
)\n
#
ingestion_policy
still
exists
from
previous
failed
test
run\n
except
BadRequest:\n
ingestion_policy =
self.portal.get(ingestion_policy_id)\n
\n
self.assertNotEqual(ingestion_policy,
None)\n
\n
self.commit()\n
self.tic()\n
\n
self.addCleanup(self._removeDocument,
ingestion_policy)\n
\n
code =
r"""\n
ingestion_policy =
context.portal_ingestion_policies.get("{}")\n
result =
[x
for
x
in
ingestion_policy.unpackLazy(\'b"\\x93\\x01\\x02\\x03"\')]\n
return
result\n
""".format(ingestion_policy_id)\n
\n
self.createAndRunScript(code,
[98,
34,
[1,
2,
3],
34])\n
\n
def
test_14_IndexSequenceInRestrictedPython(self):\n
"""\n
Ensure
its
possible
to
iterate
over
return
values
of
DataBucketStream
methods\n
in
restricted
python.\n
"""\n
\n
code =
r"""\n
data_bucket_stream =
context.portal_catalog.data_stream_module.newContent(\n
portal_type=
\'Data
Bucket
Stream\'\n
)\n
data_bucket_stream.insertBucket(1,
"1"
*
100000)\n
result =
[x
for
x
in
data_bucket_stream.getBucketIndexKeySequenceByIndex()]\n
"""\n
\n
self.createAndRunScript(code)\n
\n
def
test_15_setArrayDtypeNames(self):\n
"""\n
Test
Data
Array
method
"setArrayDtypeNames"\n
"""\n
data_array =
self.portal.data_array_module.newContent(\n
portal_type=
"Data Array"
\n
)\n
self.addCleanup(self._removeDocument,
data_array)\n
dtype_name0,
dtype_name1 =
"my_dtype_name"
,
"my_new_dtype_name"\n
data_array.initArray((3,),
np.dtype([(dtype_name0,
np.float64)]))\n
self.assertEqual(data_array.getArrayDtypeNames(),
(dtype_name0,))\n
data_array.setArrayDtypeNames((dtype_name1,))\n
self.assertEqual(data_array.getArrayDtypeNames(),
(dtype_name1,))\n
\n
def
test_16_createDataAnalysisFromDataTransformationWithoutResolution(self):\n
"""\n
Ensure
data
analysis
are
created
from
data
transformation
without
any
specified\n
variation
categories.\n
"""\n
portal =
self.portal\n
title_prefix =
"Wendelin Test 16"
\n
\n
test_function_to_organisation =
{}\n
for
test_function
in
("source",
"destination"):\n
organisation =
portal.organisation_module.newContent(\n
portal_typle=
"Organisation"
,\n
title=
"%s %s"
%
(title_prefix,
test_function),\n
)\n
self.addCleanup(self._removeDocument,
organisation)\n
organisation.validate()\n
test_function_to_organisation.update({test_function:
organisation})\n
\n
data_operation =
portal.data_operation_module.newContent(\n
portal_typle=
"Data Operation"
,\n
title=
"%s Data Operation"
%
title_prefix,\n
script_id=
"DataAnalysisLine_testWendelinConvertAToB"
,\n
)\n
self.addCleanup(self._removeDocument,
data_operation)\n
data_operation.validate()\n
\n
resource =
portal.data_product_module.newContent(\n
portal_type=
"Data Product"
,\n
title=
"%s Data Product"
%
title_prefix,\n
individual_variation_base_category_list=
["resolution"],\n
quantity_unit=
"unit/piece"
\n
)\n
self.addCleanup(self._removeDocument,
resource)\n
resource.validate()\n
resource_resolution =
resource.newContent(\n
portal_type=
"Product Individual Variation"
,\n
title=
"20S"
,\n
)\n
\n
specialise_data_transformation =
portal.data_transformation_module.newContent(\n
portal_type=
"Data Transformation"
,\n
title=
"%s Specialise Data Transformation"
%
title_prefix,\n
resource=
resource.getRelativeUrl(),\n
)\n
self.addCleanup(self._removeDocument,
specialise_data_transformation)\n
specialise_data_transformation.validate()\n
\n
specialise_data_supply =
portal.data_supply_module.newContent(\n
portal_type=
"Data Supply"
,\n
title=
"%s Specialise Data Supply"
%
title_prefix,\n
)\n
self.addCleanup(self._removeDocument,
specialise_data_supply)\n
specialise_data_supply.validate()\n
\n
initial_data_analysis =
portal.data_analysis_module.newContent(\n
portal_type=
"Data Analysis"
,\n
title=
"%s Import Raw Data"
%
title_prefix,\n
reference=
"wendelin.test.16.initial.data.analysis"
,\n
resource=
resource.getRelativeUrl(),\n
source=
test_function_to_organisation[\'source\'].getRelativeUrl(),\n
destination=
test_function_to_organisation[\'destination\'].getRelativeUrl(),\n
specialise_value_list=
[\n
specialise_data_supply.getRelativeUrl(),\n
specialise_data_transformation.getRelativeUrl()\n
],\n
)\n
self.addCleanup(self._removeDocument,
initial_data_analysis)\n
\n
initial_data_analysis.start()\n
initial_data_analysis.newContent(\n
portal_type=
"Data Analysis Line"
,\n
title=
"Raw Array"
,\n
reference=
"out_array"
,\n
resource=
resource.getRelativeUrl(),\n
quantity=
1,\n
quantity_unit=
"unit/piece"
,\n
variation_category_list=
[\n
"resolution/%s"
%
resource_resolution.getRelativeUrl(),\n
"resource/%s"
%
resource.getRelativeUrl(),\n
],\n
use=
"use/big_data/ingestion/stream"
,\n
)\n
initial_data_analysis.newContent(\n
portal_type=
"Data Analysis Line"
,\n
title=
"Convert A to B"
,\n
reference=
"data_operation"
,\n
resource=
data_operation.getRelativeUrl(),\n
quantity=
1,\n
quantity_unit=
"unit/piece"
,\n
)\n
\n
data_transformation =
portal.data_transformation_module.newContent(\n
portal_type=
"Data Transformation"
,\n
title=
"%s Data Transformation"
%
title_prefix,\n
resource=
resource.getRelativeUrl(),\n
)\n
self.addCleanup(self._removeDocument,
data_transformation)\n
data_transformation.validate()\n
data_transformation.newContent(\n
portal_type=
"Data Transformation Operation Line"
,\n
title=
"Convert A to B"
,\n
reference=
"data_operation"
,\n
resource=
data_operation.getRelativeUrl(),\n
quantity=
1,\n
quantity_unit=
"unit/piece"
,\n
)\n
\n
def
getDataAnalysisByTitle(title):\n
return
portal.portal_catalog.getResultValue(\n
portal_type=
"Data Analysis"
,\n
title=
title\n
)\n
\n
data_analysis_title_list =
[specialise_data_transformation.getTitle(),
data_transformation.getTitle()]\n
for
data_analysis_title
in
data_analysis_title_list:\n
self.assertEqual(getDataAnalysisByTitle(data_analysis_title),
None)\n
\n
self.commit()\n
self.tic()\n
\n
self.portal.portal_alarms.wendelin_handle_analysis.activeSense()\n
self.tic()\n
\n
for
data_analysis_title
in
data_analysis_title_list:\n
data_analysis =
getDataAnalysisByTitle(data_analysis_title)\n
self.assertNotEqual(data_analysis,
None)\n
self.addCleanup(self._removeDocument,
data_analysis)\n
\n
def
test_17_DataMapping(self):\n
"""\n
"""\n
portal =
self.portal\n
data_mapping =
portal.data_mapping_module.newContent(portal_type=\'Data
Mapping\')\n
self.assertEqual(0,
data_mapping.getSize())\n
data_list =
[\n
(\'/usr/bin/2to3-2.7\',
\'3ea002bead53f6bdf7\',
\'fade8568285eb14146a7244\',
\'f631570af55ee08ecef78f3\'),\n
(\'/usr/bin/R\',
\'b4c48d52345ae2eb7ca0455db\',
\'59441ddbc00b6521da571\',
\'a92be1a7acc03f3846\'),\n
(\'/usr/bin/Rscript\',
\'e97842e556f90be5f7e5\',
\'806725443a01bcae802\',\'1829d887e0c3380ec8f463527\'),\n
(\'/usr/bin/[\',
\'5c348873d0e0abe26d56cc752f0\',
\'ebb7ae1a78018b62224\',\'9162fdf5b9598c9e9d\'),\n
(\'/usr/bin/aa-enabled\',
\'c2336b14b7e9d1407d9\',
\'c8c7af8e6d14a4d1061\',
\'09726542c6c4ca907417c676b4\'),\n
(\'/usr/bin/aa-exec\',
\'7f66cd90da4b27703de299\',
\'b4bfe90f53a690deb2687342\',
\'ad36c9002fa84d4e86\'),\n
(\'/usr/bin/aclocal-1.16\',
\'6ba134fb4f97d79a5\',
\'64e518309fc9544b01c164c2e48\',
\'cc4595fba3251aaa9b48\'),\n
(\'/usr/bin/activate-global-python-argcomplete3\',
\'4007899eba237603a\',
\'8a2584480e9094f9e4761f7\',
\'893ebeeca071795ecd457ed2\'),\n
(\'/usr/bin/addpart\',
\'078a10c409b8b21c3b5137d\',
\'481766f1aa1d393e4a3a\',
\'668374ce14a57b2dd14bb\'),\n
(\'/usr/bin/ansible\',
\'cb8a161dabae51cf2616c4a85\',
\'31173931d09486697c05\',
\'00ee4c55e8b46f69a54\'),\n
(\'/usr/bin/ansible-connection\',
\'ffbef34c8d2ae633031f4e8\',
\'61cf10e6b6e4ff33c7ded21244\',
\'c670ce6af9b645c\'),\n
(\'/usr/bin/ansible-test\',
\'40f528bebb16199d2b11a43b\',
\'ea6d8680d833fb36e9548a\',
\'a2c1b309b22cd4285a\'),\n
(\'/usr/bin/appres\',
\'7ccb78e306838a87b68d2c\',
\'7d089e41ee491fc64b2b\',
\'c3e24ec3fee558e9f06bd0\'),\n
(\'/usr/bin/apt\',
\'3ca60d2b26761b7a50d812\',
\'8b624ea3f31040bd838dfc\',
\'36b5070dd02c87b47512\'),\n
(\'/usr/bin/apt-cache\',
\'f0e82d30aa1d8a80e\',
\'7da7f514d8056033c4844a6f0\',
\'be07d8cb7140399252c51c\'),\n
(\'/usr/bin/apt-cdrom\',
\'073483cad694b013a1\',
\'fb7b20b9450ab7abb4bcc\',
\'2ad240a6670f486e2e1da9daa\'),\n
(\'/usr/bin/apt-config\',
\'32856a9a4e703346d6b8\',
\'407560704903fb078e45dac\',
\'ee559cd54c2a34f3ad3d4\')\n
]\n
data_mapped_list =
[]\n
\n
#
each
different
object
return
a
different
value\n
for
data
in
data_list:\n
data_mapped_list.append(data_mapping.addObject(data))\n
self.assertEqual(len(data_mapped_list),
len(data_list))\n
self.assertEqual(len(data_list),
data_mapping.getSize())\n
\n
#
ensure
add
again
same
data
return
always
samething\n
tmp_list =
[]\n
for
data
in
data_list:\n
tmp_list.append(data_mapping.addObject(data))\n
self.assertEqual(tmp_list,
data_mapped_list)\n
#
size
still
same\n
self.assertEqual(len(data_list),
data_mapping.getSize())\n
\n
#
ensure
we
can
get
original
value\n
for
index
in
range(len(data_mapped_list)):\n
self.assertEqual(data_mapping.getObjectFromValue(data_mapped_list[index]),
data_list[index])\n
\n
#
ensure
we
can
get
mapped
value\n
for
index
in
range(len(data_mapped_list)):\n
self.assertEqual(data_mapping.getValueFromObject(data_list[index]),
data_mapped_list[index])\n
\n
#
another
data
list,
/usr/bin/2to3-2.7,
/usr/bin/appres,
/usr/bin/aclocal-1.16
\'s
value
are
different
compare
to
previous
data\n
#
so
3
values
are
different\n
another_data_list =
[\n
(\'/usr/bin/2to3-2.7\',
\'ModifiedValue\',
\'fade8568285eb14146a7244\',
\'f631570af55ee08ecef78f3\'),\n
(\'/usr/bin/R\',
\'b4c48d52345ae2eb7ca0455db\',
\'59441ddbc00b6521da571\',
\'a92be1a7acc03f3846\'),\n
(\'/usr/bin/Rscript\',
\'e97842e556f90be5f7e5\',
\'806725443a01bcae802\',\'1829d887e0c3380ec8f463527\'),\n
(\'/usr/bin/[\',
\'5c348873d0e0abe26d56cc752f0\',
\'ebb7ae1a78018b62224\',\'9162fdf5b9598c9e9d\'),\n
(\'/usr/bin/aa-enabled\',
\'c2336b14b7e9d1407d9\',
\'c8c7af8e6d14a4d1061\',
\'09726542c6c4ca907417c676b4\'),\n
(\'/usr/bin/aa-exec\',
\'7f66cd90da4b27703de299\',
\'b4bfe90f53a690deb2687342\',
\'ad36c9002fa84d4e86\'),\n
(\'/usr/bin/aclocal-1.16\',
\'6ba134fb4f97d79a5\',
\'ModifiedValue\',
\'cc4595fba3251aaa9b48\'),\n
(\'/usr/bin/activate-global-python-argcomplete3\',
\'4007899eba237603a\',
\'8a2584480e9094f9e4761f7\',
\'893ebeeca071795ecd457ed2\'),\n
(\'/usr/bin/addpart\',
\'078a10c409b8b21c3b5137d\',
\'481766f1aa1d393e4a3a\',
\'668374ce14a57b2dd14bb\'),\n
(\'/usr/bin/ansible\',
\'cb8a161dabae51cf2616c4a85\',
\'31173931d09486697c05\',
\'00ee4c55e8b46f69a54\'),\n
(\'/usr/bin/ansible-connection\',
\'ffbef34c8d2ae633031f4e8\',
\'61cf10e6b6e4ff33c7ded21244\',
\'c670ce6af9b645c\'),\n
(\'/usr/bin/ansible-test\',
\'40f528bebb16199d2b11a43b\',
\'ea6d8680d833fb36e9548a\',
\'a2c1b309b22cd4285a\'),\n
(\'/usr/bin/appres\',
\'7ccb78e306838a87b68d2c\',
\'ModifiedValue\',
\'c3e24ec3fee558e9f06bd0\'),\n
(\'/usr/bin/apt\',
\'3ca60d2b26761b7a50d812\',
\'8b624ea3f31040bd838dfc\',
\'36b5070dd02c87b47512\'),\n
(\'/usr/bin/apt-cache\',
\'f0e82d30aa1d8a80e\',
\'7da7f514d8056033c4844a6f0\',
\'be07d8cb7140399252c51c\'),\n
(\'/usr/bin/apt-cdrom\',
\'073483cad694b013a1\',
\'fb7b20b9450ab7abb4bcc\',
\'2ad240a6670f486e2e1da9daa\'),\n
(\'/usr/bin/apt-config\',
\'32856a9a4e703346d6b8\',
\'407560704903fb078e45dac\',
\'ee559cd54c2a34f3ad3d4\')\n
]\n
\n
another_data_mapped_list =
[]\n
\n
for
data
in
another_data_list:\n
another_data_mapped_list.append(data_mapping.addObject(data))\n
self.assertEqual(len(another_data_mapped_list),
len(data_list))\n
\n
array =
np.array(data_mapped_list)\n
another_array =
np.array(another_data_mapped_list)\n
#
simply
call
setdiff1d
to
get
the
different
between
two
datas\n
diff_array =
np.setdiff1d(another_array,
array)\n
self.assertEqual(diff_array.size,
3)\n
diff_object_list =
[]\n
for
value
in
diff_array:\n
diff_object_list.append(data_mapping.getObjectFromValue(value))\n
self.assertEqual(diff_object_list,
[(\'/usr/bin/2to3-2.7\',
\'ModifiedValue\',
\'fade8568285eb14146a7244\',
\'f631570af55ee08ecef78f3\'),\n
(\'/usr/bin/aclocal-1.16\',
\'6ba134fb4f97d79a5\',
\'ModifiedValue\',
\'cc4595fba3251aaa9b48\'),\n
(\'/usr/bin/appres\',
\'7ccb78e306838a87b68d2c\',
\'ModifiedValue\',
\'c3e24ec3fee558e9f06bd0\')])\n
#
same
data
value
as
"another_data_list"
but
in
other
format\n
other_format_data_list =
[\n
[\'/usr/bin/2to3-2.7\',
\'ModifiedValue\',
\'fade8568285eb14146a7244\',
\'f631570af55ee08ecef78f3\'],\n
[\'/usr/bin/R\',
\'b4c48d52345ae2eb7ca0455db\',
\'59441ddbc00b6521da571\',
\'a92be1a7acc03f3846\'],\n
[\'/usr/bin/Rscript\',
\'e97842e556f90be5f7e5\',
\'806725443a01bcae802\',\'1829d887e0c3380ec8f463527\']\n
]\n
other_format_data_mapped_list =
[]\n
original_size =
data_mapping.getSize()\n
for
data
in
other_format_data_list:\n
other_format_data_mapped_list.append(data_mapping.addObject(data))\n
self.assertEqual(original_size
+
3,
data_mapping.getSize())\n
other_format_array =
np.array(other_format_data_mapped_list)\n
#
ensure
"even
data
values
are
same
but
format
is
different"
is
considered
different\n
diff_array =
np.setdiff1d(other_format_array,
another_array)\n
self.assertEqual(diff_array.size,
3)\n
diff_object_list =
[]\n
for
value
in
diff_array:\n
diff_object_list.append(data_mapping.getObjectFromValue(value))\n
self.assertEqual(diff_object_list,
[[\'/usr/bin/2to3-2.7\',
\'ModifiedValue\',
\'fade8568285eb14146a7244\',
\'f631570af55ee08ecef78f3\'],\n
[\'/usr/bin/R\',
\'b4c48d52345ae2eb7ca0455db\',
\'59441ddbc00b6521da571\',
\'a92be1a7acc03f3846\'],\n
[\'/usr/bin/Rscript\',
\'e97842e556f90be5f7e5\',
\'806725443a01bcae802\',\'1829d887e0c3380ec8f463527\']])\n
\n
\n
def
test_18_wendelinTextToNumpySecurity(self):\n
"""\n
Test
that
we
do
not
load
pickles
when
converting
encoded
wendelin
data
to
numpy.\n
"""\n
portal =
self.portal\n
ndarray =
np.array([[0,
1],
[2,
object()]])\n
wendelin_text =
base64.b64encode(portal.Base_numpyToWendelinData(ndarray))\n
self.assertRaises(ValueError,\n
portal.Base_wendelinTextToNumpy,\n
wendelin_text)\n
]]
></string>
</value>
</item>
</dictionary>
</value>
</item>
</dictionary>
</pickle>
</record>
<record
id=
"3"
aka=
"AAAAAAAAAAM="
>
<pickle>
<pickle>
<global
name=
"PersistentMapping"
module=
"Persistence.mapping"
/>
<global
name=
"PersistentMapping"
module=
"Persistence.mapping"
/>
</pickle>
</pickle>
...
@@ -68,7 +992,7 @@
...
@@ -68,7 +992,7 @@
<item>
<item>
<key>
<string>
component_validation_workflow
</string>
</key>
<key>
<string>
component_validation_workflow
</string>
</key>
<value>
<value>
<persistent>
<string
encoding=
"base64"
>
AAAAAAAAAA
M
=
</string>
</persistent>
<persistent>
<string
encoding=
"base64"
>
AAAAAAAAAA
Q
=
</string>
</persistent>
</value>
</value>
</item>
</item>
</dictionary>
</dictionary>
...
@@ -77,7 +1001,7 @@
...
@@ -77,7 +1001,7 @@
</dictionary>
</dictionary>
</pickle>
</pickle>
</record>
</record>
<record
id=
"
3"
aka=
"AAAAAAAAAAM
="
>
<record
id=
"
4"
aka=
"AAAAAAAAAAQ
="
>
<pickle>
<pickle>
<global
name=
"WorkflowHistoryList"
module=
"Products.ERP5Type.Workflow"
/>
<global
name=
"WorkflowHistoryList"
module=
"Products.ERP5Type.Workflow"
/>
</pickle>
</pickle>
...
@@ -90,11 +1014,11 @@
...
@@ -90,11 +1014,11 @@
<dictionary>
<dictionary>
<item>
<item>
<key>
<string>
action
</string>
</key>
<key>
<string>
action
</string>
</key>
<value>
<string>
validate
</string>
</value>
<value>
<string>
modify
</string>
</value>
</item>
</item>
<item>
<item>
<key>
<string>
validation_state
</string>
</key>
<key>
<string>
validation_state
</string>
</key>
<value>
<string>
validat
ed
</string>
</value>
<value>
<string>
modifi
ed
</string>
</value>
</item>
</item>
</dictionary>
</dictionary>
</list>
</list>
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment