Commit 96472d18 authored by Ivan Tyagov's avatar Ivan Tyagov

Allow in API to pass reference of object (i.e. Data Array) where...

Allow in API to pass reference of object (i.e. Data Array) where transformation is expected to be stored.
parent c850a14d
......@@ -87,7 +87,7 @@ for line in line_list:\n
pass \n
else:\n
d = json.loads(line)\n
# xxx: save this value as a numpy array\n
# xxx: save this value as a Data Array identified by data_array_reference\n
\n
# start and enf offsets may not match existing record structure in stream\n
# thus corrections in start and end offsets is needed thus we\n
......@@ -102,7 +102,7 @@ return start, end\n
</item>
<item>
<key> <string>_params</string> </key>
<value> <string>chunk_list, start, end</string> </value>
<value> <string>chunk_list, start, end, data_array_reference=None</string> </value>
</item>
<item>
<key> <string>id</string> </key>
......
......@@ -62,7 +62,10 @@ data_stream_chunk_list = data_stream.readChunkList(start, end)\n
if transform_script_id is not None:\n
transform_script = getattr(data_stream, transform_script_id, None)\n
if transform_script is not None:\n
start, end = transform_script(data_stream_chunk_list, start, end)\n
start, end = transform_script(data_stream_chunk_list, \\\n
start, \\\n
end, \\\n
data_array_reference)\n
\n
# [warning] store current position offset in Data Stream, this can cause easily \n
# ConflictErrors and it spawns re-index activities on DataStream\n
......@@ -91,7 +94,7 @@ if start < total_stream_length:\n
</item>
<item>
<key> <string>_params</string> </key>
<value> <string>data_stream_relative_url, start, end, chunk_length, transform_script_id=None</string> </value>
<value> <string>data_stream_relative_url, start, end, chunk_length, transform_script_id=None, data_array_reference=None</string> </value>
</item>
<item>
<key> <string>id</string> </key>
......
......@@ -73,7 +73,7 @@ return data_length\n
</item>
<item>
<key> <string>_params</string> </key>
<value> <string>chunk_length=1048576, transform_script_id=None</string> </value>
<value> <string>chunk_length=1048576, transform_script_id=None, data_array_reference=None</string> </value>
</item>
<item>
<key> <string>id</string> </key>
......
......@@ -115,15 +115,17 @@ class Test(ERP5TypeTestCase):
# test copy numpy -> wendelin but first resize persistent one (add new one)
data_array.initArray((4, 4), np.uint8)
persistent_zbig_array = data_array.getArray()
rows = [0,1]
cols = [2,2]
new_array = np.arange(1,17).reshape((4,4))
persistent_zbig_array[:,:] = new_array
self.assertEquals(new_array.shape, persistent_zbig_array.shape)
# (enable when new wendelin.core released as it can kill system)
#self.assertTrue(np.array_equal(a, persistent_zbig_array))
#self.assertTrue(np.array_equal(new_array, persistent_zbig_array))
# test set element in zbig array
persistent_zbig_array[:2, 2] = 0
#self.assertFalse(np.array_equal(new_array, persistent_zbig_array))
# resize Zbig Array (enable when new wendelin.core released as it can kill system)
#persistent_zbig_array = np.resize(persistent_zbig_array, (100,100))
#self.assertNotEquals(pure_numpy_array.shape, persistent_zbig_array.shape)
\ No newline at end of file
......@@ -48,8 +48,6 @@
<tuple>
<string>W: 53, 4: Unused variable \'scipy\' (unused-variable)</string>
<string>W: 54, 4: Unused variable \'sklearn\' (unused-variable)</string>
<string>W:118, 4: Unused variable \'rows\' (unused-variable)</string>
<string>W:119, 4: Unused variable \'cols\' (unused-variable)</string>
</tuple>
</value>
</item>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment