Commit 9a84ef83 authored by Boxiang Sun's avatar Boxiang Sun Committed by Tristan Cavelier

erp5_officejs_support_request_ui: Speedup the Support Request worklist searching.

Use a custom ERP5 script instead global worklist searching.
parent 7783be22
/*global window, rJS, RSVP, Handlebars, URI */
/*global window, rJS, RSVP, Handlebars */
/*jslint nomen: true, indent: 2, maxerr: 3 */
(function (window, rJS, RSVP, Handlebars, URI) {
(function (window, rJS, RSVP, Handlebars) {
"use strict";
/////////////////////////////////////////////////////////////////
......@@ -20,6 +20,7 @@
.declareAcquiredMethod("jio_getAttachment", "jio_getAttachment")
.declareAcquiredMethod("translateHtml", "translateHtml")
.declareAcquiredMethod("updateHeader", "updateHeader")
.declareAcquiredMethod("getSetting", "getSetting")
.declareAcquiredMethod("getUrlFor", "getUrlFor")
/////////////////////////////////////////////////////////////////
......@@ -33,58 +34,43 @@
page_icon: 'clipboard'
})
.push(function () {
return gadget.jio_getAttachment(
'portal_workflow',
'links'
);
return gadget.getSetting("hateoas_url");
})
.push(function (result) {
.push(function (hateoas_url) {
return gadget.jio_getAttachment(
// result.data.rows[0].id,
'portal_workflow',
result._links.action_worklist.href
'support_request_module',
hateoas_url + 'support_request_module'
+ "/SupportRequestModule_getWorklistAsJson"
);
})
.push(function (links) {
.push(function (result) {
/*jslint continue:true*/
var action_list = links.worklist,
query_string,
promise_list = [],
var promise_list = [],
display_options,
i;
for (i = 0; i < action_list.length; i += 1) {
query_string = new URI(action_list[i].href).query(true).query;
if (query_string.indexOf('portal_type:"Support Request"') === -1) {
for (i = 0; i < result.length; i += 1) {
if (result[i].action_count === 0) {
continue;
}
display_options = {extended_search: query_string};
if (action_list[i].hasOwnProperty('module')) {
display_options = {
jio_key: new URI(action_list[i].module).segment(2),
extended_search: query_string,
jio_key: "support_request_module",
extended_search: result[i].query_string,
page: 'form',
view: 'view'
};
} else {
display_options = {
extended_search: query_string,
page: 'search'
};
}
promise_list.push(RSVP.all([
gadget.getUrlFor({command: 'display', options: display_options}),
// Remove the counter from the title
action_list[i].name,
action_list[i].count
result[i].action_name,
result[i].action_count
]));
}
return RSVP.all(promise_list);
})
.push(function (result_list) {
var line_list = [],
i;
var line_list = [], i;
for (i = 0; i < result_list.length; i += 1) {
line_list.push({
link: result_list[i][0],
......@@ -97,5 +83,4 @@
});
});
});
}(window, rJS, RSVP, Handlebars, URI));
\ No newline at end of file
}(window, rJS, RSVP, Handlebars));
\ No newline at end of file
......@@ -230,7 +230,7 @@
</item>
<item>
<key> <string>serial</string> </key>
<value> <string>962.13948.13888.3242</string> </value>
<value> <string>962.26499.5927.24507</string> </value>
</item>
<item>
<key> <string>state</string> </key>
......@@ -248,7 +248,7 @@
</tuple>
<state>
<tuple>
<float>1506505200.17</float>
<float>1506651141.01</float>
<string>UTC</string>
</tuple>
</state>
......
import json
portal = context.getPortalObject()
count_list = []
state_dict = {
"submitted": "Support Request to Open",
"draft": "Support Request to Submit",
"validated": "Support Request to Close",
"suspended": "Suspended Support Requests"
}
# XXX hardcoded, these lines below reflect portal_workflow/ticket_workflow worklists
count_list.append({
'query_string': 'portal_type:"Support Request" AND simulation_state:"draft" AND local_roles:"Owner"',
'action_name': state_dict["draft"],
'action_count': portal.support_request_module.countFolder(portal_type="Support Request", simulation_state="draft", local_roles="Owner")[0][0]})
count_list.append({
'query_string': 'portal_type:"Support Request" AND simulation_state:"submitted" AND local_roles:"Assignor"',
'action_name': state_dict["submitted"],
'action_count': portal.support_request_module.countFolder(portal_type="Support Request", simulation_state="submitted", local_roles="Assignor")[0][0]})
count_list.append({
'query_string': 'portal_type:"Support Request" AND simulation_state:"validated" AND local_roles:("Assignee" OR "Assignor")',
'action_name': state_dict["validated"],
'action_count': portal.support_request_module.countFolder(portal_type="Support Request", simulation_state="validated", local_roles=("Assignee", "Assignor"))[0][0]})
count_list.append({
'query_string': 'portal_type:"Support Request" AND simulation_state:"suspended" AND local_roles:("Assignee" OR "Assignor")',
'action_name': state_dict["suspended"],
'action_count': portal.support_request_module.countFolder(portal_type="Support Request", simulation_state="suspended", local_roles=("Assignee", "Assignor"))[0][0]})
return json.dumps(count_list)
<?xml version="1.0"?>
<ZopeData>
<record id="1" aka="AAAAAAAAAAE=">
<pickle>
<global name="PythonScript" module="Products.PythonScripts.PythonScript"/>
</pickle>
<pickle>
<dictionary>
<item>
<key> <string>Script_magic</string> </key>
<value> <int>3</int> </value>
</item>
<item>
<key> <string>_bind_names</string> </key>
<value>
<object>
<klass>
<global name="NameAssignments" module="Shared.DC.Scripts.Bindings"/>
</klass>
<tuple/>
<state>
<dictionary>
<item>
<key> <string>_asgns</string> </key>
<value>
<dictionary>
<item>
<key> <string>name_container</string> </key>
<value> <string>container</string> </value>
</item>
<item>
<key> <string>name_context</string> </key>
<value> <string>context</string> </value>
</item>
<item>
<key> <string>name_m_self</string> </key>
<value> <string>script</string> </value>
</item>
<item>
<key> <string>name_subpath</string> </key>
<value> <string>traverse_subpath</string> </value>
</item>
</dictionary>
</value>
</item>
</dictionary>
</state>
</object>
</value>
</item>
<item>
<key> <string>_params</string> </key>
<value> <string></string> </value>
</item>
<item>
<key> <string>id</string> </key>
<value> <string>SupportRequestModule_getWorklistAsJson</string> </value>
</item>
</dictionary>
</pickle>
</record>
</ZopeData>
  • I do not see how this could improve performance any better than erp5_workflist_cache.

    Please explain and provide performance figures showing that it is indeed beneficial.

  • mentioned in commit 2f0e02f1

    Toggle commit list
  • mentioned in commit 831fb18b

    Toggle commit list
  • mentioned in commit 3dd9c853

    Toggle commit list
  • mentioned in commit 08238d06

    Toggle commit list
  • mentioned in merge request !760 (merged)

    Toggle commit list
  • It has been a long time since I wrote this commit. I tried to recall the scenario and check the current implementation. If I recall correct, the script SupportRequestModule_getWorklistAsJson is for get specific worklists for Support Request, the normal way will get all worklists. We thought this can "save" some time.

    The improvements which wrote by @jerome (!760 (merged)) is better. He use ERP5Site_getTicketWorkflowWorklistInfoDict instead of hardcoded query string. This is the first time that I meet the getVarMatchKeys API... But is this worth to loop all objects in the workflow.worklists?

    cc @tc

    Edited by Boxiang Sun
  • Thanks for feedback @Daetalus

    But is this worth to loop all objects in the workflow.worklists?

    In ERP5Site_getTicketWorkflowWorklistInfoDict, this is only the worklists from ticket workflow (there are 4 worklists), so I don't think it's an issue.

    From my experience from user feedback, even if worklists with portal_workflow if very efficient from server resources, users will say "it's slow" because they have to wait 5 minutes because of the cache. Some users admitted to me that as a consequence of this, they were not using worklists at all. And that's a pitty because worklists is really excellent in a well configured ERP5.

    I feel where we should try improve is reducing this 5 minutes delay. I had one idea by indexing in a worklist table all documents that are currently in a worklist for a user - and deleting records from this table when documents are not in any worklist. As long as users "do their job" and worklists are well configured it should be fast, but otherwise it would become very slow, so it's not really better...

    Another thing, that @tc already applied in nexedi ERP5, is that we don't need to calculate worklists on all pages, only when users request it - but it would als be nice if there could be some kind of notifications "hey there's a new support request to close" based on worklists , or if the worklist view could highlight the "new" entries in worklists.

    Anyway, thanks for feedback and hopefully one day we'll improve user experience on worklists globally.

  • To reduce the effect of the cache, in a worklist sql cached setup (as opposed to the basic "each user gets their own 5 minutes Zope-level cache"), a JP-validated but never implemented idea is to feed a new table with deltas each time a document changes state, and to use that table to update worklists. Then, this table would be flushed when we refresh workflist cache, to catch any incremental inconsistency. This should improve user experience.

  • Yes, there was also this idea. There's one thing I don't understand though, is how when we index document in new state we can calculate the delta against the old state ?

    If we take the example of system state where we have 10 submitted support requests:

    portal_type state count
    Support Request submitted 10

    when one of those 10 submitted support request is open, we want to insert two lines so that we have:

    portal_type state count
    Support Request submitted 10
    Support Request submitted -1
    Support Request open 1

    but when indexing the new support request it's already in open state, so I don't know how we can figure out we should insert -1 for submitted state. Maybe selecting it from catalog table before updating this table ?

    I could not find a good way, so I started thinking at other ideas, but there might be a way. The delta approach seems much better.

  • There is another complication for the delta approach: security. If any catalog security column changes, the lines must also change. While I can see how state could work (pre- transition interaction + post-transition interaction), security I have no idea.

  • pre- transition interaction + post-transition interaction ... ah yes that was the missing point for me. Then if we assume all changes to security on the document goes through the same API call, we could probably do the same dance of pre/post interactions.

  • Also, these approaches would need an alarm to "compress" the table, isn't it ?

  • To me compression would just be the normal sql worklist cache alarm: every 5 minutes, refresh the "normal" table and remove all deltas: we are back in sync.

    Another idea: update table in-place instead of inserting deltas elsewhere. But this may cause divergences as I don't know if "x = x + " is atomic (what if commit-time "x" is different from query-time "x" ?). [EDIT]: by which I mean, what if another transaction did a change on the same row ?

    Edited by Vincent Pelletier
  • To me compression would just be the normal sql worklist cache alarm: every 5 minutes, refresh the "normal" table and remove all deltas: we are back in sync.

    Nice. I feel we also have to take care of concurrent transactions in that case, so that no table modify insert to the worklist between when alarm reads from catalog and insert in worklist table. But since this happens only every 5 minutes a big lock on the table during alarm processing is probably ok if that's needed.

Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment